hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/infiniband/core/cm.c
....@@ -1,36 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
12 /*
23 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
34 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
45 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
56 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6
- *
7
- * This software is available to you under a choice of one of two
8
- * licenses. You may choose to be licensed under the terms of the GNU
9
- * General Public License (GPL) Version 2, available from the file
10
- * COPYING in the main directory of this source tree, or the
11
- * OpenIB.org BSD license below:
12
- *
13
- * Redistribution and use in source and binary forms, with or
14
- * without modification, are permitted provided that the following
15
- * conditions are met:
16
- *
17
- * - Redistributions of source code must retain the above
18
- * copyright notice, this list of conditions and the following
19
- * disclaimer.
20
- *
21
- * - Redistributions in binary form must reproduce the above
22
- * copyright notice, this list of conditions and the following
23
- * disclaimer in the documentation and/or other materials
24
- * provided with the distribution.
25
- *
26
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33
- * SOFTWARE.
7
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
348 */
359
3610 #include <linux/completion.h>
....@@ -52,6 +26,8 @@
5226 #include <rdma/ib_cache.h>
5327 #include <rdma/ib_cm.h>
5428 #include "cm_msgs.h"
29
+#include "core_priv.h"
30
+#include "cm_trace.h"
5531
5632 MODULE_AUTHOR("Sean Hefty");
5733 MODULE_DESCRIPTION("InfiniBand CM");
....@@ -91,6 +67,8 @@
9167 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
9268 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
9369 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
70
+ [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
71
+ "vendor option is not supported",
9472 };
9573
9674 const char *__attribute_const__ ibcm_reject_msg(int reason)
....@@ -105,8 +83,22 @@
10583 }
10684 EXPORT_SYMBOL(ibcm_reject_msg);
10785
108
-static void cm_add_one(struct ib_device *device);
86
+struct cm_id_private;
87
+struct cm_work;
88
+static int cm_add_one(struct ib_device *device);
10989 static void cm_remove_one(struct ib_device *device, void *client_data);
90
+static void cm_process_work(struct cm_id_private *cm_id_priv,
91
+ struct cm_work *work);
92
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
93
+ struct ib_cm_sidr_rep_param *param);
94
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
95
+ const void *private_data, u8 private_data_len);
96
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
97
+ void *private_data, u8 private_data_len);
98
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
99
+ enum ib_cm_rej_reason reason, void *ari,
100
+ u8 ari_length, const void *private_data,
101
+ u8 private_data_len);
110102
111103 static struct ib_client cm_client = {
112104 .name = "cm",
....@@ -124,7 +116,8 @@
124116 struct rb_root remote_qp_table;
125117 struct rb_root remote_id_table;
126118 struct rb_root remote_sidr_table;
127
- struct idr local_id_table;
119
+ struct xarray local_id_table;
120
+ u32 local_id_next;
128121 __be32 random_id_operand;
129122 struct list_head timewait_list;
130123 struct workqueue_struct *wq;
....@@ -209,7 +202,6 @@
209202 struct cm_port {
210203 struct cm_device *cm_dev;
211204 struct ib_mad_agent *mad_agent;
212
- struct kobject port_obj;
213205 u8 port_num;
214206 struct list_head cm_priv_prim_list;
215207 struct list_head cm_priv_altr_list;
....@@ -219,10 +211,9 @@
219211 struct cm_device {
220212 struct list_head list;
221213 struct ib_device *ib_device;
222
- struct device *device;
223214 u8 ack_delay;
224215 int going_down;
225
- struct cm_port *port[0];
216
+ struct cm_port *port[];
226217 };
227218
228219 struct cm_av {
....@@ -241,11 +232,11 @@
241232 __be32 local_id; /* Established / timewait */
242233 __be32 remote_id;
243234 struct ib_cm_event cm_event;
244
- struct sa_path_rec path[0];
235
+ struct sa_path_rec path[];
245236 };
246237
247238 struct cm_timewait_info {
248
- struct cm_work work; /* Must be first. */
239
+ struct cm_work work;
249240 struct list_head list;
250241 struct rb_node remote_qp_node;
251242 struct rb_node remote_id_node;
....@@ -262,10 +253,11 @@
262253 struct rb_node sidr_id_node;
263254 spinlock_t lock; /* Do not acquire inside cm.lock */
264255 struct completion comp;
265
- atomic_t refcount;
256
+ refcount_t refcount;
266257 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267258 * Protected by the cm.lock spinlock. */
268259 int listen_sharecount;
260
+ struct rcu_head rcu;
269261
270262 struct ib_mad_send_buf *msg;
271263 struct cm_timewait_info *timewait_info;
....@@ -285,7 +277,6 @@
285277 __be16 pkey;
286278 u8 private_data_len;
287279 u8 max_cm_retries;
288
- u8 peer_to_peer;
289280 u8 responder_resources;
290281 u8 initiator_depth;
291282 u8 retry_count;
....@@ -301,13 +292,15 @@
301292
302293 struct list_head work_list;
303294 atomic_t work_count;
295
+
296
+ struct rdma_ucm_ece ece;
304297 };
305298
306299 static void cm_work_handler(struct work_struct *work);
307300
308301 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
309302 {
310
- if (atomic_dec_and_test(&cm_id_priv->refcount))
303
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
311304 complete(&cm_id_priv->comp);
312305 }
313306
....@@ -343,7 +336,7 @@
343336 ret = -ENODEV;
344337 goto out;
345338 }
346
- ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
339
+ ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
347340 if (IS_ERR(ah)) {
348341 ret = PTR_ERR(ah);
349342 goto out;
....@@ -355,7 +348,7 @@
355348 GFP_ATOMIC,
356349 IB_MGMT_BASE_VERSION);
357350 if (IS_ERR(m)) {
358
- rdma_destroy_ah(ah);
351
+ rdma_destroy_ah(ah, 0);
359352 ret = PTR_ERR(m);
360353 goto out;
361354 }
....@@ -364,7 +357,7 @@
364357 m->ah = ah;
365358 m->retries = cm_id_priv->max_cm_retries;
366359
367
- atomic_inc(&cm_id_priv->refcount);
360
+ refcount_inc(&cm_id_priv->refcount);
368361 m->context[0] = cm_id_priv;
369362 *msg = m;
370363
....@@ -400,7 +393,7 @@
400393 static void cm_free_msg(struct ib_mad_send_buf *msg)
401394 {
402395 if (msg->ah)
403
- rdma_destroy_ah(msg->ah);
396
+ rdma_destroy_ah(msg->ah, 0);
404397 if (msg->context[0])
405398 cm_deref_id(msg->context[0]);
406399 ib_free_send_mad(msg);
....@@ -488,24 +481,19 @@
488481 grh, &av->ah_attr);
489482 }
490483
491
-static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
492
- struct cm_av *av,
493
- struct cm_port *port)
484
+static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
485
+ struct cm_av *av, struct cm_port *port)
494486 {
495487 unsigned long flags;
496
- int ret = 0;
497488
498489 spin_lock_irqsave(&cm.lock, flags);
499
-
500490 if (&cm_id_priv->av == av)
501491 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
502492 else if (&cm_id_priv->alt_av == av)
503493 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
504494 else
505
- ret = -EINVAL;
506
-
495
+ WARN_ON(true);
507496 spin_unlock_irqrestore(&cm.lock, flags);
508
- return ret;
509497 }
510498
511499 static struct cm_port *
....@@ -586,64 +574,26 @@
586574 return ret;
587575
588576 av->timeout = path->packet_life_time + 1;
589
-
590
- ret = add_cm_id_to_port_list(cm_id_priv, av, port);
591
- if (ret) {
592
- rdma_destroy_ah_attr(&new_ah_attr);
593
- return ret;
594
- }
577
+ add_cm_id_to_port_list(cm_id_priv, av, port);
595578 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
596579 return 0;
597580 }
598581
599
-static int cm_alloc_id(struct cm_id_private *cm_id_priv)
582
+static u32 cm_local_id(__be32 local_id)
600583 {
601
- unsigned long flags;
602
- int id;
603
-
604
- idr_preload(GFP_KERNEL);
605
- spin_lock_irqsave(&cm.lock, flags);
606
-
607
- id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
608
-
609
- spin_unlock_irqrestore(&cm.lock, flags);
610
- idr_preload_end();
611
-
612
- cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
613
- return id < 0 ? id : 0;
584
+ return (__force u32) (local_id ^ cm.random_id_operand);
614585 }
615586
616
-static void cm_free_id(__be32 local_id)
617
-{
618
- spin_lock_irq(&cm.lock);
619
- idr_remove(&cm.local_id_table,
620
- (__force int) (local_id ^ cm.random_id_operand));
621
- spin_unlock_irq(&cm.lock);
622
-}
623
-
624
-static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
587
+static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
625588 {
626589 struct cm_id_private *cm_id_priv;
627590
628
- cm_id_priv = idr_find(&cm.local_id_table,
629
- (__force int) (local_id ^ cm.random_id_operand));
630
- if (cm_id_priv) {
631
- if (cm_id_priv->id.remote_id == remote_id)
632
- atomic_inc(&cm_id_priv->refcount);
633
- else
634
- cm_id_priv = NULL;
635
- }
636
-
637
- return cm_id_priv;
638
-}
639
-
640
-static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
641
-{
642
- struct cm_id_private *cm_id_priv;
643
-
644
- spin_lock_irq(&cm.lock);
645
- cm_id_priv = cm_get_id(local_id, remote_id);
646
- spin_unlock_irq(&cm.lock);
591
+ rcu_read_lock();
592
+ cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
593
+ if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
594
+ !refcount_inc_not_zero(&cm_id_priv->refcount))
595
+ cm_id_priv = NULL;
596
+ rcu_read_unlock();
647597
648598 return cm_id_priv;
649599 }
....@@ -673,22 +623,44 @@
673623 return (__force u64) a > (__force u64) b;
674624 }
675625
676
-static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
626
+/*
627
+ * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
628
+ * if the new ID was inserted, NULL if it could not be inserted due to a
629
+ * collision, or the existing cm_id_priv ready for shared usage.
630
+ */
631
+static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
632
+ ib_cm_handler shared_handler)
677633 {
678634 struct rb_node **link = &cm.listen_service_table.rb_node;
679635 struct rb_node *parent = NULL;
680636 struct cm_id_private *cur_cm_id_priv;
681637 __be64 service_id = cm_id_priv->id.service_id;
682638 __be64 service_mask = cm_id_priv->id.service_mask;
639
+ unsigned long flags;
683640
641
+ spin_lock_irqsave(&cm.lock, flags);
684642 while (*link) {
685643 parent = *link;
686644 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
687645 service_node);
688646 if ((cur_cm_id_priv->id.service_mask & service_id) ==
689647 (service_mask & cur_cm_id_priv->id.service_id) &&
690
- (cm_id_priv->id.device == cur_cm_id_priv->id.device))
648
+ (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
649
+ /*
650
+ * Sharing an ib_cm_id with different handlers is not
651
+ * supported
652
+ */
653
+ if (cur_cm_id_priv->id.cm_handler != shared_handler ||
654
+ cur_cm_id_priv->id.context ||
655
+ WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
656
+ spin_unlock_irqrestore(&cm.lock, flags);
657
+ return NULL;
658
+ }
659
+ refcount_inc(&cur_cm_id_priv->refcount);
660
+ cur_cm_id_priv->listen_sharecount++;
661
+ spin_unlock_irqrestore(&cm.lock, flags);
691662 return cur_cm_id_priv;
663
+ }
692664
693665 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
694666 link = &(*link)->rb_left;
....@@ -701,9 +673,11 @@
701673 else
702674 link = &(*link)->rb_right;
703675 }
676
+ cm_id_priv->listen_sharecount++;
704677 rb_link_node(&cm_id_priv->service_node, parent, link);
705678 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
706
- return NULL;
679
+ spin_unlock_irqrestore(&cm.lock, flags);
680
+ return cm_id_priv;
707681 }
708682
709683 static struct cm_id_private * cm_find_listen(struct ib_device *device,
....@@ -716,9 +690,10 @@
716690 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
717691 if ((cm_id_priv->id.service_mask & service_id) ==
718692 cm_id_priv->id.service_id &&
719
- (cm_id_priv->id.device == device))
693
+ (cm_id_priv->id.device == device)) {
694
+ refcount_inc(&cm_id_priv->refcount);
720695 return cm_id_priv;
721
-
696
+ }
722697 if (device < cm_id_priv->id.device)
723698 node = node->rb_left;
724699 else if (device > cm_id_priv->id.device)
....@@ -763,12 +738,14 @@
763738 return NULL;
764739 }
765740
766
-static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
767
- __be32 remote_id)
741
+static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
742
+ __be32 remote_id)
768743 {
769744 struct rb_node *node = cm.remote_id_table.rb_node;
770745 struct cm_timewait_info *timewait_info;
746
+ struct cm_id_private *res = NULL;
771747
748
+ spin_lock_irq(&cm.lock);
772749 while (node) {
773750 timewait_info = rb_entry(node, struct cm_timewait_info,
774751 remote_id_node);
....@@ -780,10 +757,14 @@
780757 node = node->rb_left;
781758 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
782759 node = node->rb_right;
783
- else
784
- return timewait_info;
760
+ else {
761
+ res = cm_acquire_id(timewait_info->work.local_id,
762
+ timewait_info->work.remote_id);
763
+ break;
764
+ }
785765 }
786
- return NULL;
766
+ spin_unlock_irq(&cm.lock);
767
+ return res;
787768 }
788769
789770 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
....@@ -850,21 +831,12 @@
850831 return NULL;
851832 }
852833
853
-static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
854
- enum ib_cm_sidr_status status)
855
-{
856
- struct ib_cm_sidr_rep_param param;
857
-
858
- memset(&param, 0, sizeof param);
859
- param.status = status;
860
- ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
861
-}
862
-
863
-struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
864
- ib_cm_handler cm_handler,
865
- void *context)
834
+static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
835
+ ib_cm_handler cm_handler,
836
+ void *context)
866837 {
867838 struct cm_id_private *cm_id_priv;
839
+ u32 id;
868840 int ret;
869841
870842 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
....@@ -876,22 +848,52 @@
876848 cm_id_priv->id.cm_handler = cm_handler;
877849 cm_id_priv->id.context = context;
878850 cm_id_priv->id.remote_cm_qpn = 1;
879
- ret = cm_alloc_id(cm_id_priv);
880
- if (ret)
881
- goto error;
882851
852
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
853
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
883854 spin_lock_init(&cm_id_priv->lock);
884855 init_completion(&cm_id_priv->comp);
885856 INIT_LIST_HEAD(&cm_id_priv->work_list);
886857 INIT_LIST_HEAD(&cm_id_priv->prim_list);
887858 INIT_LIST_HEAD(&cm_id_priv->altr_list);
888859 atomic_set(&cm_id_priv->work_count, -1);
889
- atomic_set(&cm_id_priv->refcount, 1);
890
- return &cm_id_priv->id;
860
+ refcount_set(&cm_id_priv->refcount, 1);
861
+
862
+ ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
863
+ &cm.local_id_next, GFP_KERNEL);
864
+ if (ret < 0)
865
+ goto error;
866
+ cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
867
+
868
+ return cm_id_priv;
891869
892870 error:
893871 kfree(cm_id_priv);
894
- return ERR_PTR(-ENOMEM);
872
+ return ERR_PTR(ret);
873
+}
874
+
875
+/*
876
+ * Make the ID visible to the MAD handlers and other threads that use the
877
+ * xarray.
878
+ */
879
+static void cm_finalize_id(struct cm_id_private *cm_id_priv)
880
+{
881
+ xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
882
+ cm_id_priv, GFP_ATOMIC);
883
+}
884
+
885
+struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
886
+ ib_cm_handler cm_handler,
887
+ void *context)
888
+{
889
+ struct cm_id_private *cm_id_priv;
890
+
891
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
892
+ if (IS_ERR(cm_id_priv))
893
+ return ERR_CAST(cm_id_priv);
894
+
895
+ cm_finalize_id(cm_id_priv);
896
+ return &cm_id_priv->id;
895897 }
896898 EXPORT_SYMBOL(ib_create_cm_id);
897899
....@@ -912,6 +914,36 @@
912914 if (work->mad_recv_wc)
913915 ib_free_recv_mad(work->mad_recv_wc);
914916 kfree(work);
917
+}
918
+
919
+static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
920
+ struct cm_work *work)
921
+ __releases(&cm_id_priv->lock)
922
+{
923
+ bool immediate;
924
+
925
+ /*
926
+ * To deliver the event to the user callback we have the drop the
927
+ * spinlock, however, we need to ensure that the user callback is single
928
+ * threaded and receives events in the temporal order. If there are
929
+ * already events being processed then thread new events onto a list,
930
+ * the thread currently processing will pick them up.
931
+ */
932
+ immediate = atomic_inc_and_test(&cm_id_priv->work_count);
933
+ if (!immediate) {
934
+ list_add_tail(&work->list, &cm_id_priv->work_list);
935
+ /*
936
+ * This routine always consumes incoming reference. Once queued
937
+ * to the work_list then a reference is held by the thread
938
+ * currently running cm_process_work() and this reference is not
939
+ * needed.
940
+ */
941
+ cm_deref_id(cm_id_priv);
942
+ }
943
+ spin_unlock_irq(&cm_id_priv->lock);
944
+
945
+ if (immediate)
946
+ cm_process_work(cm_id_priv, work);
915947 }
916948
917949 static inline int cm_convert_to_ms(int iba_time)
....@@ -939,8 +971,10 @@
939971 return min(31, ack_timeout);
940972 }
941973
942
-static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
974
+static void cm_remove_remote(struct cm_id_private *cm_id_priv)
943975 {
976
+ struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
977
+
944978 if (timewait_info->inserted_remote_id) {
945979 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
946980 timewait_info->inserted_remote_id = 0;
....@@ -972,12 +1006,14 @@
9721006 unsigned long flags;
9731007 struct cm_device *cm_dev;
9741008
1009
+ lockdep_assert_held(&cm_id_priv->lock);
1010
+
9751011 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
9761012 if (!cm_dev)
9771013 return;
9781014
9791015 spin_lock_irqsave(&cm.lock, flags);
980
- cm_cleanup_timewait(cm_id_priv->timewait_info);
1016
+ cm_remove_remote(cm_id_priv);
9811017 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
9821018 spin_unlock_irqrestore(&cm.lock, flags);
9831019
....@@ -996,6 +1032,11 @@
9961032 msecs_to_jiffies(wait_time));
9971033 spin_unlock_irqrestore(&cm.lock, flags);
9981034
1035
+ /*
1036
+ * The timewait_info is converted into a work and gets freed during
1037
+ * cm_free_work() in cm_timewait_handler().
1038
+ */
1039
+ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
9991040 cm_id_priv->timewait_info = NULL;
10001041 }
10011042
....@@ -1003,10 +1044,12 @@
10031044 {
10041045 unsigned long flags;
10051046
1047
+ lockdep_assert_held(&cm_id_priv->lock);
1048
+
10061049 cm_id_priv->id.state = IB_CM_IDLE;
10071050 if (cm_id_priv->timewait_info) {
10081051 spin_lock_irqsave(&cm.lock, flags);
1009
- cm_cleanup_timewait(cm_id_priv->timewait_info);
1052
+ cm_remove_remote(cm_id_priv);
10101053 spin_unlock_irqrestore(&cm.lock, flags);
10111054 kfree(cm_id_priv->timewait_info);
10121055 cm_id_priv->timewait_info = NULL;
....@@ -1019,92 +1062,97 @@
10191062 struct cm_work *work;
10201063
10211064 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1022
-retest:
10231065 spin_lock_irq(&cm_id_priv->lock);
1066
+retest:
10241067 switch (cm_id->state) {
10251068 case IB_CM_LISTEN:
1026
- spin_unlock_irq(&cm_id_priv->lock);
1027
-
1028
- spin_lock_irq(&cm.lock);
1069
+ spin_lock(&cm.lock);
10291070 if (--cm_id_priv->listen_sharecount > 0) {
10301071 /* The id is still shared. */
1072
+ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1073
+ spin_unlock(&cm.lock);
1074
+ spin_unlock_irq(&cm_id_priv->lock);
10311075 cm_deref_id(cm_id_priv);
1032
- spin_unlock_irq(&cm.lock);
10331076 return;
10341077 }
1078
+ cm_id->state = IB_CM_IDLE;
10351079 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1036
- spin_unlock_irq(&cm.lock);
1080
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
1081
+ spin_unlock(&cm.lock);
10371082 break;
10381083 case IB_CM_SIDR_REQ_SENT:
10391084 cm_id->state = IB_CM_IDLE;
10401085 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1041
- spin_unlock_irq(&cm_id_priv->lock);
10421086 break;
10431087 case IB_CM_SIDR_REQ_RCVD:
1044
- spin_unlock_irq(&cm_id_priv->lock);
1045
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
1046
- spin_lock_irq(&cm.lock);
1047
- if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1048
- rb_erase(&cm_id_priv->sidr_id_node,
1049
- &cm.remote_sidr_table);
1050
- spin_unlock_irq(&cm.lock);
1088
+ cm_send_sidr_rep_locked(cm_id_priv,
1089
+ &(struct ib_cm_sidr_rep_param){
1090
+ .status = IB_SIDR_REJECT });
1091
+ /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1092
+ cm_id->state = IB_CM_IDLE;
10511093 break;
10521094 case IB_CM_REQ_SENT:
10531095 case IB_CM_MRA_REQ_RCVD:
10541096 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1055
- spin_unlock_irq(&cm_id_priv->lock);
1056
- ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
1057
- &cm_id_priv->id.device->node_guid,
1058
- sizeof cm_id_priv->id.device->node_guid,
1059
- NULL, 0);
1097
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1098
+ &cm_id_priv->id.device->node_guid,
1099
+ sizeof(cm_id_priv->id.device->node_guid),
1100
+ NULL, 0);
10601101 break;
10611102 case IB_CM_REQ_RCVD:
10621103 if (err == -ENOMEM) {
10631104 /* Do not reject to allow future retries. */
10641105 cm_reset_to_idle(cm_id_priv);
1065
- spin_unlock_irq(&cm_id_priv->lock);
10661106 } else {
1067
- spin_unlock_irq(&cm_id_priv->lock);
1068
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1069
- NULL, 0, NULL, 0);
1107
+ cm_send_rej_locked(cm_id_priv,
1108
+ IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1109
+ NULL, 0);
10701110 }
10711111 break;
10721112 case IB_CM_REP_SENT:
10731113 case IB_CM_MRA_REP_RCVD:
10741114 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1075
- /* Fall through */
1115
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1116
+ 0, NULL, 0);
1117
+ goto retest;
10761118 case IB_CM_MRA_REQ_SENT:
10771119 case IB_CM_REP_RCVD:
10781120 case IB_CM_MRA_REP_SENT:
1079
- spin_unlock_irq(&cm_id_priv->lock);
1080
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1081
- NULL, 0, NULL, 0);
1121
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1122
+ 0, NULL, 0);
10821123 break;
10831124 case IB_CM_ESTABLISHED:
1084
- spin_unlock_irq(&cm_id_priv->lock);
1085
- if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1125
+ if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1126
+ cm_id->state = IB_CM_IDLE;
10861127 break;
1087
- ib_send_cm_dreq(cm_id, NULL, 0);
1128
+ }
1129
+ cm_send_dreq_locked(cm_id_priv, NULL, 0);
10881130 goto retest;
10891131 case IB_CM_DREQ_SENT:
10901132 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
10911133 cm_enter_timewait(cm_id_priv);
1092
- spin_unlock_irq(&cm_id_priv->lock);
1093
- break;
1134
+ goto retest;
10941135 case IB_CM_DREQ_RCVD:
1095
- spin_unlock_irq(&cm_id_priv->lock);
1096
- ib_send_cm_drep(cm_id, NULL, 0);
1136
+ cm_send_drep_locked(cm_id_priv, NULL, 0);
1137
+ WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1138
+ goto retest;
1139
+ case IB_CM_TIMEWAIT:
1140
+ /*
1141
+ * The cm_acquire_id in cm_timewait_handler will stop working
1142
+ * once we do xa_erase below, so just move to idle here for
1143
+ * consistency.
1144
+ */
1145
+ cm_id->state = IB_CM_IDLE;
10971146 break;
1098
- default:
1099
- spin_unlock_irq(&cm_id_priv->lock);
1147
+ case IB_CM_IDLE:
11001148 break;
11011149 }
1150
+ WARN_ON(cm_id->state != IB_CM_IDLE);
11021151
1103
- spin_lock_irq(&cm_id_priv->lock);
11041152 spin_lock(&cm.lock);
11051153 /* Required for cleanup paths related cm_req_handler() */
11061154 if (cm_id_priv->timewait_info) {
1107
- cm_cleanup_timewait(cm_id_priv->timewait_info);
1155
+ cm_remove_remote(cm_id_priv);
11081156 kfree(cm_id_priv->timewait_info);
11091157 cm_id_priv->timewait_info = NULL;
11101158 }
....@@ -1114,10 +1162,14 @@
11141162 if (!list_empty(&cm_id_priv->prim_list) &&
11151163 (!cm_id_priv->prim_send_port_not_ready))
11161164 list_del(&cm_id_priv->prim_list);
1165
+ WARN_ON(cm_id_priv->listen_sharecount);
1166
+ WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1167
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1168
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
11171169 spin_unlock(&cm.lock);
11181170 spin_unlock_irq(&cm_id_priv->lock);
11191171
1120
- cm_free_id(cm_id->local_id);
1172
+ xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
11211173 cm_deref_id(cm_id_priv);
11221174 wait_for_completion(&cm_id_priv->comp);
11231175 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
....@@ -1126,7 +1178,7 @@
11261178 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
11271179 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
11281180 kfree(cm_id_priv->private_data);
1129
- kfree(cm_id_priv);
1181
+ kfree_rcu(cm_id_priv, rcu);
11301182 }
11311183
11321184 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
....@@ -1135,8 +1187,27 @@
11351187 }
11361188 EXPORT_SYMBOL(ib_destroy_cm_id);
11371189
1190
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1191
+ __be64 service_mask)
1192
+{
1193
+ service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1194
+ service_id &= service_mask;
1195
+ if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1196
+ (service_id != IB_CM_ASSIGN_SERVICE_ID))
1197
+ return -EINVAL;
1198
+
1199
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1200
+ cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1201
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1202
+ } else {
1203
+ cm_id_priv->id.service_id = service_id;
1204
+ cm_id_priv->id.service_mask = service_mask;
1205
+ }
1206
+ return 0;
1207
+}
1208
+
11381209 /**
1139
- * __ib_cm_listen - Initiates listening on the specified service ID for
1210
+ * ib_cm_listen - Initiates listening on the specified service ID for
11401211 * connection and service ID resolution requests.
11411212 * @cm_id: Connection identifier associated with the listen request.
11421213 * @service_id: Service identifier matched against incoming connection
....@@ -1148,51 +1219,33 @@
11481219 * exactly. This parameter is ignored if %service_id is set to
11491220 * IB_CM_ASSIGN_SERVICE_ID.
11501221 */
1151
-static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1152
- __be64 service_mask)
1153
-{
1154
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1155
- int ret = 0;
1156
-
1157
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1158
- service_id &= service_mask;
1159
- if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1160
- (service_id != IB_CM_ASSIGN_SERVICE_ID))
1161
- return -EINVAL;
1162
-
1163
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1164
- if (cm_id->state != IB_CM_IDLE)
1165
- return -EINVAL;
1166
-
1167
- cm_id->state = IB_CM_LISTEN;
1168
- ++cm_id_priv->listen_sharecount;
1169
-
1170
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1171
- cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1172
- cm_id->service_mask = ~cpu_to_be64(0);
1173
- } else {
1174
- cm_id->service_id = service_id;
1175
- cm_id->service_mask = service_mask;
1176
- }
1177
- cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1178
-
1179
- if (cur_cm_id_priv) {
1180
- cm_id->state = IB_CM_IDLE;
1181
- --cm_id_priv->listen_sharecount;
1182
- ret = -EBUSY;
1183
- }
1184
- return ret;
1185
-}
1186
-
11871222 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
11881223 {
1224
+ struct cm_id_private *cm_id_priv =
1225
+ container_of(cm_id, struct cm_id_private, id);
11891226 unsigned long flags;
11901227 int ret;
11911228
1192
- spin_lock_irqsave(&cm.lock, flags);
1193
- ret = __ib_cm_listen(cm_id, service_id, service_mask);
1194
- spin_unlock_irqrestore(&cm.lock, flags);
1229
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
1230
+ if (cm_id_priv->id.state != IB_CM_IDLE) {
1231
+ ret = -EINVAL;
1232
+ goto out;
1233
+ }
11951234
1235
+ ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1236
+ if (ret)
1237
+ goto out;
1238
+
1239
+ if (!cm_insert_listen(cm_id_priv, NULL)) {
1240
+ ret = -EBUSY;
1241
+ goto out;
1242
+ }
1243
+
1244
+ cm_id_priv->id.state = IB_CM_LISTEN;
1245
+ ret = 0;
1246
+
1247
+out:
1248
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
11961249 return ret;
11971250 }
11981251 EXPORT_SYMBOL(ib_cm_listen);
....@@ -1217,51 +1270,40 @@
12171270 ib_cm_handler cm_handler,
12181271 __be64 service_id)
12191272 {
1273
+ struct cm_id_private *listen_id_priv;
12201274 struct cm_id_private *cm_id_priv;
1221
- struct ib_cm_id *cm_id;
1222
- unsigned long flags;
12231275 int err = 0;
12241276
12251277 /* Create an ID in advance, since the creation may sleep */
1226
- cm_id = ib_create_cm_id(device, cm_handler, NULL);
1227
- if (IS_ERR(cm_id))
1228
- return cm_id;
1278
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1279
+ if (IS_ERR(cm_id_priv))
1280
+ return ERR_CAST(cm_id_priv);
12291281
1230
- spin_lock_irqsave(&cm.lock, flags);
1231
-
1232
- if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1233
- goto new_id;
1234
-
1235
- /* Find an existing ID */
1236
- cm_id_priv = cm_find_listen(device, service_id);
1237
- if (cm_id_priv) {
1238
- if (cm_id->cm_handler != cm_handler || cm_id->context) {
1239
- /* Sharing an ib_cm_id with different handlers is not
1240
- * supported */
1241
- spin_unlock_irqrestore(&cm.lock, flags);
1242
- ib_destroy_cm_id(cm_id);
1243
- return ERR_PTR(-EINVAL);
1244
- }
1245
- atomic_inc(&cm_id_priv->refcount);
1246
- ++cm_id_priv->listen_sharecount;
1247
- spin_unlock_irqrestore(&cm.lock, flags);
1248
-
1249
- ib_destroy_cm_id(cm_id);
1250
- cm_id = &cm_id_priv->id;
1251
- return cm_id;
1252
- }
1253
-
1254
-new_id:
1255
- /* Use newly created ID */
1256
- err = __ib_cm_listen(cm_id, service_id, 0);
1257
-
1258
- spin_unlock_irqrestore(&cm.lock, flags);
1259
-
1282
+ err = cm_init_listen(cm_id_priv, service_id, 0);
12601283 if (err) {
1261
- ib_destroy_cm_id(cm_id);
1284
+ ib_destroy_cm_id(&cm_id_priv->id);
12621285 return ERR_PTR(err);
12631286 }
1264
- return cm_id;
1287
+
1288
+ spin_lock_irq(&cm_id_priv->lock);
1289
+ listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1290
+ if (listen_id_priv != cm_id_priv) {
1291
+ spin_unlock_irq(&cm_id_priv->lock);
1292
+ ib_destroy_cm_id(&cm_id_priv->id);
1293
+ if (!listen_id_priv)
1294
+ return ERR_PTR(-EINVAL);
1295
+ return &listen_id_priv->id;
1296
+ }
1297
+ cm_id_priv->id.state = IB_CM_LISTEN;
1298
+ spin_unlock_irq(&cm_id_priv->lock);
1299
+
1300
+ /*
1301
+ * A listen ID does not need to be in the xarray since it does not
1302
+ * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1303
+ * and does not enter timewait.
1304
+ */
1305
+
1306
+ return &cm_id_priv->id;
12651307 }
12661308 EXPORT_SYMBOL(ib_cm_insert_listen);
12671309
....@@ -1285,6 +1327,13 @@
12851327 hdr->tid = tid;
12861328 }
12871329
1330
+static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1331
+ __be64 tid, u32 attr_mod)
1332
+{
1333
+ cm_format_mad_hdr(hdr, attr_id, tid);
1334
+ hdr->attr_mod = cpu_to_be32(attr_mod);
1335
+}
1336
+
12881337 static void cm_format_req(struct cm_req_msg *req_msg,
12891338 struct cm_id_private *cm_id_priv,
12901339 struct ib_cm_req_param *param)
....@@ -1297,57 +1346,75 @@
12971346 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
12981347 pri_path->opa.slid);
12991348
1300
- cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1301
- cm_form_tid(cm_id_priv));
1349
+ cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1350
+ cm_form_tid(cm_id_priv), param->ece.attr_mod);
13021351
1303
- req_msg->local_comm_id = cm_id_priv->id.local_id;
1304
- req_msg->service_id = param->service_id;
1305
- req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1306
- cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1307
- cm_req_set_init_depth(req_msg, param->initiator_depth);
1308
- cm_req_set_remote_resp_timeout(req_msg,
1309
- param->remote_cm_response_timeout);
1352
+ IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1353
+ be32_to_cpu(cm_id_priv->id.local_id));
1354
+ IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1355
+ IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1356
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
1357
+ IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1358
+ IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1359
+ IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1360
+ param->remote_cm_response_timeout);
13101361 cm_req_set_qp_type(req_msg, param->qp_type);
1311
- cm_req_set_flow_ctrl(req_msg, param->flow_control);
1312
- cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1313
- cm_req_set_local_resp_timeout(req_msg,
1314
- param->local_cm_response_timeout);
1315
- req_msg->pkey = param->primary_path->pkey;
1316
- cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1317
- cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1362
+ IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1363
+ IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1364
+ IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1365
+ param->local_cm_response_timeout);
1366
+ IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1367
+ be16_to_cpu(param->primary_path->pkey));
1368
+ IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1369
+ param->primary_path->mtu);
1370
+ IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
13181371
13191372 if (param->qp_type != IB_QPT_XRC_INI) {
1320
- cm_req_set_resp_res(req_msg, param->responder_resources);
1321
- cm_req_set_retry_count(req_msg, param->retry_count);
1322
- cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1323
- cm_req_set_srq(req_msg, param->srq);
1373
+ IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1374
+ param->responder_resources);
1375
+ IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1376
+ IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1377
+ param->rnr_retry_count);
1378
+ IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
13241379 }
13251380
1326
- req_msg->primary_local_gid = pri_path->sgid;
1327
- req_msg->primary_remote_gid = pri_path->dgid;
1381
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1382
+ pri_path->sgid;
1383
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1384
+ pri_path->dgid;
13281385 if (pri_ext) {
1329
- req_msg->primary_local_gid.global.interface_id
1330
- = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1331
- req_msg->primary_remote_gid.global.interface_id
1332
- = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1386
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1387
+ ->global.interface_id =
1388
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1389
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1390
+ ->global.interface_id =
1391
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
13331392 }
13341393 if (pri_path->hop_limit <= 1) {
1335
- req_msg->primary_local_lid = pri_ext ? 0 :
1336
- htons(ntohl(sa_path_get_slid(pri_path)));
1337
- req_msg->primary_remote_lid = pri_ext ? 0 :
1338
- htons(ntohl(sa_path_get_dlid(pri_path)));
1394
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1395
+ be16_to_cpu(pri_ext ? 0 :
1396
+ htons(ntohl(sa_path_get_slid(
1397
+ pri_path)))));
1398
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1399
+ be16_to_cpu(pri_ext ? 0 :
1400
+ htons(ntohl(sa_path_get_dlid(
1401
+ pri_path)))));
13391402 } else {
13401403 /* Work-around until there's a way to obtain remote LID info */
1341
- req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1342
- req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1404
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1405
+ be16_to_cpu(IB_LID_PERMISSIVE));
1406
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1407
+ be16_to_cpu(IB_LID_PERMISSIVE));
13431408 }
1344
- cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1345
- cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1346
- req_msg->primary_traffic_class = pri_path->traffic_class;
1347
- req_msg->primary_hop_limit = pri_path->hop_limit;
1348
- cm_req_set_primary_sl(req_msg, pri_path->sl);
1349
- cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1350
- cm_req_set_primary_local_ack_timeout(req_msg,
1409
+ IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1410
+ be32_to_cpu(pri_path->flow_label));
1411
+ IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1412
+ IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1413
+ IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1414
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1415
+ IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1416
+ (pri_path->hop_limit <= 1));
1417
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
13511418 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
13521419 pri_path->packet_life_time));
13531420
....@@ -1358,46 +1425,60 @@
13581425 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
13591426 alt_path->opa.slid);
13601427
1361
- req_msg->alt_local_gid = alt_path->sgid;
1362
- req_msg->alt_remote_gid = alt_path->dgid;
1428
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1429
+ alt_path->sgid;
1430
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1431
+ alt_path->dgid;
13631432 if (alt_ext) {
1364
- req_msg->alt_local_gid.global.interface_id
1365
- = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1366
- req_msg->alt_remote_gid.global.interface_id
1367
- = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1433
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1434
+ req_msg)
1435
+ ->global.interface_id =
1436
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1437
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1438
+ req_msg)
1439
+ ->global.interface_id =
1440
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
13681441 }
13691442 if (alt_path->hop_limit <= 1) {
1370
- req_msg->alt_local_lid = alt_ext ? 0 :
1371
- htons(ntohl(sa_path_get_slid(alt_path)));
1372
- req_msg->alt_remote_lid = alt_ext ? 0 :
1373
- htons(ntohl(sa_path_get_dlid(alt_path)));
1443
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1444
+ be16_to_cpu(
1445
+ alt_ext ? 0 :
1446
+ htons(ntohl(sa_path_get_slid(
1447
+ alt_path)))));
1448
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1449
+ be16_to_cpu(
1450
+ alt_ext ? 0 :
1451
+ htons(ntohl(sa_path_get_dlid(
1452
+ alt_path)))));
13741453 } else {
1375
- req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1376
- req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1454
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1455
+ be16_to_cpu(IB_LID_PERMISSIVE));
1456
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1457
+ be16_to_cpu(IB_LID_PERMISSIVE));
13771458 }
1378
- cm_req_set_alt_flow_label(req_msg,
1379
- alt_path->flow_label);
1380
- cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1381
- req_msg->alt_traffic_class = alt_path->traffic_class;
1382
- req_msg->alt_hop_limit = alt_path->hop_limit;
1383
- cm_req_set_alt_sl(req_msg, alt_path->sl);
1384
- cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1385
- cm_req_set_alt_local_ack_timeout(req_msg,
1459
+ IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1460
+ be32_to_cpu(alt_path->flow_label));
1461
+ IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1462
+ IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1463
+ alt_path->traffic_class);
1464
+ IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1465
+ alt_path->hop_limit);
1466
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1467
+ IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1468
+ (alt_path->hop_limit <= 1));
1469
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
13861470 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
13871471 alt_path->packet_life_time));
13881472 }
1473
+ IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
13891474
13901475 if (param->private_data && param->private_data_len)
1391
- memcpy(req_msg->private_data, param->private_data,
1392
- param->private_data_len);
1476
+ IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1477
+ param->private_data_len);
13931478 }
13941479
13951480 static int cm_validate_req_param(struct ib_cm_req_param *param)
13961481 {
1397
- /* peer-to-peer not supported */
1398
- if (param->peer_to_peer)
1399
- return -EINVAL;
1400
-
14011482 if (!param->primary_path)
14021483 return -EINVAL;
14031484
....@@ -1482,9 +1563,10 @@
14821563 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
14831564 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
14841565
1485
- cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1486
- cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1566
+ cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1567
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
14871568
1569
+ trace_icm_send_req(&cm_id_priv->id);
14881570 spin_lock_irqsave(&cm_id_priv->lock, flags);
14891571 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
14901572 if (ret) {
....@@ -1520,16 +1602,21 @@
15201602 rej_msg = (struct cm_rej_msg *) msg->mad;
15211603
15221604 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1523
- rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1524
- rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1525
- cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1526
- rej_msg->reason = cpu_to_be16(reason);
1605
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1606
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1607
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1608
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1609
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1610
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
15271611
15281612 if (ari && ari_length) {
1529
- cm_rej_set_reject_info_len(rej_msg, ari_length);
1530
- memcpy(rej_msg->ari, ari, ari_length);
1613
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1614
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
15311615 }
15321616
1617
+ trace_icm_issue_rej(
1618
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1619
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
15331620 ret = ib_post_send_mad(msg, NULL);
15341621 if (ret)
15351622 cm_free_msg(msg);
....@@ -1537,18 +1624,12 @@
15371624 return ret;
15381625 }
15391626
1540
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1541
- __be32 local_qpn, __be32 remote_qpn)
1542
-{
1543
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1544
- ((local_ca_guid == remote_ca_guid) &&
1545
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1546
-}
1547
-
15481627 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
15491628 {
1550
- return ((req_msg->alt_local_lid) ||
1551
- (ib_is_opa_gid(&req_msg->alt_local_gid)));
1629
+ return ((cpu_to_be16(
1630
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1631
+ (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1632
+ req_msg))));
15521633 }
15531634
15541635 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
....@@ -1562,20 +1643,23 @@
15621643
15631644 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
15641645 struct sa_path_rec *primary_path,
1565
- struct sa_path_rec *alt_path)
1646
+ struct sa_path_rec *alt_path,
1647
+ struct ib_wc *wc)
15661648 {
15671649 u32 lid;
15681650
15691651 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1570
- sa_path_set_dlid(primary_path,
1571
- ntohs(req_msg->primary_local_lid));
1652
+ sa_path_set_dlid(primary_path, wc->slid);
15721653 sa_path_set_slid(primary_path,
1573
- ntohs(req_msg->primary_remote_lid));
1654
+ IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1655
+ req_msg));
15741656 } else {
1575
- lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1657
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1658
+ CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
15761659 sa_path_set_dlid(primary_path, lid);
15771660
1578
- lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1661
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662
+ CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
15791663 sa_path_set_slid(primary_path, lid);
15801664 }
15811665
....@@ -1583,64 +1667,85 @@
15831667 return;
15841668
15851669 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1586
- sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
1587
- sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
1670
+ sa_path_set_dlid(alt_path,
1671
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1672
+ req_msg));
1673
+ sa_path_set_slid(alt_path,
1674
+ IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1675
+ req_msg));
15881676 } else {
1589
- lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1677
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1678
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
15901679 sa_path_set_dlid(alt_path, lid);
15911680
1592
- lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1681
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1682
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
15931683 sa_path_set_slid(alt_path, lid);
15941684 }
15951685 }
15961686
15971687 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
15981688 struct sa_path_rec *primary_path,
1599
- struct sa_path_rec *alt_path)
1689
+ struct sa_path_rec *alt_path,
1690
+ struct ib_wc *wc)
16001691 {
1601
- primary_path->dgid = req_msg->primary_local_gid;
1602
- primary_path->sgid = req_msg->primary_remote_gid;
1603
- primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1604
- primary_path->hop_limit = req_msg->primary_hop_limit;
1605
- primary_path->traffic_class = req_msg->primary_traffic_class;
1692
+ primary_path->dgid =
1693
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1694
+ primary_path->sgid =
1695
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1696
+ primary_path->flow_label =
1697
+ cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1698
+ primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1699
+ primary_path->traffic_class =
1700
+ IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
16061701 primary_path->reversible = 1;
1607
- primary_path->pkey = req_msg->pkey;
1608
- primary_path->sl = cm_req_get_primary_sl(req_msg);
1702
+ primary_path->pkey =
1703
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1704
+ primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
16091705 primary_path->mtu_selector = IB_SA_EQ;
1610
- primary_path->mtu = cm_req_get_path_mtu(req_msg);
1706
+ primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
16111707 primary_path->rate_selector = IB_SA_EQ;
1612
- primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1708
+ primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
16131709 primary_path->packet_life_time_selector = IB_SA_EQ;
16141710 primary_path->packet_life_time =
1615
- cm_req_get_primary_local_ack_timeout(req_msg);
1711
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
16161712 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1617
- primary_path->service_id = req_msg->service_id;
1713
+ primary_path->service_id =
1714
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
16181715 if (sa_path_is_roce(primary_path))
16191716 primary_path->roce.route_resolved = false;
16201717
16211718 if (cm_req_has_alt_path(req_msg)) {
1622
- alt_path->dgid = req_msg->alt_local_gid;
1623
- alt_path->sgid = req_msg->alt_remote_gid;
1624
- alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1625
- alt_path->hop_limit = req_msg->alt_hop_limit;
1626
- alt_path->traffic_class = req_msg->alt_traffic_class;
1719
+ alt_path->dgid = *IBA_GET_MEM_PTR(
1720
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1721
+ alt_path->sgid = *IBA_GET_MEM_PTR(
1722
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1723
+ alt_path->flow_label = cpu_to_be32(
1724
+ IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1725
+ alt_path->hop_limit =
1726
+ IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1727
+ alt_path->traffic_class =
1728
+ IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
16271729 alt_path->reversible = 1;
1628
- alt_path->pkey = req_msg->pkey;
1629
- alt_path->sl = cm_req_get_alt_sl(req_msg);
1730
+ alt_path->pkey =
1731
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1732
+ alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
16301733 alt_path->mtu_selector = IB_SA_EQ;
1631
- alt_path->mtu = cm_req_get_path_mtu(req_msg);
1734
+ alt_path->mtu =
1735
+ IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
16321736 alt_path->rate_selector = IB_SA_EQ;
1633
- alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1737
+ alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
16341738 alt_path->packet_life_time_selector = IB_SA_EQ;
16351739 alt_path->packet_life_time =
1636
- cm_req_get_alt_local_ack_timeout(req_msg);
1740
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
16371741 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1638
- alt_path->service_id = req_msg->service_id;
1742
+ alt_path->service_id =
1743
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
16391744
16401745 if (sa_path_is_roce(alt_path))
16411746 alt_path->roce.route_resolved = false;
16421747 }
1643
- cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1748
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
16441749 }
16451750
16461751 static u16 cm_get_bth_pkey(struct cm_work *work)
....@@ -1710,23 +1815,28 @@
17101815 } else {
17111816 param->alternate_path = NULL;
17121817 }
1713
- param->remote_ca_guid = req_msg->local_ca_guid;
1714
- param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1715
- param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1818
+ param->remote_ca_guid =
1819
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1820
+ param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1821
+ param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
17161822 param->qp_type = cm_req_get_qp_type(req_msg);
1717
- param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1718
- param->responder_resources = cm_req_get_init_depth(req_msg);
1719
- param->initiator_depth = cm_req_get_resp_res(req_msg);
1823
+ param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1824
+ param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1825
+ param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
17201826 param->local_cm_response_timeout =
1721
- cm_req_get_remote_resp_timeout(req_msg);
1722
- param->flow_control = cm_req_get_flow_ctrl(req_msg);
1827
+ IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1828
+ param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
17231829 param->remote_cm_response_timeout =
1724
- cm_req_get_local_resp_timeout(req_msg);
1725
- param->retry_count = cm_req_get_retry_count(req_msg);
1726
- param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1727
- param->srq = cm_req_get_srq(req_msg);
1830
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1831
+ param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1832
+ param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1833
+ param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
17281834 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1729
- work->cm_event.private_data = &req_msg->private_data;
1835
+ param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1836
+ param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1837
+
1838
+ work->cm_event.private_data =
1839
+ IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
17301840 }
17311841
17321842 static void cm_process_work(struct cm_id_private *cm_id_priv,
....@@ -1760,54 +1870,63 @@
17601870 const void *private_data, u8 private_data_len)
17611871 {
17621872 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1763
- cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1764
- mra_msg->local_comm_id = cm_id_priv->id.local_id;
1765
- mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1766
- cm_mra_set_service_timeout(mra_msg, service_timeout);
1873
+ IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1874
+ IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1875
+ be32_to_cpu(cm_id_priv->id.local_id));
1876
+ IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1877
+ be32_to_cpu(cm_id_priv->id.remote_id));
1878
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
17671879
17681880 if (private_data && private_data_len)
1769
- memcpy(mra_msg->private_data, private_data, private_data_len);
1881
+ IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1882
+ private_data_len);
17701883 }
17711884
17721885 static void cm_format_rej(struct cm_rej_msg *rej_msg,
17731886 struct cm_id_private *cm_id_priv,
1774
- enum ib_cm_rej_reason reason,
1775
- void *ari,
1776
- u8 ari_length,
1777
- const void *private_data,
1778
- u8 private_data_len)
1887
+ enum ib_cm_rej_reason reason, void *ari,
1888
+ u8 ari_length, const void *private_data,
1889
+ u8 private_data_len, enum ib_cm_state state)
17791890 {
1780
- cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1781
- rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1891
+ lockdep_assert_held(&cm_id_priv->lock);
17821892
1783
- switch(cm_id_priv->id.state) {
1893
+ cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1894
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1895
+ be32_to_cpu(cm_id_priv->id.remote_id));
1896
+
1897
+ switch (state) {
17841898 case IB_CM_REQ_RCVD:
1785
- rej_msg->local_comm_id = 0;
1786
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1899
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1900
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
17871901 break;
17881902 case IB_CM_MRA_REQ_SENT:
1789
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
1790
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1903
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1904
+ be32_to_cpu(cm_id_priv->id.local_id));
1905
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
17911906 break;
17921907 case IB_CM_REP_RCVD:
17931908 case IB_CM_MRA_REP_SENT:
1794
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
1795
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1909
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1910
+ be32_to_cpu(cm_id_priv->id.local_id));
1911
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
17961912 break;
17971913 default:
1798
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
1799
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1914
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1915
+ be32_to_cpu(cm_id_priv->id.local_id));
1916
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1917
+ CM_MSG_RESPONSE_OTHER);
18001918 break;
18011919 }
18021920
1803
- rej_msg->reason = cpu_to_be16(reason);
1921
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
18041922 if (ari && ari_length) {
1805
- cm_rej_set_reject_info_len(rej_msg, ari_length);
1806
- memcpy(rej_msg->ari, ari, ari_length);
1923
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1924
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
18071925 }
18081926
18091927 if (private_data && private_data_len)
1810
- memcpy(rej_msg->private_data, private_data, private_data_len);
1928
+ IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1929
+ private_data_len);
18111930 }
18121931
18131932 static void cm_dup_req_handler(struct cm_work *work,
....@@ -1820,8 +1939,12 @@
18201939 counter[CM_REQ_COUNTER]);
18211940
18221941 /* Quick state check to discard duplicate REQs. */
1823
- if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1942
+ spin_lock_irq(&cm_id_priv->lock);
1943
+ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1944
+ spin_unlock_irq(&cm_id_priv->lock);
18241945 return;
1946
+ }
1947
+ spin_unlock_irq(&cm_id_priv->lock);
18251948
18261949 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
18271950 if (ret)
....@@ -1836,14 +1959,16 @@
18361959 cm_id_priv->private_data_len);
18371960 break;
18381961 case IB_CM_TIMEWAIT:
1839
- cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1840
- IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1962
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1963
+ IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1964
+ IB_CM_TIMEWAIT);
18411965 break;
18421966 default:
18431967 goto unlock;
18441968 }
18451969 spin_unlock_irq(&cm_id_priv->lock);
18461970
1971
+ trace_icm_send_dup_req(&cm_id_priv->id);
18471972 ret = ib_post_send_mad(msg, NULL);
18481973 if (ret)
18491974 goto free;
....@@ -1859,7 +1984,6 @@
18591984 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
18601985 struct cm_timewait_info *timewait_info;
18611986 struct cm_req_msg *req_msg;
1862
- struct ib_cm_id *cm_id;
18631987
18641988 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
18651989
....@@ -1867,7 +1991,7 @@
18671991 spin_lock_irq(&cm.lock);
18681992 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
18691993 if (timewait_info) {
1870
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1994
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
18711995 timewait_info->work.remote_id);
18721996 spin_unlock_irq(&cm.lock);
18731997 if (cur_cm_id_priv) {
....@@ -1880,8 +2004,8 @@
18802004 /* Check for stale connections. */
18812005 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
18822006 if (timewait_info) {
1883
- cm_cleanup_timewait(cm_id_priv->timewait_info);
1884
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2007
+ cm_remove_remote(cm_id_priv);
2008
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
18852009 timewait_info->work.remote_id);
18862010
18872011 spin_unlock_irq(&cm.lock);
....@@ -1889,30 +2013,25 @@
18892013 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
18902014 NULL, 0);
18912015 if (cur_cm_id_priv) {
1892
- cm_id = &cur_cm_id_priv->id;
1893
- ib_send_cm_dreq(cm_id, NULL, 0);
2016
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
18942017 cm_deref_id(cur_cm_id_priv);
18952018 }
18962019 return NULL;
18972020 }
18982021
18992022 /* Find matching listen request. */
1900
- listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1901
- req_msg->service_id);
2023
+ listen_cm_id_priv = cm_find_listen(
2024
+ cm_id_priv->id.device,
2025
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
19022026 if (!listen_cm_id_priv) {
1903
- cm_cleanup_timewait(cm_id_priv->timewait_info);
2027
+ cm_remove_remote(cm_id_priv);
19042028 spin_unlock_irq(&cm.lock);
19052029 cm_issue_rej(work->port, work->mad_recv_wc,
19062030 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
19072031 NULL, 0);
1908
- goto out;
2032
+ return NULL;
19092033 }
1910
- atomic_inc(&listen_cm_id_priv->refcount);
1911
- atomic_inc(&cm_id_priv->refcount);
1912
- cm_id_priv->id.state = IB_CM_REQ_RCVD;
1913
- atomic_inc(&cm_id_priv->work_count);
19142034 spin_unlock_irq(&cm.lock);
1915
-out:
19162035 return listen_cm_id_priv;
19172036 }
19182037
....@@ -1923,30 +2042,37 @@
19232042 */
19242043 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
19252044 {
1926
- if (!cm_req_get_primary_subnet_local(req_msg)) {
1927
- if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1928
- req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1929
- cm_req_set_primary_sl(req_msg, wc->sl);
2045
+ if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2046
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2047
+ req_msg)) == IB_LID_PERMISSIVE) {
2048
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2049
+ be16_to_cpu(ib_lid_be16(wc->slid)));
2050
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
19302051 }
19312052
1932
- if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1933
- req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
2053
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2054
+ req_msg)) == IB_LID_PERMISSIVE)
2055
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2056
+ wc->dlid_path_bits);
19342057 }
19352058
1936
- if (!cm_req_get_alt_subnet_local(req_msg)) {
1937
- if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1938
- req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1939
- cm_req_set_alt_sl(req_msg, wc->sl);
2059
+ if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2060
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2061
+ req_msg)) == IB_LID_PERMISSIVE) {
2062
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2063
+ be16_to_cpu(ib_lid_be16(wc->slid)));
2064
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
19402065 }
19412066
1942
- if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1943
- req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
2067
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2068
+ req_msg)) == IB_LID_PERMISSIVE)
2069
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2070
+ wc->dlid_path_bits);
19442071 }
19452072 }
19462073
19472074 static int cm_req_handler(struct cm_work *work)
19482075 {
1949
- struct ib_cm_id *cm_id;
19502076 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
19512077 struct cm_req_msg *req_msg;
19522078 const struct ib_global_route *grh;
....@@ -1955,12 +2081,33 @@
19552081
19562082 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
19572083
1958
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1959
- if (IS_ERR(cm_id))
1960
- return PTR_ERR(cm_id);
2084
+ cm_id_priv =
2085
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2086
+ if (IS_ERR(cm_id_priv))
2087
+ return PTR_ERR(cm_id_priv);
19612088
1962
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1963
- cm_id_priv->id.remote_id = req_msg->local_comm_id;
2089
+ cm_id_priv->id.remote_id =
2090
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2091
+ cm_id_priv->id.service_id =
2092
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2093
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2094
+ cm_id_priv->tid = req_msg->hdr.tid;
2095
+ cm_id_priv->timeout_ms = cm_convert_to_ms(
2096
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2097
+ cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2098
+ cm_id_priv->remote_qpn =
2099
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2100
+ cm_id_priv->initiator_depth =
2101
+ IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2102
+ cm_id_priv->responder_resources =
2103
+ IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2104
+ cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2105
+ cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2106
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2107
+ cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2108
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2109
+ cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2110
+
19642111 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
19652112 work->mad_recv_wc->recv_buf.grh,
19662113 &cm_id_priv->av);
....@@ -1973,24 +2120,27 @@
19732120 cm_id_priv->timewait_info = NULL;
19742121 goto destroy;
19752122 }
1976
- cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1977
- cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1978
- cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
2123
+ cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2124
+ cm_id_priv->timewait_info->remote_ca_guid =
2125
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2126
+ cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2127
+
2128
+ /*
2129
+ * Note that the ID pointer is not in the xarray at this point,
2130
+ * so this set is only visible to the local thread.
2131
+ */
2132
+ cm_id_priv->id.state = IB_CM_REQ_RCVD;
19792133
19802134 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
19812135 if (!listen_cm_id_priv) {
1982
- pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
1983
- be32_to_cpu(cm_id->local_id));
2136
+ trace_icm_no_listener_err(&cm_id_priv->id);
2137
+ cm_id_priv->id.state = IB_CM_IDLE;
19842138 ret = -EINVAL;
19852139 goto destroy;
19862140 }
19872141
1988
- cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1989
- cm_id_priv->id.context = listen_cm_id_priv->id.context;
1990
- cm_id_priv->id.service_id = req_msg->service_id;
1991
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1992
-
1993
- cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2142
+ if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
2143
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
19942144
19952145 memset(&work->path[0], 0, sizeof(work->path[0]));
19962146 if (cm_req_has_alt_path(req_msg))
....@@ -1998,20 +2148,22 @@
19982148 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
19992149 gid_attr = grh->sgid_attr;
20002150
2001
- if (gid_attr && gid_attr->ndev) {
2151
+ if (gid_attr &&
2152
+ rdma_protocol_roce(work->port->cm_dev->ib_device,
2153
+ work->port->port_num)) {
20022154 work->path[0].rec_type =
20032155 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
20042156 } else {
2005
- /* If no GID attribute or ndev is null, it is not RoCE. */
2006
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
2007
- work->port->port_num,
2008
- &work->path[0],
2009
- &req_msg->primary_local_gid);
2157
+ cm_path_set_rec_type(
2158
+ work->port->cm_dev->ib_device, work->port->port_num,
2159
+ &work->path[0],
2160
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2161
+ req_msg));
20102162 }
20112163 if (cm_req_has_alt_path(req_msg))
20122164 work->path[1].rec_type = work->path[0].rec_type;
20132165 cm_format_paths_from_req(req_msg, &work->path[0],
2014
- &work->path[1]);
2166
+ &work->path[1], work->mad_recv_wc->wc);
20152167 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
20162168 sa_path_set_dmac(&work->path[0],
20172169 cm_id_priv->av.ah_attr.roce.dmac);
....@@ -2025,10 +2177,10 @@
20252177 work->port->port_num, 0,
20262178 &work->path[0].sgid);
20272179 if (err)
2028
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2180
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
20292181 NULL, 0, NULL, 0);
20302182 else
2031
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2183
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
20322184 &work->path[0].sgid,
20332185 sizeof(work->path[0].sgid),
20342186 NULL, 0);
....@@ -2038,36 +2190,38 @@
20382190 ret = cm_init_av_by_path(&work->path[1], NULL,
20392191 &cm_id_priv->alt_av, cm_id_priv);
20402192 if (ret) {
2041
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
2193
+ ib_send_cm_rej(&cm_id_priv->id,
2194
+ IB_CM_REJ_INVALID_ALT_GID,
20422195 &work->path[0].sgid,
20432196 sizeof(work->path[0].sgid), NULL, 0);
20442197 goto rejected;
20452198 }
20462199 }
2047
- cm_id_priv->tid = req_msg->hdr.tid;
2048
- cm_id_priv->timeout_ms = cm_convert_to_ms(
2049
- cm_req_get_local_resp_timeout(req_msg));
2050
- cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
2051
- cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
2052
- cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
2053
- cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
2054
- cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
2055
- cm_id_priv->pkey = req_msg->pkey;
2056
- cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
2057
- cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
2058
- cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
2059
- cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
20602200
2201
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2202
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
20612203 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2062
- cm_process_work(cm_id_priv, work);
2204
+
2205
+ /* Now MAD handlers can see the new ID */
2206
+ spin_lock_irq(&cm_id_priv->lock);
2207
+ cm_finalize_id(cm_id_priv);
2208
+
2209
+ /* Refcount belongs to the event, pairs with cm_process_work() */
2210
+ refcount_inc(&cm_id_priv->refcount);
2211
+ cm_queue_work_unlock(cm_id_priv, work);
2212
+ /*
2213
+ * Since this ID was just created and was not made visible to other MAD
2214
+ * handlers until the cm_finalize_id() above we know that the
2215
+ * cm_process_work() will deliver the event and the listen_cm_id
2216
+ * embedded in the event can be derefed here.
2217
+ */
20632218 cm_deref_id(listen_cm_id_priv);
20642219 return 0;
20652220
20662221 rejected:
2067
- atomic_dec(&cm_id_priv->refcount);
20682222 cm_deref_id(listen_cm_id_priv);
20692223 destroy:
2070
- ib_destroy_cm_id(cm_id);
2224
+ ib_destroy_cm_id(&cm_id_priv->id);
20712225 return ret;
20722226 }
20732227
....@@ -2075,30 +2229,41 @@
20752229 struct cm_id_private *cm_id_priv,
20762230 struct ib_cm_rep_param *param)
20772231 {
2078
- cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
2079
- rep_msg->local_comm_id = cm_id_priv->id.local_id;
2080
- rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2081
- cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
2082
- rep_msg->resp_resources = param->responder_resources;
2083
- cm_rep_set_target_ack_delay(rep_msg,
2084
- cm_id_priv->av.port->cm_dev->ack_delay);
2085
- cm_rep_set_failover(rep_msg, param->failover_accepted);
2086
- cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
2087
- rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
2232
+ cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2233
+ param->ece.attr_mod);
2234
+ IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2235
+ be32_to_cpu(cm_id_priv->id.local_id));
2236
+ IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2237
+ be32_to_cpu(cm_id_priv->id.remote_id));
2238
+ IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2239
+ IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2240
+ param->responder_resources);
2241
+ IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2242
+ cm_id_priv->av.port->cm_dev->ack_delay);
2243
+ IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2244
+ IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2245
+ IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2246
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
20882247
20892248 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2090
- rep_msg->initiator_depth = param->initiator_depth;
2091
- cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
2092
- cm_rep_set_srq(rep_msg, param->srq);
2093
- cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
2249
+ IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2250
+ param->initiator_depth);
2251
+ IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2252
+ param->flow_control);
2253
+ IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2254
+ IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
20942255 } else {
2095
- cm_rep_set_srq(rep_msg, 1);
2096
- cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
2256
+ IBA_SET(CM_REP_SRQ, rep_msg, 1);
2257
+ IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
20972258 }
20982259
2260
+ IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2261
+ IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2262
+ IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2263
+
20992264 if (param->private_data && param->private_data_len)
2100
- memcpy(rep_msg->private_data, param->private_data,
2101
- param->private_data_len);
2265
+ IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2266
+ param->private_data_len);
21022267 }
21032268
21042269 int ib_send_cm_rep(struct ib_cm_id *cm_id,
....@@ -2118,8 +2283,7 @@
21182283 spin_lock_irqsave(&cm_id_priv->lock, flags);
21192284 if (cm_id->state != IB_CM_REQ_RCVD &&
21202285 cm_id->state != IB_CM_MRA_REQ_SENT) {
2121
- pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2122
- be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2286
+ trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
21232287 ret = -EINVAL;
21242288 goto out;
21252289 }
....@@ -2133,6 +2297,7 @@
21332297 msg->timeout_ms = cm_id_priv->timeout_ms;
21342298 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
21352299
2300
+ trace_icm_send_rep(cm_id);
21362301 ret = ib_post_send_mad(msg, NULL);
21372302 if (ret) {
21382303 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
....@@ -2144,7 +2309,10 @@
21442309 cm_id_priv->msg = msg;
21452310 cm_id_priv->initiator_depth = param->initiator_depth;
21462311 cm_id_priv->responder_resources = param->responder_resources;
2147
- cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2312
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2313
+ WARN_ONCE(param->qp_num & 0xFF000000,
2314
+ "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2315
+ param->qp_num);
21482316 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
21492317
21502318 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
....@@ -2158,11 +2326,14 @@
21582326 u8 private_data_len)
21592327 {
21602328 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2161
- rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2162
- rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2329
+ IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2330
+ be32_to_cpu(cm_id_priv->id.local_id));
2331
+ IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2332
+ be32_to_cpu(cm_id_priv->id.remote_id));
21632333
21642334 if (private_data && private_data_len)
2165
- memcpy(rtu_msg->private_data, private_data, private_data_len);
2335
+ IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2336
+ private_data_len);
21662337 }
21672338
21682339 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
....@@ -2186,8 +2357,7 @@
21862357 spin_lock_irqsave(&cm_id_priv->lock, flags);
21872358 if (cm_id->state != IB_CM_REP_RCVD &&
21882359 cm_id->state != IB_CM_MRA_REP_SENT) {
2189
- pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2190
- be32_to_cpu(cm_id->local_id), cm_id->state);
2360
+ trace_icm_send_cm_rtu_err(cm_id);
21912361 ret = -EINVAL;
21922362 goto error;
21932363 }
....@@ -2199,6 +2369,7 @@
21992369 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
22002370 private_data, private_data_len);
22012371
2372
+ trace_icm_send_rtu(cm_id);
22022373 ret = ib_post_send_mad(msg, NULL);
22032374 if (ret) {
22042375 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
....@@ -2225,18 +2396,25 @@
22252396
22262397 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
22272398 param = &work->cm_event.param.rep_rcvd;
2228
- param->remote_ca_guid = rep_msg->local_ca_guid;
2229
- param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2399
+ param->remote_ca_guid =
2400
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2401
+ param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
22302402 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2231
- param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2232
- param->responder_resources = rep_msg->initiator_depth;
2233
- param->initiator_depth = rep_msg->resp_resources;
2234
- param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2235
- param->failover_accepted = cm_rep_get_failover(rep_msg);
2236
- param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2237
- param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2238
- param->srq = cm_rep_get_srq(rep_msg);
2239
- work->cm_event.private_data = &rep_msg->private_data;
2403
+ param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2404
+ param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2405
+ param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2406
+ param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2407
+ param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2408
+ param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2409
+ param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2410
+ param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2411
+ param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2412
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2413
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2414
+ param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2415
+
2416
+ work->cm_event.private_data =
2417
+ IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
22402418 }
22412419
22422420 static void cm_dup_rep_handler(struct cm_work *work)
....@@ -2247,8 +2425,9 @@
22472425 int ret;
22482426
22492427 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2250
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2251
- rep_msg->local_comm_id);
2428
+ cm_id_priv = cm_acquire_id(
2429
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2430
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
22522431 if (!cm_id_priv)
22532432 return;
22542433
....@@ -2272,6 +2451,7 @@
22722451 goto unlock;
22732452 spin_unlock_irq(&cm_id_priv->lock);
22742453
2454
+ trace_icm_send_dup_rep(&cm_id_priv->id);
22752455 ret = ib_post_send_mad(msg, NULL);
22762456 if (ret)
22772457 goto free;
....@@ -2288,15 +2468,15 @@
22882468 struct cm_rep_msg *rep_msg;
22892469 int ret;
22902470 struct cm_id_private *cur_cm_id_priv;
2291
- struct ib_cm_id *cm_id;
22922471 struct cm_timewait_info *timewait_info;
22932472
22942473 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2295
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2474
+ cm_id_priv = cm_acquire_id(
2475
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
22962476 if (!cm_id_priv) {
22972477 cm_dup_rep_handler(work);
2298
- pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2299
- be32_to_cpu(rep_msg->remote_comm_id));
2478
+ trace_icm_remote_no_priv_err(
2479
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
23002480 return -EINVAL;
23012481 }
23022482
....@@ -2308,17 +2488,19 @@
23082488 case IB_CM_MRA_REQ_RCVD:
23092489 break;
23102490 default:
2311
- spin_unlock_irq(&cm_id_priv->lock);
23122491 ret = -EINVAL;
2313
- pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2314
- __func__, cm_id_priv->id.state,
2315
- be32_to_cpu(rep_msg->local_comm_id),
2316
- be32_to_cpu(rep_msg->remote_comm_id));
2492
+ trace_icm_rep_unknown_err(
2493
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2494
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2495
+ cm_id_priv->id.state);
2496
+ spin_unlock_irq(&cm_id_priv->lock);
23172497 goto error;
23182498 }
23192499
2320
- cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2321
- cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2500
+ cm_id_priv->timewait_info->work.remote_id =
2501
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2502
+ cm_id_priv->timewait_info->remote_ca_guid =
2503
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
23222504 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
23232505
23242506 spin_lock(&cm.lock);
....@@ -2327,17 +2509,15 @@
23272509 spin_unlock(&cm.lock);
23282510 spin_unlock_irq(&cm_id_priv->lock);
23292511 ret = -EINVAL;
2330
- pr_debug("%s: Failed to insert remote id %d\n", __func__,
2331
- be32_to_cpu(rep_msg->remote_comm_id));
2512
+ trace_icm_insert_failed_err(
2513
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
23322514 goto error;
23332515 }
23342516 /* Check for a stale connection. */
23352517 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
23362518 if (timewait_info) {
2337
- rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2338
- &cm.remote_id_table);
2339
- cm_id_priv->timewait_info->inserted_remote_id = 0;
2340
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2519
+ cm_remove_remote(cm_id_priv);
2520
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
23412521 timewait_info->work.remote_id);
23422522
23432523 spin_unlock(&cm.lock);
....@@ -2346,13 +2526,12 @@
23462526 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
23472527 NULL, 0);
23482528 ret = -EINVAL;
2349
- pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2350
- __func__, be32_to_cpu(rep_msg->local_comm_id),
2351
- be32_to_cpu(rep_msg->remote_comm_id));
2529
+ trace_icm_staleconn_err(
2530
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2531
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
23522532
23532533 if (cur_cm_id_priv) {
2354
- cm_id = &cur_cm_id_priv->id;
2355
- ib_send_cm_dreq(cm_id, NULL, 0);
2534
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
23562535 cm_deref_id(cur_cm_id_priv);
23572536 }
23582537
....@@ -2361,13 +2540,17 @@
23612540 spin_unlock(&cm.lock);
23622541
23632542 cm_id_priv->id.state = IB_CM_REP_RCVD;
2364
- cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2543
+ cm_id_priv->id.remote_id =
2544
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
23652545 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2366
- cm_id_priv->initiator_depth = rep_msg->resp_resources;
2367
- cm_id_priv->responder_resources = rep_msg->initiator_depth;
2368
- cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2369
- cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2370
- cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2546
+ cm_id_priv->initiator_depth =
2547
+ IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2548
+ cm_id_priv->responder_resources =
2549
+ IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2550
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2551
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2552
+ cm_id_priv->target_ack_delay =
2553
+ IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
23712554 cm_id_priv->av.timeout =
23722555 cm_ack_timeout(cm_id_priv->target_ack_delay,
23732556 cm_id_priv->av.timeout - 1);
....@@ -2375,18 +2558,8 @@
23752558 cm_ack_timeout(cm_id_priv->target_ack_delay,
23762559 cm_id_priv->alt_av.timeout - 1);
23772560
2378
- /* todo: handle peer_to_peer */
2379
-
23802561 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2381
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2382
- if (!ret)
2383
- list_add_tail(&work->list, &cm_id_priv->work_list);
2384
- spin_unlock_irq(&cm_id_priv->lock);
2385
-
2386
- if (ret)
2387
- cm_process_work(cm_id_priv, work);
2388
- else
2389
- cm_deref_id(cm_id_priv);
2562
+ cm_queue_work_unlock(cm_id_priv, work);
23902563 return 0;
23912564
23922565 error:
....@@ -2397,7 +2570,6 @@
23972570 static int cm_establish_handler(struct cm_work *work)
23982571 {
23992572 struct cm_id_private *cm_id_priv;
2400
- int ret;
24012573
24022574 /* See comment in cm_establish about lookup. */
24032575 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
....@@ -2411,15 +2583,7 @@
24112583 }
24122584
24132585 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2414
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2415
- if (!ret)
2416
- list_add_tail(&work->list, &cm_id_priv->work_list);
2417
- spin_unlock_irq(&cm_id_priv->lock);
2418
-
2419
- if (ret)
2420
- cm_process_work(cm_id_priv, work);
2421
- else
2422
- cm_deref_id(cm_id_priv);
2586
+ cm_queue_work_unlock(cm_id_priv, work);
24232587 return 0;
24242588 out:
24252589 cm_deref_id(cm_id_priv);
....@@ -2430,15 +2594,16 @@
24302594 {
24312595 struct cm_id_private *cm_id_priv;
24322596 struct cm_rtu_msg *rtu_msg;
2433
- int ret;
24342597
24352598 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2436
- cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2437
- rtu_msg->local_comm_id);
2599
+ cm_id_priv = cm_acquire_id(
2600
+ cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2601
+ cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
24382602 if (!cm_id_priv)
24392603 return -EINVAL;
24402604
2441
- work->cm_event.private_data = &rtu_msg->private_data;
2605
+ work->cm_event.private_data =
2606
+ IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
24422607
24432608 spin_lock_irq(&cm_id_priv->lock);
24442609 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
....@@ -2451,15 +2616,7 @@
24512616 cm_id_priv->id.state = IB_CM_ESTABLISHED;
24522617
24532618 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2454
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2455
- if (!ret)
2456
- list_add_tail(&work->list, &cm_id_priv->work_list);
2457
- spin_unlock_irq(&cm_id_priv->lock);
2458
-
2459
- if (ret)
2460
- cm_process_work(cm_id_priv, work);
2461
- else
2462
- cm_deref_id(cm_id_priv);
2619
+ cm_queue_work_unlock(cm_id_priv, work);
24632620 return 0;
24642621 out:
24652622 cm_deref_id(cm_id_priv);
....@@ -2473,43 +2630,42 @@
24732630 {
24742631 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
24752632 cm_form_tid(cm_id_priv));
2476
- dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2477
- dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2478
- cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2633
+ IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2634
+ be32_to_cpu(cm_id_priv->id.local_id));
2635
+ IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2636
+ be32_to_cpu(cm_id_priv->id.remote_id));
2637
+ IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2638
+ be32_to_cpu(cm_id_priv->remote_qpn));
24792639
24802640 if (private_data && private_data_len)
2481
- memcpy(dreq_msg->private_data, private_data, private_data_len);
2641
+ IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2642
+ private_data_len);
24822643 }
24832644
2484
-int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2485
- const void *private_data,
2486
- u8 private_data_len)
2645
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2646
+ const void *private_data, u8 private_data_len)
24872647 {
2488
- struct cm_id_private *cm_id_priv;
24892648 struct ib_mad_send_buf *msg;
2490
- unsigned long flags;
24912649 int ret;
2650
+
2651
+ lockdep_assert_held(&cm_id_priv->lock);
24922652
24932653 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
24942654 return -EINVAL;
24952655
2496
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2497
- spin_lock_irqsave(&cm_id_priv->lock, flags);
2498
- if (cm_id->state != IB_CM_ESTABLISHED) {
2499
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2500
- be32_to_cpu(cm_id->local_id), cm_id->state);
2501
- ret = -EINVAL;
2502
- goto out;
2656
+ if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2657
+ trace_icm_dreq_skipped(&cm_id_priv->id);
2658
+ return -EINVAL;
25032659 }
25042660
2505
- if (cm_id->lap_state == IB_CM_LAP_SENT ||
2506
- cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2661
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2662
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25072663 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
25082664
25092665 ret = cm_alloc_msg(cm_id_priv, &msg);
25102666 if (ret) {
25112667 cm_enter_timewait(cm_id_priv);
2512
- goto out;
2668
+ return ret;
25132669 }
25142670
25152671 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
....@@ -2517,17 +2673,30 @@
25172673 msg->timeout_ms = cm_id_priv->timeout_ms;
25182674 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
25192675
2676
+ trace_icm_send_dreq(&cm_id_priv->id);
25202677 ret = ib_post_send_mad(msg, NULL);
25212678 if (ret) {
25222679 cm_enter_timewait(cm_id_priv);
2523
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
25242680 cm_free_msg(msg);
25252681 return ret;
25262682 }
25272683
2528
- cm_id->state = IB_CM_DREQ_SENT;
2684
+ cm_id_priv->id.state = IB_CM_DREQ_SENT;
25292685 cm_id_priv->msg = msg;
2530
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2686
+ return 0;
2687
+}
2688
+
2689
+int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2690
+ u8 private_data_len)
2691
+{
2692
+ struct cm_id_private *cm_id_priv =
2693
+ container_of(cm_id, struct cm_id_private, id);
2694
+ unsigned long flags;
2695
+ int ret;
2696
+
2697
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
2698
+ ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2699
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
25312700 return ret;
25322701 }
25332702 EXPORT_SYMBOL(ib_send_cm_dreq);
....@@ -2538,58 +2707,68 @@
25382707 u8 private_data_len)
25392708 {
25402709 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2541
- drep_msg->local_comm_id = cm_id_priv->id.local_id;
2542
- drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2710
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2711
+ be32_to_cpu(cm_id_priv->id.local_id));
2712
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2713
+ be32_to_cpu(cm_id_priv->id.remote_id));
25432714
25442715 if (private_data && private_data_len)
2545
- memcpy(drep_msg->private_data, private_data, private_data_len);
2716
+ IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2717
+ private_data_len);
25462718 }
25472719
2548
-int ib_send_cm_drep(struct ib_cm_id *cm_id,
2549
- const void *private_data,
2550
- u8 private_data_len)
2720
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2721
+ void *private_data, u8 private_data_len)
25512722 {
2552
- struct cm_id_private *cm_id_priv;
25532723 struct ib_mad_send_buf *msg;
2554
- unsigned long flags;
2555
- void *data;
25562724 int ret;
2725
+
2726
+ lockdep_assert_held(&cm_id_priv->lock);
25572727
25582728 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
25592729 return -EINVAL;
2730
+
2731
+ if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2732
+ trace_icm_send_drep_err(&cm_id_priv->id);
2733
+ kfree(private_data);
2734
+ return -EINVAL;
2735
+ }
2736
+
2737
+ cm_set_private_data(cm_id_priv, private_data, private_data_len);
2738
+ cm_enter_timewait(cm_id_priv);
2739
+
2740
+ ret = cm_alloc_msg(cm_id_priv, &msg);
2741
+ if (ret)
2742
+ return ret;
2743
+
2744
+ cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2745
+ private_data, private_data_len);
2746
+
2747
+ trace_icm_send_drep(&cm_id_priv->id);
2748
+ ret = ib_post_send_mad(msg, NULL);
2749
+ if (ret) {
2750
+ cm_free_msg(msg);
2751
+ return ret;
2752
+ }
2753
+ return 0;
2754
+}
2755
+
2756
+int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2757
+ u8 private_data_len)
2758
+{
2759
+ struct cm_id_private *cm_id_priv =
2760
+ container_of(cm_id, struct cm_id_private, id);
2761
+ unsigned long flags;
2762
+ void *data;
2763
+ int ret;
25602764
25612765 data = cm_copy_private_data(private_data, private_data_len);
25622766 if (IS_ERR(data))
25632767 return PTR_ERR(data);
25642768
2565
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
25662769 spin_lock_irqsave(&cm_id_priv->lock, flags);
2567
- if (cm_id->state != IB_CM_DREQ_RCVD) {
2568
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2569
- kfree(data);
2570
- pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2571
- __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
2572
- return -EINVAL;
2573
- }
2574
-
2575
- cm_set_private_data(cm_id_priv, data, private_data_len);
2576
- cm_enter_timewait(cm_id_priv);
2577
-
2578
- ret = cm_alloc_msg(cm_id_priv, &msg);
2579
- if (ret)
2580
- goto out;
2581
-
2582
- cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2583
- private_data, private_data_len);
2584
-
2585
- ret = ib_post_send_mad(msg, NULL);
2586
- if (ret) {
2587
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2588
- cm_free_msg(msg);
2589
- return ret;
2590
- }
2591
-
2592
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2770
+ ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2771
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
25932772 return ret;
25942773 }
25952774 EXPORT_SYMBOL(ib_send_cm_drep);
....@@ -2610,9 +2789,14 @@
26102789 drep_msg = (struct cm_drep_msg *) msg->mad;
26112790
26122791 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2613
- drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2614
- drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2792
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2793
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2794
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2795
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
26152796
2797
+ trace_icm_issue_drep(
2798
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2799
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
26162800 ret = ib_post_send_mad(msg, NULL);
26172801 if (ret)
26182802 cm_free_msg(msg);
....@@ -2625,25 +2809,27 @@
26252809 struct cm_id_private *cm_id_priv;
26262810 struct cm_dreq_msg *dreq_msg;
26272811 struct ib_mad_send_buf *msg = NULL;
2628
- int ret;
26292812
26302813 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2631
- cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2632
- dreq_msg->local_comm_id);
2814
+ cm_id_priv = cm_acquire_id(
2815
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2816
+ cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
26332817 if (!cm_id_priv) {
26342818 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26352819 counter[CM_DREQ_COUNTER]);
26362820 cm_issue_drep(work->port, work->mad_recv_wc);
2637
- pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2638
- __func__, be32_to_cpu(dreq_msg->local_comm_id),
2639
- be32_to_cpu(dreq_msg->remote_comm_id));
2821
+ trace_icm_no_priv_err(
2822
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2823
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
26402824 return -EINVAL;
26412825 }
26422826
2643
- work->cm_event.private_data = &dreq_msg->private_data;
2827
+ work->cm_event.private_data =
2828
+ IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
26442829
26452830 spin_lock_irq(&cm_id_priv->lock);
2646
- if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2831
+ if (cm_id_priv->local_qpn !=
2832
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
26472833 goto unlock;
26482834
26492835 switch (cm_id_priv->id.state) {
....@@ -2679,22 +2865,12 @@
26792865 counter[CM_DREQ_COUNTER]);
26802866 goto unlock;
26812867 default:
2682
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2683
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
2684
- cm_id_priv->id.state);
2868
+ trace_icm_dreq_unknown_err(&cm_id_priv->id);
26852869 goto unlock;
26862870 }
26872871 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
26882872 cm_id_priv->tid = dreq_msg->hdr.tid;
2689
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2690
- if (!ret)
2691
- list_add_tail(&work->list, &cm_id_priv->work_list);
2692
- spin_unlock_irq(&cm_id_priv->lock);
2693
-
2694
- if (ret)
2695
- cm_process_work(cm_id_priv, work);
2696
- else
2697
- cm_deref_id(cm_id_priv);
2873
+ cm_queue_work_unlock(cm_id_priv, work);
26982874 return 0;
26992875
27002876 unlock: spin_unlock_irq(&cm_id_priv->lock);
....@@ -2706,15 +2882,16 @@
27062882 {
27072883 struct cm_id_private *cm_id_priv;
27082884 struct cm_drep_msg *drep_msg;
2709
- int ret;
27102885
27112886 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2712
- cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2713
- drep_msg->local_comm_id);
2887
+ cm_id_priv = cm_acquire_id(
2888
+ cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2889
+ cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
27142890 if (!cm_id_priv)
27152891 return -EINVAL;
27162892
2717
- work->cm_event.private_data = &drep_msg->private_data;
2893
+ work->cm_event.private_data =
2894
+ IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
27182895
27192896 spin_lock_irq(&cm_id_priv->lock);
27202897 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
....@@ -2725,80 +2902,82 @@
27252902 cm_enter_timewait(cm_id_priv);
27262903
27272904 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2728
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2729
- if (!ret)
2730
- list_add_tail(&work->list, &cm_id_priv->work_list);
2731
- spin_unlock_irq(&cm_id_priv->lock);
2732
-
2733
- if (ret)
2734
- cm_process_work(cm_id_priv, work);
2735
- else
2736
- cm_deref_id(cm_id_priv);
2905
+ cm_queue_work_unlock(cm_id_priv, work);
27372906 return 0;
27382907 out:
27392908 cm_deref_id(cm_id_priv);
27402909 return -EINVAL;
27412910 }
27422911
2743
-int ib_send_cm_rej(struct ib_cm_id *cm_id,
2744
- enum ib_cm_rej_reason reason,
2745
- void *ari,
2746
- u8 ari_length,
2747
- const void *private_data,
2748
- u8 private_data_len)
2912
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2913
+ enum ib_cm_rej_reason reason, void *ari,
2914
+ u8 ari_length, const void *private_data,
2915
+ u8 private_data_len)
27492916 {
2750
- struct cm_id_private *cm_id_priv;
2917
+ enum ib_cm_state state = cm_id_priv->id.state;
27512918 struct ib_mad_send_buf *msg;
2752
- unsigned long flags;
27532919 int ret;
2920
+
2921
+ lockdep_assert_held(&cm_id_priv->lock);
27542922
27552923 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
27562924 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
27572925 return -EINVAL;
27582926
2759
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2927
+ trace_icm_send_rej(&cm_id_priv->id, reason);
27602928
2761
- spin_lock_irqsave(&cm_id_priv->lock, flags);
2762
- switch (cm_id->state) {
2929
+ switch (state) {
27632930 case IB_CM_REQ_SENT:
27642931 case IB_CM_MRA_REQ_RCVD:
27652932 case IB_CM_REQ_RCVD:
27662933 case IB_CM_MRA_REQ_SENT:
27672934 case IB_CM_REP_RCVD:
27682935 case IB_CM_MRA_REP_SENT:
2769
- ret = cm_alloc_msg(cm_id_priv, &msg);
2770
- if (!ret)
2771
- cm_format_rej((struct cm_rej_msg *) msg->mad,
2772
- cm_id_priv, reason, ari, ari_length,
2773
- private_data, private_data_len);
2774
-
27752936 cm_reset_to_idle(cm_id_priv);
2937
+ ret = cm_alloc_msg(cm_id_priv, &msg);
2938
+ if (ret)
2939
+ return ret;
2940
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2941
+ ari, ari_length, private_data, private_data_len,
2942
+ state);
27762943 break;
27772944 case IB_CM_REP_SENT:
27782945 case IB_CM_MRA_REP_RCVD:
2779
- ret = cm_alloc_msg(cm_id_priv, &msg);
2780
- if (!ret)
2781
- cm_format_rej((struct cm_rej_msg *) msg->mad,
2782
- cm_id_priv, reason, ari, ari_length,
2783
- private_data, private_data_len);
2784
-
27852946 cm_enter_timewait(cm_id_priv);
2947
+ ret = cm_alloc_msg(cm_id_priv, &msg);
2948
+ if (ret)
2949
+ return ret;
2950
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2951
+ ari, ari_length, private_data, private_data_len,
2952
+ state);
27862953 break;
27872954 default:
2788
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2789
- be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2790
- ret = -EINVAL;
2791
- goto out;
2955
+ trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2956
+ return -EINVAL;
27922957 }
27932958
2794
- if (ret)
2795
- goto out;
2796
-
27972959 ret = ib_post_send_mad(msg, NULL);
2798
- if (ret)
2960
+ if (ret) {
27992961 cm_free_msg(msg);
2962
+ return ret;
2963
+ }
28002964
2801
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2965
+ return 0;
2966
+}
2967
+
2968
+int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2969
+ void *ari, u8 ari_length, const void *private_data,
2970
+ u8 private_data_len)
2971
+{
2972
+ struct cm_id_private *cm_id_priv =
2973
+ container_of(cm_id, struct cm_id_private, id);
2974
+ unsigned long flags;
2975
+ int ret;
2976
+
2977
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
2978
+ ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2979
+ private_data, private_data_len);
2980
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
28022981 return ret;
28032982 }
28042983 EXPORT_SYMBOL(ib_send_cm_rej);
....@@ -2810,42 +2989,33 @@
28102989
28112990 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
28122991 param = &work->cm_event.param.rej_rcvd;
2813
- param->ari = rej_msg->ari;
2814
- param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2815
- param->reason = __be16_to_cpu(rej_msg->reason);
2816
- work->cm_event.private_data = &rej_msg->private_data;
2992
+ param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2993
+ param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2994
+ param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2995
+ work->cm_event.private_data =
2996
+ IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
28172997 }
28182998
28192999 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
28203000 {
2821
- struct cm_timewait_info *timewait_info;
28223001 struct cm_id_private *cm_id_priv;
28233002 __be32 remote_id;
28243003
2825
- remote_id = rej_msg->local_comm_id;
3004
+ remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
28263005
2827
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2828
- spin_lock_irq(&cm.lock);
2829
- timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2830
- remote_id);
2831
- if (!timewait_info) {
2832
- spin_unlock_irq(&cm.lock);
2833
- return NULL;
2834
- }
2835
- cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2836
- (timewait_info->work.local_id ^
2837
- cm.random_id_operand));
2838
- if (cm_id_priv) {
2839
- if (cm_id_priv->id.remote_id == remote_id)
2840
- atomic_inc(&cm_id_priv->refcount);
2841
- else
2842
- cm_id_priv = NULL;
2843
- }
2844
- spin_unlock_irq(&cm.lock);
2845
- } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2846
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
3006
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3007
+ cm_id_priv = cm_find_remote_id(
3008
+ *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3009
+ remote_id);
3010
+ } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3011
+ CM_MSG_RESPONSE_REQ)
3012
+ cm_id_priv = cm_acquire_id(
3013
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3014
+ 0);
28473015 else
2848
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
3016
+ cm_id_priv = cm_acquire_id(
3017
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3018
+ remote_id);
28493019
28503020 return cm_id_priv;
28513021 }
....@@ -2854,7 +3024,6 @@
28543024 {
28553025 struct cm_id_private *cm_id_priv;
28563026 struct cm_rej_msg *rej_msg;
2857
- int ret;
28583027
28593028 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
28603029 cm_id_priv = cm_acquire_rejected_id(rej_msg);
....@@ -2870,17 +3039,17 @@
28703039 case IB_CM_REP_SENT:
28713040 case IB_CM_MRA_REP_RCVD:
28723041 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2873
- /* fall through */
3042
+ fallthrough;
28743043 case IB_CM_REQ_RCVD:
28753044 case IB_CM_MRA_REQ_SENT:
2876
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
3045
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
28773046 cm_enter_timewait(cm_id_priv);
28783047 else
28793048 cm_reset_to_idle(cm_id_priv);
28803049 break;
28813050 case IB_CM_DREQ_SENT:
28823051 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2883
- /* fall through */
3052
+ fallthrough;
28843053 case IB_CM_REP_RCVD:
28853054 case IB_CM_MRA_REP_SENT:
28863055 cm_enter_timewait(cm_id_priv);
....@@ -2894,25 +3063,14 @@
28943063 cm_enter_timewait(cm_id_priv);
28953064 break;
28963065 }
2897
- /* fall through */
3066
+ fallthrough;
28983067 default:
3068
+ trace_icm_rej_unknown_err(&cm_id_priv->id);
28993069 spin_unlock_irq(&cm_id_priv->lock);
2900
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2901
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
2902
- cm_id_priv->id.state);
2903
- ret = -EINVAL;
29043070 goto out;
29053071 }
29063072
2907
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
2908
- if (!ret)
2909
- list_add_tail(&work->list, &cm_id_priv->work_list);
2910
- spin_unlock_irq(&cm_id_priv->lock);
2911
-
2912
- if (ret)
2913
- cm_process_work(cm_id_priv, work);
2914
- else
2915
- cm_deref_id(cm_id_priv);
3073
+ cm_queue_work_unlock(cm_id_priv, work);
29163074 return 0;
29173075 out:
29183076 cm_deref_id(cm_id_priv);
....@@ -2961,11 +3119,9 @@
29613119 msg_response = CM_MSG_RESPONSE_OTHER;
29623120 break;
29633121 }
2964
- /* fall through */
3122
+ fallthrough;
29653123 default:
2966
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2967
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
2968
- cm_id_priv->id.state);
3124
+ trace_icm_send_mra_unknown_err(&cm_id_priv->id);
29693125 ret = -EINVAL;
29703126 goto error1;
29713127 }
....@@ -2978,6 +3134,7 @@
29783134 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
29793135 msg_response, service_timeout,
29803136 private_data, private_data_len);
3137
+ trace_icm_send_mra(cm_id);
29813138 ret = ib_post_send_mad(msg, NULL);
29823139 if (ret)
29833140 goto error2;
....@@ -3003,13 +3160,16 @@
30033160
30043161 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
30053162 {
3006
- switch (cm_mra_get_msg_mraed(mra_msg)) {
3163
+ switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
30073164 case CM_MSG_RESPONSE_REQ:
3008
- return cm_acquire_id(mra_msg->remote_comm_id, 0);
3165
+ return cm_acquire_id(
3166
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3167
+ 0);
30093168 case CM_MSG_RESPONSE_REP:
30103169 case CM_MSG_RESPONSE_OTHER:
3011
- return cm_acquire_id(mra_msg->remote_comm_id,
3012
- mra_msg->local_comm_id);
3170
+ return cm_acquire_id(
3171
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3172
+ cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
30133173 default:
30143174 return NULL;
30153175 }
....@@ -3019,37 +3179,41 @@
30193179 {
30203180 struct cm_id_private *cm_id_priv;
30213181 struct cm_mra_msg *mra_msg;
3022
- int timeout, ret;
3182
+ int timeout;
30233183
30243184 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
30253185 cm_id_priv = cm_acquire_mraed_id(mra_msg);
30263186 if (!cm_id_priv)
30273187 return -EINVAL;
30283188
3029
- work->cm_event.private_data = &mra_msg->private_data;
3189
+ work->cm_event.private_data =
3190
+ IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
30303191 work->cm_event.param.mra_rcvd.service_timeout =
3031
- cm_mra_get_service_timeout(mra_msg);
3032
- timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
3192
+ IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3193
+ timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
30333194 cm_convert_to_ms(cm_id_priv->av.timeout);
30343195
30353196 spin_lock_irq(&cm_id_priv->lock);
30363197 switch (cm_id_priv->id.state) {
30373198 case IB_CM_REQ_SENT:
3038
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
3199
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3200
+ CM_MSG_RESPONSE_REQ ||
30393201 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30403202 cm_id_priv->msg, timeout))
30413203 goto out;
30423204 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
30433205 break;
30443206 case IB_CM_REP_SENT:
3045
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
3207
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3208
+ CM_MSG_RESPONSE_REP ||
30463209 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30473210 cm_id_priv->msg, timeout))
30483211 goto out;
30493212 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
30503213 break;
30513214 case IB_CM_ESTABLISHED:
3052
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
3215
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3216
+ CM_MSG_RESPONSE_OTHER ||
30533217 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
30543218 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30553219 cm_id_priv->msg, timeout)) {
....@@ -3065,25 +3229,15 @@
30653229 case IB_CM_MRA_REP_RCVD:
30663230 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30673231 counter[CM_MRA_COUNTER]);
3068
- /* fall through */
3232
+ fallthrough;
30693233 default:
3070
- pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3071
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
3072
- cm_id_priv->id.state);
3234
+ trace_icm_mra_unknown_err(&cm_id_priv->id);
30733235 goto out;
30743236 }
30753237
30763238 cm_id_priv->msg->context[1] = (void *) (unsigned long)
30773239 cm_id_priv->id.state;
3078
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
3079
- if (!ret)
3080
- list_add_tail(&work->list, &cm_id_priv->work_list);
3081
- spin_unlock_irq(&cm_id_priv->lock);
3082
-
3083
- if (ret)
3084
- cm_process_work(cm_id_priv, work);
3085
- else
3086
- cm_deref_id(cm_id_priv);
3240
+ cm_queue_work_unlock(cm_id_priv, work);
30873241 return 0;
30883242 out:
30893243 spin_unlock_irq(&cm_id_priv->lock);
....@@ -3091,117 +3245,23 @@
30913245 return -EINVAL;
30923246 }
30933247
3094
-static void cm_format_lap(struct cm_lap_msg *lap_msg,
3095
- struct cm_id_private *cm_id_priv,
3096
- struct sa_path_rec *alternate_path,
3097
- const void *private_data,
3098
- u8 private_data_len)
3099
-{
3100
- bool alt_ext = false;
3101
-
3102
- if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
3103
- alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
3104
- alternate_path->opa.slid);
3105
- cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
3106
- cm_form_tid(cm_id_priv));
3107
- lap_msg->local_comm_id = cm_id_priv->id.local_id;
3108
- lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
3109
- cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
3110
- /* todo: need remote CM response timeout */
3111
- cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
3112
- lap_msg->alt_local_lid =
3113
- htons(ntohl(sa_path_get_slid(alternate_path)));
3114
- lap_msg->alt_remote_lid =
3115
- htons(ntohl(sa_path_get_dlid(alternate_path)));
3116
- lap_msg->alt_local_gid = alternate_path->sgid;
3117
- lap_msg->alt_remote_gid = alternate_path->dgid;
3118
- if (alt_ext) {
3119
- lap_msg->alt_local_gid.global.interface_id
3120
- = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
3121
- lap_msg->alt_remote_gid.global.interface_id
3122
- = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
3123
- }
3124
- cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
3125
- cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
3126
- lap_msg->alt_hop_limit = alternate_path->hop_limit;
3127
- cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
3128
- cm_lap_set_sl(lap_msg, alternate_path->sl);
3129
- cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
3130
- cm_lap_set_local_ack_timeout(lap_msg,
3131
- cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
3132
- alternate_path->packet_life_time));
3133
-
3134
- if (private_data && private_data_len)
3135
- memcpy(lap_msg->private_data, private_data, private_data_len);
3136
-}
3137
-
3138
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
3139
- struct sa_path_rec *alternate_path,
3140
- const void *private_data,
3141
- u8 private_data_len)
3142
-{
3143
- struct cm_id_private *cm_id_priv;
3144
- struct ib_mad_send_buf *msg;
3145
- unsigned long flags;
3146
- int ret;
3147
-
3148
- if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
3149
- return -EINVAL;
3150
-
3151
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3152
- spin_lock_irqsave(&cm_id_priv->lock, flags);
3153
- if (cm_id->state != IB_CM_ESTABLISHED ||
3154
- (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3155
- cm_id->lap_state != IB_CM_LAP_IDLE)) {
3156
- ret = -EINVAL;
3157
- goto out;
3158
- }
3159
-
3160
- ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
3161
- cm_id_priv);
3162
- if (ret)
3163
- goto out;
3164
- cm_id_priv->alt_av.timeout =
3165
- cm_ack_timeout(cm_id_priv->target_ack_delay,
3166
- cm_id_priv->alt_av.timeout - 1);
3167
-
3168
- ret = cm_alloc_msg(cm_id_priv, &msg);
3169
- if (ret)
3170
- goto out;
3171
-
3172
- cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3173
- alternate_path, private_data, private_data_len);
3174
- msg->timeout_ms = cm_id_priv->timeout_ms;
3175
- msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3176
-
3177
- ret = ib_post_send_mad(msg, NULL);
3178
- if (ret) {
3179
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3180
- cm_free_msg(msg);
3181
- return ret;
3182
- }
3183
-
3184
- cm_id->lap_state = IB_CM_LAP_SENT;
3185
- cm_id_priv->msg = msg;
3186
-
3187
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3188
- return ret;
3189
-}
3190
-EXPORT_SYMBOL(ib_send_cm_lap);
3191
-
31923248 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
31933249 struct sa_path_rec *path)
31943250 {
31953251 u32 lid;
31963252
31973253 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3198
- sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
3199
- sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
3254
+ sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3255
+ lap_msg));
3256
+ sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3257
+ lap_msg));
32003258 } else {
3201
- lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3259
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3260
+ CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
32023261 sa_path_set_dlid(path, lid);
32033262
3204
- lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3263
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3264
+ CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
32053265 sa_path_set_slid(path, lid);
32063266 }
32073267 }
....@@ -3210,20 +3270,23 @@
32103270 struct sa_path_rec *path,
32113271 struct cm_lap_msg *lap_msg)
32123272 {
3213
- path->dgid = lap_msg->alt_local_gid;
3214
- path->sgid = lap_msg->alt_remote_gid;
3215
- path->flow_label = cm_lap_get_flow_label(lap_msg);
3216
- path->hop_limit = lap_msg->alt_hop_limit;
3217
- path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3273
+ path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3274
+ path->sgid =
3275
+ *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3276
+ path->flow_label =
3277
+ cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3278
+ path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3279
+ path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
32183280 path->reversible = 1;
32193281 path->pkey = cm_id_priv->pkey;
3220
- path->sl = cm_lap_get_sl(lap_msg);
3282
+ path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
32213283 path->mtu_selector = IB_SA_EQ;
32223284 path->mtu = cm_id_priv->path_mtu;
32233285 path->rate_selector = IB_SA_EQ;
3224
- path->rate = cm_lap_get_packet_rate(lap_msg);
3286
+ path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
32253287 path->packet_life_time_selector = IB_SA_EQ;
3226
- path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3288
+ path->packet_life_time =
3289
+ IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
32273290 path->packet_life_time -= (path->packet_life_time > 0);
32283291 cm_format_path_lid_from_lap(lap_msg, path);
32293292 }
....@@ -3245,20 +3308,22 @@
32453308
32463309 /* todo: verify LAP request and send reject APR if invalid. */
32473310 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3248
- cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3249
- lap_msg->local_comm_id);
3311
+ cm_id_priv = cm_acquire_id(
3312
+ cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3313
+ cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
32503314 if (!cm_id_priv)
32513315 return -EINVAL;
32523316
32533317 param = &work->cm_event.param.lap_rcvd;
32543318 memset(&work->path[0], 0, sizeof(work->path[1]));
32553319 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3256
- work->port->port_num,
3257
- &work->path[0],
3258
- &lap_msg->alt_local_gid);
3320
+ work->port->port_num, &work->path[0],
3321
+ IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3322
+ lap_msg));
32593323 param->alternate_path = &work->path[0];
32603324 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3261
- work->cm_event.private_data = &lap_msg->private_data;
3325
+ work->cm_event.private_data =
3326
+ IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
32623327
32633328 spin_lock_irq(&cm_id_priv->lock);
32643329 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
....@@ -3307,15 +3372,7 @@
33073372
33083373 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
33093374 cm_id_priv->tid = lap_msg->hdr.tid;
3310
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
3311
- if (!ret)
3312
- list_add_tail(&work->list, &cm_id_priv->work_list);
3313
- spin_unlock_irq(&cm_id_priv->lock);
3314
-
3315
- if (ret)
3316
- cm_process_work(cm_id_priv, work);
3317
- else
3318
- cm_deref_id(cm_id_priv);
3375
+ cm_queue_work_unlock(cm_id_priv, work);
33193376 return 0;
33203377
33213378 unlock: spin_unlock_irq(&cm_id_priv->lock);
....@@ -3323,77 +3380,10 @@
33233380 return -EINVAL;
33243381 }
33253382
3326
-static void cm_format_apr(struct cm_apr_msg *apr_msg,
3327
- struct cm_id_private *cm_id_priv,
3328
- enum ib_cm_apr_status status,
3329
- void *info,
3330
- u8 info_length,
3331
- const void *private_data,
3332
- u8 private_data_len)
3333
-{
3334
- cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3335
- apr_msg->local_comm_id = cm_id_priv->id.local_id;
3336
- apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3337
- apr_msg->ap_status = (u8) status;
3338
-
3339
- if (info && info_length) {
3340
- apr_msg->info_length = info_length;
3341
- memcpy(apr_msg->info, info, info_length);
3342
- }
3343
-
3344
- if (private_data && private_data_len)
3345
- memcpy(apr_msg->private_data, private_data, private_data_len);
3346
-}
3347
-
3348
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
3349
- enum ib_cm_apr_status status,
3350
- void *info,
3351
- u8 info_length,
3352
- const void *private_data,
3353
- u8 private_data_len)
3354
-{
3355
- struct cm_id_private *cm_id_priv;
3356
- struct ib_mad_send_buf *msg;
3357
- unsigned long flags;
3358
- int ret;
3359
-
3360
- if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3361
- (info && info_length > IB_CM_APR_INFO_LENGTH))
3362
- return -EINVAL;
3363
-
3364
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3365
- spin_lock_irqsave(&cm_id_priv->lock, flags);
3366
- if (cm_id->state != IB_CM_ESTABLISHED ||
3367
- (cm_id->lap_state != IB_CM_LAP_RCVD &&
3368
- cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3369
- ret = -EINVAL;
3370
- goto out;
3371
- }
3372
-
3373
- ret = cm_alloc_msg(cm_id_priv, &msg);
3374
- if (ret)
3375
- goto out;
3376
-
3377
- cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3378
- info, info_length, private_data, private_data_len);
3379
- ret = ib_post_send_mad(msg, NULL);
3380
- if (ret) {
3381
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3382
- cm_free_msg(msg);
3383
- return ret;
3384
- }
3385
-
3386
- cm_id->lap_state = IB_CM_LAP_IDLE;
3387
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3388
- return ret;
3389
-}
3390
-EXPORT_SYMBOL(ib_send_cm_apr);
3391
-
33923383 static int cm_apr_handler(struct cm_work *work)
33933384 {
33943385 struct cm_id_private *cm_id_priv;
33953386 struct cm_apr_msg *apr_msg;
3396
- int ret;
33973387
33983388 /* Currently Alternate path messages are not supported for
33993389 * RoCE link layer.
....@@ -3403,15 +3393,20 @@
34033393 return -EINVAL;
34043394
34053395 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3406
- cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3407
- apr_msg->local_comm_id);
3396
+ cm_id_priv = cm_acquire_id(
3397
+ cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3398
+ cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
34083399 if (!cm_id_priv)
34093400 return -EINVAL; /* Unmatched reply. */
34103401
3411
- work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3412
- work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3413
- work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3414
- work->cm_event.private_data = &apr_msg->private_data;
3402
+ work->cm_event.param.apr_rcvd.ap_status =
3403
+ IBA_GET(CM_APR_AR_STATUS, apr_msg);
3404
+ work->cm_event.param.apr_rcvd.apr_info =
3405
+ IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3406
+ work->cm_event.param.apr_rcvd.info_len =
3407
+ IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3408
+ work->cm_event.private_data =
3409
+ IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
34153410
34163411 spin_lock_irq(&cm_id_priv->lock);
34173412 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
....@@ -3423,16 +3418,7 @@
34233418 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
34243419 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
34253420 cm_id_priv->msg = NULL;
3426
-
3427
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
3428
- if (!ret)
3429
- list_add_tail(&work->list, &cm_id_priv->work_list);
3430
- spin_unlock_irq(&cm_id_priv->lock);
3431
-
3432
- if (ret)
3433
- cm_process_work(cm_id_priv, work);
3434
- else
3435
- cm_deref_id(cm_id_priv);
3421
+ cm_queue_work_unlock(cm_id_priv, work);
34363422 return 0;
34373423 out:
34383424 cm_deref_id(cm_id_priv);
....@@ -3443,9 +3429,8 @@
34433429 {
34443430 struct cm_timewait_info *timewait_info;
34453431 struct cm_id_private *cm_id_priv;
3446
- int ret;
34473432
3448
- timewait_info = (struct cm_timewait_info *)work;
3433
+ timewait_info = container_of(work, struct cm_timewait_info, work);
34493434 spin_lock_irq(&cm.lock);
34503435 list_del(&timewait_info->list);
34513436 spin_unlock_irq(&cm.lock);
....@@ -3462,15 +3447,7 @@
34623447 goto out;
34633448 }
34643449 cm_id_priv->id.state = IB_CM_IDLE;
3465
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
3466
- if (!ret)
3467
- list_add_tail(&work->list, &cm_id_priv->work_list);
3468
- spin_unlock_irq(&cm_id_priv->lock);
3469
-
3470
- if (ret)
3471
- cm_process_work(cm_id_priv, work);
3472
- else
3473
- cm_deref_id(cm_id_priv);
3450
+ cm_queue_work_unlock(cm_id_priv, work);
34743451 return 0;
34753452 out:
34763453 cm_deref_id(cm_id_priv);
....@@ -3483,13 +3460,16 @@
34833460 {
34843461 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
34853462 cm_form_tid(cm_id_priv));
3486
- sidr_req_msg->request_id = cm_id_priv->id.local_id;
3487
- sidr_req_msg->pkey = param->path->pkey;
3488
- sidr_req_msg->service_id = param->service_id;
3463
+ IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3464
+ be32_to_cpu(cm_id_priv->id.local_id));
3465
+ IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3466
+ be16_to_cpu(param->path->pkey));
3467
+ IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3468
+ be64_to_cpu(param->service_id));
34893469
34903470 if (param->private_data && param->private_data_len)
3491
- memcpy(sidr_req_msg->private_data, param->private_data,
3492
- param->private_data_len);
3471
+ IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3472
+ param->private_data, param->private_data_len);
34933473 }
34943474
34953475 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
....@@ -3525,10 +3505,12 @@
35253505 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
35263506
35273507 spin_lock_irqsave(&cm_id_priv->lock, flags);
3528
- if (cm_id->state == IB_CM_IDLE)
3508
+ if (cm_id->state == IB_CM_IDLE) {
3509
+ trace_icm_send_sidr_req(&cm_id_priv->id);
35293510 ret = ib_post_send_mad(msg, NULL);
3530
- else
3511
+ } else {
35313512 ret = -EINVAL;
3513
+ }
35323514
35333515 if (ret) {
35343516 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
....@@ -3553,31 +3535,40 @@
35533535 sidr_req_msg = (struct cm_sidr_req_msg *)
35543536 work->mad_recv_wc->recv_buf.mad;
35553537 param = &work->cm_event.param.sidr_req_rcvd;
3556
- param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3538
+ param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
35573539 param->listen_id = listen_id;
3558
- param->service_id = sidr_req_msg->service_id;
3540
+ param->service_id =
3541
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
35593542 param->bth_pkey = cm_get_bth_pkey(work);
35603543 param->port = work->port->port_num;
35613544 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3562
- work->cm_event.private_data = &sidr_req_msg->private_data;
3545
+ work->cm_event.private_data =
3546
+ IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
35633547 }
35643548
35653549 static int cm_sidr_req_handler(struct cm_work *work)
35663550 {
3567
- struct ib_cm_id *cm_id;
3568
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3551
+ struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
35693552 struct cm_sidr_req_msg *sidr_req_msg;
35703553 struct ib_wc *wc;
35713554 int ret;
35723555
3573
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3574
- if (IS_ERR(cm_id))
3575
- return PTR_ERR(cm_id);
3576
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3556
+ cm_id_priv =
3557
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3558
+ if (IS_ERR(cm_id_priv))
3559
+ return PTR_ERR(cm_id_priv);
35773560
35783561 /* Record SGID/SLID and request ID for lookup. */
35793562 sidr_req_msg = (struct cm_sidr_req_msg *)
35803563 work->mad_recv_wc->recv_buf.mad;
3564
+
3565
+ cm_id_priv->id.remote_id =
3566
+ cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3567
+ cm_id_priv->id.service_id =
3568
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3569
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3570
+ cm_id_priv->tid = sidr_req_msg->hdr.tid;
3571
+
35813572 wc = work->mad_recv_wc->wc;
35823573 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
35833574 cm_id_priv->av.dgid.global.interface_id = 0;
....@@ -3587,38 +3578,45 @@
35873578 if (ret)
35883579 goto out;
35893580
3590
- cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3591
- cm_id_priv->tid = sidr_req_msg->hdr.tid;
3592
- atomic_inc(&cm_id_priv->work_count);
3593
-
35943581 spin_lock_irq(&cm.lock);
3595
- cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3596
- if (cur_cm_id_priv) {
3582
+ listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3583
+ if (listen_cm_id_priv) {
35973584 spin_unlock_irq(&cm.lock);
35983585 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35993586 counter[CM_SIDR_REQ_COUNTER]);
36003587 goto out; /* Duplicate message. */
36013588 }
36023589 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3603
- cur_cm_id_priv = cm_find_listen(cm_id->device,
3604
- sidr_req_msg->service_id);
3605
- if (!cur_cm_id_priv) {
3590
+ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3591
+ cm_id_priv->id.service_id);
3592
+ if (!listen_cm_id_priv) {
36063593 spin_unlock_irq(&cm.lock);
3607
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3594
+ ib_send_cm_sidr_rep(&cm_id_priv->id,
3595
+ &(struct ib_cm_sidr_rep_param){
3596
+ .status = IB_SIDR_UNSUPPORTED });
36083597 goto out; /* No match. */
36093598 }
3610
- atomic_inc(&cur_cm_id_priv->refcount);
3611
- atomic_inc(&cm_id_priv->refcount);
36123599 spin_unlock_irq(&cm.lock);
36133600
3614
- cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3615
- cm_id_priv->id.context = cur_cm_id_priv->id.context;
3616
- cm_id_priv->id.service_id = sidr_req_msg->service_id;
3617
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3601
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3602
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
36183603
3619
- cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
3620
- cm_process_work(cm_id_priv, work);
3621
- cm_deref_id(cur_cm_id_priv);
3604
+ /*
3605
+ * A SIDR ID does not need to be in the xarray since it does not receive
3606
+ * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3607
+ * not enter timewait.
3608
+ */
3609
+
3610
+ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3611
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3612
+ cm_free_work(work);
3613
+ /*
3614
+ * A pointer to the listen_cm_id is held in the event, so this deref
3615
+ * must be after the event is delivered above.
3616
+ */
3617
+ cm_deref_id(listen_cm_id_priv);
3618
+ if (ret)
3619
+ cm_destroy_id(&cm_id_priv->id, ret);
36223620 return 0;
36233621 out:
36243622 ib_destroy_cm_id(&cm_id_priv->id);
....@@ -3629,57 +3627,59 @@
36293627 struct cm_id_private *cm_id_priv,
36303628 struct ib_cm_sidr_rep_param *param)
36313629 {
3632
- cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3633
- cm_id_priv->tid);
3634
- sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3635
- sidr_rep_msg->status = param->status;
3636
- cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3637
- sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3638
- sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3630
+ cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3631
+ cm_id_priv->tid, param->ece.attr_mod);
3632
+ IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3633
+ be32_to_cpu(cm_id_priv->id.remote_id));
3634
+ IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3635
+ IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3636
+ IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3637
+ be64_to_cpu(cm_id_priv->id.service_id));
3638
+ IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3639
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3640
+ param->ece.vendor_id & 0xFF);
3641
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3642
+ (param->ece.vendor_id >> 8) & 0xFF);
36393643
36403644 if (param->info && param->info_length)
3641
- memcpy(sidr_rep_msg->info, param->info, param->info_length);
3645
+ IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3646
+ param->info, param->info_length);
36423647
36433648 if (param->private_data && param->private_data_len)
3644
- memcpy(sidr_rep_msg->private_data, param->private_data,
3645
- param->private_data_len);
3649
+ IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3650
+ param->private_data, param->private_data_len);
36463651 }
36473652
3648
-int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3649
- struct ib_cm_sidr_rep_param *param)
3653
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3654
+ struct ib_cm_sidr_rep_param *param)
36503655 {
3651
- struct cm_id_private *cm_id_priv;
36523656 struct ib_mad_send_buf *msg;
36533657 unsigned long flags;
36543658 int ret;
3659
+
3660
+ lockdep_assert_held(&cm_id_priv->lock);
36553661
36563662 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
36573663 (param->private_data &&
36583664 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
36593665 return -EINVAL;
36603666
3661
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3662
- spin_lock_irqsave(&cm_id_priv->lock, flags);
3663
- if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3664
- ret = -EINVAL;
3665
- goto error;
3666
- }
3667
+ if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3668
+ return -EINVAL;
36673669
36683670 ret = cm_alloc_msg(cm_id_priv, &msg);
36693671 if (ret)
3670
- goto error;
3672
+ return ret;
36713673
36723674 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
36733675 param);
3676
+ trace_icm_send_sidr_rep(&cm_id_priv->id);
36743677 ret = ib_post_send_mad(msg, NULL);
36753678 if (ret) {
3676
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
36773679 cm_free_msg(msg);
36783680 return ret;
36793681 }
3680
- cm_id->state = IB_CM_IDLE;
3681
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3682
-
3682
+ cm_id_priv->id.state = IB_CM_IDLE;
36833683 spin_lock_irqsave(&cm.lock, flags);
36843684 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
36853685 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
....@@ -3687,8 +3687,19 @@
36873687 }
36883688 spin_unlock_irqrestore(&cm.lock, flags);
36893689 return 0;
3690
+}
36903691
3691
-error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3692
+int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3693
+ struct ib_cm_sidr_rep_param *param)
3694
+{
3695
+ struct cm_id_private *cm_id_priv =
3696
+ container_of(cm_id, struct cm_id_private, id);
3697
+ unsigned long flags;
3698
+ int ret;
3699
+
3700
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
3701
+ ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3702
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
36923703 return ret;
36933704 }
36943705 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
....@@ -3702,13 +3713,16 @@
37023713 sidr_rep_msg = (struct cm_sidr_rep_msg *)
37033714 work->mad_recv_wc->recv_buf.mad;
37043715 param = &work->cm_event.param.sidr_rep_rcvd;
3705
- param->status = sidr_rep_msg->status;
3706
- param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3707
- param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3708
- param->info = &sidr_rep_msg->info;
3709
- param->info_len = sidr_rep_msg->info_length;
3716
+ param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3717
+ param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3718
+ param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3719
+ param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3720
+ sidr_rep_msg);
3721
+ param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3722
+ sidr_rep_msg);
37103723 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3711
- work->cm_event.private_data = &sidr_rep_msg->private_data;
3724
+ work->cm_event.private_data =
3725
+ IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
37123726 }
37133727
37143728 static int cm_sidr_rep_handler(struct cm_work *work)
....@@ -3718,7 +3732,8 @@
37183732
37193733 sidr_rep_msg = (struct cm_sidr_rep_msg *)
37203734 work->mad_recv_wc->recv_buf.mad;
3721
- cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3735
+ cm_id_priv = cm_acquire_id(
3736
+ cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
37223737 if (!cm_id_priv)
37233738 return -EINVAL; /* Unmatched reply. */
37243739
....@@ -3756,8 +3771,7 @@
37563771 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
37573772 goto discard;
37583773
3759
- pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3760
- state, ib_wc_status_msg(wc_status));
3774
+ trace_icm_mad_send_err(state, wc_status);
37613775 switch (state) {
37623776 case IB_CM_REQ_SENT:
37633777 case IB_CM_MRA_REQ_RCVD:
....@@ -3880,7 +3894,7 @@
38803894 ret = cm_timewait_handler(work);
38813895 break;
38823896 default:
3883
- pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3897
+ trace_icm_handler_err(work->cm_event.event);
38843898 ret = -EINVAL;
38853899 break;
38863900 }
....@@ -3916,8 +3930,7 @@
39163930 ret = -EISCONN;
39173931 break;
39183932 default:
3919
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3920
- be32_to_cpu(cm_id->local_id), cm_id->state);
3933
+ trace_icm_establish_err(cm_id);
39213934 ret = -EINVAL;
39223935 break;
39233936 }
....@@ -4060,8 +4073,7 @@
40604073 atomic_long_inc(&port->counter_group[CM_RECV].
40614074 counter[attr_id - CM_ATTR_ID_OFFSET]);
40624075
4063
- work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
4064
- GFP_KERNEL);
4076
+ work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
40654077 if (!work) {
40664078 ib_free_recv_mad(mad_recv_wc);
40674079 return;
....@@ -4115,9 +4127,7 @@
41154127 ret = 0;
41164128 break;
41174129 default:
4118
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4119
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
4120
- cm_id_priv->id.state);
4130
+ trace_icm_qp_init_err(&cm_id_priv->id);
41214131 ret = -EINVAL;
41224132 break;
41234133 }
....@@ -4165,9 +4175,7 @@
41654175 ret = 0;
41664176 break;
41674177 default:
4168
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4169
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
4170
- cm_id_priv->id.state);
4178
+ trace_icm_qp_rtr_err(&cm_id_priv->id);
41714179 ret = -EINVAL;
41724180 break;
41734181 }
....@@ -4204,7 +4212,7 @@
42044212 qp_attr->retry_cnt = cm_id_priv->retry_count;
42054213 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
42064214 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4207
- /* fall through */
4215
+ fallthrough;
42084216 case IB_QPT_XRC_TGT:
42094217 *qp_attr_mask |= IB_QP_TIMEOUT;
42104218 qp_attr->timeout = cm_id_priv->av.timeout;
....@@ -4227,9 +4235,7 @@
42274235 ret = 0;
42284236 break;
42294237 default:
4230
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4231
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
4232
- cm_id_priv->id.state);
4238
+ trace_icm_qp_rts_err(&cm_id_priv->id);
42334239 ret = -EINVAL;
42344240 break;
42354241 }
....@@ -4285,49 +4291,16 @@
42854291 .default_attrs = cm_counter_default_attrs
42864292 };
42874293
4288
-static void cm_release_port_obj(struct kobject *obj)
4289
-{
4290
- struct cm_port *cm_port;
4291
-
4292
- cm_port = container_of(obj, struct cm_port, port_obj);
4293
- kfree(cm_port);
4294
-}
4295
-
4296
-static struct kobj_type cm_port_obj_type = {
4297
- .release = cm_release_port_obj
4298
-};
4299
-
4300
-static char *cm_devnode(struct device *dev, umode_t *mode)
4301
-{
4302
- if (mode)
4303
- *mode = 0666;
4304
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4305
-}
4306
-
4307
-struct class cm_class = {
4308
- .owner = THIS_MODULE,
4309
- .name = "infiniband_cm",
4310
- .devnode = cm_devnode,
4311
-};
4312
-EXPORT_SYMBOL(cm_class);
4313
-
43144294 static int cm_create_port_fs(struct cm_port *port)
43154295 {
43164296 int i, ret;
43174297
4318
- ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
4319
- &port->cm_dev->device->kobj,
4320
- "%d", port->port_num);
4321
- if (ret) {
4322
- kfree(port);
4323
- return ret;
4324
- }
4325
-
43264298 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4327
- ret = kobject_init_and_add(&port->counter_group[i].obj,
4328
- &cm_counter_obj_type,
4329
- &port->port_obj,
4330
- "%s", counter_group_names[i]);
4299
+ ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4300
+ port->port_num,
4301
+ &port->counter_group[i].obj,
4302
+ &cm_counter_obj_type,
4303
+ counter_group_names[i]);
43314304 if (ret)
43324305 goto error;
43334306 }
....@@ -4336,8 +4309,7 @@
43364309
43374310 error:
43384311 while (i--)
4339
- kobject_put(&port->counter_group[i].obj);
4340
- kobject_put(&port->port_obj);
4312
+ ib_port_unregister_module_stat(&port->counter_group[i].obj);
43414313 return ret;
43424314
43434315 }
....@@ -4347,12 +4319,11 @@
43474319 int i;
43484320
43494321 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4350
- kobject_put(&port->counter_group[i].obj);
4322
+ ib_port_unregister_module_stat(&port->counter_group[i].obj);
43514323
4352
- kobject_put(&port->port_obj);
43534324 }
43544325
4355
-static void cm_add_one(struct ib_device *ib_device)
4326
+static int cm_add_one(struct ib_device *ib_device)
43564327 {
43574328 struct cm_device *cm_dev;
43584329 struct cm_port *port;
....@@ -4366,32 +4337,27 @@
43664337 unsigned long flags;
43674338 int ret;
43684339 int count = 0;
4369
- u8 i;
4340
+ unsigned int i;
43704341
43714342 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
43724343 GFP_KERNEL);
43734344 if (!cm_dev)
4374
- return;
4345
+ return -ENOMEM;
43754346
43764347 cm_dev->ib_device = ib_device;
43774348 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
43784349 cm_dev->going_down = 0;
4379
- cm_dev->device = device_create(&cm_class, &ib_device->dev,
4380
- MKDEV(0, 0), NULL,
4381
- "%s", ib_device->name);
4382
- if (IS_ERR(cm_dev->device)) {
4383
- kfree(cm_dev);
4384
- return;
4385
- }
43864350
43874351 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4388
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4352
+ rdma_for_each_port (ib_device, i) {
43894353 if (!rdma_cap_ib_cm(ib_device, i))
43904354 continue;
43914355
43924356 port = kzalloc(sizeof *port, GFP_KERNEL);
4393
- if (!port)
4357
+ if (!port) {
4358
+ ret = -ENOMEM;
43944359 goto error1;
4360
+ }
43954361
43964362 cm_dev->port[i-1] = port;
43974363 port->cm_dev = cm_dev;
....@@ -4412,8 +4378,10 @@
44124378 cm_recv_handler,
44134379 port,
44144380 0);
4415
- if (IS_ERR(port->mad_agent))
4381
+ if (IS_ERR(port->mad_agent)) {
4382
+ ret = PTR_ERR(port->mad_agent);
44164383 goto error2;
4384
+ }
44174385
44184386 ret = ib_modify_port(ib_device, i, 0, &port_modify);
44194387 if (ret)
....@@ -4422,15 +4390,17 @@
44224390 count++;
44234391 }
44244392
4425
- if (!count)
4393
+ if (!count) {
4394
+ ret = -EOPNOTSUPP;
44264395 goto free;
4396
+ }
44274397
44284398 ib_set_client_data(ib_device, &cm_client, cm_dev);
44294399
44304400 write_lock_irqsave(&cm.device_lock, flags);
44314401 list_add_tail(&cm_dev->list, &cm.device_list);
44324402 write_unlock_irqrestore(&cm.device_lock, flags);
4433
- return;
4403
+ return 0;
44344404
44354405 error3:
44364406 ib_unregister_mad_agent(port->mad_agent);
....@@ -4439,6 +4409,7 @@
44394409 error1:
44404410 port_modify.set_port_cap_mask = 0;
44414411 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4412
+ kfree(port);
44424413 while (--i) {
44434414 if (!rdma_cap_ib_cm(ib_device, i))
44444415 continue;
....@@ -4447,10 +4418,11 @@
44474418 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
44484419 ib_unregister_mad_agent(port->mad_agent);
44494420 cm_remove_port_fs(port);
4421
+ kfree(port);
44504422 }
44514423 free:
4452
- device_unregister(cm_dev->device);
44534424 kfree(cm_dev);
4425
+ return ret;
44544426 }
44554427
44564428 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
....@@ -4463,10 +4435,7 @@
44634435 .clr_port_cap_mask = IB_PORT_CM_SUP
44644436 };
44654437 unsigned long flags;
4466
- int i;
4467
-
4468
- if (!cm_dev)
4469
- return;
4438
+ unsigned int i;
44704439
44714440 write_lock_irqsave(&cm.device_lock, flags);
44724441 list_del(&cm_dev->list);
....@@ -4476,7 +4445,7 @@
44764445 cm_dev->going_down = 1;
44774446 spin_unlock_irq(&cm.lock);
44784447
4479
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4448
+ rdma_for_each_port (ib_device, i) {
44804449 if (!rdma_cap_ib_cm(ib_device, i))
44814450 continue;
44824451
....@@ -4501,9 +4470,9 @@
45014470 spin_unlock_irq(&cm.state_lock);
45024471 ib_unregister_mad_agent(cur_mad_agent);
45034472 cm_remove_port_fs(port);
4473
+ kfree(port);
45044474 }
45054475
4506
- device_unregister(cm_dev->device);
45074476 kfree(cm_dev);
45084477 }
45094478
....@@ -4511,7 +4480,6 @@
45114480 {
45124481 int ret;
45134482
4514
- memset(&cm, 0, sizeof cm);
45154483 INIT_LIST_HEAD(&cm.device_list);
45164484 rwlock_init(&cm.device_lock);
45174485 spin_lock_init(&cm.lock);
....@@ -4521,15 +4489,9 @@
45214489 cm.remote_id_table = RB_ROOT;
45224490 cm.remote_qp_table = RB_ROOT;
45234491 cm.remote_sidr_table = RB_ROOT;
4524
- idr_init(&cm.local_id_table);
4492
+ xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
45254493 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
45264494 INIT_LIST_HEAD(&cm.timewait_list);
4527
-
4528
- ret = class_register(&cm_class);
4529
- if (ret) {
4530
- ret = -ENOMEM;
4531
- goto error1;
4532
- }
45334495
45344496 cm.wq = alloc_workqueue("ib_cm", 0, 1);
45354497 if (!cm.wq) {
....@@ -4545,9 +4507,6 @@
45454507 error3:
45464508 destroy_workqueue(cm.wq);
45474509 error2:
4548
- class_unregister(&cm_class);
4549
-error1:
4550
- idr_destroy(&cm.local_id_table);
45514510 return ret;
45524511 }
45534512
....@@ -4568,10 +4527,8 @@
45684527 kfree(timewait_info);
45694528 }
45704529
4571
- class_unregister(&cm_class);
4572
- idr_destroy(&cm.local_id_table);
4530
+ WARN_ON(!xa_empty(&cm.local_id_table));
45734531 }
45744532
45754533 module_init(ib_cm_init);
45764534 module_exit(ib_cm_cleanup);
4577
-