hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/sw/rxe/rxe_verbs.c
....@@ -1,38 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
12 /*
23 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
34 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4
- *
5
- * This software is available to you under a choice of one of two
6
- * licenses. You may choose to be licensed under the terms of the GNU
7
- * General Public License (GPL) Version 2, available from the file
8
- * COPYING in the main directory of this source tree, or the
9
- * OpenIB.org BSD license below:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * - Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * - Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
325 */
336
347 #include <linux/dma-mapping.h>
358 #include <net/addrconf.h>
9
+#include <rdma/uverbs_ioctl.h>
3610 #include "rxe.h"
3711 #include "rxe_loc.h"
3812 #include "rxe_queue.h"
....@@ -56,12 +30,7 @@
5630 {
5731 struct rxe_dev *rxe = to_rdev(dev);
5832 struct rxe_port *port;
59
- int rc = -EINVAL;
60
-
61
- if (unlikely(port_num != 1)) {
62
- pr_warn("invalid port_number %d\n", port_num);
63
- goto out;
64
- }
33
+ int rc;
6534
6635 port = &rxe->port;
6736
....@@ -71,56 +40,37 @@
7140 mutex_lock(&rxe->usdev_lock);
7241 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
7342 &attr->active_width);
43
+
44
+ if (attr->state == IB_PORT_ACTIVE)
45
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
46
+ else if (dev_get_flags(rxe->ndev) & IFF_UP)
47
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
48
+ else
49
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
50
+
7451 mutex_unlock(&rxe->usdev_lock);
7552
76
-out:
7753 return rc;
78
-}
79
-
80
-static struct net_device *rxe_get_netdev(struct ib_device *device,
81
- u8 port_num)
82
-{
83
- struct rxe_dev *rxe = to_rdev(device);
84
-
85
- if (rxe->ndev) {
86
- dev_hold(rxe->ndev);
87
- return rxe->ndev;
88
- }
89
-
90
- return NULL;
9154 }
9255
9356 static int rxe_query_pkey(struct ib_device *device,
9457 u8 port_num, u16 index, u16 *pkey)
9558 {
96
- struct rxe_dev *rxe = to_rdev(device);
97
- struct rxe_port *port;
59
+ if (index > 0)
60
+ return -EINVAL;
9861
99
- if (unlikely(port_num != 1)) {
100
- dev_warn(device->dev.parent, "invalid port_num = %d\n",
101
- port_num);
102
- goto err1;
103
- }
104
-
105
- port = &rxe->port;
106
-
107
- if (unlikely(index >= port->attr.pkey_tbl_len)) {
108
- dev_warn(device->dev.parent, "invalid index = %d\n",
109
- index);
110
- goto err1;
111
- }
112
-
113
- *pkey = port->pkey_tbl[index];
62
+ *pkey = IB_DEFAULT_PKEY_FULL;
11463 return 0;
115
-
116
-err1:
117
- return -EINVAL;
11864 }
11965
12066 static int rxe_modify_device(struct ib_device *dev,
12167 int mask, struct ib_device_modify *attr)
12268 {
12369 struct rxe_dev *rxe = to_rdev(dev);
70
+
71
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
72
+ IB_DEVICE_MODIFY_NODE_DESC))
73
+ return -EOPNOTSUPP;
12474
12575 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
12676 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
....@@ -139,11 +89,6 @@
13989 struct rxe_dev *rxe = to_rdev(dev);
14090 struct rxe_port *port;
14191
142
- if (unlikely(port_num != 1)) {
143
- pr_warn("invalid port_num = %d\n", port_num);
144
- goto err1;
145
- }
146
-
14792 port = &rxe->port;
14893
14994 port->attr.port_cap_flags |= attr->set_port_cap_mask;
....@@ -153,35 +98,27 @@
15398 port->attr.qkey_viol_cntr = 0;
15499
155100 return 0;
156
-
157
-err1:
158
- return -EINVAL;
159101 }
160102
161103 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
162104 u8 port_num)
163105 {
164
- struct rxe_dev *rxe = to_rdev(dev);
165
-
166
- return rxe_link_layer(rxe, port_num);
106
+ return IB_LINK_LAYER_ETHERNET;
167107 }
168108
169
-static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
170
- struct ib_udata *udata)
109
+static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
171110 {
172
- struct rxe_dev *rxe = to_rdev(dev);
173
- struct rxe_ucontext *uc;
111
+ struct rxe_dev *rxe = to_rdev(uctx->device);
112
+ struct rxe_ucontext *uc = to_ruc(uctx);
174113
175
- uc = rxe_alloc(&rxe->uc_pool);
176
- return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
114
+ return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
177115 }
178116
179
-static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
117
+static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
180118 {
181119 struct rxe_ucontext *uc = to_ruc(ibuc);
182120
183121 rxe_drop_ref(uc);
184
- return 0;
185122 }
186123
187124 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
....@@ -203,18 +140,15 @@
203140 return 0;
204141 }
205142
206
-static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
207
- struct ib_ucontext *context,
208
- struct ib_udata *udata)
143
+static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
209144 {
210
- struct rxe_dev *rxe = to_rdev(dev);
211
- struct rxe_pd *pd;
145
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
146
+ struct rxe_pd *pd = to_rpd(ibpd);
212147
213
- pd = rxe_alloc(&rxe->pd_pool);
214
- return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
148
+ return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
215149 }
216150
217
-static int rxe_dealloc_pd(struct ib_pd *ibpd)
151
+static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
218152 {
219153 struct rxe_pd *pd = to_rpd(ibpd);
220154
....@@ -222,36 +156,25 @@
222156 return 0;
223157 }
224158
225
-static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
226
- struct rxe_av *av)
227
-{
228
- rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
229
- rxe_av_fill_ip_info(av, attr);
230
-}
231
-
232
-static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
233
- struct rdma_ah_attr *attr,
234
- struct ib_udata *udata)
159
+static int rxe_create_ah(struct ib_ah *ibah,
160
+ struct rdma_ah_init_attr *init_attr,
161
+ struct ib_udata *udata)
235162
236163 {
237164 int err;
238
- struct rxe_dev *rxe = to_rdev(ibpd->device);
239
- struct rxe_pd *pd = to_rpd(ibpd);
240
- struct rxe_ah *ah;
165
+ struct rxe_dev *rxe = to_rdev(ibah->device);
166
+ struct rxe_ah *ah = to_rah(ibah);
241167
242
- err = rxe_av_chk_attr(rxe, attr);
168
+ err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
243169 if (err)
244
- return ERR_PTR(err);
170
+ return err;
245171
246
- ah = rxe_alloc(&rxe->ah_pool);
247
- if (!ah)
248
- return ERR_PTR(-ENOMEM);
172
+ err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
173
+ if (err)
174
+ return err;
249175
250
- rxe_add_ref(pd);
251
- ah->pd = pd;
252
-
253
- rxe_init_av(rxe, attr, &ah->av);
254
- return &ah->ibah;
176
+ rxe_init_av(init_attr->ah_attr, &ah->av);
177
+ return 0;
255178 }
256179
257180 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
....@@ -264,7 +187,7 @@
264187 if (err)
265188 return err;
266189
267
- rxe_init_av(rxe, attr, &ah->av);
190
+ rxe_init_av(attr, &ah->av);
268191 return 0;
269192 }
270193
....@@ -278,11 +201,10 @@
278201 return 0;
279202 }
280203
281
-static int rxe_destroy_ah(struct ib_ah *ibah)
204
+static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
282205 {
283206 struct rxe_ah *ah = to_rah(ibah);
284207
285
- rxe_drop_ref(ah->pd);
286208 rxe_drop_ref(ah);
287209 return 0;
288210 }
....@@ -334,20 +256,18 @@
334256 return err;
335257 }
336258
337
-static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
338
- struct ib_srq_init_attr *init,
339
- struct ib_udata *udata)
259
+static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
260
+ struct ib_udata *udata)
340261 {
341262 int err;
342
- struct rxe_dev *rxe = to_rdev(ibpd->device);
343
- struct rxe_pd *pd = to_rpd(ibpd);
344
- struct rxe_srq *srq;
345
- struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
263
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
264
+ struct rxe_pd *pd = to_rpd(ibsrq->pd);
265
+ struct rxe_srq *srq = to_rsrq(ibsrq);
346266 struct rxe_create_srq_resp __user *uresp = NULL;
347267
348268 if (udata) {
349269 if (udata->outlen < sizeof(*uresp))
350
- return ERR_PTR(-EINVAL);
270
+ return -EINVAL;
351271 uresp = udata->outbuf;
352272 }
353273
....@@ -355,28 +275,24 @@
355275 if (err)
356276 goto err1;
357277
358
- srq = rxe_alloc(&rxe->srq_pool);
359
- if (!srq) {
360
- err = -ENOMEM;
278
+ err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
279
+ if (err)
361280 goto err1;
362
- }
363281
364
- rxe_add_index(srq);
365282 rxe_add_ref(pd);
366283 srq->pd = pd;
367284
368
- err = rxe_srq_from_init(rxe, srq, init, context, uresp);
285
+ err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
369286 if (err)
370287 goto err2;
371288
372
- return &srq->ibsrq;
289
+ return 0;
373290
374291 err2:
375292 rxe_drop_ref(pd);
376
- rxe_drop_index(srq);
377293 rxe_drop_ref(srq);
378294 err1:
379
- return ERR_PTR(err);
295
+ return err;
380296 }
381297
382298 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
....@@ -401,7 +317,7 @@
401317 if (err)
402318 goto err1;
403319
404
- err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
320
+ err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
405321 if (err)
406322 goto err1;
407323
....@@ -424,7 +340,7 @@
424340 return 0;
425341 }
426342
427
-static int rxe_destroy_srq(struct ib_srq *ibsrq)
343
+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
428344 {
429345 struct rxe_srq *srq = to_rsrq(ibsrq);
430346
....@@ -432,9 +348,7 @@
432348 rxe_queue_cleanup(srq->rq.queue);
433349
434350 rxe_drop_ref(srq->pd);
435
- rxe_drop_index(srq);
436351 rxe_drop_ref(srq);
437
-
438352 return 0;
439353 }
440354
....@@ -498,7 +412,7 @@
498412
499413 rxe_add_index(qp);
500414
501
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
415
+ err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
502416 if (err)
503417 goto err3;
504418
....@@ -544,7 +458,7 @@
544458 return 0;
545459 }
546460
547
-static int rxe_destroy_qp(struct ib_qp *ibqp)
461
+static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
548462 {
549463 struct rxe_qp *qp = to_rqp(ibqp);
550464
....@@ -602,7 +516,7 @@
602516 switch (wr->opcode) {
603517 case IB_WR_RDMA_WRITE_WITH_IMM:
604518 wr->ex.imm_data = ibwr->ex.imm_data;
605
- /* fall through */
519
+ fallthrough;
606520 case IB_WR_RDMA_READ:
607521 case IB_WR_RDMA_WRITE:
608522 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
....@@ -835,49 +749,37 @@
835749 return err;
836750 }
837751
838
-static struct ib_cq *rxe_create_cq(struct ib_device *dev,
839
- const struct ib_cq_init_attr *attr,
840
- struct ib_ucontext *context,
841
- struct ib_udata *udata)
752
+static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
753
+ struct ib_udata *udata)
842754 {
843755 int err;
756
+ struct ib_device *dev = ibcq->device;
844757 struct rxe_dev *rxe = to_rdev(dev);
845
- struct rxe_cq *cq;
758
+ struct rxe_cq *cq = to_rcq(ibcq);
846759 struct rxe_create_cq_resp __user *uresp = NULL;
847760
848761 if (udata) {
849762 if (udata->outlen < sizeof(*uresp))
850
- return ERR_PTR(-EINVAL);
763
+ return -EINVAL;
851764 uresp = udata->outbuf;
852765 }
853766
854767 if (attr->flags)
855
- return ERR_PTR(-EINVAL);
768
+ return -EINVAL;
856769
857770 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
858771 if (err)
859
- goto err1;
772
+ return err;
860773
861
- cq = rxe_alloc(&rxe->cq_pool);
862
- if (!cq) {
863
- err = -ENOMEM;
864
- goto err1;
865
- }
866
-
867
- err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
868
- context, uresp);
774
+ err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
775
+ uresp);
869776 if (err)
870
- goto err2;
777
+ return err;
871778
872
- return &cq->ibcq;
873
-
874
-err2:
875
- rxe_drop_ref(cq);
876
-err1:
877
- return ERR_PTR(err);
779
+ return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
878780 }
879781
880
-static int rxe_destroy_cq(struct ib_cq *ibcq)
782
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
881783 {
882784 struct rxe_cq *cq = to_rcq(ibcq);
883785
....@@ -904,7 +806,7 @@
904806 if (err)
905807 goto err1;
906808
907
- err = rxe_cq_resize_queue(cq, cqe, uresp);
809
+ err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
908810 if (err)
909811 goto err1;
910812
....@@ -966,30 +868,16 @@
966868 struct rxe_dev *rxe = to_rdev(ibpd->device);
967869 struct rxe_pd *pd = to_rpd(ibpd);
968870 struct rxe_mem *mr;
969
- int err;
970871
971872 mr = rxe_alloc(&rxe->mr_pool);
972
- if (!mr) {
973
- err = -ENOMEM;
974
- goto err1;
975
- }
873
+ if (!mr)
874
+ return ERR_PTR(-ENOMEM);
976875
977876 rxe_add_index(mr);
978
-
979877 rxe_add_ref(pd);
980
-
981
- err = rxe_mem_init_dma(pd, access, mr);
982
- if (err)
983
- goto err2;
878
+ rxe_mem_init_dma(pd, access, mr);
984879
985880 return &mr->ibmr;
986
-
987
-err2:
988
- rxe_drop_ref(pd);
989
- rxe_drop_index(mr);
990
- rxe_drop_ref(mr);
991
-err1:
992
- return ERR_PTR(err);
993881 }
994882
995883 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
....@@ -1028,19 +916,18 @@
1028916 return ERR_PTR(err);
1029917 }
1030918
1031
-static int rxe_dereg_mr(struct ib_mr *ibmr)
919
+static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1032920 {
1033921 struct rxe_mem *mr = to_rmr(ibmr);
1034922
1035923 mr->state = RXE_MEM_STATE_ZOMBIE;
1036
- rxe_drop_ref(mr->pd);
924
+ rxe_drop_ref(mr_pd(mr));
1037925 rxe_drop_index(mr);
1038926 rxe_drop_ref(mr);
1039927 return 0;
1040928 }
1041929
1042
-static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1043
- enum ib_mr_type mr_type,
930
+static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1044931 u32 max_num_sg)
1045932 {
1046933 struct rxe_dev *rxe = to_rdev(ibpd->device);
....@@ -1143,41 +1030,104 @@
11431030 static ssize_t parent_show(struct device *device,
11441031 struct device_attribute *attr, char *buf)
11451032 {
1146
- struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1147
- ib_dev.dev);
1033
+ struct rxe_dev *rxe =
1034
+ rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
11481035
11491036 return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
11501037 }
11511038
11521039 static DEVICE_ATTR_RO(parent);
11531040
1154
-static struct device_attribute *rxe_dev_attributes[] = {
1155
- &dev_attr_parent,
1041
+static struct attribute *rxe_dev_attributes[] = {
1042
+ &dev_attr_parent.attr,
1043
+ NULL
11561044 };
11571045
1158
-int rxe_register_device(struct rxe_dev *rxe)
1046
+static const struct attribute_group rxe_attr_group = {
1047
+ .attrs = rxe_dev_attributes,
1048
+};
1049
+
1050
+static int rxe_enable_driver(struct ib_device *ib_dev)
1051
+{
1052
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1053
+
1054
+ rxe_set_port_state(rxe);
1055
+ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1056
+ return 0;
1057
+}
1058
+
1059
+static const struct ib_device_ops rxe_dev_ops = {
1060
+ .owner = THIS_MODULE,
1061
+ .driver_id = RDMA_DRIVER_RXE,
1062
+ .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1063
+
1064
+ .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1065
+ .alloc_mr = rxe_alloc_mr,
1066
+ .alloc_pd = rxe_alloc_pd,
1067
+ .alloc_ucontext = rxe_alloc_ucontext,
1068
+ .attach_mcast = rxe_attach_mcast,
1069
+ .create_ah = rxe_create_ah,
1070
+ .create_cq = rxe_create_cq,
1071
+ .create_qp = rxe_create_qp,
1072
+ .create_srq = rxe_create_srq,
1073
+ .dealloc_driver = rxe_dealloc,
1074
+ .dealloc_pd = rxe_dealloc_pd,
1075
+ .dealloc_ucontext = rxe_dealloc_ucontext,
1076
+ .dereg_mr = rxe_dereg_mr,
1077
+ .destroy_ah = rxe_destroy_ah,
1078
+ .destroy_cq = rxe_destroy_cq,
1079
+ .destroy_qp = rxe_destroy_qp,
1080
+ .destroy_srq = rxe_destroy_srq,
1081
+ .detach_mcast = rxe_detach_mcast,
1082
+ .enable_driver = rxe_enable_driver,
1083
+ .get_dma_mr = rxe_get_dma_mr,
1084
+ .get_hw_stats = rxe_ib_get_hw_stats,
1085
+ .get_link_layer = rxe_get_link_layer,
1086
+ .get_port_immutable = rxe_port_immutable,
1087
+ .map_mr_sg = rxe_map_mr_sg,
1088
+ .mmap = rxe_mmap,
1089
+ .modify_ah = rxe_modify_ah,
1090
+ .modify_device = rxe_modify_device,
1091
+ .modify_port = rxe_modify_port,
1092
+ .modify_qp = rxe_modify_qp,
1093
+ .modify_srq = rxe_modify_srq,
1094
+ .peek_cq = rxe_peek_cq,
1095
+ .poll_cq = rxe_poll_cq,
1096
+ .post_recv = rxe_post_recv,
1097
+ .post_send = rxe_post_send,
1098
+ .post_srq_recv = rxe_post_srq_recv,
1099
+ .query_ah = rxe_query_ah,
1100
+ .query_device = rxe_query_device,
1101
+ .query_pkey = rxe_query_pkey,
1102
+ .query_port = rxe_query_port,
1103
+ .query_qp = rxe_query_qp,
1104
+ .query_srq = rxe_query_srq,
1105
+ .reg_user_mr = rxe_reg_user_mr,
1106
+ .req_notify_cq = rxe_req_notify_cq,
1107
+ .resize_cq = rxe_resize_cq,
1108
+
1109
+ INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1110
+ INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1111
+ INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1112
+ INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1113
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1114
+};
1115
+
1116
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
11591117 {
11601118 int err;
1161
- int i;
11621119 struct ib_device *dev = &rxe->ib_dev;
11631120 struct crypto_shash *tfm;
11641121
1165
- strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
11661122 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
11671123
1168
- dev->owner = THIS_MODULE;
11691124 dev->node_type = RDMA_NODE_IB_CA;
11701125 dev->phys_port_cnt = 1;
11711126 dev->num_comp_vectors = num_possible_cpus();
1172
- dev->dev.parent = rxe_dma_device(rxe);
11731127 dev->local_dma_lkey = 0;
11741128 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
11751129 rxe->ndev->dev_addr);
1176
- dev->dev.dma_ops = &dma_virt_ops;
1177
- dma_coerce_mask_and_coherent(&dev->dev,
1178
- dma_get_required_mask(&dev->dev));
11791130
1180
- dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
11811131 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
11821132 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
11831133 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
....@@ -1211,49 +1161,10 @@
12111161 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
12121162 ;
12131163
1214
- dev->query_device = rxe_query_device;
1215
- dev->modify_device = rxe_modify_device;
1216
- dev->query_port = rxe_query_port;
1217
- dev->modify_port = rxe_modify_port;
1218
- dev->get_link_layer = rxe_get_link_layer;
1219
- dev->get_netdev = rxe_get_netdev;
1220
- dev->query_pkey = rxe_query_pkey;
1221
- dev->alloc_ucontext = rxe_alloc_ucontext;
1222
- dev->dealloc_ucontext = rxe_dealloc_ucontext;
1223
- dev->mmap = rxe_mmap;
1224
- dev->get_port_immutable = rxe_port_immutable;
1225
- dev->alloc_pd = rxe_alloc_pd;
1226
- dev->dealloc_pd = rxe_dealloc_pd;
1227
- dev->create_ah = rxe_create_ah;
1228
- dev->modify_ah = rxe_modify_ah;
1229
- dev->query_ah = rxe_query_ah;
1230
- dev->destroy_ah = rxe_destroy_ah;
1231
- dev->create_srq = rxe_create_srq;
1232
- dev->modify_srq = rxe_modify_srq;
1233
- dev->query_srq = rxe_query_srq;
1234
- dev->destroy_srq = rxe_destroy_srq;
1235
- dev->post_srq_recv = rxe_post_srq_recv;
1236
- dev->create_qp = rxe_create_qp;
1237
- dev->modify_qp = rxe_modify_qp;
1238
- dev->query_qp = rxe_query_qp;
1239
- dev->destroy_qp = rxe_destroy_qp;
1240
- dev->post_send = rxe_post_send;
1241
- dev->post_recv = rxe_post_recv;
1242
- dev->create_cq = rxe_create_cq;
1243
- dev->destroy_cq = rxe_destroy_cq;
1244
- dev->resize_cq = rxe_resize_cq;
1245
- dev->poll_cq = rxe_poll_cq;
1246
- dev->peek_cq = rxe_peek_cq;
1247
- dev->req_notify_cq = rxe_req_notify_cq;
1248
- dev->get_dma_mr = rxe_get_dma_mr;
1249
- dev->reg_user_mr = rxe_reg_user_mr;
1250
- dev->dereg_mr = rxe_dereg_mr;
1251
- dev->alloc_mr = rxe_alloc_mr;
1252
- dev->map_mr_sg = rxe_map_mr_sg;
1253
- dev->attach_mcast = rxe_attach_mcast;
1254
- dev->detach_mcast = rxe_detach_mcast;
1255
- dev->get_hw_stats = rxe_ib_get_hw_stats;
1256
- dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
1164
+ ib_set_device_ops(dev, &rxe_dev_ops);
1165
+ err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1166
+ if (err)
1167
+ return err;
12571168
12581169 tfm = crypto_alloc_shash("crc32", 0, 0);
12591170 if (IS_ERR(tfm)) {
....@@ -1263,41 +1174,14 @@
12631174 }
12641175 rxe->tfm = tfm;
12651176
1266
- dev->driver_id = RDMA_DRIVER_RXE;
1267
- err = ib_register_device(dev, NULL);
1268
- if (err) {
1177
+ rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1178
+ err = ib_register_device(dev, ibdev_name, NULL);
1179
+ if (err)
12691180 pr_warn("%s failed with error %d\n", __func__, err);
1270
- goto err1;
1271
- }
12721181
1273
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1274
- err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1275
- if (err) {
1276
- pr_warn("%s failed with error %d for attr number %d\n",
1277
- __func__, err, i);
1278
- goto err2;
1279
- }
1280
- }
1281
-
1282
- return 0;
1283
-
1284
-err2:
1285
- ib_unregister_device(dev);
1286
-err1:
1287
- crypto_free_shash(rxe->tfm);
1288
-
1182
+ /*
1183
+ * Note that rxe may be invalid at this point if another thread
1184
+ * unregistered it.
1185
+ */
12891186 return err;
1290
-}
1291
-
1292
-int rxe_unregister_device(struct rxe_dev *rxe)
1293
-{
1294
- int i;
1295
- struct ib_device *dev = &rxe->ib_dev;
1296
-
1297
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1298
- device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1299
-
1300
- ib_unregister_device(dev);
1301
-
1302
- return 0;
13031187 }