forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/hns/hns_roce_main.c
....@@ -57,17 +57,16 @@
5757 {
5858 return gid_index * hr_dev->caps.num_ports + port;
5959 }
60
-EXPORT_SYMBOL_GPL(hns_get_gid_index);
6160
6261 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
6362 {
6463 u8 phy_port;
6564 u32 i = 0;
6665
67
- if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
66
+ if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
6867 return 0;
6968
70
- for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
69
+ for (i = 0; i < ETH_ALEN; i++)
7170 hr_dev->dev_addr[port][i] = addr[i];
7271
7372 phy_port = hr_dev->iboe.phy_port[port];
....@@ -78,17 +77,12 @@
7877 {
7978 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
8079 u8 port = attr->port_num - 1;
81
- unsigned long flags;
8280 int ret;
8381
8482 if (port >= hr_dev->caps.num_ports)
8583 return -EINVAL;
8684
87
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
88
-
8985 ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
90
-
91
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
9286
9387 return ret;
9488 }
....@@ -96,19 +90,14 @@
9690 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
9791 {
9892 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
99
- struct ib_gid_attr zattr = { };
93
+ struct ib_gid_attr zattr = {};
10094 u8 port = attr->port_num - 1;
101
- unsigned long flags;
10295 int ret;
10396
10497 if (port >= hr_dev->caps.num_ports)
10598 return -EINVAL;
10699
107
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
108
-
109100 ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
110
-
111
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
112101
113102 return ret;
114103 }
....@@ -122,7 +111,7 @@
122111
123112 netdev = hr_dev->iboe.netdevs[port];
124113 if (!netdev) {
125
- dev_err(dev, "port(%d) can't find netdev\n", port);
114
+ dev_err(dev, "Can't find netdev on port(%u)!\n", port);
126115 return -ENODEV;
127116 }
128117
....@@ -152,8 +141,8 @@
152141 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
153142 struct hns_roce_ib_iboe *iboe = NULL;
154143 struct hns_roce_dev *hr_dev = NULL;
155
- u8 port = 0;
156
- int ret = 0;
144
+ int ret;
145
+ u8 port;
157146
158147 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
159148 iboe = &hr_dev->iboe;
....@@ -196,6 +185,7 @@
196185
197186 memset(props, 0, sizeof(*props));
198187
188
+ props->fw_ver = hr_dev->caps.fw_ver;
199189 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
200190 props->max_mr_size = (u64)(~(0ULL));
201191 props->page_size_cap = hr_dev->caps.page_size_cap;
....@@ -215,30 +205,22 @@
215205 props->max_pd = hr_dev->caps.num_pds;
216206 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
217207 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
218
- props->atomic_cap = IB_ATOMIC_NONE;
208
+ props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
209
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
219210 props->max_pkeys = 1;
220211 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
212
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
213
+ props->max_srq = hr_dev->caps.num_srqs;
214
+ props->max_srq_wr = hr_dev->caps.max_srq_wrs;
215
+ props->max_srq_sge = hr_dev->caps.max_srq_sges;
216
+ }
217
+
218
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
219
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
220
+ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
221
+ }
221222
222223 return 0;
223
-}
224
-
225
-static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
226
- u8 port_num)
227
-{
228
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
229
- struct net_device *ndev;
230
-
231
- if (port_num < 1 || port_num > hr_dev->caps.num_ports)
232
- return NULL;
233
-
234
- rcu_read_lock();
235
-
236
- ndev = hr_dev->iboe.netdevs[port_num - 1];
237
- if (ndev)
238
- dev_hold(ndev);
239
-
240
- rcu_read_unlock();
241
- return ndev;
242224 }
243225
244226 static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
....@@ -251,7 +233,6 @@
251233 enum ib_mtu mtu;
252234 u8 port;
253235
254
- assert(port_num > 0);
255236 port = port_num - 1;
256237
257238 /* props being zeroed by the caller, avoid zeroing it here */
....@@ -271,15 +252,18 @@
271252 net_dev = hr_dev->iboe.netdevs[port];
272253 if (!net_dev) {
273254 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
274
- dev_err(dev, "find netdev %d failed!\r\n", port);
255
+ dev_err(dev, "Find netdev %u failed!\n", port);
275256 return -EINVAL;
276257 }
277258
278259 mtu = iboe_get_mtu(net_dev->mtu);
279260 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
280
- props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
281
- IB_PORT_ACTIVE : IB_PORT_DOWN;
282
- props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3;
261
+ props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
262
+ IB_PORT_ACTIVE :
263
+ IB_PORT_DOWN;
264
+ props->phys_state = props->state == IB_PORT_ACTIVE ?
265
+ IB_PORT_PHYS_STATE_LINK_UP :
266
+ IB_PORT_PHYS_STATE_DISABLED;
283267
284268 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
285269
....@@ -320,107 +304,49 @@
320304 return 0;
321305 }
322306
323
-static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
324
- struct ib_port_modify *props)
307
+static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
308
+ struct ib_udata *udata)
325309 {
326
- return 0;
327
-}
328
-
329
-static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
330
- struct ib_udata *udata)
331
-{
332
- int ret = 0;
333
- struct hns_roce_ucontext *context;
310
+ int ret;
311
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
334312 struct hns_roce_ib_alloc_ucontext_resp resp = {};
335
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
313
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
336314
337315 if (!hr_dev->active)
338
- return ERR_PTR(-EAGAIN);
316
+ return -EAGAIN;
339317
340318 resp.qp_tab_size = hr_dev->caps.num_qps;
341
-
342
- context = kmalloc(sizeof(*context), GFP_KERNEL);
343
- if (!context)
344
- return ERR_PTR(-ENOMEM);
345319
346320 ret = hns_roce_uar_alloc(hr_dev, &context->uar);
347321 if (ret)
348322 goto error_fail_uar_alloc;
349323
350
- INIT_LIST_HEAD(&context->vma_list);
351
- mutex_init(&context->vma_list_mutex);
352324 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
353325 INIT_LIST_HEAD(&context->page_list);
354326 mutex_init(&context->page_mutex);
355327 }
356328
357
- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
329
+ resp.cqe_size = hr_dev->caps.cqe_sz;
330
+
331
+ ret = ib_copy_to_udata(udata, &resp,
332
+ min(udata->outlen, sizeof(resp)));
358333 if (ret)
359334 goto error_fail_copy_to_udata;
360335
361
- return &context->ibucontext;
336
+ return 0;
362337
363338 error_fail_copy_to_udata:
364339 hns_roce_uar_free(hr_dev, &context->uar);
365340
366341 error_fail_uar_alloc:
367
- kfree(context);
368
-
369
- return ERR_PTR(ret);
342
+ return ret;
370343 }
371344
372
-static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
345
+static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
373346 {
374347 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
375348
376349 hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
377
- kfree(context);
378
-
379
- return 0;
380
-}
381
-
382
-static void hns_roce_vma_open(struct vm_area_struct *vma)
383
-{
384
- vma->vm_ops = NULL;
385
-}
386
-
387
-static void hns_roce_vma_close(struct vm_area_struct *vma)
388
-{
389
- struct hns_roce_vma_data *vma_data;
390
-
391
- vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
392
- vma_data->vma = NULL;
393
- mutex_lock(vma_data->vma_list_mutex);
394
- list_del(&vma_data->list);
395
- mutex_unlock(vma_data->vma_list_mutex);
396
- kfree(vma_data);
397
-}
398
-
399
-static const struct vm_operations_struct hns_roce_vm_ops = {
400
- .open = hns_roce_vma_open,
401
- .close = hns_roce_vma_close,
402
-};
403
-
404
-static int hns_roce_set_vma_data(struct vm_area_struct *vma,
405
- struct hns_roce_ucontext *context)
406
-{
407
- struct list_head *vma_head = &context->vma_list;
408
- struct hns_roce_vma_data *vma_data;
409
-
410
- vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
411
- if (!vma_data)
412
- return -ENOMEM;
413
-
414
- vma_data->vma = vma;
415
- vma_data->vma_list_mutex = &context->vma_list_mutex;
416
- vma->vm_private_data = vma_data;
417
- vma->vm_ops = &hns_roce_vm_ops;
418
-
419
- mutex_lock(&context->vma_list_mutex);
420
- list_add(&vma_data->list, vma_head);
421
- mutex_unlock(&context->vma_list_mutex);
422
-
423
- return 0;
424350 }
425351
426352 static int hns_roce_mmap(struct ib_ucontext *context,
....@@ -428,27 +354,31 @@
428354 {
429355 struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
430356
431
- if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
432
- return -EINVAL;
357
+ switch (vma->vm_pgoff) {
358
+ case 0:
359
+ return rdma_user_mmap_io(context, vma,
360
+ to_hr_ucontext(context)->uar.pfn,
361
+ PAGE_SIZE,
362
+ pgprot_device(vma->vm_page_prot),
363
+ NULL);
433364
434
- if (vma->vm_pgoff == 0) {
435
- vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
436
- if (io_remap_pfn_range(vma, vma->vm_start,
437
- to_hr_ucontext(context)->uar.pfn,
438
- PAGE_SIZE, vma->vm_page_prot))
439
- return -EAGAIN;
440
- } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
441
- hr_dev->tptr_size) {
442
- /* vm_pgoff: 1 -- TPTR */
443
- if (io_remap_pfn_range(vma, vma->vm_start,
444
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
445
- hr_dev->tptr_size,
446
- vma->vm_page_prot))
447
- return -EAGAIN;
448
- } else
449
- return -EINVAL;
365
+ /* vm_pgoff: 1 -- TPTR */
366
+ case 1:
367
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
368
+ return -EINVAL;
369
+ /*
370
+ * FIXME: using io_remap_pfn_range on the dma address returned
371
+ * by dma_alloc_coherent is totally wrong.
372
+ */
373
+ return rdma_user_mmap_io(context, vma,
374
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
375
+ hr_dev->tptr_size,
376
+ vma->vm_page_prot,
377
+ NULL);
450378
451
- return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
379
+ default:
380
+ return -EINVAL;
381
+ }
452382 }
453383
454384 static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
....@@ -474,21 +404,6 @@
474404
475405 static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
476406 {
477
- struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
478
- struct hns_roce_vma_data *vma_data, *n;
479
- struct vm_area_struct *vma;
480
-
481
- mutex_lock(&context->vma_list_mutex);
482
- list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
483
- vma = vma_data->vma;
484
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
485
-
486
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
487
- vma->vm_ops = NULL;
488
- list_del(&vma_data->list);
489
- kfree(vma_data);
490
- }
491
- mutex_unlock(&context->vma_list_mutex);
492407 }
493408
494409 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
....@@ -500,28 +415,87 @@
500415 ib_unregister_device(&hr_dev->ib_dev);
501416 }
502417
418
+static const struct ib_device_ops hns_roce_dev_ops = {
419
+ .owner = THIS_MODULE,
420
+ .driver_id = RDMA_DRIVER_HNS,
421
+ .uverbs_abi_ver = 1,
422
+ .uverbs_no_driver_id_binding = 1,
423
+
424
+ .add_gid = hns_roce_add_gid,
425
+ .alloc_pd = hns_roce_alloc_pd,
426
+ .alloc_ucontext = hns_roce_alloc_ucontext,
427
+ .create_ah = hns_roce_create_ah,
428
+ .create_cq = hns_roce_create_cq,
429
+ .create_qp = hns_roce_create_qp,
430
+ .dealloc_pd = hns_roce_dealloc_pd,
431
+ .dealloc_ucontext = hns_roce_dealloc_ucontext,
432
+ .del_gid = hns_roce_del_gid,
433
+ .dereg_mr = hns_roce_dereg_mr,
434
+ .destroy_ah = hns_roce_destroy_ah,
435
+ .destroy_cq = hns_roce_destroy_cq,
436
+ .disassociate_ucontext = hns_roce_disassociate_ucontext,
437
+ .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
438
+ .get_dma_mr = hns_roce_get_dma_mr,
439
+ .get_link_layer = hns_roce_get_link_layer,
440
+ .get_port_immutable = hns_roce_port_immutable,
441
+ .mmap = hns_roce_mmap,
442
+ .modify_device = hns_roce_modify_device,
443
+ .modify_qp = hns_roce_modify_qp,
444
+ .query_ah = hns_roce_query_ah,
445
+ .query_device = hns_roce_query_device,
446
+ .query_pkey = hns_roce_query_pkey,
447
+ .query_port = hns_roce_query_port,
448
+ .reg_user_mr = hns_roce_reg_user_mr,
449
+
450
+ INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
451
+ INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
452
+ INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
453
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
454
+};
455
+
456
+static const struct ib_device_ops hns_roce_dev_mr_ops = {
457
+ .rereg_user_mr = hns_roce_rereg_user_mr,
458
+};
459
+
460
+static const struct ib_device_ops hns_roce_dev_mw_ops = {
461
+ .alloc_mw = hns_roce_alloc_mw,
462
+ .dealloc_mw = hns_roce_dealloc_mw,
463
+
464
+ INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
465
+};
466
+
467
+static const struct ib_device_ops hns_roce_dev_frmr_ops = {
468
+ .alloc_mr = hns_roce_alloc_mr,
469
+ .map_mr_sg = hns_roce_map_mr_sg,
470
+};
471
+
472
+static const struct ib_device_ops hns_roce_dev_srq_ops = {
473
+ .create_srq = hns_roce_create_srq,
474
+ .destroy_srq = hns_roce_destroy_srq,
475
+
476
+ INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
477
+};
478
+
503479 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
504480 {
505481 int ret;
506482 struct hns_roce_ib_iboe *iboe = NULL;
507483 struct ib_device *ib_dev = NULL;
508484 struct device *dev = hr_dev->dev;
485
+ unsigned int i;
509486
510487 iboe = &hr_dev->iboe;
511488 spin_lock_init(&iboe->lock);
512489
513490 ib_dev = &hr_dev->ib_dev;
514
- strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
515491
516
- ib_dev->owner = THIS_MODULE;
517
- ib_dev->node_type = RDMA_NODE_IB_CA;
518
- ib_dev->dev.parent = dev;
492
+ ib_dev->node_type = RDMA_NODE_IB_CA;
493
+ ib_dev->dev.parent = dev;
519494
520
- ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
521
- ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
522
- ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
523
- ib_dev->uverbs_abi_ver = 1;
524
- ib_dev->uverbs_cmd_mask =
495
+ ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
496
+ ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
497
+ ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
498
+ ib_dev->uverbs_cmd_mask =
525499 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
526500 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
527501 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
....@@ -537,62 +511,48 @@
537511 (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
538512 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
539513
540
- ib_dev->uverbs_ex_cmd_mask |=
541
- (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
542
-
543
- /* HCA||device||port */
544
- ib_dev->modify_device = hns_roce_modify_device;
545
- ib_dev->query_device = hns_roce_query_device;
546
- ib_dev->query_port = hns_roce_query_port;
547
- ib_dev->modify_port = hns_roce_modify_port;
548
- ib_dev->get_link_layer = hns_roce_get_link_layer;
549
- ib_dev->get_netdev = hns_roce_get_netdev;
550
- ib_dev->add_gid = hns_roce_add_gid;
551
- ib_dev->del_gid = hns_roce_del_gid;
552
- ib_dev->query_pkey = hns_roce_query_pkey;
553
- ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
554
- ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
555
- ib_dev->mmap = hns_roce_mmap;
556
-
557
- /* PD */
558
- ib_dev->alloc_pd = hns_roce_alloc_pd;
559
- ib_dev->dealloc_pd = hns_roce_dealloc_pd;
560
-
561
- /* AH */
562
- ib_dev->create_ah = hns_roce_create_ah;
563
- ib_dev->query_ah = hns_roce_query_ah;
564
- ib_dev->destroy_ah = hns_roce_destroy_ah;
565
-
566
- /* QP */
567
- ib_dev->create_qp = hns_roce_create_qp;
568
- ib_dev->modify_qp = hns_roce_modify_qp;
569
- ib_dev->query_qp = hr_dev->hw->query_qp;
570
- ib_dev->destroy_qp = hr_dev->hw->destroy_qp;
571
- ib_dev->post_send = hr_dev->hw->post_send;
572
- ib_dev->post_recv = hr_dev->hw->post_recv;
573
-
574
- /* CQ */
575
- ib_dev->create_cq = hns_roce_ib_create_cq;
576
- ib_dev->modify_cq = hr_dev->hw->modify_cq;
577
- ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
578
- ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
579
- ib_dev->poll_cq = hr_dev->hw->poll_cq;
580
-
581
- /* MR */
582
- ib_dev->get_dma_mr = hns_roce_get_dma_mr;
583
- ib_dev->reg_user_mr = hns_roce_reg_user_mr;
584
- ib_dev->dereg_mr = hns_roce_dereg_mr;
585514 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
586
- ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
587515 ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
516
+ ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
588517 }
589518
590
- /* OTHERS */
591
- ib_dev->get_port_immutable = hns_roce_port_immutable;
592
- ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
519
+ /* MW */
520
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
521
+ ib_dev->uverbs_cmd_mask |=
522
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
523
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
524
+ ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
525
+ }
593526
594
- ib_dev->driver_id = RDMA_DRIVER_HNS;
595
- ret = ib_register_device(ib_dev, NULL);
527
+ /* FRMR */
528
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
529
+ ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
530
+
531
+ /* SRQ */
532
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
533
+ ib_dev->uverbs_cmd_mask |=
534
+ (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
535
+ (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
536
+ (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
537
+ (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
538
+ (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
539
+ ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
540
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
541
+ }
542
+
543
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
544
+ ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
545
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
546
+ if (!hr_dev->iboe.netdevs[i])
547
+ continue;
548
+
549
+ ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
550
+ i + 1);
551
+ if (ret)
552
+ return ret;
553
+ }
554
+ dma_set_max_seg_size(dev, UINT_MAX);
555
+ ret = ib_register_device(ib_dev, "hns_%d", dev);
596556 if (ret) {
597557 dev_err(dev, "ib_register_device failed!\n");
598558 return ret;
....@@ -622,38 +582,19 @@
622582
623583 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
624584 {
625
- int ret;
626585 struct device *dev = hr_dev->dev;
627
-
628
- ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
629
- HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
630
- hr_dev->caps.num_mtt_segs, 1);
631
- if (ret) {
632
- dev_err(dev, "Failed to init MTT context memory, aborting.\n");
633
- return ret;
634
- }
635
-
636
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
637
- ret = hns_roce_init_hem_table(hr_dev,
638
- &hr_dev->mr_table.mtt_cqe_table,
639
- HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
640
- hr_dev->caps.num_cqe_segs, 1);
641
- if (ret) {
642
- dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
643
- goto err_unmap_cqe;
644
- }
645
- }
586
+ int ret;
646587
647588 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
648589 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
649590 hr_dev->caps.num_mtpts, 1);
650591 if (ret) {
651592 dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
652
- goto err_unmap_mtt;
593
+ return ret;
653594 }
654595
655596 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
656
- HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
597
+ HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
657598 hr_dev->caps.num_qps, 1);
658599 if (ret) {
659600 dev_err(dev, "Failed to init QP context memory, aborting.\n");
....@@ -679,7 +620,7 @@
679620 hr_dev->caps.num_qps, 1);
680621 if (ret) {
681622 dev_err(dev,
682
- "Failed to init trrl_table memory, aborting.\n");
623
+ "Failed to init trrl_table memory, aborting.\n");
683624 goto err_unmap_irrl;
684625 }
685626 }
....@@ -692,7 +633,71 @@
692633 goto err_unmap_trrl;
693634 }
694635
636
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
637
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
638
+ HEM_TYPE_SRQC,
639
+ hr_dev->caps.srqc_entry_sz,
640
+ hr_dev->caps.num_srqs, 1);
641
+ if (ret) {
642
+ dev_err(dev,
643
+ "Failed to init SRQ context memory, aborting.\n");
644
+ goto err_unmap_cq;
645
+ }
646
+ }
647
+
648
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
649
+ ret = hns_roce_init_hem_table(hr_dev,
650
+ &hr_dev->qp_table.sccc_table,
651
+ HEM_TYPE_SCCC,
652
+ hr_dev->caps.sccc_sz,
653
+ hr_dev->caps.num_qps, 1);
654
+ if (ret) {
655
+ dev_err(dev,
656
+ "Failed to init SCC context memory, aborting.\n");
657
+ goto err_unmap_srq;
658
+ }
659
+ }
660
+
661
+ if (hr_dev->caps.qpc_timer_entry_sz) {
662
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
663
+ HEM_TYPE_QPC_TIMER,
664
+ hr_dev->caps.qpc_timer_entry_sz,
665
+ hr_dev->caps.num_qpc_timer, 1);
666
+ if (ret) {
667
+ dev_err(dev,
668
+ "Failed to init QPC timer memory, aborting.\n");
669
+ goto err_unmap_ctx;
670
+ }
671
+ }
672
+
673
+ if (hr_dev->caps.cqc_timer_entry_sz) {
674
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
675
+ HEM_TYPE_CQC_TIMER,
676
+ hr_dev->caps.cqc_timer_entry_sz,
677
+ hr_dev->caps.num_cqc_timer, 1);
678
+ if (ret) {
679
+ dev_err(dev,
680
+ "Failed to init CQC timer memory, aborting.\n");
681
+ goto err_unmap_qpc_timer;
682
+ }
683
+ }
684
+
695685 return 0;
686
+
687
+err_unmap_qpc_timer:
688
+ if (hr_dev->caps.qpc_timer_entry_sz)
689
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
690
+
691
+err_unmap_ctx:
692
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
693
+ hns_roce_cleanup_hem_table(hr_dev,
694
+ &hr_dev->qp_table.sccc_table);
695
+err_unmap_srq:
696
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
697
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
698
+
699
+err_unmap_cq:
700
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
696701
697702 err_unmap_trrl:
698703 if (hr_dev->caps.trrl_entry_sz)
....@@ -708,14 +713,6 @@
708713 err_unmap_dmpt:
709714 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
710715
711
-err_unmap_mtt:
712
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
713
- hns_roce_cleanup_hem_table(hr_dev,
714
- &hr_dev->mr_table.mtt_cqe_table);
715
-
716
-err_unmap_cqe:
717
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
718
-
719716 return ret;
720717 }
721718
....@@ -726,8 +723,8 @@
726723 */
727724 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
728725 {
729
- int ret;
730726 struct device *dev = hr_dev->dev;
727
+ int ret;
731728
732729 spin_lock_init(&hr_dev->sm_lock);
733730 spin_lock_init(&hr_dev->bt_cmd_lock);
....@@ -773,7 +770,19 @@
773770 goto err_cq_table_free;
774771 }
775772
773
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
774
+ ret = hns_roce_init_srq_table(hr_dev);
775
+ if (ret) {
776
+ dev_err(dev,
777
+ "Failed to init share receive queue table.\n");
778
+ goto err_qp_table_free;
779
+ }
780
+ }
781
+
776782 return 0;
783
+
784
+err_qp_table_free:
785
+ hns_roce_cleanup_qp_table(hr_dev);
777786
778787 err_cq_table_free:
779788 hns_roce_cleanup_cq_table(hr_dev);
....@@ -792,10 +801,54 @@
792801 return ret;
793802 }
794803
804
+static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
805
+{
806
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
807
+ unsigned long flags;
808
+
809
+ spin_lock_irqsave(&hr_cq->lock, flags);
810
+ if (cq->comp_handler) {
811
+ if (!hr_cq->is_armed) {
812
+ hr_cq->is_armed = 1;
813
+ list_add_tail(&hr_cq->node, cq_list);
814
+ }
815
+ }
816
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
817
+}
818
+
819
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
820
+{
821
+ struct hns_roce_qp *hr_qp;
822
+ struct hns_roce_cq *hr_cq;
823
+ struct list_head cq_list;
824
+ unsigned long flags_qp;
825
+ unsigned long flags;
826
+
827
+ INIT_LIST_HEAD(&cq_list);
828
+
829
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
830
+ list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
831
+ spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
832
+ if (hr_qp->sq.tail != hr_qp->sq.head)
833
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
834
+ spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
835
+
836
+ spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
837
+ if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
838
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
839
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
840
+ }
841
+
842
+ list_for_each_entry(hr_cq, &cq_list, node)
843
+ hns_roce_cq_completion(hr_dev, hr_cq->cqn);
844
+
845
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
846
+}
847
+
795848 int hns_roce_init(struct hns_roce_dev *hr_dev)
796849 {
797
- int ret;
798850 struct device *dev = hr_dev->dev;
851
+ int ret;
799852
800853 if (hr_dev->hw->reset) {
801854 ret = hr_dev->hw->reset(hr_dev, true);
....@@ -826,6 +879,7 @@
826879 goto error_failed_cmd_init;
827880 }
828881
882
+ /* EQ depends on poll mode, event mode depends on EQ */
829883 ret = hr_dev->hw->init_eq(hr_dev);
830884 if (ret) {
831885 dev_err(dev, "eq init failed!\n");
....@@ -835,8 +889,9 @@
835889 if (hr_dev->cmd_mod) {
836890 ret = hns_roce_cmd_use_events(hr_dev);
837891 if (ret) {
838
- dev_err(dev, "Switch to event-driven cmd failed!\n");
839
- goto error_failed_use_event;
892
+ dev_warn(dev,
893
+ "Cmd event mode failed, set back to poll!\n");
894
+ hns_roce_cmd_use_polling(hr_dev);
840895 }
841896 }
842897
....@@ -860,6 +915,9 @@
860915 }
861916 }
862917
918
+ INIT_LIST_HEAD(&hr_dev->qp_list);
919
+ spin_lock_init(&hr_dev->qp_list_lock);
920
+
863921 ret = hns_roce_register_device(hr_dev);
864922 if (ret)
865923 goto error_failed_register_device;
....@@ -879,8 +937,6 @@
879937 error_failed_init_hem:
880938 if (hr_dev->cmd_mod)
881939 hns_roce_cmd_use_polling(hr_dev);
882
-
883
-error_failed_use_event:
884940 hr_dev->hw->cleanup_eq(hr_dev);
885941
886942 error_failed_eq_table:
....@@ -898,7 +954,6 @@
898954
899955 return ret;
900956 }
901
-EXPORT_SYMBOL_GPL(hns_roce_init);
902957
903958 void hns_roce_exit(struct hns_roce_dev *hr_dev)
904959 {
....@@ -919,7 +974,6 @@
919974 if (hr_dev->hw->reset)
920975 hr_dev->hw->reset(hr_dev, false);
921976 }
922
-EXPORT_SYMBOL_GPL(hns_roce_exit);
923977
924978 MODULE_LICENSE("Dual BSD/GPL");
925979 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");