hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/hw/mthca/mthca_provider.c
....@@ -37,6 +37,7 @@
3737 #include <rdma/ib_smi.h>
3838 #include <rdma/ib_umem.h>
3939 #include <rdma/ib_user_verbs.h>
40
+#include <rdma/uverbs_ioctl.h>
4041
4142 #include <linux/sched.h>
4243 #include <linux/slab.h>
....@@ -117,16 +118,6 @@
117118 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
118119 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
119120 props->max_mcast_grp;
120
- /*
121
- * If Sinai memory key optimization is being used, then only
122
- * the 8-bit key portion will change. For other HCAs, the
123
- * unused index bits will also be used for FMR remapping.
124
- */
125
- if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
126
- props->max_map_per_fmr = 255;
127
- else
128
- props->max_map_per_fmr =
129
- (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
130121
131122 err = 0;
132123 out:
....@@ -300,17 +291,16 @@
300291 return err;
301292 }
302293
303
-static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
304
- struct ib_udata *udata)
294
+static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
295
+ struct ib_udata *udata)
305296 {
306
- struct mthca_alloc_ucontext_resp uresp;
307
- struct mthca_ucontext *context;
297
+ struct ib_device *ibdev = uctx->device;
298
+ struct mthca_alloc_ucontext_resp uresp = {};
299
+ struct mthca_ucontext *context = to_mucontext(uctx);
308300 int err;
309301
310302 if (!(to_mdev(ibdev)->active))
311
- return ERR_PTR(-EAGAIN);
312
-
313
- memset(&uresp, 0, sizeof uresp);
303
+ return -EAGAIN;
314304
315305 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
316306 if (mthca_is_memfree(to_mdev(ibdev)))
....@@ -318,44 +308,33 @@
318308 else
319309 uresp.uarc_size = 0;
320310
321
- context = kmalloc(sizeof *context, GFP_KERNEL);
322
- if (!context)
323
- return ERR_PTR(-ENOMEM);
324
-
325311 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
326
- if (err) {
327
- kfree(context);
328
- return ERR_PTR(err);
329
- }
312
+ if (err)
313
+ return err;
330314
331315 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
332316 if (IS_ERR(context->db_tab)) {
333317 err = PTR_ERR(context->db_tab);
334318 mthca_uar_free(to_mdev(ibdev), &context->uar);
335
- kfree(context);
336
- return ERR_PTR(err);
319
+ return err;
337320 }
338321
339
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
322
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
340323 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
341324 mthca_uar_free(to_mdev(ibdev), &context->uar);
342
- kfree(context);
343
- return ERR_PTR(-EFAULT);
325
+ return -EFAULT;
344326 }
345327
346328 context->reg_mr_warned = 0;
347329
348
- return &context->ibucontext;
330
+ return 0;
349331 }
350332
351
-static int mthca_dealloc_ucontext(struct ib_ucontext *context)
333
+static void mthca_dealloc_ucontext(struct ib_ucontext *context)
352334 {
353335 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
354336 to_mucontext(context)->db_tab);
355337 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
356
- kfree(to_mucontext(context));
357
-
358
- return 0;
359338 }
360339
361340 static int mthca_mmap_uar(struct ib_ucontext *context,
....@@ -374,144 +353,109 @@
374353 return 0;
375354 }
376355
377
-static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
378
- struct ib_ucontext *context,
379
- struct ib_udata *udata)
356
+static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
380357 {
381
- struct mthca_pd *pd;
358
+ struct ib_device *ibdev = ibpd->device;
359
+ struct mthca_pd *pd = to_mpd(ibpd);
382360 int err;
383361
384
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
385
- if (!pd)
386
- return ERR_PTR(-ENOMEM);
362
+ err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
363
+ if (err)
364
+ return err;
387365
388
- err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
389
- if (err) {
390
- kfree(pd);
391
- return ERR_PTR(err);
392
- }
393
-
394
- if (context) {
366
+ if (udata) {
395367 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
396368 mthca_pd_free(to_mdev(ibdev), pd);
397
- kfree(pd);
398
- return ERR_PTR(-EFAULT);
369
+ return -EFAULT;
399370 }
400371 }
401372
402
- return &pd->ibpd;
373
+ return 0;
403374 }
404375
405
-static int mthca_dealloc_pd(struct ib_pd *pd)
376
+static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
406377 {
407378 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
408
- kfree(pd);
409
-
410379 return 0;
411380 }
412381
413
-static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
414
- struct rdma_ah_attr *ah_attr,
415
- struct ib_udata *udata)
382
+static int mthca_ah_create(struct ib_ah *ibah,
383
+ struct rdma_ah_init_attr *init_attr,
384
+ struct ib_udata *udata)
416385
417386 {
418
- int err;
419
- struct mthca_ah *ah;
387
+ struct mthca_ah *ah = to_mah(ibah);
420388
421
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
422
- if (!ah)
423
- return ERR_PTR(-ENOMEM);
424
-
425
- err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
426
- if (err) {
427
- kfree(ah);
428
- return ERR_PTR(err);
429
- }
430
-
431
- return &ah->ibah;
389
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
390
+ init_attr->ah_attr, ah);
432391 }
433392
434
-static int mthca_ah_destroy(struct ib_ah *ah)
393
+static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
435394 {
436395 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
437
- kfree(ah);
438
-
439396 return 0;
440397 }
441398
442
-static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
443
- struct ib_srq_init_attr *init_attr,
444
- struct ib_udata *udata)
399
+static int mthca_create_srq(struct ib_srq *ibsrq,
400
+ struct ib_srq_init_attr *init_attr,
401
+ struct ib_udata *udata)
445402 {
446403 struct mthca_create_srq ucmd;
447
- struct mthca_ucontext *context = NULL;
448
- struct mthca_srq *srq;
404
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
405
+ udata, struct mthca_ucontext, ibucontext);
406
+ struct mthca_srq *srq = to_msrq(ibsrq);
449407 int err;
450408
451409 if (init_attr->srq_type != IB_SRQT_BASIC)
452
- return ERR_PTR(-EOPNOTSUPP);
410
+ return -EOPNOTSUPP;
453411
454
- srq = kmalloc(sizeof *srq, GFP_KERNEL);
455
- if (!srq)
456
- return ERR_PTR(-ENOMEM);
412
+ if (udata) {
413
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
414
+ return -EFAULT;
457415
458
- if (pd->uobject) {
459
- context = to_mucontext(pd->uobject->context);
460
-
461
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
462
- err = -EFAULT;
463
- goto err_free;
464
- }
465
-
466
- err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
416
+ err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
467417 context->db_tab, ucmd.db_index,
468418 ucmd.db_page);
469419
470420 if (err)
471
- goto err_free;
421
+ return err;
472422
473423 srq->mr.ibmr.lkey = ucmd.lkey;
474424 srq->db_index = ucmd.db_index;
475425 }
476426
477
- err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
478
- &init_attr->attr, srq);
427
+ err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
428
+ &init_attr->attr, srq, udata);
479429
480
- if (err && pd->uobject)
481
- mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
430
+ if (err && udata)
431
+ mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
482432 context->db_tab, ucmd.db_index);
483433
484434 if (err)
485
- goto err_free;
435
+ return err;
486436
487
- if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
488
- mthca_free_srq(to_mdev(pd->device), srq);
489
- err = -EFAULT;
490
- goto err_free;
437
+ if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
438
+ mthca_free_srq(to_mdev(ibsrq->device), srq);
439
+ return -EFAULT;
491440 }
492441
493
- return &srq->ibsrq;
494
-
495
-err_free:
496
- kfree(srq);
497
-
498
- return ERR_PTR(err);
442
+ return 0;
499443 }
500444
501
-static int mthca_destroy_srq(struct ib_srq *srq)
445
+static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
502446 {
503
- struct mthca_ucontext *context;
504
-
505
- if (srq->uobject) {
506
- context = to_mucontext(srq->uobject->context);
447
+ if (udata) {
448
+ struct mthca_ucontext *context =
449
+ rdma_udata_to_drv_context(
450
+ udata,
451
+ struct mthca_ucontext,
452
+ ibucontext);
507453
508454 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
509455 context->db_tab, to_msrq(srq)->db_index);
510456 }
511457
512458 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
513
- kfree(srq);
514
-
515459 return 0;
516460 }
517461
....@@ -519,6 +463,8 @@
519463 struct ib_qp_init_attr *init_attr,
520464 struct ib_udata *udata)
521465 {
466
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
467
+ udata, struct mthca_ucontext, ibucontext);
522468 struct mthca_create_qp ucmd;
523469 struct mthca_qp *qp;
524470 int err;
....@@ -531,15 +477,11 @@
531477 case IB_QPT_UC:
532478 case IB_QPT_UD:
533479 {
534
- struct mthca_ucontext *context;
535
-
536480 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
537481 if (!qp)
538482 return ERR_PTR(-ENOMEM);
539483
540
- if (pd->uobject) {
541
- context = to_mucontext(pd->uobject->context);
542
-
484
+ if (udata) {
543485 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
544486 kfree(qp);
545487 return ERR_PTR(-EFAULT);
....@@ -574,11 +516,9 @@
574516 to_mcq(init_attr->send_cq),
575517 to_mcq(init_attr->recv_cq),
576518 init_attr->qp_type, init_attr->sq_sig_type,
577
- &init_attr->cap, qp);
519
+ &init_attr->cap, qp, udata);
578520
579
- if (err && pd->uobject) {
580
- context = to_mucontext(pd->uobject->context);
581
-
521
+ if (err && udata) {
582522 mthca_unmap_user_db(to_mdev(pd->device),
583523 &context->uar,
584524 context->db_tab,
....@@ -595,13 +535,14 @@
595535 case IB_QPT_SMI:
596536 case IB_QPT_GSI:
597537 {
598
- /* Don't allow userspace to create special QPs */
599
- if (pd->uobject)
600
- return ERR_PTR(-EINVAL);
601
-
602
- qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
538
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
603539 if (!qp)
604540 return ERR_PTR(-ENOMEM);
541
+ qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
542
+ if (!qp->sqp) {
543
+ kfree(qp);
544
+ return ERR_PTR(-ENOMEM);
545
+ }
605546
606547 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
607548
....@@ -610,15 +551,16 @@
610551 to_mcq(init_attr->recv_cq),
611552 init_attr->sq_sig_type, &init_attr->cap,
612553 qp->ibqp.qp_num, init_attr->port_num,
613
- to_msqp(qp));
554
+ qp, udata);
614555 break;
615556 }
616557 default:
617558 /* Don't support raw QPs */
618
- return ERR_PTR(-ENOSYS);
559
+ return ERR_PTR(-EOPNOTSUPP);
619560 }
620561
621562 if (err) {
563
+ kfree(qp->sqp);
622564 kfree(qp);
623565 return ERR_PTR(err);
624566 }
....@@ -632,64 +574,69 @@
632574 return &qp->ibqp;
633575 }
634576
635
-static int mthca_destroy_qp(struct ib_qp *qp)
577
+static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
636578 {
637
- if (qp->uobject) {
579
+ if (udata) {
580
+ struct mthca_ucontext *context =
581
+ rdma_udata_to_drv_context(
582
+ udata,
583
+ struct mthca_ucontext,
584
+ ibucontext);
585
+
638586 mthca_unmap_user_db(to_mdev(qp->device),
639
- &to_mucontext(qp->uobject->context)->uar,
640
- to_mucontext(qp->uobject->context)->db_tab,
587
+ &context->uar,
588
+ context->db_tab,
641589 to_mqp(qp)->sq.db_index);
642590 mthca_unmap_user_db(to_mdev(qp->device),
643
- &to_mucontext(qp->uobject->context)->uar,
644
- to_mucontext(qp->uobject->context)->db_tab,
591
+ &context->uar,
592
+ context->db_tab,
645593 to_mqp(qp)->rq.db_index);
646594 }
647595 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
648
- kfree(qp);
596
+ kfree(to_mqp(qp)->sqp);
597
+ kfree(to_mqp(qp));
649598 return 0;
650599 }
651600
652
-static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
653
- const struct ib_cq_init_attr *attr,
654
- struct ib_ucontext *context,
655
- struct ib_udata *udata)
601
+static int mthca_create_cq(struct ib_cq *ibcq,
602
+ const struct ib_cq_init_attr *attr,
603
+ struct ib_udata *udata)
656604 {
605
+ struct ib_device *ibdev = ibcq->device;
657606 int entries = attr->cqe;
658607 struct mthca_create_cq ucmd;
659608 struct mthca_cq *cq;
660609 int nent;
661610 int err;
611
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
612
+ udata, struct mthca_ucontext, ibucontext);
662613
663614 if (attr->flags)
664
- return ERR_PTR(-EINVAL);
615
+ return -EINVAL;
665616
666617 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
667
- return ERR_PTR(-EINVAL);
618
+ return -EINVAL;
668619
669
- if (context) {
670
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
671
- return ERR_PTR(-EFAULT);
620
+ if (udata) {
621
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
622
+ return -EFAULT;
672623
673
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
674
- to_mucontext(context)->db_tab,
675
- ucmd.set_db_index, ucmd.set_db_page);
624
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
625
+ context->db_tab, ucmd.set_db_index,
626
+ ucmd.set_db_page);
676627 if (err)
677
- return ERR_PTR(err);
628
+ return err;
678629
679
- err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
680
- to_mucontext(context)->db_tab,
681
- ucmd.arm_db_index, ucmd.arm_db_page);
630
+ err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
631
+ context->db_tab, ucmd.arm_db_index,
632
+ ucmd.arm_db_page);
682633 if (err)
683634 goto err_unmap_set;
684635 }
685636
686
- cq = kmalloc(sizeof *cq, GFP_KERNEL);
687
- if (!cq) {
688
- err = -ENOMEM;
689
- goto err_unmap_arm;
690
- }
637
+ cq = to_mcq(ibcq);
691638
692
- if (context) {
639
+ if (udata) {
693640 cq->buf.mr.ibmr.lkey = ucmd.lkey;
694641 cq->set_ci_db_index = ucmd.set_db_index;
695642 cq->arm_db_index = ucmd.arm_db_index;
....@@ -698,37 +645,33 @@
698645 for (nent = 1; nent <= entries; nent <<= 1)
699646 ; /* nothing */
700647
701
- err = mthca_init_cq(to_mdev(ibdev), nent,
702
- context ? to_mucontext(context) : NULL,
703
- context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
648
+ err = mthca_init_cq(to_mdev(ibdev), nent, context,
649
+ udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
704650 cq);
705651 if (err)
706
- goto err_free;
652
+ goto err_unmap_arm;
707653
708
- if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
654
+ if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
709655 mthca_free_cq(to_mdev(ibdev), cq);
710656 err = -EFAULT;
711
- goto err_free;
657
+ goto err_unmap_arm;
712658 }
713659
714660 cq->resize_buf = NULL;
715661
716
- return &cq->ibcq;
717
-
718
-err_free:
719
- kfree(cq);
662
+ return 0;
720663
721664 err_unmap_arm:
722
- if (context)
723
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
724
- to_mucontext(context)->db_tab, ucmd.arm_db_index);
665
+ if (udata)
666
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
667
+ context->db_tab, ucmd.arm_db_index);
725668
726669 err_unmap_set:
727
- if (context)
728
- mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
729
- to_mucontext(context)->db_tab, ucmd.set_db_index);
670
+ if (udata)
671
+ mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
672
+ context->db_tab, ucmd.set_db_index);
730673
731
- return ERR_PTR(err);
674
+ return err;
732675 }
733676
734677 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
....@@ -852,21 +795,25 @@
852795 return ret;
853796 }
854797
855
-static int mthca_destroy_cq(struct ib_cq *cq)
798
+static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
856799 {
857
- if (cq->uobject) {
800
+ if (udata) {
801
+ struct mthca_ucontext *context =
802
+ rdma_udata_to_drv_context(
803
+ udata,
804
+ struct mthca_ucontext,
805
+ ibucontext);
806
+
858807 mthca_unmap_user_db(to_mdev(cq->device),
859
- &to_mucontext(cq->uobject->context)->uar,
860
- to_mucontext(cq->uobject->context)->db_tab,
808
+ &context->uar,
809
+ context->db_tab,
861810 to_mcq(cq)->arm_db_index);
862811 mthca_unmap_user_db(to_mdev(cq->device),
863
- &to_mucontext(cq->uobject->context)->uar,
864
- to_mucontext(cq->uobject->context)->db_tab,
812
+ &context->uar,
813
+ context->db_tab,
865814 to_mcq(cq)->set_ci_db_index);
866815 }
867816 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
868
- kfree(cq);
869
-
870817 return 0;
871818 }
872819
....@@ -906,22 +853,23 @@
906853 u64 virt, int acc, struct ib_udata *udata)
907854 {
908855 struct mthca_dev *dev = to_mdev(pd->device);
909
- struct scatterlist *sg;
856
+ struct ib_block_iter biter;
857
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
858
+ udata, struct mthca_ucontext, ibucontext);
910859 struct mthca_mr *mr;
911860 struct mthca_reg_mr ucmd;
912861 u64 *pages;
913
- int shift, n, len;
914
- int i, k, entry;
862
+ int n, i;
915863 int err = 0;
916864 int write_mtt_size;
917865
918866 if (udata->inlen < sizeof ucmd) {
919
- if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
867
+ if (!context->reg_mr_warned) {
920868 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
921869 current->comm);
922870 mthca_warn(dev, " Update libmthca to fix this.\n");
923871 }
924
- ++to_mucontext(pd->uobject->context)->reg_mr_warned;
872
+ ++context->reg_mr_warned;
925873 ucmd.mr_attrs = 0;
926874 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
927875 return ERR_PTR(-EFAULT);
....@@ -930,16 +878,13 @@
930878 if (!mr)
931879 return ERR_PTR(-ENOMEM);
932880
933
- mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
934
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
935
-
881
+ mr->umem = ib_umem_get(pd->device, start, length, acc);
936882 if (IS_ERR(mr->umem)) {
937883 err = PTR_ERR(mr->umem);
938884 goto err;
939885 }
940886
941
- shift = mr->umem->page_shift;
942
- n = mr->umem->nmap;
887
+ n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
943888
944889 mr->mtt = mthca_alloc_mtt(dev, n);
945890 if (IS_ERR(mr->mtt)) {
....@@ -957,21 +902,19 @@
957902
958903 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
959904
960
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
961
- len = sg_dma_len(sg) >> shift;
962
- for (k = 0; k < len; ++k) {
963
- pages[i++] = sg_dma_address(sg) + (k << shift);
964
- /*
965
- * Be friendly to write_mtt and pass it chunks
966
- * of appropriate size.
967
- */
968
- if (i == write_mtt_size) {
969
- err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
970
- if (err)
971
- goto mtt_done;
972
- n += i;
973
- i = 0;
974
- }
905
+ rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
906
+ pages[i++] = rdma_block_iter_dma_address(&biter);
907
+
908
+ /*
909
+ * Be friendly to write_mtt and pass it chunks
910
+ * of appropriate size.
911
+ */
912
+ if (i == write_mtt_size) {
913
+ err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
914
+ if (err)
915
+ goto mtt_done;
916
+ n += i;
917
+ i = 0;
975918 }
976919 }
977920
....@@ -982,7 +925,7 @@
982925 if (err)
983926 goto err_mtt;
984927
985
- err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
928
+ err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
986929 convert_access(acc), mr);
987930
988931 if (err)
....@@ -1001,94 +944,33 @@
1001944 return ERR_PTR(err);
1002945 }
1003946
1004
-static int mthca_dereg_mr(struct ib_mr *mr)
947
+static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
1005948 {
1006949 struct mthca_mr *mmr = to_mmr(mr);
1007950
1008951 mthca_free_mr(to_mdev(mr->device), mmr);
1009
- if (mmr->umem)
1010
- ib_umem_release(mmr->umem);
952
+ ib_umem_release(mmr->umem);
1011953 kfree(mmr);
1012954
1013955 return 0;
1014956 }
1015957
1016
-static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1017
- struct ib_fmr_attr *fmr_attr)
1018
-{
1019
- struct mthca_fmr *fmr;
1020
- int err;
1021
-
1022
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1023
- if (!fmr)
1024
- return ERR_PTR(-ENOMEM);
1025
-
1026
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1027
- err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1028
- convert_access(mr_access_flags), fmr);
1029
-
1030
- if (err) {
1031
- kfree(fmr);
1032
- return ERR_PTR(err);
1033
- }
1034
-
1035
- return &fmr->ibmr;
1036
-}
1037
-
1038
-static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1039
-{
1040
- struct mthca_fmr *mfmr = to_mfmr(fmr);
1041
- int err;
1042
-
1043
- err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1044
- if (err)
1045
- return err;
1046
-
1047
- kfree(mfmr);
1048
- return 0;
1049
-}
1050
-
1051
-static int mthca_unmap_fmr(struct list_head *fmr_list)
1052
-{
1053
- struct ib_fmr *fmr;
1054
- int err;
1055
- struct mthca_dev *mdev = NULL;
1056
-
1057
- list_for_each_entry(fmr, fmr_list, list) {
1058
- if (mdev && to_mdev(fmr->device) != mdev)
1059
- return -EINVAL;
1060
- mdev = to_mdev(fmr->device);
1061
- }
1062
-
1063
- if (!mdev)
1064
- return 0;
1065
-
1066
- if (mthca_is_memfree(mdev)) {
1067
- list_for_each_entry(fmr, fmr_list, list)
1068
- mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1069
-
1070
- wmb();
1071
- } else
1072
- list_for_each_entry(fmr, fmr_list, list)
1073
- mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1074
-
1075
- err = mthca_SYNC_TPT(mdev);
1076
- return err;
1077
-}
1078
-
1079
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1080
- char *buf)
958
+static ssize_t hw_rev_show(struct device *device,
959
+ struct device_attribute *attr, char *buf)
1081960 {
1082961 struct mthca_dev *dev =
1083
- container_of(device, struct mthca_dev, ib_dev.dev);
962
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
963
+
1084964 return sprintf(buf, "%x\n", dev->rev_id);
1085965 }
966
+static DEVICE_ATTR_RO(hw_rev);
1086967
1087
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1088
- char *buf)
968
+static ssize_t hca_type_show(struct device *device,
969
+ struct device_attribute *attr, char *buf)
1089970 {
1090971 struct mthca_dev *dev =
1091
- container_of(device, struct mthca_dev, ib_dev.dev);
972
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
973
+
1092974 switch (dev->pdev->device) {
1093975 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1094976 return sprintf(buf, "MT23108\n");
....@@ -1103,23 +985,27 @@
1103985 return sprintf(buf, "unknown\n");
1104986 }
1105987 }
988
+static DEVICE_ATTR_RO(hca_type);
1106989
1107
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
1108
- char *buf)
990
+static ssize_t board_id_show(struct device *device,
991
+ struct device_attribute *attr, char *buf)
1109992 {
1110993 struct mthca_dev *dev =
1111
- container_of(device, struct mthca_dev, ib_dev.dev);
994
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
995
+
1112996 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1113997 }
998
+static DEVICE_ATTR_RO(board_id);
1114999
1115
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1116
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1117
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1000
+static struct attribute *mthca_dev_attributes[] = {
1001
+ &dev_attr_hw_rev.attr,
1002
+ &dev_attr_hca_type.attr,
1003
+ &dev_attr_board_id.attr,
1004
+ NULL
1005
+};
11181006
1119
-static struct device_attribute *mthca_dev_attributes[] = {
1120
- &dev_attr_hw_rev,
1121
- &dev_attr_hca_type,
1122
- &dev_attr_board_id
1007
+static const struct attribute_group mthca_attr_group = {
1008
+ .attrs = mthca_dev_attributes,
11231009 };
11241010
11251011 static int mthca_init_node_data(struct mthca_dev *dev)
....@@ -1189,19 +1075,89 @@
11891075 (int) dev->fw_ver & 0xffff);
11901076 }
11911077
1078
+static const struct ib_device_ops mthca_dev_ops = {
1079
+ .owner = THIS_MODULE,
1080
+ .driver_id = RDMA_DRIVER_MTHCA,
1081
+ .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION,
1082
+ .uverbs_no_driver_id_binding = 1,
1083
+
1084
+ .alloc_pd = mthca_alloc_pd,
1085
+ .alloc_ucontext = mthca_alloc_ucontext,
1086
+ .attach_mcast = mthca_multicast_attach,
1087
+ .create_ah = mthca_ah_create,
1088
+ .create_cq = mthca_create_cq,
1089
+ .create_qp = mthca_create_qp,
1090
+ .dealloc_pd = mthca_dealloc_pd,
1091
+ .dealloc_ucontext = mthca_dealloc_ucontext,
1092
+ .dereg_mr = mthca_dereg_mr,
1093
+ .destroy_ah = mthca_ah_destroy,
1094
+ .destroy_cq = mthca_destroy_cq,
1095
+ .destroy_qp = mthca_destroy_qp,
1096
+ .detach_mcast = mthca_multicast_detach,
1097
+ .get_dev_fw_str = get_dev_fw_str,
1098
+ .get_dma_mr = mthca_get_dma_mr,
1099
+ .get_port_immutable = mthca_port_immutable,
1100
+ .mmap = mthca_mmap_uar,
1101
+ .modify_device = mthca_modify_device,
1102
+ .modify_port = mthca_modify_port,
1103
+ .modify_qp = mthca_modify_qp,
1104
+ .poll_cq = mthca_poll_cq,
1105
+ .process_mad = mthca_process_mad,
1106
+ .query_ah = mthca_ah_query,
1107
+ .query_device = mthca_query_device,
1108
+ .query_gid = mthca_query_gid,
1109
+ .query_pkey = mthca_query_pkey,
1110
+ .query_port = mthca_query_port,
1111
+ .query_qp = mthca_query_qp,
1112
+ .reg_user_mr = mthca_reg_user_mr,
1113
+ .resize_cq = mthca_resize_cq,
1114
+
1115
+ INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
1116
+ INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
1117
+ INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
1118
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
1119
+};
1120
+
1121
+static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1122
+ .create_srq = mthca_create_srq,
1123
+ .destroy_srq = mthca_destroy_srq,
1124
+ .modify_srq = mthca_modify_srq,
1125
+ .post_srq_recv = mthca_arbel_post_srq_recv,
1126
+ .query_srq = mthca_query_srq,
1127
+
1128
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1129
+};
1130
+
1131
+static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1132
+ .create_srq = mthca_create_srq,
1133
+ .destroy_srq = mthca_destroy_srq,
1134
+ .modify_srq = mthca_modify_srq,
1135
+ .post_srq_recv = mthca_tavor_post_srq_recv,
1136
+ .query_srq = mthca_query_srq,
1137
+
1138
+ INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1139
+};
1140
+
1141
+static const struct ib_device_ops mthca_dev_arbel_ops = {
1142
+ .post_recv = mthca_arbel_post_receive,
1143
+ .post_send = mthca_arbel_post_send,
1144
+ .req_notify_cq = mthca_arbel_arm_cq,
1145
+};
1146
+
1147
+static const struct ib_device_ops mthca_dev_tavor_ops = {
1148
+ .post_recv = mthca_tavor_post_receive,
1149
+ .post_send = mthca_tavor_post_send,
1150
+ .req_notify_cq = mthca_tavor_arm_cq,
1151
+};
1152
+
11921153 int mthca_register_device(struct mthca_dev *dev)
11931154 {
11941155 int ret;
1195
- int i;
11961156
11971157 ret = mthca_init_node_data(dev);
11981158 if (ret)
11991159 return ret;
12001160
1201
- strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1202
- dev->ib_dev.owner = THIS_MODULE;
1203
-
1204
- dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
12051161 dev->ib_dev.uverbs_cmd_mask =
12061162 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
12071163 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
....@@ -1224,26 +1180,8 @@
12241180 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
12251181 dev->ib_dev.num_comp_vectors = 1;
12261182 dev->ib_dev.dev.parent = &dev->pdev->dev;
1227
- dev->ib_dev.query_device = mthca_query_device;
1228
- dev->ib_dev.query_port = mthca_query_port;
1229
- dev->ib_dev.modify_device = mthca_modify_device;
1230
- dev->ib_dev.modify_port = mthca_modify_port;
1231
- dev->ib_dev.query_pkey = mthca_query_pkey;
1232
- dev->ib_dev.query_gid = mthca_query_gid;
1233
- dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1234
- dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1235
- dev->ib_dev.mmap = mthca_mmap_uar;
1236
- dev->ib_dev.alloc_pd = mthca_alloc_pd;
1237
- dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1238
- dev->ib_dev.create_ah = mthca_ah_create;
1239
- dev->ib_dev.query_ah = mthca_ah_query;
1240
- dev->ib_dev.destroy_ah = mthca_ah_destroy;
12411183
12421184 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1243
- dev->ib_dev.create_srq = mthca_create_srq;
1244
- dev->ib_dev.modify_srq = mthca_modify_srq;
1245
- dev->ib_dev.query_srq = mthca_query_srq;
1246
- dev->ib_dev.destroy_srq = mthca_destroy_srq;
12471185 dev->ib_dev.uverbs_cmd_mask |=
12481186 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
12491187 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
....@@ -1251,64 +1189,26 @@
12511189 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
12521190
12531191 if (mthca_is_memfree(dev))
1254
- dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1192
+ ib_set_device_ops(&dev->ib_dev,
1193
+ &mthca_dev_arbel_srq_ops);
12551194 else
1256
- dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1195
+ ib_set_device_ops(&dev->ib_dev,
1196
+ &mthca_dev_tavor_srq_ops);
12571197 }
12581198
1259
- dev->ib_dev.create_qp = mthca_create_qp;
1260
- dev->ib_dev.modify_qp = mthca_modify_qp;
1261
- dev->ib_dev.query_qp = mthca_query_qp;
1262
- dev->ib_dev.destroy_qp = mthca_destroy_qp;
1263
- dev->ib_dev.create_cq = mthca_create_cq;
1264
- dev->ib_dev.resize_cq = mthca_resize_cq;
1265
- dev->ib_dev.destroy_cq = mthca_destroy_cq;
1266
- dev->ib_dev.poll_cq = mthca_poll_cq;
1267
- dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1268
- dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1269
- dev->ib_dev.dereg_mr = mthca_dereg_mr;
1270
- dev->ib_dev.get_port_immutable = mthca_port_immutable;
1271
- dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
1199
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
12721200
1273
- if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1274
- dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1275
- dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1276
- dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1277
- if (mthca_is_memfree(dev))
1278
- dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1279
- else
1280
- dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1281
- }
1282
-
1283
- dev->ib_dev.attach_mcast = mthca_multicast_attach;
1284
- dev->ib_dev.detach_mcast = mthca_multicast_detach;
1285
- dev->ib_dev.process_mad = mthca_process_mad;
1286
-
1287
- if (mthca_is_memfree(dev)) {
1288
- dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1289
- dev->ib_dev.post_send = mthca_arbel_post_send;
1290
- dev->ib_dev.post_recv = mthca_arbel_post_receive;
1291
- } else {
1292
- dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1293
- dev->ib_dev.post_send = mthca_tavor_post_send;
1294
- dev->ib_dev.post_recv = mthca_tavor_post_receive;
1295
- }
1201
+ if (mthca_is_memfree(dev))
1202
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1203
+ else
1204
+ ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
12961205
12971206 mutex_init(&dev->cap_mask_mutex);
12981207
1299
- dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
1300
- ret = ib_register_device(&dev->ib_dev, NULL);
1208
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1209
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
13011210 if (ret)
13021211 return ret;
1303
-
1304
- for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
1305
- ret = device_create_file(&dev->ib_dev.dev,
1306
- mthca_dev_attributes[i]);
1307
- if (ret) {
1308
- ib_unregister_device(&dev->ib_dev);
1309
- return ret;
1310
- }
1311
- }
13121212
13131213 mthca_start_catas_poll(dev);
13141214