forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/infiniband/hw/cxgb4/device.c
....@@ -81,14 +81,6 @@
8181 int pos;
8282 };
8383
84
-static int count_idrs(int id, void *p, void *data)
85
-{
86
- int *countp = data;
87
-
88
- *countp = *countp + 1;
89
- return 0;
90
-}
91
-
9284 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
9385 loff_t *ppos)
9486 {
....@@ -250,13 +242,11 @@
250242 }
251243 }
252244
253
-static int dump_qp(int id, void *p, void *data)
245
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
246
+ struct c4iw_debugfs_data *qpd)
254247 {
255
- struct c4iw_qp *qp = p;
256
- struct c4iw_debugfs_data *qpd = data;
257248 int space;
258249 int cc;
259
-
260250 if (id != qp->wq.sq.qid)
261251 return 0;
262252
....@@ -335,19 +325,24 @@
335325
336326 static int qp_open(struct inode *inode, struct file *file)
337327 {
328
+ struct c4iw_qp *qp;
338329 struct c4iw_debugfs_data *qpd;
330
+ unsigned long index;
339331 int count = 1;
340332
341
- qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
333
+ qpd = kmalloc(sizeof(*qpd), GFP_KERNEL);
342334 if (!qpd)
343335 return -ENOMEM;
344336
345337 qpd->devp = inode->i_private;
346338 qpd->pos = 0;
347339
348
- spin_lock_irq(&qpd->devp->lock);
349
- idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
350
- spin_unlock_irq(&qpd->devp->lock);
340
+ /*
341
+ * No need to lock; we drop the lock to call vmalloc so it's racy
342
+ * anyway. Someone who cares should switch this over to seq_file
343
+ */
344
+ xa_for_each(&qpd->devp->qps, index, qp)
345
+ count++;
351346
352347 qpd->bufsize = count * 180;
353348 qpd->buf = vmalloc(qpd->bufsize);
....@@ -356,9 +351,10 @@
356351 return -ENOMEM;
357352 }
358353
359
- spin_lock_irq(&qpd->devp->lock);
360
- idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
361
- spin_unlock_irq(&qpd->devp->lock);
354
+ xa_lock_irq(&qpd->devp->qps);
355
+ xa_for_each(&qpd->devp->qps, index, qp)
356
+ dump_qp(index, qp, qpd);
357
+ xa_unlock_irq(&qpd->devp->qps);
362358
363359 qpd->buf[qpd->pos++] = 0;
364360 file->private_data = qpd;
....@@ -373,9 +369,8 @@
373369 .llseek = default_llseek,
374370 };
375371
376
-static int dump_stag(int id, void *p, void *data)
372
+static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd)
377373 {
378
- struct c4iw_debugfs_data *stagd = data;
379374 int space;
380375 int cc;
381376 struct fw_ri_tpte tpte;
....@@ -424,10 +419,12 @@
424419 static int stag_open(struct inode *inode, struct file *file)
425420 {
426421 struct c4iw_debugfs_data *stagd;
422
+ void *p;
423
+ unsigned long index;
427424 int ret = 0;
428425 int count = 1;
429426
430
- stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
427
+ stagd = kmalloc(sizeof(*stagd), GFP_KERNEL);
431428 if (!stagd) {
432429 ret = -ENOMEM;
433430 goto out;
....@@ -435,9 +432,8 @@
435432 stagd->devp = inode->i_private;
436433 stagd->pos = 0;
437434
438
- spin_lock_irq(&stagd->devp->lock);
439
- idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
440
- spin_unlock_irq(&stagd->devp->lock);
435
+ xa_for_each(&stagd->devp->mrs, index, p)
436
+ count++;
441437
442438 stagd->bufsize = count * 256;
443439 stagd->buf = vmalloc(stagd->bufsize);
....@@ -446,9 +442,10 @@
446442 goto err1;
447443 }
448444
449
- spin_lock_irq(&stagd->devp->lock);
450
- idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
451
- spin_unlock_irq(&stagd->devp->lock);
445
+ xa_lock_irq(&stagd->devp->mrs);
446
+ xa_for_each(&stagd->devp->mrs, index, p)
447
+ dump_stag(index, stagd);
448
+ xa_unlock_irq(&stagd->devp->mrs);
452449
453450 stagd->buf[stagd->pos++] = 0;
454451 file->private_data = stagd;
....@@ -558,10 +555,8 @@
558555 .write = stats_clear,
559556 };
560557
561
-static int dump_ep(int id, void *p, void *data)
558
+static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd)
562559 {
563
- struct c4iw_ep *ep = p;
564
- struct c4iw_debugfs_data *epd = data;
565560 int space;
566561 int cc;
567562
....@@ -617,10 +612,9 @@
617612 return 0;
618613 }
619614
620
-static int dump_listen_ep(int id, void *p, void *data)
615
+static
616
+int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd)
621617 {
622
- struct c4iw_listen_ep *ep = p;
623
- struct c4iw_debugfs_data *epd = data;
624618 int space;
625619 int cc;
626620
....@@ -674,6 +668,9 @@
674668
675669 static int ep_open(struct inode *inode, struct file *file)
676670 {
671
+ struct c4iw_ep *ep;
672
+ struct c4iw_listen_ep *lep;
673
+ unsigned long index;
677674 struct c4iw_debugfs_data *epd;
678675 int ret = 0;
679676 int count = 1;
....@@ -686,11 +683,12 @@
686683 epd->devp = inode->i_private;
687684 epd->pos = 0;
688685
689
- spin_lock_irq(&epd->devp->lock);
690
- idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
691
- idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
692
- idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
693
- spin_unlock_irq(&epd->devp->lock);
686
+ xa_for_each(&epd->devp->hwtids, index, ep)
687
+ count++;
688
+ xa_for_each(&epd->devp->atids, index, ep)
689
+ count++;
690
+ xa_for_each(&epd->devp->stids, index, lep)
691
+ count++;
694692
695693 epd->bufsize = count * 240;
696694 epd->buf = vmalloc(epd->bufsize);
....@@ -699,11 +697,18 @@
699697 goto err1;
700698 }
701699
702
- spin_lock_irq(&epd->devp->lock);
703
- idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
704
- idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
705
- idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
706
- spin_unlock_irq(&epd->devp->lock);
700
+ xa_lock_irq(&epd->devp->hwtids);
701
+ xa_for_each(&epd->devp->hwtids, index, ep)
702
+ dump_ep(ep, epd);
703
+ xa_unlock_irq(&epd->devp->hwtids);
704
+ xa_lock_irq(&epd->devp->atids);
705
+ xa_for_each(&epd->devp->atids, index, ep)
706
+ dump_ep(ep, epd);
707
+ xa_unlock_irq(&epd->devp->atids);
708
+ xa_lock_irq(&epd->devp->stids);
709
+ xa_for_each(&epd->devp->stids, index, lep)
710
+ dump_listen_ep(lep, epd);
711
+ xa_unlock_irq(&epd->devp->stids);
707712
708713 file->private_data = epd;
709714 goto out;
....@@ -720,11 +725,8 @@
720725 .read = debugfs_read,
721726 };
722727
723
-static int setup_debugfs(struct c4iw_dev *devp)
728
+static void setup_debugfs(struct c4iw_dev *devp)
724729 {
725
- if (!devp->debugfs_root)
726
- return -1;
727
-
728730 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
729731 (void *)devp, &qp_debugfs_fops, 4096);
730732
....@@ -740,7 +742,6 @@
740742 if (c4iw_wr_log)
741743 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
742744 (void *)devp, &wr_log_debugfs_fops, 4096);
743
- return 0;
744745 }
745746
746747 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
....@@ -783,6 +784,7 @@
783784 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784785 {
785786 int err;
787
+ unsigned int factor;
786788
787789 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
788790
....@@ -806,8 +808,18 @@
806808 return -EINVAL;
807809 }
808810
809
- rdev->qpmask = rdev->lldi.udb_density - 1;
810
- rdev->cqmask = rdev->lldi.ucq_density - 1;
811
+ /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
812
+ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
813
+ pr_err("%s: unsupported sge host page size %u\n",
814
+ pci_name(rdev->lldi.pdev),
815
+ rdev->lldi.sge_host_page_size);
816
+ return -EINVAL;
817
+ }
818
+
819
+ factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
820
+ rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
821
+ rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
822
+
811823 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
812824 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
813825 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
....@@ -924,16 +936,12 @@
924936 void c4iw_dealloc(struct uld_ctx *ctx)
925937 {
926938 c4iw_rdev_close(&ctx->dev->rdev);
927
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
928
- idr_destroy(&ctx->dev->cqidr);
929
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
930
- idr_destroy(&ctx->dev->qpidr);
931
- WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
932
- idr_destroy(&ctx->dev->mmidr);
933
- wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
934
- idr_destroy(&ctx->dev->hwtid_idr);
935
- idr_destroy(&ctx->dev->stid_idr);
936
- idr_destroy(&ctx->dev->atid_idr);
939
+ WARN_ON(!xa_empty(&ctx->dev->cqs));
940
+ WARN_ON(!xa_empty(&ctx->dev->qps));
941
+ WARN_ON(!xa_empty(&ctx->dev->mrs));
942
+ wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids));
943
+ WARN_ON(!xa_empty(&ctx->dev->stids));
944
+ WARN_ON(!xa_empty(&ctx->dev->atids));
937945 if (ctx->dev->rdev.bar2_kva)
938946 iounmap(ctx->dev->rdev.bar2_kva);
939947 if (ctx->dev->rdev.oc_mw_kva)
....@@ -971,7 +979,7 @@
971979 pr_info("%s: On-Chip Queues not supported on this device\n",
972980 pci_name(infop->pdev));
973981
974
- devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
982
+ devp = ib_alloc_device(c4iw_dev, ibdev);
975983 if (!devp) {
976984 pr_err("Cannot allocate ib device\n");
977985 return ERR_PTR(-ENOMEM);
....@@ -1038,13 +1046,12 @@
10381046 return ERR_PTR(ret);
10391047 }
10401048
1041
- idr_init(&devp->cqidr);
1042
- idr_init(&devp->qpidr);
1043
- idr_init(&devp->mmidr);
1044
- idr_init(&devp->hwtid_idr);
1045
- idr_init(&devp->stid_idr);
1046
- idr_init(&devp->atid_idr);
1047
- spin_lock_init(&devp->lock);
1049
+ xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
1050
+ xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
1051
+ xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ);
1052
+ xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ);
1053
+ xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ);
1054
+ xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ);
10481055 mutex_init(&devp->rdev.stats.lock);
10491056 mutex_init(&devp->db_mutex);
10501057 INIT_LIST_HEAD(&devp->db_fc_list);
....@@ -1072,7 +1079,7 @@
10721079 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
10731080 DRV_VERSION);
10741081
1075
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1082
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
10761083 if (!ctx) {
10771084 ctx = ERR_PTR(-ENOMEM);
10781085 goto out;
....@@ -1240,10 +1247,9 @@
12401247 case CXGB4_STATE_START_RECOVERY:
12411248 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
12421249 if (ctx->dev) {
1243
- struct ib_event event;
1250
+ struct ib_event event = {};
12441251
12451252 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1246
- memset(&event, 0, sizeof event);
12471253 event.event = IB_EVENT_DEVICE_FATAL;
12481254 event.device = &ctx->dev->ibdev;
12491255 ib_dispatch_event(&event);
....@@ -1259,34 +1265,21 @@
12591265 return 0;
12601266 }
12611267
1262
-static int disable_qp_db(int id, void *p, void *data)
1263
-{
1264
- struct c4iw_qp *qp = p;
1265
-
1266
- t4_disable_wq_db(&qp->wq);
1267
- return 0;
1268
-}
1269
-
12701268 static void stop_queues(struct uld_ctx *ctx)
12711269 {
1272
- unsigned long flags;
1270
+ struct c4iw_qp *qp;
1271
+ unsigned long index, flags;
12731272
1274
- spin_lock_irqsave(&ctx->dev->lock, flags);
1273
+ xa_lock_irqsave(&ctx->dev->qps, flags);
12751274 ctx->dev->rdev.stats.db_state_transitions++;
12761275 ctx->dev->db_state = STOPPED;
1277
- if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
1278
- idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
1279
- else
1276
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1277
+ xa_for_each(&ctx->dev->qps, index, qp)
1278
+ t4_disable_wq_db(&qp->wq);
1279
+ } else {
12801280 ctx->dev->rdev.status_page->db_off = 1;
1281
- spin_unlock_irqrestore(&ctx->dev->lock, flags);
1282
-}
1283
-
1284
-static int enable_qp_db(int id, void *p, void *data)
1285
-{
1286
- struct c4iw_qp *qp = p;
1287
-
1288
- t4_enable_wq_db(&qp->wq);
1289
- return 0;
1281
+ }
1282
+ xa_unlock_irqrestore(&ctx->dev->qps, flags);
12901283 }
12911284
12921285 static void resume_rc_qp(struct c4iw_qp *qp)
....@@ -1316,18 +1309,21 @@
13161309
13171310 static void resume_queues(struct uld_ctx *ctx)
13181311 {
1319
- spin_lock_irq(&ctx->dev->lock);
1312
+ xa_lock_irq(&ctx->dev->qps);
13201313 if (ctx->dev->db_state != STOPPED)
13211314 goto out;
13221315 ctx->dev->db_state = FLOW_CONTROL;
13231316 while (1) {
13241317 if (list_empty(&ctx->dev->db_fc_list)) {
1318
+ struct c4iw_qp *qp;
1319
+ unsigned long index;
1320
+
13251321 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
13261322 ctx->dev->db_state = NORMAL;
13271323 ctx->dev->rdev.stats.db_state_transitions++;
13281324 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1329
- idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1330
- NULL);
1325
+ xa_for_each(&ctx->dev->qps, index, qp)
1326
+ t4_enable_wq_db(&qp->wq);
13311327 } else {
13321328 ctx->dev->rdev.status_page->db_off = 0;
13331329 }
....@@ -1339,12 +1335,12 @@
13391335 resume_a_chunk(ctx);
13401336 }
13411337 if (!list_empty(&ctx->dev->db_fc_list)) {
1342
- spin_unlock_irq(&ctx->dev->lock);
1338
+ xa_unlock_irq(&ctx->dev->qps);
13431339 if (DB_FC_RESUME_DELAY) {
13441340 set_current_state(TASK_UNINTERRUPTIBLE);
13451341 schedule_timeout(DB_FC_RESUME_DELAY);
13461342 }
1347
- spin_lock_irq(&ctx->dev->lock);
1343
+ xa_lock_irq(&ctx->dev->qps);
13481344 if (ctx->dev->db_state != FLOW_CONTROL)
13491345 break;
13501346 }
....@@ -1353,30 +1349,13 @@
13531349 out:
13541350 if (ctx->dev->db_state != NORMAL)
13551351 ctx->dev->rdev.stats.db_fc_interruptions++;
1356
- spin_unlock_irq(&ctx->dev->lock);
1352
+ xa_unlock_irq(&ctx->dev->qps);
13571353 }
13581354
13591355 struct qp_list {
13601356 unsigned idx;
13611357 struct c4iw_qp **qps;
13621358 };
1363
-
1364
-static int add_and_ref_qp(int id, void *p, void *data)
1365
-{
1366
- struct qp_list *qp_listp = data;
1367
- struct c4iw_qp *qp = p;
1368
-
1369
- c4iw_qp_add_ref(&qp->ibqp);
1370
- qp_listp->qps[qp_listp->idx++] = qp;
1371
- return 0;
1372
-}
1373
-
1374
-static int count_qps(int id, void *p, void *data)
1375
-{
1376
- unsigned *countp = data;
1377
- (*countp)++;
1378
- return 0;
1379
-}
13801359
13811360 static void deref_qps(struct qp_list *qp_list)
13821361 {
....@@ -1394,7 +1373,7 @@
13941373 for (idx = 0; idx < qp_list->idx; idx++) {
13951374 struct c4iw_qp *qp = qp_list->qps[idx];
13961375
1397
- spin_lock_irq(&qp->rhp->lock);
1376
+ xa_lock_irq(&qp->rhp->qps);
13981377 spin_lock(&qp->lock);
13991378 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
14001379 qp->wq.sq.qid,
....@@ -1404,7 +1383,7 @@
14041383 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
14051384 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
14061385 spin_unlock(&qp->lock);
1407
- spin_unlock_irq(&qp->rhp->lock);
1386
+ xa_unlock_irq(&qp->rhp->qps);
14081387 return;
14091388 }
14101389 qp->wq.sq.wq_pidx_inc = 0;
....@@ -1418,12 +1397,12 @@
14181397 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
14191398 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
14201399 spin_unlock(&qp->lock);
1421
- spin_unlock_irq(&qp->rhp->lock);
1400
+ xa_unlock_irq(&qp->rhp->qps);
14221401 return;
14231402 }
14241403 qp->wq.rq.wq_pidx_inc = 0;
14251404 spin_unlock(&qp->lock);
1426
- spin_unlock_irq(&qp->rhp->lock);
1405
+ xa_unlock_irq(&qp->rhp->qps);
14271406
14281407 /* Wait for the dbfifo to drain */
14291408 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
....@@ -1435,6 +1414,8 @@
14351414
14361415 static void recover_queues(struct uld_ctx *ctx)
14371416 {
1417
+ struct c4iw_qp *qp;
1418
+ unsigned long index;
14381419 int count = 0;
14391420 struct qp_list qp_list;
14401421 int ret;
....@@ -1452,22 +1433,26 @@
14521433 }
14531434
14541435 /* Count active queues so we can build a list of queues to recover */
1455
- spin_lock_irq(&ctx->dev->lock);
1436
+ xa_lock_irq(&ctx->dev->qps);
14561437 WARN_ON(ctx->dev->db_state != STOPPED);
14571438 ctx->dev->db_state = RECOVERY;
1458
- idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1439
+ xa_for_each(&ctx->dev->qps, index, qp)
1440
+ count++;
14591441
14601442 qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
14611443 if (!qp_list.qps) {
1462
- spin_unlock_irq(&ctx->dev->lock);
1444
+ xa_unlock_irq(&ctx->dev->qps);
14631445 return;
14641446 }
14651447 qp_list.idx = 0;
14661448
14671449 /* add and ref each qp so it doesn't get freed */
1468
- idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1450
+ xa_for_each(&ctx->dev->qps, index, qp) {
1451
+ c4iw_qp_add_ref(&qp->ibqp);
1452
+ qp_list.qps[qp_list.idx++] = qp;
1453
+ }
14691454
1470
- spin_unlock_irq(&ctx->dev->lock);
1455
+ xa_unlock_irq(&ctx->dev->qps);
14711456
14721457 /* now traverse the list in a safe context to recover the db state*/
14731458 recover_lost_dbs(ctx, &qp_list);
....@@ -1476,10 +1461,10 @@
14761461 deref_qps(&qp_list);
14771462 kfree(qp_list.qps);
14781463
1479
- spin_lock_irq(&ctx->dev->lock);
1464
+ xa_lock_irq(&ctx->dev->qps);
14801465 WARN_ON(ctx->dev->db_state != RECOVERY);
14811466 ctx->dev->db_state = STOPPED;
1482
- spin_unlock_irq(&ctx->dev->lock);
1467
+ xa_unlock_irq(&ctx->dev->qps);
14831468 }
14841469
14851470 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
....@@ -1554,8 +1539,6 @@
15541539 return err;
15551540
15561541 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1557
- if (!c4iw_debugfs_root)
1558
- pr_warn("could not create debugfs entry, continuing\n");
15591542
15601543 reg_workq = create_singlethread_workqueue("Register_iWARP_device");
15611544 if (!reg_workq) {