.. | .. |
---|
81 | 81 | int pos; |
---|
82 | 82 | }; |
---|
83 | 83 | |
---|
84 | | -static int count_idrs(int id, void *p, void *data) |
---|
85 | | -{ |
---|
86 | | - int *countp = data; |
---|
87 | | - |
---|
88 | | - *countp = *countp + 1; |
---|
89 | | - return 0; |
---|
90 | | -} |
---|
91 | | - |
---|
92 | 84 | static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, |
---|
93 | 85 | loff_t *ppos) |
---|
94 | 86 | { |
---|
.. | .. |
---|
250 | 242 | } |
---|
251 | 243 | } |
---|
252 | 244 | |
---|
253 | | -static int dump_qp(int id, void *p, void *data) |
---|
| 245 | +static int dump_qp(unsigned long id, struct c4iw_qp *qp, |
---|
| 246 | + struct c4iw_debugfs_data *qpd) |
---|
254 | 247 | { |
---|
255 | | - struct c4iw_qp *qp = p; |
---|
256 | | - struct c4iw_debugfs_data *qpd = data; |
---|
257 | 248 | int space; |
---|
258 | 249 | int cc; |
---|
259 | | - |
---|
260 | 250 | if (id != qp->wq.sq.qid) |
---|
261 | 251 | return 0; |
---|
262 | 252 | |
---|
.. | .. |
---|
335 | 325 | |
---|
336 | 326 | static int qp_open(struct inode *inode, struct file *file) |
---|
337 | 327 | { |
---|
| 328 | + struct c4iw_qp *qp; |
---|
338 | 329 | struct c4iw_debugfs_data *qpd; |
---|
| 330 | + unsigned long index; |
---|
339 | 331 | int count = 1; |
---|
340 | 332 | |
---|
341 | | - qpd = kmalloc(sizeof *qpd, GFP_KERNEL); |
---|
| 333 | + qpd = kmalloc(sizeof(*qpd), GFP_KERNEL); |
---|
342 | 334 | if (!qpd) |
---|
343 | 335 | return -ENOMEM; |
---|
344 | 336 | |
---|
345 | 337 | qpd->devp = inode->i_private; |
---|
346 | 338 | qpd->pos = 0; |
---|
347 | 339 | |
---|
348 | | - spin_lock_irq(&qpd->devp->lock); |
---|
349 | | - idr_for_each(&qpd->devp->qpidr, count_idrs, &count); |
---|
350 | | - spin_unlock_irq(&qpd->devp->lock); |
---|
| 340 | + /* |
---|
| 341 | + * No need to lock; we drop the lock to call vmalloc so it's racy |
---|
| 342 | + * anyway. Someone who cares should switch this over to seq_file |
---|
| 343 | + */ |
---|
| 344 | + xa_for_each(&qpd->devp->qps, index, qp) |
---|
| 345 | + count++; |
---|
351 | 346 | |
---|
352 | 347 | qpd->bufsize = count * 180; |
---|
353 | 348 | qpd->buf = vmalloc(qpd->bufsize); |
---|
.. | .. |
---|
356 | 351 | return -ENOMEM; |
---|
357 | 352 | } |
---|
358 | 353 | |
---|
359 | | - spin_lock_irq(&qpd->devp->lock); |
---|
360 | | - idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); |
---|
361 | | - spin_unlock_irq(&qpd->devp->lock); |
---|
| 354 | + xa_lock_irq(&qpd->devp->qps); |
---|
| 355 | + xa_for_each(&qpd->devp->qps, index, qp) |
---|
| 356 | + dump_qp(index, qp, qpd); |
---|
| 357 | + xa_unlock_irq(&qpd->devp->qps); |
---|
362 | 358 | |
---|
363 | 359 | qpd->buf[qpd->pos++] = 0; |
---|
364 | 360 | file->private_data = qpd; |
---|
.. | .. |
---|
373 | 369 | .llseek = default_llseek, |
---|
374 | 370 | }; |
---|
375 | 371 | |
---|
376 | | -static int dump_stag(int id, void *p, void *data) |
---|
| 372 | +static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd) |
---|
377 | 373 | { |
---|
378 | | - struct c4iw_debugfs_data *stagd = data; |
---|
379 | 374 | int space; |
---|
380 | 375 | int cc; |
---|
381 | 376 | struct fw_ri_tpte tpte; |
---|
.. | .. |
---|
424 | 419 | static int stag_open(struct inode *inode, struct file *file) |
---|
425 | 420 | { |
---|
426 | 421 | struct c4iw_debugfs_data *stagd; |
---|
| 422 | + void *p; |
---|
| 423 | + unsigned long index; |
---|
427 | 424 | int ret = 0; |
---|
428 | 425 | int count = 1; |
---|
429 | 426 | |
---|
430 | | - stagd = kmalloc(sizeof *stagd, GFP_KERNEL); |
---|
| 427 | + stagd = kmalloc(sizeof(*stagd), GFP_KERNEL); |
---|
431 | 428 | if (!stagd) { |
---|
432 | 429 | ret = -ENOMEM; |
---|
433 | 430 | goto out; |
---|
.. | .. |
---|
435 | 432 | stagd->devp = inode->i_private; |
---|
436 | 433 | stagd->pos = 0; |
---|
437 | 434 | |
---|
438 | | - spin_lock_irq(&stagd->devp->lock); |
---|
439 | | - idr_for_each(&stagd->devp->mmidr, count_idrs, &count); |
---|
440 | | - spin_unlock_irq(&stagd->devp->lock); |
---|
| 435 | + xa_for_each(&stagd->devp->mrs, index, p) |
---|
| 436 | + count++; |
---|
441 | 437 | |
---|
442 | 438 | stagd->bufsize = count * 256; |
---|
443 | 439 | stagd->buf = vmalloc(stagd->bufsize); |
---|
.. | .. |
---|
446 | 442 | goto err1; |
---|
447 | 443 | } |
---|
448 | 444 | |
---|
449 | | - spin_lock_irq(&stagd->devp->lock); |
---|
450 | | - idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); |
---|
451 | | - spin_unlock_irq(&stagd->devp->lock); |
---|
| 445 | + xa_lock_irq(&stagd->devp->mrs); |
---|
| 446 | + xa_for_each(&stagd->devp->mrs, index, p) |
---|
| 447 | + dump_stag(index, stagd); |
---|
| 448 | + xa_unlock_irq(&stagd->devp->mrs); |
---|
452 | 449 | |
---|
453 | 450 | stagd->buf[stagd->pos++] = 0; |
---|
454 | 451 | file->private_data = stagd; |
---|
.. | .. |
---|
558 | 555 | .write = stats_clear, |
---|
559 | 556 | }; |
---|
560 | 557 | |
---|
561 | | -static int dump_ep(int id, void *p, void *data) |
---|
| 558 | +static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd) |
---|
562 | 559 | { |
---|
563 | | - struct c4iw_ep *ep = p; |
---|
564 | | - struct c4iw_debugfs_data *epd = data; |
---|
565 | 560 | int space; |
---|
566 | 561 | int cc; |
---|
567 | 562 | |
---|
.. | .. |
---|
617 | 612 | return 0; |
---|
618 | 613 | } |
---|
619 | 614 | |
---|
620 | | -static int dump_listen_ep(int id, void *p, void *data) |
---|
| 615 | +static |
---|
| 616 | +int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd) |
---|
621 | 617 | { |
---|
622 | | - struct c4iw_listen_ep *ep = p; |
---|
623 | | - struct c4iw_debugfs_data *epd = data; |
---|
624 | 618 | int space; |
---|
625 | 619 | int cc; |
---|
626 | 620 | |
---|
.. | .. |
---|
674 | 668 | |
---|
675 | 669 | static int ep_open(struct inode *inode, struct file *file) |
---|
676 | 670 | { |
---|
| 671 | + struct c4iw_ep *ep; |
---|
| 672 | + struct c4iw_listen_ep *lep; |
---|
| 673 | + unsigned long index; |
---|
677 | 674 | struct c4iw_debugfs_data *epd; |
---|
678 | 675 | int ret = 0; |
---|
679 | 676 | int count = 1; |
---|
.. | .. |
---|
686 | 683 | epd->devp = inode->i_private; |
---|
687 | 684 | epd->pos = 0; |
---|
688 | 685 | |
---|
689 | | - spin_lock_irq(&epd->devp->lock); |
---|
690 | | - idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); |
---|
691 | | - idr_for_each(&epd->devp->atid_idr, count_idrs, &count); |
---|
692 | | - idr_for_each(&epd->devp->stid_idr, count_idrs, &count); |
---|
693 | | - spin_unlock_irq(&epd->devp->lock); |
---|
| 686 | + xa_for_each(&epd->devp->hwtids, index, ep) |
---|
| 687 | + count++; |
---|
| 688 | + xa_for_each(&epd->devp->atids, index, ep) |
---|
| 689 | + count++; |
---|
| 690 | + xa_for_each(&epd->devp->stids, index, lep) |
---|
| 691 | + count++; |
---|
694 | 692 | |
---|
695 | 693 | epd->bufsize = count * 240; |
---|
696 | 694 | epd->buf = vmalloc(epd->bufsize); |
---|
.. | .. |
---|
699 | 697 | goto err1; |
---|
700 | 698 | } |
---|
701 | 699 | |
---|
702 | | - spin_lock_irq(&epd->devp->lock); |
---|
703 | | - idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); |
---|
704 | | - idr_for_each(&epd->devp->atid_idr, dump_ep, epd); |
---|
705 | | - idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); |
---|
706 | | - spin_unlock_irq(&epd->devp->lock); |
---|
| 700 | + xa_lock_irq(&epd->devp->hwtids); |
---|
| 701 | + xa_for_each(&epd->devp->hwtids, index, ep) |
---|
| 702 | + dump_ep(ep, epd); |
---|
| 703 | + xa_unlock_irq(&epd->devp->hwtids); |
---|
| 704 | + xa_lock_irq(&epd->devp->atids); |
---|
| 705 | + xa_for_each(&epd->devp->atids, index, ep) |
---|
| 706 | + dump_ep(ep, epd); |
---|
| 707 | + xa_unlock_irq(&epd->devp->atids); |
---|
| 708 | + xa_lock_irq(&epd->devp->stids); |
---|
| 709 | + xa_for_each(&epd->devp->stids, index, lep) |
---|
| 710 | + dump_listen_ep(lep, epd); |
---|
| 711 | + xa_unlock_irq(&epd->devp->stids); |
---|
707 | 712 | |
---|
708 | 713 | file->private_data = epd; |
---|
709 | 714 | goto out; |
---|
.. | .. |
---|
720 | 725 | .read = debugfs_read, |
---|
721 | 726 | }; |
---|
722 | 727 | |
---|
723 | | -static int setup_debugfs(struct c4iw_dev *devp) |
---|
| 728 | +static void setup_debugfs(struct c4iw_dev *devp) |
---|
724 | 729 | { |
---|
725 | | - if (!devp->debugfs_root) |
---|
726 | | - return -1; |
---|
727 | | - |
---|
728 | 730 | debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, |
---|
729 | 731 | (void *)devp, &qp_debugfs_fops, 4096); |
---|
730 | 732 | |
---|
.. | .. |
---|
740 | 742 | if (c4iw_wr_log) |
---|
741 | 743 | debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, |
---|
742 | 744 | (void *)devp, &wr_log_debugfs_fops, 4096); |
---|
743 | | - return 0; |
---|
744 | 745 | } |
---|
745 | 746 | |
---|
746 | 747 | void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, |
---|
.. | .. |
---|
783 | 784 | static int c4iw_rdev_open(struct c4iw_rdev *rdev) |
---|
784 | 785 | { |
---|
785 | 786 | int err; |
---|
| 787 | + unsigned int factor; |
---|
786 | 788 | |
---|
787 | 789 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); |
---|
788 | 790 | |
---|
.. | .. |
---|
806 | 808 | return -EINVAL; |
---|
807 | 809 | } |
---|
808 | 810 | |
---|
809 | | - rdev->qpmask = rdev->lldi.udb_density - 1; |
---|
810 | | - rdev->cqmask = rdev->lldi.ucq_density - 1; |
---|
| 811 | + /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */ |
---|
| 812 | + if (rdev->lldi.sge_host_page_size > PAGE_SIZE) { |
---|
| 813 | + pr_err("%s: unsupported sge host page size %u\n", |
---|
| 814 | + pci_name(rdev->lldi.pdev), |
---|
| 815 | + rdev->lldi.sge_host_page_size); |
---|
| 816 | + return -EINVAL; |
---|
| 817 | + } |
---|
| 818 | + |
---|
| 819 | + factor = PAGE_SIZE / rdev->lldi.sge_host_page_size; |
---|
| 820 | + rdev->qpmask = (rdev->lldi.udb_density * factor) - 1; |
---|
| 821 | + rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1; |
---|
| 822 | + |
---|
811 | 823 | pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", |
---|
812 | 824 | pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, |
---|
813 | 825 | rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), |
---|
.. | .. |
---|
924 | 936 | void c4iw_dealloc(struct uld_ctx *ctx) |
---|
925 | 937 | { |
---|
926 | 938 | c4iw_rdev_close(&ctx->dev->rdev); |
---|
927 | | - WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); |
---|
928 | | - idr_destroy(&ctx->dev->cqidr); |
---|
929 | | - WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); |
---|
930 | | - idr_destroy(&ctx->dev->qpidr); |
---|
931 | | - WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); |
---|
932 | | - idr_destroy(&ctx->dev->mmidr); |
---|
933 | | - wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); |
---|
934 | | - idr_destroy(&ctx->dev->hwtid_idr); |
---|
935 | | - idr_destroy(&ctx->dev->stid_idr); |
---|
936 | | - idr_destroy(&ctx->dev->atid_idr); |
---|
| 939 | + WARN_ON(!xa_empty(&ctx->dev->cqs)); |
---|
| 940 | + WARN_ON(!xa_empty(&ctx->dev->qps)); |
---|
| 941 | + WARN_ON(!xa_empty(&ctx->dev->mrs)); |
---|
| 942 | + wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids)); |
---|
| 943 | + WARN_ON(!xa_empty(&ctx->dev->stids)); |
---|
| 944 | + WARN_ON(!xa_empty(&ctx->dev->atids)); |
---|
937 | 945 | if (ctx->dev->rdev.bar2_kva) |
---|
938 | 946 | iounmap(ctx->dev->rdev.bar2_kva); |
---|
939 | 947 | if (ctx->dev->rdev.oc_mw_kva) |
---|
.. | .. |
---|
971 | 979 | pr_info("%s: On-Chip Queues not supported on this device\n", |
---|
972 | 980 | pci_name(infop->pdev)); |
---|
973 | 981 | |
---|
974 | | - devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
---|
| 982 | + devp = ib_alloc_device(c4iw_dev, ibdev); |
---|
975 | 983 | if (!devp) { |
---|
976 | 984 | pr_err("Cannot allocate ib device\n"); |
---|
977 | 985 | return ERR_PTR(-ENOMEM); |
---|
.. | .. |
---|
1038 | 1046 | return ERR_PTR(ret); |
---|
1039 | 1047 | } |
---|
1040 | 1048 | |
---|
1041 | | - idr_init(&devp->cqidr); |
---|
1042 | | - idr_init(&devp->qpidr); |
---|
1043 | | - idr_init(&devp->mmidr); |
---|
1044 | | - idr_init(&devp->hwtid_idr); |
---|
1045 | | - idr_init(&devp->stid_idr); |
---|
1046 | | - idr_init(&devp->atid_idr); |
---|
1047 | | - spin_lock_init(&devp->lock); |
---|
| 1049 | + xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ); |
---|
| 1050 | + xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ); |
---|
| 1051 | + xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ); |
---|
| 1052 | + xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ); |
---|
| 1053 | + xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ); |
---|
| 1054 | + xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ); |
---|
1048 | 1055 | mutex_init(&devp->rdev.stats.lock); |
---|
1049 | 1056 | mutex_init(&devp->db_mutex); |
---|
1050 | 1057 | INIT_LIST_HEAD(&devp->db_fc_list); |
---|
.. | .. |
---|
1072 | 1079 | pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", |
---|
1073 | 1080 | DRV_VERSION); |
---|
1074 | 1081 | |
---|
1075 | | - ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
---|
| 1082 | + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
---|
1076 | 1083 | if (!ctx) { |
---|
1077 | 1084 | ctx = ERR_PTR(-ENOMEM); |
---|
1078 | 1085 | goto out; |
---|
.. | .. |
---|
1240 | 1247 | case CXGB4_STATE_START_RECOVERY: |
---|
1241 | 1248 | pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); |
---|
1242 | 1249 | if (ctx->dev) { |
---|
1243 | | - struct ib_event event; |
---|
| 1250 | + struct ib_event event = {}; |
---|
1244 | 1251 | |
---|
1245 | 1252 | ctx->dev->rdev.flags |= T4_FATAL_ERROR; |
---|
1246 | | - memset(&event, 0, sizeof event); |
---|
1247 | 1253 | event.event = IB_EVENT_DEVICE_FATAL; |
---|
1248 | 1254 | event.device = &ctx->dev->ibdev; |
---|
1249 | 1255 | ib_dispatch_event(&event); |
---|
.. | .. |
---|
1259 | 1265 | return 0; |
---|
1260 | 1266 | } |
---|
1261 | 1267 | |
---|
1262 | | -static int disable_qp_db(int id, void *p, void *data) |
---|
1263 | | -{ |
---|
1264 | | - struct c4iw_qp *qp = p; |
---|
1265 | | - |
---|
1266 | | - t4_disable_wq_db(&qp->wq); |
---|
1267 | | - return 0; |
---|
1268 | | -} |
---|
1269 | | - |
---|
1270 | 1268 | static void stop_queues(struct uld_ctx *ctx) |
---|
1271 | 1269 | { |
---|
1272 | | - unsigned long flags; |
---|
| 1270 | + struct c4iw_qp *qp; |
---|
| 1271 | + unsigned long index, flags; |
---|
1273 | 1272 | |
---|
1274 | | - spin_lock_irqsave(&ctx->dev->lock, flags); |
---|
| 1273 | + xa_lock_irqsave(&ctx->dev->qps, flags); |
---|
1275 | 1274 | ctx->dev->rdev.stats.db_state_transitions++; |
---|
1276 | 1275 | ctx->dev->db_state = STOPPED; |
---|
1277 | | - if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) |
---|
1278 | | - idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); |
---|
1279 | | - else |
---|
| 1276 | + if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { |
---|
| 1277 | + xa_for_each(&ctx->dev->qps, index, qp) |
---|
| 1278 | + t4_disable_wq_db(&qp->wq); |
---|
| 1279 | + } else { |
---|
1280 | 1280 | ctx->dev->rdev.status_page->db_off = 1; |
---|
1281 | | - spin_unlock_irqrestore(&ctx->dev->lock, flags); |
---|
1282 | | -} |
---|
1283 | | - |
---|
1284 | | -static int enable_qp_db(int id, void *p, void *data) |
---|
1285 | | -{ |
---|
1286 | | - struct c4iw_qp *qp = p; |
---|
1287 | | - |
---|
1288 | | - t4_enable_wq_db(&qp->wq); |
---|
1289 | | - return 0; |
---|
| 1281 | + } |
---|
| 1282 | + xa_unlock_irqrestore(&ctx->dev->qps, flags); |
---|
1290 | 1283 | } |
---|
1291 | 1284 | |
---|
1292 | 1285 | static void resume_rc_qp(struct c4iw_qp *qp) |
---|
.. | .. |
---|
1316 | 1309 | |
---|
1317 | 1310 | static void resume_queues(struct uld_ctx *ctx) |
---|
1318 | 1311 | { |
---|
1319 | | - spin_lock_irq(&ctx->dev->lock); |
---|
| 1312 | + xa_lock_irq(&ctx->dev->qps); |
---|
1320 | 1313 | if (ctx->dev->db_state != STOPPED) |
---|
1321 | 1314 | goto out; |
---|
1322 | 1315 | ctx->dev->db_state = FLOW_CONTROL; |
---|
1323 | 1316 | while (1) { |
---|
1324 | 1317 | if (list_empty(&ctx->dev->db_fc_list)) { |
---|
| 1318 | + struct c4iw_qp *qp; |
---|
| 1319 | + unsigned long index; |
---|
| 1320 | + |
---|
1325 | 1321 | WARN_ON(ctx->dev->db_state != FLOW_CONTROL); |
---|
1326 | 1322 | ctx->dev->db_state = NORMAL; |
---|
1327 | 1323 | ctx->dev->rdev.stats.db_state_transitions++; |
---|
1328 | 1324 | if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { |
---|
1329 | | - idr_for_each(&ctx->dev->qpidr, enable_qp_db, |
---|
1330 | | - NULL); |
---|
| 1325 | + xa_for_each(&ctx->dev->qps, index, qp) |
---|
| 1326 | + t4_enable_wq_db(&qp->wq); |
---|
1331 | 1327 | } else { |
---|
1332 | 1328 | ctx->dev->rdev.status_page->db_off = 0; |
---|
1333 | 1329 | } |
---|
.. | .. |
---|
1339 | 1335 | resume_a_chunk(ctx); |
---|
1340 | 1336 | } |
---|
1341 | 1337 | if (!list_empty(&ctx->dev->db_fc_list)) { |
---|
1342 | | - spin_unlock_irq(&ctx->dev->lock); |
---|
| 1338 | + xa_unlock_irq(&ctx->dev->qps); |
---|
1343 | 1339 | if (DB_FC_RESUME_DELAY) { |
---|
1344 | 1340 | set_current_state(TASK_UNINTERRUPTIBLE); |
---|
1345 | 1341 | schedule_timeout(DB_FC_RESUME_DELAY); |
---|
1346 | 1342 | } |
---|
1347 | | - spin_lock_irq(&ctx->dev->lock); |
---|
| 1343 | + xa_lock_irq(&ctx->dev->qps); |
---|
1348 | 1344 | if (ctx->dev->db_state != FLOW_CONTROL) |
---|
1349 | 1345 | break; |
---|
1350 | 1346 | } |
---|
.. | .. |
---|
1353 | 1349 | out: |
---|
1354 | 1350 | if (ctx->dev->db_state != NORMAL) |
---|
1355 | 1351 | ctx->dev->rdev.stats.db_fc_interruptions++; |
---|
1356 | | - spin_unlock_irq(&ctx->dev->lock); |
---|
| 1352 | + xa_unlock_irq(&ctx->dev->qps); |
---|
1357 | 1353 | } |
---|
1358 | 1354 | |
---|
1359 | 1355 | struct qp_list { |
---|
1360 | 1356 | unsigned idx; |
---|
1361 | 1357 | struct c4iw_qp **qps; |
---|
1362 | 1358 | }; |
---|
1363 | | - |
---|
1364 | | -static int add_and_ref_qp(int id, void *p, void *data) |
---|
1365 | | -{ |
---|
1366 | | - struct qp_list *qp_listp = data; |
---|
1367 | | - struct c4iw_qp *qp = p; |
---|
1368 | | - |
---|
1369 | | - c4iw_qp_add_ref(&qp->ibqp); |
---|
1370 | | - qp_listp->qps[qp_listp->idx++] = qp; |
---|
1371 | | - return 0; |
---|
1372 | | -} |
---|
1373 | | - |
---|
1374 | | -static int count_qps(int id, void *p, void *data) |
---|
1375 | | -{ |
---|
1376 | | - unsigned *countp = data; |
---|
1377 | | - (*countp)++; |
---|
1378 | | - return 0; |
---|
1379 | | -} |
---|
1380 | 1359 | |
---|
1381 | 1360 | static void deref_qps(struct qp_list *qp_list) |
---|
1382 | 1361 | { |
---|
.. | .. |
---|
1394 | 1373 | for (idx = 0; idx < qp_list->idx; idx++) { |
---|
1395 | 1374 | struct c4iw_qp *qp = qp_list->qps[idx]; |
---|
1396 | 1375 | |
---|
1397 | | - spin_lock_irq(&qp->rhp->lock); |
---|
| 1376 | + xa_lock_irq(&qp->rhp->qps); |
---|
1398 | 1377 | spin_lock(&qp->lock); |
---|
1399 | 1378 | ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], |
---|
1400 | 1379 | qp->wq.sq.qid, |
---|
.. | .. |
---|
1404 | 1383 | pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n", |
---|
1405 | 1384 | pci_name(ctx->lldi.pdev), qp->wq.sq.qid); |
---|
1406 | 1385 | spin_unlock(&qp->lock); |
---|
1407 | | - spin_unlock_irq(&qp->rhp->lock); |
---|
| 1386 | + xa_unlock_irq(&qp->rhp->qps); |
---|
1408 | 1387 | return; |
---|
1409 | 1388 | } |
---|
1410 | 1389 | qp->wq.sq.wq_pidx_inc = 0; |
---|
.. | .. |
---|
1418 | 1397 | pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n", |
---|
1419 | 1398 | pci_name(ctx->lldi.pdev), qp->wq.rq.qid); |
---|
1420 | 1399 | spin_unlock(&qp->lock); |
---|
1421 | | - spin_unlock_irq(&qp->rhp->lock); |
---|
| 1400 | + xa_unlock_irq(&qp->rhp->qps); |
---|
1422 | 1401 | return; |
---|
1423 | 1402 | } |
---|
1424 | 1403 | qp->wq.rq.wq_pidx_inc = 0; |
---|
1425 | 1404 | spin_unlock(&qp->lock); |
---|
1426 | | - spin_unlock_irq(&qp->rhp->lock); |
---|
| 1405 | + xa_unlock_irq(&qp->rhp->qps); |
---|
1427 | 1406 | |
---|
1428 | 1407 | /* Wait for the dbfifo to drain */ |
---|
1429 | 1408 | while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { |
---|
.. | .. |
---|
1435 | 1414 | |
---|
1436 | 1415 | static void recover_queues(struct uld_ctx *ctx) |
---|
1437 | 1416 | { |
---|
| 1417 | + struct c4iw_qp *qp; |
---|
| 1418 | + unsigned long index; |
---|
1438 | 1419 | int count = 0; |
---|
1439 | 1420 | struct qp_list qp_list; |
---|
1440 | 1421 | int ret; |
---|
.. | .. |
---|
1452 | 1433 | } |
---|
1453 | 1434 | |
---|
1454 | 1435 | /* Count active queues so we can build a list of queues to recover */ |
---|
1455 | | - spin_lock_irq(&ctx->dev->lock); |
---|
| 1436 | + xa_lock_irq(&ctx->dev->qps); |
---|
1456 | 1437 | WARN_ON(ctx->dev->db_state != STOPPED); |
---|
1457 | 1438 | ctx->dev->db_state = RECOVERY; |
---|
1458 | | - idr_for_each(&ctx->dev->qpidr, count_qps, &count); |
---|
| 1439 | + xa_for_each(&ctx->dev->qps, index, qp) |
---|
| 1440 | + count++; |
---|
1459 | 1441 | |
---|
1460 | 1442 | qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC); |
---|
1461 | 1443 | if (!qp_list.qps) { |
---|
1462 | | - spin_unlock_irq(&ctx->dev->lock); |
---|
| 1444 | + xa_unlock_irq(&ctx->dev->qps); |
---|
1463 | 1445 | return; |
---|
1464 | 1446 | } |
---|
1465 | 1447 | qp_list.idx = 0; |
---|
1466 | 1448 | |
---|
1467 | 1449 | /* add and ref each qp so it doesn't get freed */ |
---|
1468 | | - idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); |
---|
| 1450 | + xa_for_each(&ctx->dev->qps, index, qp) { |
---|
| 1451 | + c4iw_qp_add_ref(&qp->ibqp); |
---|
| 1452 | + qp_list.qps[qp_list.idx++] = qp; |
---|
| 1453 | + } |
---|
1469 | 1454 | |
---|
1470 | | - spin_unlock_irq(&ctx->dev->lock); |
---|
| 1455 | + xa_unlock_irq(&ctx->dev->qps); |
---|
1471 | 1456 | |
---|
1472 | 1457 | /* now traverse the list in a safe context to recover the db state*/ |
---|
1473 | 1458 | recover_lost_dbs(ctx, &qp_list); |
---|
.. | .. |
---|
1476 | 1461 | deref_qps(&qp_list); |
---|
1477 | 1462 | kfree(qp_list.qps); |
---|
1478 | 1463 | |
---|
1479 | | - spin_lock_irq(&ctx->dev->lock); |
---|
| 1464 | + xa_lock_irq(&ctx->dev->qps); |
---|
1480 | 1465 | WARN_ON(ctx->dev->db_state != RECOVERY); |
---|
1481 | 1466 | ctx->dev->db_state = STOPPED; |
---|
1482 | | - spin_unlock_irq(&ctx->dev->lock); |
---|
| 1467 | + xa_unlock_irq(&ctx->dev->qps); |
---|
1483 | 1468 | } |
---|
1484 | 1469 | |
---|
1485 | 1470 | static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) |
---|
.. | .. |
---|
1554 | 1539 | return err; |
---|
1555 | 1540 | |
---|
1556 | 1541 | c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); |
---|
1557 | | - if (!c4iw_debugfs_root) |
---|
1558 | | - pr_warn("could not create debugfs entry, continuing\n"); |
---|
1559 | 1542 | |
---|
1560 | 1543 | reg_workq = create_singlethread_workqueue("Register_iWARP_device"); |
---|
1561 | 1544 | if (!reg_workq) { |
---|