hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/rds/ib_rdma.c
....@@ -34,15 +34,18 @@
3434 #include <linux/slab.h>
3535 #include <linux/rculist.h>
3636 #include <linux/llist.h>
37
-#include <linux/delay.h>
3837
3938 #include "rds_single_path.h"
4039 #include "ib_mr.h"
40
+#include "rds.h"
4141
4242 struct workqueue_struct *rds_ib_mr_wq;
43
+struct rds_ib_dereg_odp_mr {
44
+ struct work_struct work;
45
+ struct ib_mr *mr;
46
+};
4347
44
-static DEFINE_PER_CPU(unsigned long, clean_list_grace);
45
-#define CLEAN_LIST_BUSY_BIT 0
48
+static void rds_ib_odp_mr_worker(struct work_struct *work);
4649
4750 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
4851 {
....@@ -178,7 +181,7 @@
178181 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
179182
180183 iinfo->rdma_mr_max = pool_1m->max_items;
181
- iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
184
+ iinfo->rdma_mr_size = pool_1m->max_pages;
182185 }
183186
184187 #if IS_ENABLED(CONFIG_IPV6)
....@@ -188,7 +191,7 @@
188191 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
189192
190193 iinfo6->rdma_mr_max = pool_1m->max_items;
191
- iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
194
+ iinfo6->rdma_mr_size = pool_1m->max_pages;
192195 }
193196 #endif
194197
....@@ -196,12 +199,11 @@
196199 {
197200 struct rds_ib_mr *ibmr = NULL;
198201 struct llist_node *ret;
199
- unsigned long *flag;
202
+ unsigned long flags;
200203
201
- preempt_disable();
202
- flag = this_cpu_ptr(&clean_list_grace);
203
- set_bit(CLEAN_LIST_BUSY_BIT, flag);
204
+ spin_lock_irqsave(&pool->clean_lock, flags);
204205 ret = llist_del_first(&pool->clean_list);
206
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
205207 if (ret) {
206208 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
207209 if (pool->pool_type == RDS_IB_MR_8K_POOL)
....@@ -210,27 +212,16 @@
210212 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
211213 }
212214
213
- clear_bit(CLEAN_LIST_BUSY_BIT, flag);
214
- preempt_enable();
215215 return ibmr;
216
-}
217
-
218
-static inline void wait_clean_list_grace(void)
219
-{
220
- int cpu;
221
- unsigned long *flag;
222
-
223
- for_each_online_cpu(cpu) {
224
- flag = &per_cpu(clean_list_grace, cpu);
225
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
226
- cpu_chill();
227
- }
228216 }
229217
230218 void rds_ib_sync_mr(void *trans_private, int direction)
231219 {
232220 struct rds_ib_mr *ibmr = trans_private;
233221 struct rds_ib_device *rds_ibdev = ibmr->device;
222
+
223
+ if (ibmr->odp)
224
+ return;
234225
235226 switch (direction) {
236227 case DMA_FROM_DEVICE:
....@@ -325,8 +316,7 @@
325316 * of clusters. Each cluster has linked llist nodes of
326317 * MR_CLUSTER_SIZE mrs that are ready for reuse.
327318 */
328
-static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
329
- struct list_head *list,
319
+static void list_to_llist_nodes(struct list_head *list,
330320 struct llist_node **nodes_head,
331321 struct llist_node **nodes_tail)
332322 {
....@@ -403,41 +393,36 @@
403393 */
404394 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
405395 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
406
- if (free_all)
396
+ if (free_all) {
397
+ unsigned long flags;
398
+
399
+ spin_lock_irqsave(&pool->clean_lock, flags);
407400 llist_append_to_list(&pool->clean_list, &unmap_list);
401
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
402
+ }
408403
409404 free_goal = rds_ib_flush_goal(pool, free_all);
410405
411406 if (list_empty(&unmap_list))
412407 goto out;
413408
414
- if (pool->use_fastreg)
415
- rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
416
- else
417
- rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
409
+ rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
418410
419411 if (!list_empty(&unmap_list)) {
420
- /* we have to make sure that none of the things we're about
421
- * to put on the clean list would race with other cpus trying
422
- * to pull items off. The llist would explode if we managed to
423
- * remove something from the clean list and then add it back again
424
- * while another CPU was spinning on that same item in llist_del_first.
425
- *
426
- * This is pretty unlikely, but just in case wait for an llist grace period
427
- * here before adding anything back into the clean list.
428
- */
429
- wait_clean_list_grace();
412
+ unsigned long flags;
430413
431
- list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
414
+ list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
432415 if (ibmr_ret) {
433416 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
434417 clean_nodes = clean_nodes->next;
435418 }
436419 /* more than one entry in llist nodes */
437
- if (clean_nodes)
420
+ if (clean_nodes) {
421
+ spin_lock_irqsave(&pool->clean_lock, flags);
438422 llist_add_batch(clean_nodes, clean_tail,
439423 &pool->clean_list);
440
-
424
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
425
+ }
441426 }
442427
443428 atomic_sub(unpinned, &pool->free_pinned);
....@@ -472,7 +457,7 @@
472457 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
473458 else
474459 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
475
- return ERR_PTR(-EAGAIN);
460
+ break;
476461 }
477462
478463 /* We do have some empty MRs. Flush them out. */
....@@ -486,7 +471,7 @@
486471 return ibmr;
487472 }
488473
489
- return ibmr;
474
+ return NULL;
490475 }
491476
492477 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
....@@ -504,11 +489,18 @@
504489
505490 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
506491
492
+ if (ibmr->odp) {
493
+ /* A MR created and marked as use_once. We use delayed work,
494
+ * because there is a change that we are in interrupt and can't
495
+ * call to ib_dereg_mr() directly.
496
+ */
497
+ INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
498
+ queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
499
+ return;
500
+ }
501
+
507502 /* Return it to the pool's free list */
508
- if (rds_ibdev->use_fastreg)
509
- rds_ib_free_frmr_list(ibmr);
510
- else
511
- rds_ib_free_fmr_list(ibmr);
503
+ rds_ib_free_frmr_list(ibmr);
512504
513505 atomic_add(ibmr->sg_len, &pool->free_pinned);
514506 atomic_inc(&pool->dirty_count);
....@@ -548,9 +540,17 @@
548540 up_read(&rds_ib_devices_lock);
549541 }
550542
543
+u32 rds_ib_get_lkey(void *trans_private)
544
+{
545
+ struct rds_ib_mr *ibmr = trans_private;
546
+
547
+ return ibmr->u.mr->lkey;
548
+}
549
+
551550 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
552551 struct rds_sock *rs, u32 *key_ret,
553
- struct rds_connection *conn)
552
+ struct rds_connection *conn,
553
+ u64 start, u64 length, int need_odp)
554554 {
555555 struct rds_ib_device *rds_ibdev;
556556 struct rds_ib_mr *ibmr = NULL;
....@@ -563,6 +563,51 @@
563563 goto out;
564564 }
565565
566
+ if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
567
+ u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
568
+ int access_flags =
569
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
570
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
571
+ IB_ACCESS_ON_DEMAND);
572
+ struct ib_sge sge = {};
573
+ struct ib_mr *ib_mr;
574
+
575
+ if (!rds_ibdev->odp_capable) {
576
+ ret = -EOPNOTSUPP;
577
+ goto out;
578
+ }
579
+
580
+ ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
581
+ access_flags);
582
+
583
+ if (IS_ERR(ib_mr)) {
584
+ rdsdebug("rds_ib_get_user_mr returned %d\n",
585
+ IS_ERR(ib_mr));
586
+ ret = PTR_ERR(ib_mr);
587
+ goto out;
588
+ }
589
+ if (key_ret)
590
+ *key_ret = ib_mr->rkey;
591
+
592
+ ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
593
+ if (!ibmr) {
594
+ ib_dereg_mr(ib_mr);
595
+ ret = -ENOMEM;
596
+ goto out;
597
+ }
598
+ ibmr->u.mr = ib_mr;
599
+ ibmr->odp = 1;
600
+
601
+ sge.addr = virt_addr;
602
+ sge.length = length;
603
+ sge.lkey = ib_mr->lkey;
604
+
605
+ ib_advise_mr(rds_ibdev->pd,
606
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
607
+ IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
608
+ return ibmr;
609
+ }
610
+
566611 if (conn)
567612 ic = conn->c_transport_data;
568613
....@@ -571,10 +616,7 @@
571616 goto out;
572617 }
573618
574
- if (rds_ibdev->use_fastreg)
575
- ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
576
- else
577
- ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
619
+ ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
578620 if (IS_ERR(ibmr)) {
579621 ret = PTR_ERR(ibmr);
580622 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
....@@ -611,25 +653,23 @@
611653 init_llist_head(&pool->free_list);
612654 init_llist_head(&pool->drop_list);
613655 init_llist_head(&pool->clean_list);
656
+ spin_lock_init(&pool->clean_lock);
614657 mutex_init(&pool->flush_lock);
615658 init_waitqueue_head(&pool->flush_wait);
616659 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
617660
618661 if (pool_type == RDS_IB_MR_1M_POOL) {
619662 /* +1 allows for unaligned MRs */
620
- pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
663
+ pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
621664 pool->max_items = rds_ibdev->max_1m_mrs;
622665 } else {
623666 /* pool_type == RDS_IB_MR_8K_POOL */
624
- pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
667
+ pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
625668 pool->max_items = rds_ibdev->max_8k_mrs;
626669 }
627670
628
- pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
629
- pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
630
- pool->fmr_attr.page_shift = PAGE_SHIFT;
671
+ pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
631672 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
632
- pool->use_fastreg = rds_ibdev->use_fastreg;
633673
634674 return pool;
635675 }
....@@ -650,3 +690,12 @@
650690 {
651691 destroy_workqueue(rds_ib_mr_wq);
652692 }
693
+
694
+static void rds_ib_odp_mr_worker(struct work_struct *work)
695
+{
696
+ struct rds_ib_mr *ibmr;
697
+
698
+ ibmr = container_of(work, struct rds_ib_mr, work.work);
699
+ ib_dereg_mr(ibmr->u.mr);
700
+ kfree(ibmr);
701
+}