hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/hw/hfi1/user_sdma.c
....@@ -1,5 +1,6 @@
11 /*
2
- * Copyright(c) 2015 - 2017 Intel Corporation.
2
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
3
+ * Copyright(c) 2015 - 2018 Intel Corporation.
34 *
45 * This file is provided under a dual BSD/GPLv2 license. When using or
56 * redistributing this file, you may do so under either license.
....@@ -64,7 +65,6 @@
6465
6566 #include "hfi.h"
6667 #include "sdma.h"
67
-#include "mmu_rb.h"
6868 #include "user_sdma.h"
6969 #include "verbs.h" /* for the headers */
7070 #include "common.h" /* for struct hfi1_tid_info */
....@@ -76,15 +76,10 @@
7676
7777 static unsigned initial_pkt_count = 8;
7878
79
-static int user_sdma_send_pkts(struct user_sdma_request *req,
80
- unsigned maxpkts);
79
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
8180 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
8281 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
83
-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
84
-static int pin_vector_pages(struct user_sdma_request *req,
85
- struct user_sdma_iovec *iovec);
86
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
87
- unsigned start, unsigned npages);
82
+static void user_sdma_free_request(struct user_sdma_request *req);
8883 static int check_header_template(struct user_sdma_request *req,
8984 struct hfi1_pkt_header *hdr, u32 lrhlen,
9085 u32 datalen);
....@@ -101,39 +96,40 @@
10196
10297 static int defer_packet_queue(
10398 struct sdma_engine *sde,
104
- struct iowait *wait,
99
+ struct iowait_work *wait,
105100 struct sdma_txreq *txreq,
106101 uint seq,
107102 bool pkts_sent);
108103 static void activate_packet_queue(struct iowait *wait, int reason);
109104 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
110105 unsigned long len);
111
-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
112106 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
113107 void *arg2, bool *stop);
114108 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
115
-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
116109
117110 static struct mmu_rb_ops sdma_rb_ops = {
118111 .filter = sdma_rb_filter,
119
- .insert = sdma_rb_insert,
120112 .evict = sdma_rb_evict,
121113 .remove = sdma_rb_remove,
122
- .invalidate = sdma_rb_invalidate
123114 };
115
+
116
+static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
117
+ struct user_sdma_txreq *tx,
118
+ struct user_sdma_iovec *iovec,
119
+ u32 *pkt_remaining);
124120
125121 static int defer_packet_queue(
126122 struct sdma_engine *sde,
127
- struct iowait *wait,
123
+ struct iowait_work *wait,
128124 struct sdma_txreq *txreq,
129125 uint seq,
130126 bool pkts_sent)
131127 {
132128 struct hfi1_user_sdma_pkt_q *pq =
133
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
134
- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
129
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
135130
136
- write_seqlock(&dev->iowait_lock);
131
+ write_seqlock(&sde->waitlock);
132
+ trace_hfi1_usdma_defer(pq, sde, &pq->busy);
137133 if (sdma_progress(sde, seq, txreq))
138134 goto eagain;
139135 /*
....@@ -142,12 +138,15 @@
142138 * it is supposed to be enqueued.
143139 */
144140 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
145
- if (list_empty(&pq->busy.list))
141
+ if (list_empty(&pq->busy.list)) {
142
+ pq->busy.lock = &sde->waitlock;
143
+ iowait_get_priority(&pq->busy);
146144 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
147
- write_sequnlock(&dev->iowait_lock);
145
+ }
146
+ write_sequnlock(&sde->waitlock);
148147 return -EBUSY;
149148 eagain:
150
- write_sequnlock(&dev->iowait_lock);
149
+ write_sequnlock(&sde->waitlock);
151150 return -EAGAIN;
152151 }
153152
....@@ -155,6 +154,8 @@
155154 {
156155 struct hfi1_user_sdma_pkt_q *pq =
157156 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
157
+
158
+ trace_hfi1_usdma_activate(pq, wait, reason);
158159 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
159160 wake_up(&wait->wait_dma);
160161 };
....@@ -186,10 +187,9 @@
186187 atomic_set(&pq->n_reqs, 0);
187188 init_waitqueue_head(&pq->wait);
188189 atomic_set(&pq->n_locked, 0);
189
- pq->mm = fd->mm;
190190
191
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
192
- activate_packet_queue, NULL);
191
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
192
+ activate_packet_queue, NULL, NULL);
193193 pq->reqidx = 0;
194194
195195 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
....@@ -198,9 +198,7 @@
198198 if (!pq->reqs)
199199 goto pq_reqs_nomem;
200200
201
- pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
202
- sizeof(*pq->req_in_use),
203
- GFP_KERNEL);
201
+ pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
204202 if (!pq->req_in_use)
205203 goto pq_reqs_no_in_use;
206204
....@@ -228,7 +226,7 @@
228226
229227 cq->nentries = hfi1_sdma_comp_ring_size;
230228
231
- ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
229
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
232230 &pq->handler);
233231 if (ret) {
234232 dd_dev_err(dd, "Failed to register with MMU %d", ret);
....@@ -247,13 +245,28 @@
247245 cq_nomem:
248246 kmem_cache_destroy(pq->txreq_cache);
249247 pq_txreq_nomem:
250
- kfree(pq->req_in_use);
248
+ bitmap_free(pq->req_in_use);
251249 pq_reqs_no_in_use:
252250 kfree(pq->reqs);
253251 pq_reqs_nomem:
254252 kfree(pq);
255253
256254 return ret;
255
+}
256
+
257
+static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
258
+{
259
+ unsigned long flags;
260
+ seqlock_t *lock = pq->busy.lock;
261
+
262
+ if (!lock)
263
+ return;
264
+ write_seqlock_irqsave(lock, flags);
265
+ if (!list_empty(&pq->busy.list)) {
266
+ list_del_init(&pq->busy.list);
267
+ pq->busy.lock = NULL;
268
+ }
269
+ write_sequnlock_irqrestore(lock, flags);
257270 }
258271
259272 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
....@@ -271,16 +284,17 @@
271284 spin_unlock(&fd->pq_rcu_lock);
272285 synchronize_srcu(&fd->pq_srcu);
273286 /* at this point there can be no more new requests */
274
- if (pq->handler)
275
- hfi1_mmu_rb_unregister(pq->handler);
276287 iowait_sdma_drain(&pq->busy);
277288 /* Wait until all requests have been freed. */
278289 wait_event_interruptible(
279290 pq->wait,
280291 !atomic_read(&pq->n_reqs));
281292 kfree(pq->reqs);
282
- kfree(pq->req_in_use);
293
+ if (pq->handler)
294
+ hfi1_mmu_rb_unregister(pq->handler);
295
+ bitmap_free(pq->req_in_use);
283296 kmem_cache_destroy(pq->txreq_cache);
297
+ flush_pq_iowait(pq);
284298 kfree(pq);
285299 } else {
286300 spin_unlock(&fd->pq_rcu_lock);
....@@ -433,6 +447,7 @@
433447 ret = -EINVAL;
434448 goto free_req;
435449 }
450
+
436451 /* Copy the header from the user buffer */
437452 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
438453 sizeof(req->hdr));
....@@ -507,9 +522,8 @@
507522 memcpy(&req->iovs[i].iov,
508523 iovec + idx++,
509524 sizeof(req->iovs[i].iov));
510
- ret = pin_vector_pages(req, &req->iovs[i]);
511
- if (ret) {
512
- req->data_iovs = i;
525
+ if (req->iovs[i].iov.iov_len == 0) {
526
+ ret = -EINVAL;
513527 goto free_req;
514528 }
515529 req->data_len += req->iovs[i].iov.iov_len;
....@@ -571,10 +585,6 @@
571585
572586 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
573587 pq->state = SDMA_PKT_Q_ACTIVE;
574
- /* Send the first N packets in the request to buy us some time */
575
- ret = user_sdma_send_pkts(req, pcount);
576
- if (unlikely(ret < 0 && ret != -EBUSY))
577
- goto free_req;
578588
579589 /*
580590 * This is a somewhat blocking send implementation.
....@@ -585,13 +595,18 @@
585595 while (req->seqsubmitted != req->info.npkts) {
586596 ret = user_sdma_send_pkts(req, pcount);
587597 if (ret < 0) {
598
+ int we_ret;
599
+
588600 if (ret != -EBUSY)
589601 goto free_req;
590
- wait_event_interruptible_timeout(
602
+ we_ret = wait_event_interruptible_timeout(
591603 pq->busy.wait_dma,
592
- (pq->state == SDMA_PKT_Q_ACTIVE),
604
+ pq->state == SDMA_PKT_Q_ACTIVE,
593605 msecs_to_jiffies(
594606 SDMA_IOWAIT_TIMEOUT));
607
+ trace_hfi1_usdma_we(pq, we_ret);
608
+ if (we_ret <= 0)
609
+ flush_pq_iowait(pq);
595610 }
596611 }
597612 *count += idx;
....@@ -606,7 +621,7 @@
606621 if (req->seqsubmitted)
607622 wait_event(pq->busy.wait_dma,
608623 (req->seqcomp == req->seqsubmitted - 1));
609
- user_sdma_free_request(req, true);
624
+ user_sdma_free_request(req);
610625 pq_update(pq);
611626 set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
612627 }
....@@ -718,51 +733,10 @@
718733 return ret;
719734 }
720735
721
-static int user_sdma_txadd(struct user_sdma_request *req,
722
- struct user_sdma_txreq *tx,
723
- struct user_sdma_iovec *iovec, u32 datalen,
724
- u32 *queued_ptr, u32 *data_sent_ptr,
725
- u64 *iov_offset_ptr)
736
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
726737 {
727
- int ret;
728
- unsigned int pageidx, len;
729
- unsigned long base, offset;
730
- u64 iov_offset = *iov_offset_ptr;
731
- u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
732
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
733
-
734
- base = (unsigned long)iovec->iov.iov_base;
735
- offset = offset_in_page(base + iovec->offset + iov_offset);
736
- pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
737
- PAGE_SHIFT);
738
- len = offset + req->info.fragsize > PAGE_SIZE ?
739
- PAGE_SIZE - offset : req->info.fragsize;
740
- len = min((datalen - queued), len);
741
- ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
742
- offset, len);
743
- if (ret) {
744
- SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
745
- return ret;
746
- }
747
- iov_offset += len;
748
- queued += len;
749
- data_sent += len;
750
- if (unlikely(queued < datalen && pageidx == iovec->npages &&
751
- req->iov_idx < req->data_iovs - 1)) {
752
- iovec->offset += iov_offset;
753
- iovec = &req->iovs[++req->iov_idx];
754
- iov_offset = 0;
755
- }
756
-
757
- *queued_ptr = queued;
758
- *data_sent_ptr = data_sent;
759
- *iov_offset_ptr = iov_offset;
760
- return ret;
761
-}
762
-
763
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
764
-{
765
- int ret = 0, count;
738
+ int ret = 0;
739
+ u16 count;
766740 unsigned npkts = 0;
767741 struct user_sdma_txreq *tx = NULL;
768742 struct hfi1_user_sdma_pkt_q *pq = NULL;
....@@ -790,8 +764,7 @@
790764 maxpkts = req->info.npkts - req->seqnum;
791765
792766 while (npkts < maxpkts) {
793
- u32 datalen = 0, queued = 0, data_sent = 0;
794
- u64 iov_offset = 0;
767
+ u32 datalen = 0;
795768
796769 /*
797770 * Check whether any of the completions have come back
....@@ -884,27 +857,17 @@
884857 goto free_txreq;
885858 }
886859
887
- /*
888
- * If the request contains any data vectors, add up to
889
- * fragsize bytes to the descriptor.
890
- */
891
- while (queued < datalen &&
892
- (req->sent + data_sent) < req->data_len) {
893
- ret = user_sdma_txadd(req, tx, iovec, datalen,
894
- &queued, &data_sent, &iov_offset);
895
- if (ret)
896
- goto free_txreq;
897
- }
898
- /*
899
- * The txreq was submitted successfully so we can update
900
- * the counters.
901
- */
902860 req->koffset += datalen;
903861 if (req_opcode(req->info.ctrl) == EXPECTED)
904862 req->tidoffset += datalen;
905
- req->sent += data_sent;
906
- if (req->data_len)
907
- iovec->offset += iov_offset;
863
+ req->sent += datalen;
864
+ while (datalen) {
865
+ ret = add_system_pages_to_sdma_packet(req, tx, iovec,
866
+ &datalen);
867
+ if (ret)
868
+ goto free_txreq;
869
+ iovec = &req->iovs[req->iov_idx];
870
+ }
908871 list_add_tail(&tx->txreq.list, &req->txps);
909872 /*
910873 * It is important to increment this here as it is used to
....@@ -915,7 +878,9 @@
915878 npkts++;
916879 }
917880 dosend:
918
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
881
+ ret = sdma_send_txlist(req->sde,
882
+ iowait_get_ib_work(&pq->busy),
883
+ &req->txps, &count);
919884 req->seqsubmitted += count;
920885 if (req->seqsubmitted == req->info.npkts) {
921886 /*
....@@ -939,130 +904,12 @@
939904 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
940905 {
941906 struct evict_data evict_data;
907
+ struct mmu_rb_handler *handler = pq->handler;
942908
943909 evict_data.cleared = 0;
944910 evict_data.target = npages;
945
- hfi1_mmu_rb_evict(pq->handler, &evict_data);
911
+ hfi1_mmu_rb_evict(handler, &evict_data);
946912 return evict_data.cleared;
947
-}
948
-
949
-static int pin_sdma_pages(struct user_sdma_request *req,
950
- struct user_sdma_iovec *iovec,
951
- struct sdma_mmu_node *node,
952
- int npages)
953
-{
954
- int pinned, cleared;
955
- struct page **pages;
956
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
957
-
958
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
959
- if (!pages)
960
- return -ENOMEM;
961
- memcpy(pages, node->pages, node->npages * sizeof(*pages));
962
-
963
- npages -= node->npages;
964
-retry:
965
- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
966
- atomic_read(&pq->n_locked), npages)) {
967
- cleared = sdma_cache_evict(pq, npages);
968
- if (cleared >= npages)
969
- goto retry;
970
- }
971
- pinned = hfi1_acquire_user_pages(pq->mm,
972
- ((unsigned long)iovec->iov.iov_base +
973
- (node->npages * PAGE_SIZE)), npages, 0,
974
- pages + node->npages);
975
- if (pinned < 0) {
976
- kfree(pages);
977
- return pinned;
978
- }
979
- if (pinned != npages) {
980
- unpin_vector_pages(pq->mm, pages, node->npages, pinned);
981
- return -EFAULT;
982
- }
983
- kfree(node->pages);
984
- node->rb.len = iovec->iov.iov_len;
985
- node->pages = pages;
986
- atomic_add(pinned, &pq->n_locked);
987
- return pinned;
988
-}
989
-
990
-static void unpin_sdma_pages(struct sdma_mmu_node *node)
991
-{
992
- if (node->npages) {
993
- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
994
- atomic_sub(node->npages, &node->pq->n_locked);
995
- }
996
-}
997
-
998
-static int pin_vector_pages(struct user_sdma_request *req,
999
- struct user_sdma_iovec *iovec)
1000
-{
1001
- int ret = 0, pinned, npages;
1002
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
1003
- struct sdma_mmu_node *node = NULL;
1004
- struct mmu_rb_node *rb_node;
1005
- struct iovec *iov;
1006
- bool extracted;
1007
-
1008
- extracted =
1009
- hfi1_mmu_rb_remove_unless_exact(pq->handler,
1010
- (unsigned long)
1011
- iovec->iov.iov_base,
1012
- iovec->iov.iov_len, &rb_node);
1013
- if (rb_node) {
1014
- node = container_of(rb_node, struct sdma_mmu_node, rb);
1015
- if (!extracted) {
1016
- atomic_inc(&node->refcount);
1017
- iovec->pages = node->pages;
1018
- iovec->npages = node->npages;
1019
- iovec->node = node;
1020
- return 0;
1021
- }
1022
- }
1023
-
1024
- if (!node) {
1025
- node = kzalloc(sizeof(*node), GFP_KERNEL);
1026
- if (!node)
1027
- return -ENOMEM;
1028
-
1029
- node->rb.addr = (unsigned long)iovec->iov.iov_base;
1030
- node->pq = pq;
1031
- atomic_set(&node->refcount, 0);
1032
- }
1033
-
1034
- iov = &iovec->iov;
1035
- npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
1036
- if (node->npages < npages) {
1037
- pinned = pin_sdma_pages(req, iovec, node, npages);
1038
- if (pinned < 0) {
1039
- ret = pinned;
1040
- goto bail;
1041
- }
1042
- node->npages += pinned;
1043
- npages = node->npages;
1044
- }
1045
- iovec->pages = node->pages;
1046
- iovec->npages = npages;
1047
- iovec->node = node;
1048
-
1049
- ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1050
- if (ret) {
1051
- iovec->node = NULL;
1052
- goto bail;
1053
- }
1054
- return 0;
1055
-bail:
1056
- unpin_sdma_pages(node);
1057
- kfree(node);
1058
- return ret;
1059
-}
1060
-
1061
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1062
- unsigned start, unsigned npages)
1063
-{
1064
- hfi1_release_user_pages(mm, pages + start, npages, false);
1065
- kfree(pages);
1066913 }
1067914
1068915 static int check_header_template(struct user_sdma_request *req,
....@@ -1128,7 +975,8 @@
1128975 0xffffffull),
1129976 psn = val & mask;
1130977 if (expct)
1131
- psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
978
+ psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
979
+ ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
1132980 else
1133981 psn = psn + frags;
1134982 return psn & mask;
....@@ -1405,7 +1253,7 @@
14051253 if (req->seqcomp != req->info.npkts - 1)
14061254 return;
14071255
1408
- user_sdma_free_request(req, false);
1256
+ user_sdma_free_request(req);
14091257 set_comp_state(pq, cq, req->info.comp_idx, state, status);
14101258 pq_update(pq);
14111259 }
....@@ -1416,10 +1264,8 @@
14161264 wake_up(&pq->wait);
14171265 }
14181266
1419
-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1267
+static void user_sdma_free_request(struct user_sdma_request *req)
14201268 {
1421
- int i;
1422
-
14231269 if (!list_empty(&req->txps)) {
14241270 struct sdma_txreq *t, *p;
14251271
....@@ -1430,21 +1276,6 @@
14301276 sdma_txclean(req->pq->dd, t);
14311277 kmem_cache_free(req->pq->txreq_cache, tx);
14321278 }
1433
- }
1434
-
1435
- for (i = 0; i < req->data_iovs; i++) {
1436
- struct sdma_mmu_node *node = req->iovs[i].node;
1437
-
1438
- if (!node)
1439
- continue;
1440
-
1441
- req->iovs[i].node = NULL;
1442
-
1443
- if (unpin)
1444
- hfi1_mmu_rb_remove(req->pq->handler,
1445
- &node->rb);
1446
- else
1447
- atomic_dec(&node->refcount);
14481279 }
14491280
14501281 kfree(req->tids);
....@@ -1464,19 +1295,372 @@
14641295 idx, state, ret);
14651296 }
14661297
1298
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1299
+ unsigned int start, unsigned int npages)
1300
+{
1301
+ hfi1_release_user_pages(mm, pages + start, npages, false);
1302
+ kfree(pages);
1303
+}
1304
+
1305
+static void free_system_node(struct sdma_mmu_node *node)
1306
+{
1307
+ if (node->npages) {
1308
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
1309
+ node->npages);
1310
+ atomic_sub(node->npages, &node->pq->n_locked);
1311
+ }
1312
+ kfree(node);
1313
+}
1314
+
1315
+/*
1316
+ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
1317
+ * from being released until after rb_node is assigned to an SDMA descriptor
1318
+ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
1319
+ * virtual address range for rb_node is invalidated between now and then.
1320
+ */
1321
+static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
1322
+ unsigned long start,
1323
+ unsigned long end)
1324
+{
1325
+ struct mmu_rb_node *rb_node;
1326
+ unsigned long flags;
1327
+
1328
+ spin_lock_irqsave(&handler->lock, flags);
1329
+ rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
1330
+ if (!rb_node) {
1331
+ spin_unlock_irqrestore(&handler->lock, flags);
1332
+ return NULL;
1333
+ }
1334
+
1335
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
1336
+ kref_get(&rb_node->refcount);
1337
+ spin_unlock_irqrestore(&handler->lock, flags);
1338
+
1339
+ return container_of(rb_node, struct sdma_mmu_node, rb);
1340
+}
1341
+
1342
+static int pin_system_pages(struct user_sdma_request *req,
1343
+ uintptr_t start_address, size_t length,
1344
+ struct sdma_mmu_node *node, int npages)
1345
+{
1346
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
1347
+ int pinned, cleared;
1348
+ struct page **pages;
1349
+
1350
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1351
+ if (!pages)
1352
+ return -ENOMEM;
1353
+
1354
+retry:
1355
+ if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
1356
+ npages)) {
1357
+ SDMA_DBG(req, "Evicting: nlocked %u npages %u",
1358
+ atomic_read(&pq->n_locked), npages);
1359
+ cleared = sdma_cache_evict(pq, npages);
1360
+ if (cleared >= npages)
1361
+ goto retry;
1362
+ }
1363
+
1364
+ SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
1365
+ start_address, node->npages, npages);
1366
+ pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
1367
+ pages);
1368
+
1369
+ if (pinned < 0) {
1370
+ kfree(pages);
1371
+ SDMA_DBG(req, "pinned %d", pinned);
1372
+ return pinned;
1373
+ }
1374
+ if (pinned != npages) {
1375
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
1376
+ SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
1377
+ return -EFAULT;
1378
+ }
1379
+ node->rb.addr = start_address;
1380
+ node->rb.len = length;
1381
+ node->pages = pages;
1382
+ node->npages = npages;
1383
+ atomic_add(pinned, &pq->n_locked);
1384
+ SDMA_DBG(req, "done. pinned %d", pinned);
1385
+ return 0;
1386
+}
1387
+
1388
+/*
1389
+ * kref refcount on *node_p will be 2 on successful addition: one kref from
1390
+ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
1391
+ * released until after *node_p is assigned to an SDMA descriptor (struct
1392
+ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
1393
+ * address range for *node_p is invalidated between now and then.
1394
+ */
1395
+static int add_system_pinning(struct user_sdma_request *req,
1396
+ struct sdma_mmu_node **node_p,
1397
+ unsigned long start, unsigned long len)
1398
+
1399
+{
1400
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
1401
+ struct sdma_mmu_node *node;
1402
+ int ret;
1403
+
1404
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
1405
+ if (!node)
1406
+ return -ENOMEM;
1407
+
1408
+ /* First kref "moves" to mmu_rb_handler */
1409
+ kref_init(&node->rb.refcount);
1410
+
1411
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
1412
+ kref_get(&node->rb.refcount);
1413
+
1414
+ node->pq = pq;
1415
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
1416
+ if (ret == 0) {
1417
+ ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
1418
+ if (ret)
1419
+ free_system_node(node);
1420
+ else
1421
+ *node_p = node;
1422
+
1423
+ return ret;
1424
+ }
1425
+
1426
+ kfree(node);
1427
+ return ret;
1428
+}
1429
+
1430
+static int get_system_cache_entry(struct user_sdma_request *req,
1431
+ struct sdma_mmu_node **node_p,
1432
+ size_t req_start, size_t req_len)
1433
+{
1434
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
1435
+ u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
1436
+ u64 end = PFN_ALIGN(req_start + req_len);
1437
+ struct mmu_rb_handler *handler = pq->handler;
1438
+ int ret;
1439
+
1440
+ if ((end - start) == 0) {
1441
+ SDMA_DBG(req,
1442
+ "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
1443
+ req_start, req_len, start, end);
1444
+ return -EINVAL;
1445
+ }
1446
+
1447
+ SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
1448
+
1449
+ while (1) {
1450
+ struct sdma_mmu_node *node =
1451
+ find_system_node(handler, start, end);
1452
+ u64 prepend_len = 0;
1453
+
1454
+ SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
1455
+ if (!node) {
1456
+ ret = add_system_pinning(req, node_p, start,
1457
+ end - start);
1458
+ if (ret == -EEXIST) {
1459
+ /*
1460
+ * Another execution context has inserted a
1461
+ * conficting entry first.
1462
+ */
1463
+ continue;
1464
+ }
1465
+ return ret;
1466
+ }
1467
+
1468
+ if (node->rb.addr <= start) {
1469
+ /*
1470
+ * This entry covers at least part of the region. If it doesn't extend
1471
+ * to the end, then this will be called again for the next segment.
1472
+ */
1473
+ *node_p = node;
1474
+ return 0;
1475
+ }
1476
+
1477
+ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
1478
+ node->rb.addr, kref_read(&node->rb.refcount));
1479
+ prepend_len = node->rb.addr - start;
1480
+
1481
+ /*
1482
+ * This node will not be returned, instead a new node
1483
+ * will be. So release the reference.
1484
+ */
1485
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
1486
+
1487
+ /* Prepend a node to cover the beginning of the allocation */
1488
+ ret = add_system_pinning(req, node_p, start, prepend_len);
1489
+ if (ret == -EEXIST) {
1490
+ /* Another execution context has inserted a conficting entry first. */
1491
+ continue;
1492
+ }
1493
+ return ret;
1494
+ }
1495
+}
1496
+
1497
+static void sdma_mmu_rb_node_get(void *ctx)
1498
+{
1499
+ struct mmu_rb_node *node = ctx;
1500
+
1501
+ kref_get(&node->refcount);
1502
+}
1503
+
1504
+static void sdma_mmu_rb_node_put(void *ctx)
1505
+{
1506
+ struct sdma_mmu_node *node = ctx;
1507
+
1508
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
1509
+}
1510
+
1511
+static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
1512
+ struct user_sdma_txreq *tx,
1513
+ struct sdma_mmu_node *cache_entry,
1514
+ size_t start,
1515
+ size_t from_this_cache_entry)
1516
+{
1517
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
1518
+ unsigned int page_offset;
1519
+ unsigned int from_this_page;
1520
+ size_t page_index;
1521
+ void *ctx;
1522
+ int ret;
1523
+
1524
+ /*
1525
+ * Because the cache may be more fragmented than the memory that is being accessed,
1526
+ * it's not strictly necessary to have a descriptor per cache entry.
1527
+ */
1528
+
1529
+ while (from_this_cache_entry) {
1530
+ page_index = PFN_DOWN(start - cache_entry->rb.addr);
1531
+
1532
+ if (page_index >= cache_entry->npages) {
1533
+ SDMA_DBG(req,
1534
+ "Request for page_index %zu >= cache_entry->npages %u",
1535
+ page_index, cache_entry->npages);
1536
+ return -EINVAL;
1537
+ }
1538
+
1539
+ page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
1540
+ from_this_page = PAGE_SIZE - page_offset;
1541
+
1542
+ if (from_this_page < from_this_cache_entry) {
1543
+ ctx = NULL;
1544
+ } else {
1545
+ /*
1546
+ * In the case they are equal the next line has no practical effect,
1547
+ * but it's better to do a register to register copy than a conditional
1548
+ * branch.
1549
+ */
1550
+ from_this_page = from_this_cache_entry;
1551
+ ctx = cache_entry;
1552
+ }
1553
+
1554
+ ret = sdma_txadd_page(pq->dd, &tx->txreq,
1555
+ cache_entry->pages[page_index],
1556
+ page_offset, from_this_page,
1557
+ ctx,
1558
+ sdma_mmu_rb_node_get,
1559
+ sdma_mmu_rb_node_put);
1560
+ if (ret) {
1561
+ /*
1562
+ * When there's a failure, the entire request is freed by
1563
+ * user_sdma_send_pkts().
1564
+ */
1565
+ SDMA_DBG(req,
1566
+ "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
1567
+ ret, page_index, page_offset, from_this_page);
1568
+ return ret;
1569
+ }
1570
+ start += from_this_page;
1571
+ from_this_cache_entry -= from_this_page;
1572
+ }
1573
+ return 0;
1574
+}
1575
+
1576
+static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
1577
+ struct user_sdma_txreq *tx,
1578
+ struct user_sdma_iovec *iovec,
1579
+ size_t from_this_iovec)
1580
+{
1581
+ while (from_this_iovec > 0) {
1582
+ struct sdma_mmu_node *cache_entry;
1583
+ size_t from_this_cache_entry;
1584
+ size_t start;
1585
+ int ret;
1586
+
1587
+ start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
1588
+ ret = get_system_cache_entry(req, &cache_entry, start,
1589
+ from_this_iovec);
1590
+ if (ret) {
1591
+ SDMA_DBG(req, "pin system segment failed %d", ret);
1592
+ return ret;
1593
+ }
1594
+
1595
+ from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
1596
+ if (from_this_cache_entry > from_this_iovec)
1597
+ from_this_cache_entry = from_this_iovec;
1598
+
1599
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
1600
+ from_this_cache_entry);
1601
+
1602
+ /*
1603
+ * Done adding cache_entry to zero or more sdma_desc. Can
1604
+ * kref_put() the "safety" kref taken under
1605
+ * get_system_cache_entry().
1606
+ */
1607
+ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
1608
+
1609
+ if (ret) {
1610
+ SDMA_DBG(req, "add system segment failed %d", ret);
1611
+ return ret;
1612
+ }
1613
+
1614
+ iovec->offset += from_this_cache_entry;
1615
+ from_this_iovec -= from_this_cache_entry;
1616
+ }
1617
+
1618
+ return 0;
1619
+}
1620
+
1621
+static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
1622
+ struct user_sdma_txreq *tx,
1623
+ struct user_sdma_iovec *iovec,
1624
+ u32 *pkt_data_remaining)
1625
+{
1626
+ size_t remaining_to_add = *pkt_data_remaining;
1627
+ /*
1628
+ * Walk through iovec entries, ensure the associated pages
1629
+ * are pinned and mapped, add data to the packet until no more
1630
+ * data remains to be added.
1631
+ */
1632
+ while (remaining_to_add > 0) {
1633
+ struct user_sdma_iovec *cur_iovec;
1634
+ size_t from_this_iovec;
1635
+ int ret;
1636
+
1637
+ cur_iovec = iovec;
1638
+ from_this_iovec = iovec->iov.iov_len - iovec->offset;
1639
+
1640
+ if (from_this_iovec > remaining_to_add) {
1641
+ from_this_iovec = remaining_to_add;
1642
+ } else {
1643
+ /* The current iovec entry will be consumed by this pass. */
1644
+ req->iov_idx++;
1645
+ iovec++;
1646
+ }
1647
+
1648
+ ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
1649
+ from_this_iovec);
1650
+ if (ret)
1651
+ return ret;
1652
+
1653
+ remaining_to_add -= from_this_iovec;
1654
+ }
1655
+ *pkt_data_remaining = remaining_to_add;
1656
+
1657
+ return 0;
1658
+}
1659
+
14671660 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
14681661 unsigned long len)
14691662 {
14701663 return (bool)(node->addr == addr);
1471
-}
1472
-
1473
-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1474
-{
1475
- struct sdma_mmu_node *node =
1476
- container_of(mnode, struct sdma_mmu_node, rb);
1477
-
1478
- atomic_inc(&node->refcount);
1479
- return 0;
14801664 }
14811665
14821666 /*
....@@ -1490,10 +1674,6 @@
14901674 struct sdma_mmu_node *node =
14911675 container_of(mnode, struct sdma_mmu_node, rb);
14921676 struct evict_data *evict_data = evict_arg;
1493
-
1494
- /* is this node still being used? */
1495
- if (atomic_read(&node->refcount))
1496
- return 0; /* keep this node */
14971677
14981678 /* this node will be evicted, add its pages to our count */
14991679 evict_data->cleared += node->npages;
....@@ -1510,16 +1690,5 @@
15101690 struct sdma_mmu_node *node =
15111691 container_of(mnode, struct sdma_mmu_node, rb);
15121692
1513
- unpin_sdma_pages(node);
1514
- kfree(node);
1515
-}
1516
-
1517
-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1518
-{
1519
- struct sdma_mmu_node *node =
1520
- container_of(mnode, struct sdma_mmu_node, rb);
1521
-
1522
- if (!atomic_read(&node->refcount))
1523
- return 1;
1524
- return 0;
1693
+ free_system_node(node);
15251694 }