forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/infiniband/hw/hfi1/user_sdma.c
....@@ -1,5 +1,6 @@
11 /*
2
- * Copyright(c) 2015 - 2017 Intel Corporation.
2
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
3
+ * Copyright(c) 2015 - 2018 Intel Corporation.
34 *
45 * This file is provided under a dual BSD/GPLv2 license. When using or
56 * redistributing this file, you may do so under either license.
....@@ -76,8 +77,7 @@
7677
7778 static unsigned initial_pkt_count = 8;
7879
79
-static int user_sdma_send_pkts(struct user_sdma_request *req,
80
- unsigned maxpkts);
80
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
8181 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
8282 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
8383 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
....@@ -101,7 +101,7 @@
101101
102102 static int defer_packet_queue(
103103 struct sdma_engine *sde,
104
- struct iowait *wait,
104
+ struct iowait_work *wait,
105105 struct sdma_txreq *txreq,
106106 uint seq,
107107 bool pkts_sent);
....@@ -124,16 +124,15 @@
124124
125125 static int defer_packet_queue(
126126 struct sdma_engine *sde,
127
- struct iowait *wait,
127
+ struct iowait_work *wait,
128128 struct sdma_txreq *txreq,
129129 uint seq,
130130 bool pkts_sent)
131131 {
132132 struct hfi1_user_sdma_pkt_q *pq =
133
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
134
- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
133
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
135134
136
- write_seqlock(&dev->iowait_lock);
135
+ write_seqlock(&sde->waitlock);
137136 if (sdma_progress(sde, seq, txreq))
138137 goto eagain;
139138 /*
....@@ -142,12 +141,15 @@
142141 * it is supposed to be enqueued.
143142 */
144143 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
145
- if (list_empty(&pq->busy.list))
144
+ if (list_empty(&pq->busy.list)) {
145
+ pq->busy.lock = &sde->waitlock;
146
+ iowait_get_priority(&pq->busy);
146147 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
147
- write_sequnlock(&dev->iowait_lock);
148
+ }
149
+ write_sequnlock(&sde->waitlock);
148150 return -EBUSY;
149151 eagain:
150
- write_sequnlock(&dev->iowait_lock);
152
+ write_sequnlock(&sde->waitlock);
151153 return -EAGAIN;
152154 }
153155
....@@ -155,6 +157,7 @@
155157 {
156158 struct hfi1_user_sdma_pkt_q *pq =
157159 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
160
+ pq->busy.lock = NULL;
158161 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
159162 wake_up(&wait->wait_dma);
160163 };
....@@ -186,10 +189,9 @@
186189 atomic_set(&pq->n_reqs, 0);
187190 init_waitqueue_head(&pq->wait);
188191 atomic_set(&pq->n_locked, 0);
189
- pq->mm = fd->mm;
190192
191
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
192
- activate_packet_queue, NULL);
193
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
194
+ activate_packet_queue, NULL, NULL);
193195 pq->reqidx = 0;
194196
195197 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
....@@ -228,7 +230,7 @@
228230
229231 cq->nentries = hfi1_sdma_comp_ring_size;
230232
231
- ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
233
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
232234 &pq->handler);
233235 if (ret) {
234236 dd_dev_err(dd, "Failed to register with MMU %d", ret);
....@@ -256,6 +258,21 @@
256258 return ret;
257259 }
258260
261
+static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
262
+{
263
+ unsigned long flags;
264
+ seqlock_t *lock = pq->busy.lock;
265
+
266
+ if (!lock)
267
+ return;
268
+ write_seqlock_irqsave(lock, flags);
269
+ if (!list_empty(&pq->busy.list)) {
270
+ list_del_init(&pq->busy.list);
271
+ pq->busy.lock = NULL;
272
+ }
273
+ write_sequnlock_irqrestore(lock, flags);
274
+}
275
+
259276 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
260277 struct hfi1_ctxtdata *uctxt)
261278 {
....@@ -281,6 +298,7 @@
281298 kfree(pq->reqs);
282299 kfree(pq->req_in_use);
283300 kmem_cache_destroy(pq->txreq_cache);
301
+ flush_pq_iowait(pq);
284302 kfree(pq);
285303 } else {
286304 spin_unlock(&fd->pq_rcu_lock);
....@@ -571,10 +589,6 @@
571589
572590 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
573591 pq->state = SDMA_PKT_Q_ACTIVE;
574
- /* Send the first N packets in the request to buy us some time */
575
- ret = user_sdma_send_pkts(req, pcount);
576
- if (unlikely(ret < 0 && ret != -EBUSY))
577
- goto free_req;
578592
579593 /*
580594 * This is a somewhat blocking send implementation.
....@@ -587,11 +601,12 @@
587601 if (ret < 0) {
588602 if (ret != -EBUSY)
589603 goto free_req;
590
- wait_event_interruptible_timeout(
604
+ if (wait_event_interruptible_timeout(
591605 pq->busy.wait_dma,
592
- (pq->state == SDMA_PKT_Q_ACTIVE),
606
+ pq->state == SDMA_PKT_Q_ACTIVE,
593607 msecs_to_jiffies(
594
- SDMA_IOWAIT_TIMEOUT));
608
+ SDMA_IOWAIT_TIMEOUT)) <= 0)
609
+ flush_pq_iowait(pq);
595610 }
596611 }
597612 *count += idx;
....@@ -760,9 +775,10 @@
760775 return ret;
761776 }
762777
763
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
778
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
764779 {
765
- int ret = 0, count;
780
+ int ret = 0;
781
+ u16 count;
766782 unsigned npkts = 0;
767783 struct user_sdma_txreq *tx = NULL;
768784 struct hfi1_user_sdma_pkt_q *pq = NULL;
....@@ -915,7 +931,9 @@
915931 npkts++;
916932 }
917933 dosend:
918
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
934
+ ret = sdma_send_txlist(req->sde,
935
+ iowait_get_ib_work(&pq->busy),
936
+ &req->txps, &count);
919937 req->seqsubmitted += count;
920938 if (req->seqsubmitted == req->info.npkts) {
921939 /*
....@@ -962,13 +980,13 @@
962980
963981 npages -= node->npages;
964982 retry:
965
- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
983
+ if (!hfi1_can_pin_pages(pq->dd, current->mm,
966984 atomic_read(&pq->n_locked), npages)) {
967985 cleared = sdma_cache_evict(pq, npages);
968986 if (cleared >= npages)
969987 goto retry;
970988 }
971
- pinned = hfi1_acquire_user_pages(pq->mm,
989
+ pinned = hfi1_acquire_user_pages(current->mm,
972990 ((unsigned long)iovec->iov.iov_base +
973991 (node->npages * PAGE_SIZE)), npages, 0,
974992 pages + node->npages);
....@@ -977,7 +995,7 @@
977995 return pinned;
978996 }
979997 if (pinned != npages) {
980
- unpin_vector_pages(pq->mm, pages, node->npages, pinned);
998
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
981999 return -EFAULT;
9821000 }
9831001 kfree(node->pages);
....@@ -990,7 +1008,8 @@
9901008 static void unpin_sdma_pages(struct sdma_mmu_node *node)
9911009 {
9921010 if (node->npages) {
993
- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1011
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
1012
+ node->npages);
9941013 atomic_sub(node->npages, &node->pq->n_locked);
9951014 }
9961015 }
....@@ -1128,7 +1147,8 @@
11281147 0xffffffull),
11291148 psn = val & mask;
11301149 if (expct)
1131
- psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1150
+ psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
1151
+ ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
11321152 else
11331153 psn = psn + frags;
11341154 return psn & mask;