| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | | - * Copyright(c) 2015 - 2017 Intel Corporation. |
|---|
| 2 | + * Copyright(c) 2020 - Cornelis Networks, Inc. |
|---|
| 3 | + * Copyright(c) 2015 - 2018 Intel Corporation. |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * This file is provided under a dual BSD/GPLv2 license. When using or |
|---|
| 5 | 6 | * redistributing this file, you may do so under either license. |
|---|
| .. | .. |
|---|
| 76 | 77 | |
|---|
| 77 | 78 | static unsigned initial_pkt_count = 8; |
|---|
| 78 | 79 | |
|---|
| 79 | | -static int user_sdma_send_pkts(struct user_sdma_request *req, |
|---|
| 80 | | - unsigned maxpkts); |
|---|
| 80 | +static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); |
|---|
| 81 | 81 | static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); |
|---|
| 82 | 82 | static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); |
|---|
| 83 | 83 | static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); |
|---|
| .. | .. |
|---|
| 101 | 101 | |
|---|
| 102 | 102 | static int defer_packet_queue( |
|---|
| 103 | 103 | struct sdma_engine *sde, |
|---|
| 104 | | - struct iowait *wait, |
|---|
| 104 | + struct iowait_work *wait, |
|---|
| 105 | 105 | struct sdma_txreq *txreq, |
|---|
| 106 | 106 | uint seq, |
|---|
| 107 | 107 | bool pkts_sent); |
|---|
| .. | .. |
|---|
| 124 | 124 | |
|---|
| 125 | 125 | static int defer_packet_queue( |
|---|
| 126 | 126 | struct sdma_engine *sde, |
|---|
| 127 | | - struct iowait *wait, |
|---|
| 127 | + struct iowait_work *wait, |
|---|
| 128 | 128 | struct sdma_txreq *txreq, |
|---|
| 129 | 129 | uint seq, |
|---|
| 130 | 130 | bool pkts_sent) |
|---|
| 131 | 131 | { |
|---|
| 132 | 132 | struct hfi1_user_sdma_pkt_q *pq = |
|---|
| 133 | | - container_of(wait, struct hfi1_user_sdma_pkt_q, busy); |
|---|
| 134 | | - struct hfi1_ibdev *dev = &pq->dd->verbs_dev; |
|---|
| 133 | + container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); |
|---|
| 135 | 134 | |
|---|
| 136 | | - write_seqlock(&dev->iowait_lock); |
|---|
| 135 | + write_seqlock(&sde->waitlock); |
|---|
| 137 | 136 | if (sdma_progress(sde, seq, txreq)) |
|---|
| 138 | 137 | goto eagain; |
|---|
| 139 | 138 | /* |
|---|
| .. | .. |
|---|
| 142 | 141 | * it is supposed to be enqueued. |
|---|
| 143 | 142 | */ |
|---|
| 144 | 143 | xchg(&pq->state, SDMA_PKT_Q_DEFERRED); |
|---|
| 145 | | - if (list_empty(&pq->busy.list)) |
|---|
| 144 | + if (list_empty(&pq->busy.list)) { |
|---|
| 145 | + pq->busy.lock = &sde->waitlock; |
|---|
| 146 | + iowait_get_priority(&pq->busy); |
|---|
| 146 | 147 | iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); |
|---|
| 147 | | - write_sequnlock(&dev->iowait_lock); |
|---|
| 148 | + } |
|---|
| 149 | + write_sequnlock(&sde->waitlock); |
|---|
| 148 | 150 | return -EBUSY; |
|---|
| 149 | 151 | eagain: |
|---|
| 150 | | - write_sequnlock(&dev->iowait_lock); |
|---|
| 152 | + write_sequnlock(&sde->waitlock); |
|---|
| 151 | 153 | return -EAGAIN; |
|---|
| 152 | 154 | } |
|---|
| 153 | 155 | |
|---|
| .. | .. |
|---|
| 155 | 157 | { |
|---|
| 156 | 158 | struct hfi1_user_sdma_pkt_q *pq = |
|---|
| 157 | 159 | container_of(wait, struct hfi1_user_sdma_pkt_q, busy); |
|---|
| 160 | + pq->busy.lock = NULL; |
|---|
| 158 | 161 | xchg(&pq->state, SDMA_PKT_Q_ACTIVE); |
|---|
| 159 | 162 | wake_up(&wait->wait_dma); |
|---|
| 160 | 163 | }; |
|---|
| .. | .. |
|---|
| 186 | 189 | atomic_set(&pq->n_reqs, 0); |
|---|
| 187 | 190 | init_waitqueue_head(&pq->wait); |
|---|
| 188 | 191 | atomic_set(&pq->n_locked, 0); |
|---|
| 189 | | - pq->mm = fd->mm; |
|---|
| 190 | 192 | |
|---|
| 191 | | - iowait_init(&pq->busy, 0, NULL, defer_packet_queue, |
|---|
| 192 | | - activate_packet_queue, NULL); |
|---|
| 193 | + iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, |
|---|
| 194 | + activate_packet_queue, NULL, NULL); |
|---|
| 193 | 195 | pq->reqidx = 0; |
|---|
| 194 | 196 | |
|---|
| 195 | 197 | pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, |
|---|
| .. | .. |
|---|
| 228 | 230 | |
|---|
| 229 | 231 | cq->nentries = hfi1_sdma_comp_ring_size; |
|---|
| 230 | 232 | |
|---|
| 231 | | - ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq, |
|---|
| 233 | + ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq, |
|---|
| 232 | 234 | &pq->handler); |
|---|
| 233 | 235 | if (ret) { |
|---|
| 234 | 236 | dd_dev_err(dd, "Failed to register with MMU %d", ret); |
|---|
| .. | .. |
|---|
| 256 | 258 | return ret; |
|---|
| 257 | 259 | } |
|---|
| 258 | 260 | |
|---|
| 261 | +static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) |
|---|
| 262 | +{ |
|---|
| 263 | + unsigned long flags; |
|---|
| 264 | + seqlock_t *lock = pq->busy.lock; |
|---|
| 265 | + |
|---|
| 266 | + if (!lock) |
|---|
| 267 | + return; |
|---|
| 268 | + write_seqlock_irqsave(lock, flags); |
|---|
| 269 | + if (!list_empty(&pq->busy.list)) { |
|---|
| 270 | + list_del_init(&pq->busy.list); |
|---|
| 271 | + pq->busy.lock = NULL; |
|---|
| 272 | + } |
|---|
| 273 | + write_sequnlock_irqrestore(lock, flags); |
|---|
| 274 | +} |
|---|
| 275 | + |
|---|
| 259 | 276 | int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, |
|---|
| 260 | 277 | struct hfi1_ctxtdata *uctxt) |
|---|
| 261 | 278 | { |
|---|
| .. | .. |
|---|
| 281 | 298 | kfree(pq->reqs); |
|---|
| 282 | 299 | kfree(pq->req_in_use); |
|---|
| 283 | 300 | kmem_cache_destroy(pq->txreq_cache); |
|---|
| 301 | + flush_pq_iowait(pq); |
|---|
| 284 | 302 | kfree(pq); |
|---|
| 285 | 303 | } else { |
|---|
| 286 | 304 | spin_unlock(&fd->pq_rcu_lock); |
|---|
| .. | .. |
|---|
| 571 | 589 | |
|---|
| 572 | 590 | set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); |
|---|
| 573 | 591 | pq->state = SDMA_PKT_Q_ACTIVE; |
|---|
| 574 | | - /* Send the first N packets in the request to buy us some time */ |
|---|
| 575 | | - ret = user_sdma_send_pkts(req, pcount); |
|---|
| 576 | | - if (unlikely(ret < 0 && ret != -EBUSY)) |
|---|
| 577 | | - goto free_req; |
|---|
| 578 | 592 | |
|---|
| 579 | 593 | /* |
|---|
| 580 | 594 | * This is a somewhat blocking send implementation. |
|---|
| .. | .. |
|---|
| 587 | 601 | if (ret < 0) { |
|---|
| 588 | 602 | if (ret != -EBUSY) |
|---|
| 589 | 603 | goto free_req; |
|---|
| 590 | | - wait_event_interruptible_timeout( |
|---|
| 604 | + if (wait_event_interruptible_timeout( |
|---|
| 591 | 605 | pq->busy.wait_dma, |
|---|
| 592 | | - (pq->state == SDMA_PKT_Q_ACTIVE), |
|---|
| 606 | + pq->state == SDMA_PKT_Q_ACTIVE, |
|---|
| 593 | 607 | msecs_to_jiffies( |
|---|
| 594 | | - SDMA_IOWAIT_TIMEOUT)); |
|---|
| 608 | + SDMA_IOWAIT_TIMEOUT)) <= 0) |
|---|
| 609 | + flush_pq_iowait(pq); |
|---|
| 595 | 610 | } |
|---|
| 596 | 611 | } |
|---|
| 597 | 612 | *count += idx; |
|---|
| .. | .. |
|---|
| 760 | 775 | return ret; |
|---|
| 761 | 776 | } |
|---|
| 762 | 777 | |
|---|
| 763 | | -static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) |
|---|
| 778 | +static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) |
|---|
| 764 | 779 | { |
|---|
| 765 | | - int ret = 0, count; |
|---|
| 780 | + int ret = 0; |
|---|
| 781 | + u16 count; |
|---|
| 766 | 782 | unsigned npkts = 0; |
|---|
| 767 | 783 | struct user_sdma_txreq *tx = NULL; |
|---|
| 768 | 784 | struct hfi1_user_sdma_pkt_q *pq = NULL; |
|---|
| .. | .. |
|---|
| 915 | 931 | npkts++; |
|---|
| 916 | 932 | } |
|---|
| 917 | 933 | dosend: |
|---|
| 918 | | - ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); |
|---|
| 934 | + ret = sdma_send_txlist(req->sde, |
|---|
| 935 | + iowait_get_ib_work(&pq->busy), |
|---|
| 936 | + &req->txps, &count); |
|---|
| 919 | 937 | req->seqsubmitted += count; |
|---|
| 920 | 938 | if (req->seqsubmitted == req->info.npkts) { |
|---|
| 921 | 939 | /* |
|---|
| .. | .. |
|---|
| 962 | 980 | |
|---|
| 963 | 981 | npages -= node->npages; |
|---|
| 964 | 982 | retry: |
|---|
| 965 | | - if (!hfi1_can_pin_pages(pq->dd, pq->mm, |
|---|
| 983 | + if (!hfi1_can_pin_pages(pq->dd, current->mm, |
|---|
| 966 | 984 | atomic_read(&pq->n_locked), npages)) { |
|---|
| 967 | 985 | cleared = sdma_cache_evict(pq, npages); |
|---|
| 968 | 986 | if (cleared >= npages) |
|---|
| 969 | 987 | goto retry; |
|---|
| 970 | 988 | } |
|---|
| 971 | | - pinned = hfi1_acquire_user_pages(pq->mm, |
|---|
| 989 | + pinned = hfi1_acquire_user_pages(current->mm, |
|---|
| 972 | 990 | ((unsigned long)iovec->iov.iov_base + |
|---|
| 973 | 991 | (node->npages * PAGE_SIZE)), npages, 0, |
|---|
| 974 | 992 | pages + node->npages); |
|---|
| .. | .. |
|---|
| 977 | 995 | return pinned; |
|---|
| 978 | 996 | } |
|---|
| 979 | 997 | if (pinned != npages) { |
|---|
| 980 | | - unpin_vector_pages(pq->mm, pages, node->npages, pinned); |
|---|
| 998 | + unpin_vector_pages(current->mm, pages, node->npages, pinned); |
|---|
| 981 | 999 | return -EFAULT; |
|---|
| 982 | 1000 | } |
|---|
| 983 | 1001 | kfree(node->pages); |
|---|
| .. | .. |
|---|
| 990 | 1008 | static void unpin_sdma_pages(struct sdma_mmu_node *node) |
|---|
| 991 | 1009 | { |
|---|
| 992 | 1010 | if (node->npages) { |
|---|
| 993 | | - unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); |
|---|
| 1011 | + unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0, |
|---|
| 1012 | + node->npages); |
|---|
| 994 | 1013 | atomic_sub(node->npages, &node->pq->n_locked); |
|---|
| 995 | 1014 | } |
|---|
| 996 | 1015 | } |
|---|
| .. | .. |
|---|
| 1128 | 1147 | 0xffffffull), |
|---|
| 1129 | 1148 | psn = val & mask; |
|---|
| 1130 | 1149 | if (expct) |
|---|
| 1131 | | - psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK); |
|---|
| 1150 | + psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) | |
|---|
| 1151 | + ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK); |
|---|
| 1132 | 1152 | else |
|---|
| 1133 | 1153 | psn = psn + frags; |
|---|
| 1134 | 1154 | return psn & mask; |
|---|