.. | .. |
---|
49 | 49 | #include <rdma/ib_addr.h> |
---|
50 | 50 | #include <rdma/ib_smi.h> |
---|
51 | 51 | #include <rdma/ib_user_verbs.h> |
---|
| 52 | +#include <rdma/uverbs_ioctl.h> |
---|
52 | 53 | |
---|
53 | 54 | #include "pvrdma.h" |
---|
54 | 55 | |
---|
.. | .. |
---|
91 | 92 | |
---|
92 | 93 | /** |
---|
93 | 94 | * pvrdma_create_cq - create completion queue |
---|
94 | | - * @ibdev: the device |
---|
| 95 | + * @ibcq: Allocated CQ |
---|
95 | 96 | * @attr: completion queue attributes |
---|
96 | | - * @context: user context |
---|
97 | 97 | * @udata: user data |
---|
98 | 98 | * |
---|
99 | | - * @return: ib_cq completion queue pointer on success, |
---|
100 | | - * otherwise returns negative errno. |
---|
| 99 | + * @return: 0 on success |
---|
101 | 100 | */ |
---|
102 | | -struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, |
---|
103 | | - const struct ib_cq_init_attr *attr, |
---|
104 | | - struct ib_ucontext *context, |
---|
105 | | - struct ib_udata *udata) |
---|
| 101 | +int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 102 | + struct ib_udata *udata) |
---|
106 | 103 | { |
---|
| 104 | + struct ib_device *ibdev = ibcq->device; |
---|
107 | 105 | int entries = attr->cqe; |
---|
108 | 106 | struct pvrdma_dev *dev = to_vdev(ibdev); |
---|
109 | | - struct pvrdma_cq *cq; |
---|
| 107 | + struct pvrdma_cq *cq = to_vcq(ibcq); |
---|
110 | 108 | int ret; |
---|
111 | 109 | int npages; |
---|
112 | 110 | unsigned long flags; |
---|
.. | .. |
---|
114 | 112 | union pvrdma_cmd_resp rsp; |
---|
115 | 113 | struct pvrdma_cmd_create_cq *cmd = &req.create_cq; |
---|
116 | 114 | struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; |
---|
117 | | - struct pvrdma_create_cq_resp cq_resp = {0}; |
---|
| 115 | + struct pvrdma_create_cq_resp cq_resp = {}; |
---|
118 | 116 | struct pvrdma_create_cq ucmd; |
---|
| 117 | + struct pvrdma_ucontext *context = rdma_udata_to_drv_context( |
---|
| 118 | + udata, struct pvrdma_ucontext, ibucontext); |
---|
119 | 119 | |
---|
120 | 120 | BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); |
---|
121 | 121 | |
---|
122 | 122 | entries = roundup_pow_of_two(entries); |
---|
123 | 123 | if (entries < 1 || entries > dev->dsr->caps.max_cqe) |
---|
124 | | - return ERR_PTR(-EINVAL); |
---|
| 124 | + return -EINVAL; |
---|
125 | 125 | |
---|
126 | 126 | if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq)) |
---|
127 | | - return ERR_PTR(-ENOMEM); |
---|
128 | | - |
---|
129 | | - cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
---|
130 | | - if (!cq) { |
---|
131 | | - atomic_dec(&dev->num_cqs); |
---|
132 | | - return ERR_PTR(-ENOMEM); |
---|
133 | | - } |
---|
| 127 | + return -ENOMEM; |
---|
134 | 128 | |
---|
135 | 129 | cq->ibcq.cqe = entries; |
---|
136 | | - cq->is_kernel = !context; |
---|
| 130 | + cq->is_kernel = !udata; |
---|
137 | 131 | |
---|
138 | 132 | if (!cq->is_kernel) { |
---|
139 | 133 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { |
---|
.. | .. |
---|
141 | 135 | goto err_cq; |
---|
142 | 136 | } |
---|
143 | 137 | |
---|
144 | | - cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size, |
---|
145 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
| 138 | + cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, |
---|
| 139 | + IB_ACCESS_LOCAL_WRITE); |
---|
146 | 140 | if (IS_ERR(cq->umem)) { |
---|
147 | 141 | ret = PTR_ERR(cq->umem); |
---|
148 | 142 | goto err_cq; |
---|
149 | 143 | } |
---|
150 | 144 | |
---|
151 | | - npages = ib_umem_page_count(cq->umem); |
---|
| 145 | + npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE); |
---|
152 | 146 | } else { |
---|
153 | 147 | /* One extra page for shared ring state */ |
---|
154 | 148 | npages = 1 + (entries * sizeof(struct pvrdma_cqe) + |
---|
.. | .. |
---|
185 | 179 | memset(cmd, 0, sizeof(*cmd)); |
---|
186 | 180 | cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; |
---|
187 | 181 | cmd->nchunks = npages; |
---|
188 | | - cmd->ctx_handle = (context) ? |
---|
189 | | - (u64)to_vucontext(context)->ctx_handle : 0; |
---|
| 182 | + cmd->ctx_handle = context ? context->ctx_handle : 0; |
---|
190 | 183 | cmd->cqe = entries; |
---|
191 | 184 | cmd->pdir_dma = cq->pdir.dir_dma; |
---|
192 | 185 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); |
---|
.. | .. |
---|
204 | 197 | spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); |
---|
205 | 198 | |
---|
206 | 199 | if (!cq->is_kernel) { |
---|
207 | | - cq->uar = &(to_vucontext(context)->uar); |
---|
| 200 | + cq->uar = &context->uar; |
---|
208 | 201 | |
---|
209 | 202 | /* Copy udata back. */ |
---|
210 | 203 | if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { |
---|
211 | 204 | dev_warn(&dev->pdev->dev, |
---|
212 | 205 | "failed to copy back udata\n"); |
---|
213 | | - pvrdma_destroy_cq(&cq->ibcq); |
---|
214 | | - return ERR_PTR(-EINVAL); |
---|
| 206 | + pvrdma_destroy_cq(&cq->ibcq, udata); |
---|
| 207 | + return -EINVAL; |
---|
215 | 208 | } |
---|
216 | 209 | } |
---|
217 | 210 | |
---|
218 | | - return &cq->ibcq; |
---|
| 211 | + return 0; |
---|
219 | 212 | |
---|
220 | 213 | err_page_dir: |
---|
221 | 214 | pvrdma_page_dir_cleanup(dev, &cq->pdir); |
---|
222 | 215 | err_umem: |
---|
223 | | - if (!cq->is_kernel) |
---|
224 | | - ib_umem_release(cq->umem); |
---|
| 216 | + ib_umem_release(cq->umem); |
---|
225 | 217 | err_cq: |
---|
226 | 218 | atomic_dec(&dev->num_cqs); |
---|
227 | | - kfree(cq); |
---|
228 | | - |
---|
229 | | - return ERR_PTR(ret); |
---|
| 219 | + return ret; |
---|
230 | 220 | } |
---|
231 | 221 | |
---|
232 | 222 | static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) |
---|
.. | .. |
---|
235 | 225 | complete(&cq->free); |
---|
236 | 226 | wait_for_completion(&cq->free); |
---|
237 | 227 | |
---|
238 | | - if (!cq->is_kernel) |
---|
239 | | - ib_umem_release(cq->umem); |
---|
| 228 | + ib_umem_release(cq->umem); |
---|
240 | 229 | |
---|
241 | 230 | pvrdma_page_dir_cleanup(dev, &cq->pdir); |
---|
242 | | - kfree(cq); |
---|
243 | 231 | } |
---|
244 | 232 | |
---|
245 | 233 | /** |
---|
246 | 234 | * pvrdma_destroy_cq - destroy completion queue |
---|
247 | 235 | * @cq: the completion queue to destroy. |
---|
248 | | - * |
---|
249 | | - * @return: 0 for success. |
---|
| 236 | + * @udata: user data or null for kernel object |
---|
250 | 237 | */ |
---|
251 | | -int pvrdma_destroy_cq(struct ib_cq *cq) |
---|
| 238 | +int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
---|
252 | 239 | { |
---|
253 | 240 | struct pvrdma_cq *vcq = to_vcq(cq); |
---|
254 | 241 | union pvrdma_cmd_req req; |
---|
.. | .. |
---|
274 | 261 | |
---|
275 | 262 | pvrdma_free_cq(dev, vcq); |
---|
276 | 263 | atomic_dec(&dev->num_cqs); |
---|
277 | | - |
---|
278 | | - return ret; |
---|
| 264 | + return 0; |
---|
279 | 265 | } |
---|
280 | 266 | |
---|
281 | 267 | static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) |
---|
.. | .. |
---|
378 | 364 | wc->dlid_path_bits = cqe->dlid_path_bits; |
---|
379 | 365 | wc->port_num = cqe->port_num; |
---|
380 | 366 | wc->vendor_err = cqe->vendor_err; |
---|
381 | | - wc->network_hdr_type = cqe->network_hdr_type; |
---|
| 367 | + wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type); |
---|
382 | 368 | |
---|
383 | 369 | /* Update shared ring state */ |
---|
384 | 370 | pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); |
---|
.. | .. |
---|
390 | 376 | * pvrdma_poll_cq - poll for work completion queue entries |
---|
391 | 377 | * @ibcq: completion queue |
---|
392 | 378 | * @num_entries: the maximum number of entries |
---|
393 | | - * @entry: pointer to work completion array |
---|
| 379 | + * @wc: pointer to work completion array |
---|
394 | 380 | * |
---|
395 | 381 | * @return: number of polled completion entries |
---|
396 | 382 | */ |
---|