hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
....@@ -90,56 +90,49 @@
9090
9191 /**
9292 * pvrdma_create_srq - create shared receive queue
93
- * @pd: protection domain
93
+ * @ibsrq: the IB shared receive queue
9494 * @init_attr: shared receive queue attributes
9595 * @udata: user data
9696 *
97
- * @return: the ib_srq pointer on success, otherwise returns an errno.
97
+ * @return: 0 on success, otherwise returns an errno.
9898 */
99
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
100
- struct ib_srq_init_attr *init_attr,
101
- struct ib_udata *udata)
99
+int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
100
+ struct ib_udata *udata)
102101 {
103
- struct pvrdma_srq *srq = NULL;
104
- struct pvrdma_dev *dev = to_vdev(pd->device);
102
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
103
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
105104 union pvrdma_cmd_req req;
106105 union pvrdma_cmd_resp rsp;
107106 struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
108107 struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
109
- struct pvrdma_create_srq_resp srq_resp = {0};
108
+ struct pvrdma_create_srq_resp srq_resp = {};
110109 struct pvrdma_create_srq ucmd;
111110 unsigned long flags;
112111 int ret;
113112
114
- if (!(pd->uobject && udata)) {
113
+ if (!udata) {
115114 /* No support for kernel clients. */
116115 dev_warn(&dev->pdev->dev,
117116 "no shared receive queue support for kernel client\n");
118
- return ERR_PTR(-EOPNOTSUPP);
117
+ return -EOPNOTSUPP;
119118 }
120119
121120 if (init_attr->srq_type != IB_SRQT_BASIC) {
122121 dev_warn(&dev->pdev->dev,
123122 "shared receive queue type %d not supported\n",
124123 init_attr->srq_type);
125
- return ERR_PTR(-EINVAL);
124
+ return -EINVAL;
126125 }
127126
128127 if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
129128 init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
130129 dev_warn(&dev->pdev->dev,
131130 "shared receive queue size invalid\n");
132
- return ERR_PTR(-EINVAL);
131
+ return -EINVAL;
133132 }
134133
135134 if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
136
- return ERR_PTR(-ENOMEM);
137
-
138
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
139
- if (!srq) {
140
- ret = -ENOMEM;
141
- goto err_srq;
142
- }
135
+ return -ENOMEM;
143136
144137 spin_lock_init(&srq->lock);
145138 refcount_set(&srq->refcnt, 1);
....@@ -153,15 +146,13 @@
153146 goto err_srq;
154147 }
155148
156
- srq->umem = ib_umem_get(pd->uobject->context,
157
- ucmd.buf_addr,
158
- ucmd.buf_size, 0, 0);
149
+ srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
159150 if (IS_ERR(srq->umem)) {
160151 ret = PTR_ERR(srq->umem);
161152 goto err_srq;
162153 }
163154
164
- srq->npages = ib_umem_page_count(srq->umem);
155
+ srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
165156
166157 if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
167158 dev_warn(&dev->pdev->dev,
....@@ -183,7 +174,7 @@
183174 cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
184175 cmd->srq_type = init_attr->srq_type;
185176 cmd->nchunks = srq->npages;
186
- cmd->pd_handle = to_vpd(pd)->pd_handle;
177
+ cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
187178 cmd->attrs.max_wr = init_attr->attr.max_wr;
188179 cmd->attrs.max_sge = init_attr->attr.max_sge;
189180 cmd->attrs.srq_limit = init_attr->attr.srq_limit;
....@@ -206,21 +197,20 @@
206197 /* Copy udata back. */
207198 if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
208199 dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
209
- pvrdma_destroy_srq(&srq->ibsrq);
210
- return ERR_PTR(-EINVAL);
200
+ pvrdma_destroy_srq(&srq->ibsrq, udata);
201
+ return -EINVAL;
211202 }
212203
213
- return &srq->ibsrq;
204
+ return 0;
214205
215206 err_page_dir:
216207 pvrdma_page_dir_cleanup(dev, &srq->pdir);
217208 err_umem:
218209 ib_umem_release(srq->umem);
219210 err_srq:
220
- kfree(srq);
221211 atomic_dec(&dev->num_srqs);
222212
223
- return ERR_PTR(ret);
213
+ return ret;
224214 }
225215
226216 static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
....@@ -240,18 +230,17 @@
240230
241231 pvrdma_page_dir_cleanup(dev, &srq->pdir);
242232
243
- kfree(srq);
244
-
245233 atomic_dec(&dev->num_srqs);
246234 }
247235
248236 /**
249237 * pvrdma_destroy_srq - destroy shared receive queue
250238 * @srq: the shared receive queue to destroy
239
+ * @udata: user data or null for kernel object
251240 *
252241 * @return: 0 for success.
253242 */
254
-int pvrdma_destroy_srq(struct ib_srq *srq)
243
+int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
255244 {
256245 struct pvrdma_srq *vsrq = to_vsrq(srq);
257246 union pvrdma_cmd_req req;
....@@ -270,7 +259,6 @@
270259 ret);
271260
272261 pvrdma_free_srq(dev, vsrq);
273
-
274262 return 0;
275263 }
276264