hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/hw/mlx5/cq.c
....@@ -35,8 +35,10 @@
3535 #include <rdma/ib_user_verbs.h>
3636 #include <rdma/ib_cache.h>
3737 #include "mlx5_ib.h"
38
+#include "srq.h"
39
+#include "qp.h"
3840
39
-static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
41
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
4042 {
4143 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
4244
....@@ -81,7 +83,7 @@
8183
8284 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
8385
84
- if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
86
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
8587 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
8688 return cqe;
8789 } else {
....@@ -119,13 +121,13 @@
119121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
120122 case MLX5_OPCODE_RDMA_WRITE_IMM:
121123 wc->wc_flags |= IB_WC_WITH_IMM;
122
- /* fall through */
124
+ fallthrough;
123125 case MLX5_OPCODE_RDMA_WRITE:
124126 wc->opcode = IB_WC_RDMA_WRITE;
125127 break;
126128 case MLX5_OPCODE_SEND_IMM:
127129 wc->wc_flags |= IB_WC_WITH_IMM;
128
- /* fall through */
130
+ fallthrough;
129131 case MLX5_OPCODE_SEND:
130132 case MLX5_OPCODE_SEND_INVAL:
131133 wc->opcode = IB_WC_SEND;
....@@ -166,7 +168,7 @@
166168 {
167169 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
168170 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
169
- struct mlx5_ib_srq *srq;
171
+ struct mlx5_ib_srq *srq = NULL;
170172 struct mlx5_ib_wq *wq;
171173 u16 wqe_ctr;
172174 u8 roce_packet_type;
....@@ -177,9 +179,9 @@
177179 struct mlx5_core_srq *msrq = NULL;
178180
179181 if (qp->ibqp.xrcd) {
180
- msrq = mlx5_core_get_srq(dev->mdev,
181
- be32_to_cpu(cqe->srqn));
182
- srq = to_mibsrq(msrq);
182
+ msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
183
+ if (msrq)
184
+ srq = to_mibsrq(msrq);
183185 } else {
184186 srq = to_msrq(qp->ibqp.srq);
185187 }
....@@ -187,8 +189,8 @@
187189 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
188190 wc->wr_id = srq->wrid[wqe_ctr];
189191 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
190
- if (msrq && atomic_dec_and_test(&msrq->refcount))
191
- complete(&msrq->free);
192
+ if (msrq)
193
+ mlx5_core_res_put(&msrq->common);
192194 }
193195 } else {
194196 wq = &qp->rq;
....@@ -197,11 +199,11 @@
197199 }
198200 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
199201
200
- switch (cqe->op_own >> 4) {
202
+ switch (get_cqe_opcode(cqe)) {
201203 case MLX5_CQE_RESP_WR_IMM:
202204 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
203205 wc->wc_flags = IB_WC_WITH_IMM;
204
- wc->ex.imm_data = cqe->imm_inval_pkey;
206
+ wc->ex.imm_data = cqe->immediate;
205207 break;
206208 case MLX5_CQE_RESP_SEND:
207209 wc->opcode = IB_WC_RECV;
....@@ -213,12 +215,12 @@
213215 case MLX5_CQE_RESP_SEND_IMM:
214216 wc->opcode = IB_WC_RECV;
215217 wc->wc_flags = IB_WC_WITH_IMM;
216
- wc->ex.imm_data = cqe->imm_inval_pkey;
218
+ wc->ex.imm_data = cqe->immediate;
217219 break;
218220 case MLX5_CQE_RESP_SEND_INV:
219221 wc->opcode = IB_WC_RECV;
220222 wc->wc_flags = IB_WC_WITH_INVALIDATE;
221
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
223
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
222224 break;
223225 }
224226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
....@@ -226,7 +228,7 @@
226228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
227229 wc->wc_flags |= g ? IB_WC_GRH : 0;
228230 if (unlikely(is_qp1(qp->ibqp.qp_type))) {
229
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
231
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
230232
231233 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
232234 &wc->pkey_index);
....@@ -253,7 +255,7 @@
253255
254256 switch (roce_packet_type) {
255257 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
256
- wc->network_hdr_type = RDMA_NETWORK_IB;
258
+ wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
257259 break;
258260 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
259261 wc->network_hdr_type = RDMA_NETWORK_IPV6;
....@@ -330,50 +332,6 @@
330332 dump_cqe(dev, cqe);
331333 }
332334
333
-static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
334
-{
335
- /* TBD: waiting decision
336
- */
337
- return 0;
338
-}
339
-
340
-static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
341
-{
342
- struct mlx5_wqe_data_seg *dpseg;
343
- void *addr;
344
-
345
- dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
346
- sizeof(struct mlx5_wqe_raddr_seg) +
347
- sizeof(struct mlx5_wqe_atomic_seg);
348
- addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
349
- return addr;
350
-}
351
-
352
-static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
353
- uint16_t idx)
354
-{
355
- void *addr;
356
- int byte_count;
357
- int i;
358
-
359
- if (!is_atomic_response(qp, idx))
360
- return;
361
-
362
- byte_count = be32_to_cpu(cqe64->byte_cnt);
363
- addr = mlx5_get_atomic_laddr(qp, idx);
364
-
365
- if (byte_count == 4) {
366
- *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
367
- } else {
368
- for (i = 0; i < byte_count; i += 8) {
369
- *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
370
- addr += 8;
371
- }
372
- }
373
-
374
- return;
375
-}
376
-
377335 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
378336 u16 tail, u16 head)
379337 {
....@@ -381,7 +339,6 @@
381339
382340 do {
383341 idx = tail & (qp->sq.wqe_cnt - 1);
384
- handle_atomic(qp, cqe64, idx);
385342 if (idx == head)
386343 break;
387344
....@@ -393,7 +350,7 @@
393350
394351 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
395352 {
396
- mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
353
+ mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
397354 }
398355
399356 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
....@@ -428,16 +385,15 @@
428385 item->key = be32_to_cpu(cqe->mkey);
429386 }
430387
431
-static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
432
- struct ib_wc *wc, int *npolled)
388
+static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
389
+ int *npolled, bool is_send)
433390 {
434391 struct mlx5_ib_wq *wq;
435392 unsigned int cur;
436
- unsigned int idx;
437393 int np;
438394 int i;
439395
440
- wq = &qp->sq;
396
+ wq = (is_send) ? &qp->sq : &qp->rq;
441397 cur = wq->head - wq->tail;
442398 np = *npolled;
443399
....@@ -445,39 +401,16 @@
445401 return;
446402
447403 for (i = 0; i < cur && np < num_entries; i++) {
448
- idx = wq->last_poll & (wq->wqe_cnt - 1);
404
+ unsigned int idx;
405
+
406
+ idx = (is_send) ? wq->last_poll : wq->tail;
407
+ idx &= (wq->wqe_cnt - 1);
449408 wc->wr_id = wq->wrid[idx];
450409 wc->status = IB_WC_WR_FLUSH_ERR;
451410 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
452411 wq->tail++;
453
- np++;
454
- wc->qp = &qp->ibqp;
455
- wc++;
456
- wq->last_poll = wq->w_list[idx].next;
457
- }
458
- *npolled = np;
459
-}
460
-
461
-static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
462
- struct ib_wc *wc, int *npolled)
463
-{
464
- struct mlx5_ib_wq *wq;
465
- unsigned int cur;
466
- int np;
467
- int i;
468
-
469
- wq = &qp->rq;
470
- cur = wq->head - wq->tail;
471
- np = *npolled;
472
-
473
- if (cur == 0)
474
- return;
475
-
476
- for (i = 0; i < cur && np < num_entries; i++) {
477
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
478
- wc->status = IB_WC_WR_FLUSH_ERR;
479
- wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
480
- wq->tail++;
412
+ if (is_send)
413
+ wq->last_poll = wq->w_list[idx].next;
481414 np++;
482415 wc->qp = &qp->ibqp;
483416 wc++;
....@@ -493,13 +426,13 @@
493426 *npolled = 0;
494427 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
495428 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
496
- sw_send_comp(qp, num_entries, wc + *npolled, npolled);
429
+ sw_comp(qp, num_entries, wc + *npolled, npolled, true);
497430 if (*npolled >= num_entries)
498431 return;
499432 }
500433
501434 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
502
- sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
435
+ sw_comp(qp, num_entries, wc + *npolled, npolled, false);
503436 if (*npolled >= num_entries)
504437 return;
505438 }
....@@ -514,9 +447,6 @@
514447 struct mlx5_cqe64 *cqe64;
515448 struct mlx5_core_qp *mqp;
516449 struct mlx5_ib_wq *wq;
517
- struct mlx5_sig_err_cqe *sig_err_cqe;
518
- struct mlx5_core_mkey *mmkey;
519
- struct mlx5_ib_mr *mr;
520450 uint8_t opcode;
521451 uint32_t qpn;
522452 u16 wqe_ctr;
....@@ -537,7 +467,7 @@
537467 */
538468 rmb();
539469
540
- opcode = cqe64->op_own >> 4;
470
+ opcode = get_cqe_opcode(cqe64);
541471 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
542472 if (likely(cq->resize_buf)) {
543473 free_cq_buf(dev, &cq->buf);
....@@ -556,7 +486,7 @@
556486 * because CQs will be locked while QPs are removed
557487 * from the table.
558488 */
559
- mqp = __mlx5_qp_lookup(dev->mdev, qpn);
489
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
560490 *cur_qp = to_mibqp(mqp);
561491 }
562492
....@@ -611,26 +541,28 @@
611541 }
612542 }
613543 break;
614
- case MLX5_CQE_SIG_ERR:
615
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
544
+ case MLX5_CQE_SIG_ERR: {
545
+ struct mlx5_sig_err_cqe *sig_err_cqe =
546
+ (struct mlx5_sig_err_cqe *)cqe64;
547
+ struct mlx5_core_sig_ctx *sig;
616548
617
- read_lock(&dev->mdev->priv.mkey_table.lock);
618
- mmkey = __mlx5_mr_lookup(dev->mdev,
619
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
620
- mr = to_mibmr(mmkey);
621
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
622
- mr->sig->sig_err_exists = true;
623
- mr->sig->sigerr_count++;
549
+ xa_lock(&dev->sig_mrs);
550
+ sig = xa_load(&dev->sig_mrs,
551
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
552
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
553
+ sig->sig_err_exists = true;
554
+ sig->sigerr_count++;
624555
625556 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
626
- cq->mcq.cqn, mr->sig->err_item.key,
627
- mr->sig->err_item.err_type,
628
- mr->sig->err_item.sig_err_offset,
629
- mr->sig->err_item.expected,
630
- mr->sig->err_item.actual);
557
+ cq->mcq.cqn, sig->err_item.key,
558
+ sig->err_item.err_type,
559
+ sig->err_item.sig_err_offset,
560
+ sig->err_item.expected,
561
+ sig->err_item.actual);
631562
632
- read_unlock(&dev->mdev->priv.mkey_table.lock);
563
+ xa_unlock(&dev->sig_mrs);
633564 goto repoll;
565
+ }
634566 }
635567
636568 return 0;
....@@ -728,15 +660,10 @@
728660 int nent,
729661 int cqe_size)
730662 {
731
- struct mlx5_frag_buf_ctrl *c = &buf->fbc;
732
- struct mlx5_frag_buf *frag_buf = &c->frag_buf;
733
- u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
663
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
664
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
665
+ u8 log_wq_sz = ilog2(cqe_size);
734666 int err;
735
-
736
- MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
737
- MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
738
-
739
- mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
740667
741668 err = mlx5_frag_buf_alloc_node(dev->mdev,
742669 nent * cqe_size,
....@@ -744,6 +671,8 @@
744671 dev->mdev->priv.numa_node);
745672 if (err)
746673 return err;
674
+
675
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
747676
748677 buf->cqe_size = cqe_size;
749678 buf->nent = nent;
....@@ -774,8 +703,7 @@
774703 }
775704
776705 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
777
- struct ib_ucontext *context, struct mlx5_ib_cq *cq,
778
- int entries, u32 **cqb,
706
+ struct mlx5_ib_cq *cq, int entries, u32 **cqb,
779707 int *cqe_size, int *index, int *inlen)
780708 {
781709 struct mlx5_ib_create_cq ucmd = {};
....@@ -786,32 +714,35 @@
786714 int ncont;
787715 void *cqc;
788716 int err;
717
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
718
+ udata, struct mlx5_ib_ucontext, ibucontext);
789719
790
- ucmdlen = udata->inlen < sizeof(ucmd) ?
791
- (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
720
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
721
+ if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
722
+ return -EINVAL;
792723
793724 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
794725 return -EFAULT;
795726
796
- if (ucmdlen == sizeof(ucmd) &&
797
- (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
727
+ if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
728
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
798729 return -EINVAL;
799730
800
- if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
731
+ if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
732
+ ucmd.reserved0 || ucmd.reserved1)
801733 return -EINVAL;
802734
803735 *cqe_size = ucmd.cqe_size;
804736
805
- cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
806
- entries * ucmd.cqe_size,
807
- IB_ACCESS_LOCAL_WRITE, 1);
737
+ cq->buf.umem =
738
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
739
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
808740 if (IS_ERR(cq->buf.umem)) {
809741 err = PTR_ERR(cq->buf.umem);
810742 return err;
811743 }
812744
813
- err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
814
- &cq->db);
745
+ err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
815746 if (err)
816747 goto err_umem;
817748
....@@ -835,7 +766,14 @@
835766 MLX5_SET(cqc, cqc, log_page_size,
836767 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
837768
838
- *index = to_mucontext(context)->bfregi.sys_pages[0];
769
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
770
+ *index = ucmd.uar_page_index;
771
+ } else if (context->bfregi.lib_uar_dyn) {
772
+ err = -EINVAL;
773
+ goto err_cqb;
774
+ } else {
775
+ *index = context->bfregi.sys_pages[0];
776
+ }
839777
840778 if (ucmd.cqe_comp_en == 1) {
841779 int mini_cqe_format;
....@@ -877,22 +815,26 @@
877815 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
878816 }
879817
818
+ MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
880819 return 0;
881820
882821 err_cqb:
883822 kvfree(*cqb);
884823
885824 err_db:
886
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
825
+ mlx5_ib_db_unmap_user(context, &cq->db);
887826
888827 err_umem:
889828 ib_umem_release(cq->buf.umem);
890829 return err;
891830 }
892831
893
-static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
832
+static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
894833 {
895
- mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
834
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
835
+ udata, struct mlx5_ib_ucontext, ibucontext);
836
+
837
+ mlx5_ib_db_unmap_user(context, &cq->db);
896838 ib_umem_release(cq->buf.umem);
897839 }
898840
....@@ -933,7 +875,7 @@
933875
934876 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
935877 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
936
- cq->buf.fbc.frag_buf.npages;
878
+ cq->buf.frag_buf.npages;
937879 *cqb = kvzalloc(*inlen, GFP_KERNEL);
938880 if (!*cqb) {
939881 err = -ENOMEM;
....@@ -941,11 +883,11 @@
941883 }
942884
943885 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
944
- mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
886
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
945887
946888 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
947889 MLX5_SET(cqc, cqc, log_page_size,
948
- cq->buf.fbc.frag_buf.page_shift -
890
+ cq->buf.frag_buf.page_shift -
949891 MLX5_ADAPTER_PAGE_SHIFT);
950892
951893 *index = dev->mdev->priv.uar->index;
....@@ -974,38 +916,33 @@
974916 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
975917 }
976918
977
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
978
- const struct ib_cq_init_attr *attr,
979
- struct ib_ucontext *context,
980
- struct ib_udata *udata)
919
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
920
+ struct ib_udata *udata)
981921 {
922
+ struct ib_device *ibdev = ibcq->device;
982923 int entries = attr->cqe;
983924 int vector = attr->comp_vector;
984925 struct mlx5_ib_dev *dev = to_mdev(ibdev);
985
- struct mlx5_ib_cq *cq;
986
- int uninitialized_var(index);
987
- int uninitialized_var(inlen);
926
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
927
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
928
+ int index;
929
+ int inlen;
988930 u32 *cqb = NULL;
989931 void *cqc;
990932 int cqe_size;
991
- unsigned int irqn;
992933 int eqn;
993934 int err;
994935
995936 if (entries < 0 ||
996937 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
997
- return ERR_PTR(-EINVAL);
938
+ return -EINVAL;
998939
999940 if (check_cq_create_flags(attr->flags))
1000
- return ERR_PTR(-EOPNOTSUPP);
941
+ return -EOPNOTSUPP;
1001942
1002943 entries = roundup_pow_of_two(entries + 1);
1003944 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
1004
- return ERR_PTR(-EINVAL);
1005
-
1006
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1007
- if (!cq)
1008
- return ERR_PTR(-ENOMEM);
945
+ return -EINVAL;
1009946
1010947 cq->ibcq.cqe = entries - 1;
1011948 mutex_init(&cq->resize_mutex);
....@@ -1016,22 +953,22 @@
1016953 INIT_LIST_HEAD(&cq->list_send_qp);
1017954 INIT_LIST_HEAD(&cq->list_recv_qp);
1018955
1019
- if (context) {
1020
- err = create_cq_user(dev, udata, context, cq, entries,
1021
- &cqb, &cqe_size, &index, &inlen);
956
+ if (udata) {
957
+ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
958
+ &index, &inlen);
1022959 if (err)
1023
- goto err_create;
960
+ return err;
1024961 } else {
1025962 cqe_size = cache_line_size() == 128 ? 128 : 64;
1026963 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
1027964 &index, &inlen);
1028965 if (err)
1029
- goto err_create;
966
+ return err;
1030967
1031968 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
1032969 }
1033970
1034
- err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
971
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
1035972 if (err)
1036973 goto err_cqb;
1037974
....@@ -1049,13 +986,12 @@
1049986 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1050987 MLX5_SET(cqc, cqc, oi, 1);
1051988
1052
- err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
989
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1053990 if (err)
1054991 goto err_cqb;
1055992
1056993 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1057
- cq->mcq.irqn = irqn;
1058
- if (context)
994
+ if (udata)
1059995 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1060996 else
1061997 cq->mcq.comp = mlx5_ib_cq_comp;
....@@ -1063,7 +999,7 @@
1063999
10641000 INIT_LIST_HEAD(&cq->wc_list);
10651001
1066
- if (context)
1002
+ if (udata)
10671003 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
10681004 err = -EFAULT;
10691005 goto err_cmd;
....@@ -1071,42 +1007,34 @@
10711007
10721008
10731009 kvfree(cqb);
1074
- return &cq->ibcq;
1010
+ return 0;
10751011
10761012 err_cmd:
10771013 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
10781014
10791015 err_cqb:
10801016 kvfree(cqb);
1081
- if (context)
1082
- destroy_cq_user(cq, context);
1017
+ if (udata)
1018
+ destroy_cq_user(cq, udata);
10831019 else
10841020 destroy_cq_kernel(dev, cq);
1085
-
1086
-err_create:
1087
- kfree(cq);
1088
-
1089
- return ERR_PTR(err);
1021
+ return err;
10901022 }
10911023
1092
-
1093
-int mlx5_ib_destroy_cq(struct ib_cq *cq)
1024
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
10941025 {
10951026 struct mlx5_ib_dev *dev = to_mdev(cq->device);
10961027 struct mlx5_ib_cq *mcq = to_mcq(cq);
1097
- struct ib_ucontext *context = NULL;
1028
+ int ret;
10981029
1099
- if (cq->uobject)
1100
- context = cq->uobject->context;
1030
+ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1031
+ if (ret)
1032
+ return ret;
11011033
1102
- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1103
- if (context)
1104
- destroy_cq_user(mcq, context);
1034
+ if (udata)
1035
+ destroy_cq_user(mcq, udata);
11051036 else
11061037 destroy_cq_kernel(dev, mcq);
1107
-
1108
- kfree(mcq);
1109
-
11101038 return 0;
11111039 }
11121040
....@@ -1204,7 +1132,6 @@
12041132 struct ib_umem *umem;
12051133 int err;
12061134 int npages;
1207
- struct ib_ucontext *context = cq->buf.umem->context;
12081135
12091136 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
12101137 if (err)
....@@ -1217,9 +1144,9 @@
12171144 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
12181145 return -EINVAL;
12191146
1220
- umem = ib_umem_get(context, ucmd.buf_addr,
1147
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
12211148 (size_t)ucmd.cqe_size * entries,
1222
- IB_ACCESS_LOCAL_WRITE, 1);
1149
+ IB_ACCESS_LOCAL_WRITE);
12231150 if (IS_ERR(umem)) {
12241151 err = PTR_ERR(umem);
12251152 return err;
....@@ -1232,11 +1159,6 @@
12321159 *cqe_size = ucmd.cqe_size;
12331160
12341161 return 0;
1235
-}
1236
-
1237
-static void un_resize_user(struct mlx5_ib_cq *cq)
1238
-{
1239
- ib_umem_release(cq->resize_umem);
12401162 }
12411163
12421164 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
....@@ -1259,12 +1181,6 @@
12591181 ex:
12601182 kfree(cq->resize_buf);
12611183 return err;
1262
-}
1263
-
1264
-static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1265
-{
1266
- free_cq_buf(dev, cq->resize_buf);
1267
- cq->resize_buf = NULL;
12681184 }
12691185
12701186 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
....@@ -1296,7 +1212,7 @@
12961212 return -EINVAL;
12971213 }
12981214
1299
- while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1215
+ while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
13001216 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
13011217 (i + 1) & cq->resize_buf->nent);
13021218 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
....@@ -1333,7 +1249,7 @@
13331249 __be64 *pas;
13341250 int page_shift;
13351251 int inlen;
1336
- int uninitialized_var(cqe_size);
1252
+ int cqe_size;
13371253 unsigned long flags;
13381254
13391255 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
....@@ -1364,11 +1280,10 @@
13641280 cqe_size = 64;
13651281 err = resize_kernel(dev, cq, entries, cqe_size);
13661282 if (!err) {
1367
- struct mlx5_frag_buf_ctrl *c;
1283
+ struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
13681284
1369
- c = &cq->resize_buf->fbc;
1370
- npas = c->frag_buf.npages;
1371
- page_shift = c->frag_buf.page_shift;
1285
+ npas = frag_buf->npages;
1286
+ page_shift = frag_buf->page_shift;
13721287 }
13731288 }
13741289
....@@ -1389,8 +1304,7 @@
13891304 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
13901305 pas, 0);
13911306 else
1392
- mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
1393
- pas);
1307
+ mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
13941308
13951309 MLX5_SET(modify_cq_in, in,
13961310 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
....@@ -1449,16 +1363,17 @@
14491363 kvfree(in);
14501364
14511365 ex_resize:
1452
- if (udata)
1453
- un_resize_user(cq);
1454
- else
1455
- un_resize_kernel(dev, cq);
1366
+ ib_umem_release(cq->resize_umem);
1367
+ if (!udata) {
1368
+ free_cq_buf(dev, cq->resize_buf);
1369
+ cq->resize_buf = NULL;
1370
+ }
14561371 ex:
14571372 mutex_unlock(&cq->resize_mutex);
14581373 return err;
14591374 }
14601375
1461
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1376
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
14621377 {
14631378 struct mlx5_ib_cq *cq;
14641379