| .. | .. |
|---|
| 35 | 35 | #include <rdma/ib_user_verbs.h> |
|---|
| 36 | 36 | #include <rdma/ib_cache.h> |
|---|
| 37 | 37 | #include "mlx5_ib.h" |
|---|
| 38 | +#include "srq.h" |
|---|
| 39 | +#include "qp.h" |
|---|
| 38 | 40 | |
|---|
| 39 | | -static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) |
|---|
| 41 | +static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) |
|---|
| 40 | 42 | { |
|---|
| 41 | 43 | struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; |
|---|
| 42 | 44 | |
|---|
| .. | .. |
|---|
| 81 | 83 | |
|---|
| 82 | 84 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; |
|---|
| 83 | 85 | |
|---|
| 84 | | - if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && |
|---|
| 86 | + if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) && |
|---|
| 85 | 87 | !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { |
|---|
| 86 | 88 | return cqe; |
|---|
| 87 | 89 | } else { |
|---|
| .. | .. |
|---|
| 119 | 121 | switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { |
|---|
| 120 | 122 | case MLX5_OPCODE_RDMA_WRITE_IMM: |
|---|
| 121 | 123 | wc->wc_flags |= IB_WC_WITH_IMM; |
|---|
| 122 | | - /* fall through */ |
|---|
| 124 | + fallthrough; |
|---|
| 123 | 125 | case MLX5_OPCODE_RDMA_WRITE: |
|---|
| 124 | 126 | wc->opcode = IB_WC_RDMA_WRITE; |
|---|
| 125 | 127 | break; |
|---|
| 126 | 128 | case MLX5_OPCODE_SEND_IMM: |
|---|
| 127 | 129 | wc->wc_flags |= IB_WC_WITH_IMM; |
|---|
| 128 | | - /* fall through */ |
|---|
| 130 | + fallthrough; |
|---|
| 129 | 131 | case MLX5_OPCODE_SEND: |
|---|
| 130 | 132 | case MLX5_OPCODE_SEND_INVAL: |
|---|
| 131 | 133 | wc->opcode = IB_WC_SEND; |
|---|
| .. | .. |
|---|
| 166 | 168 | { |
|---|
| 167 | 169 | enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); |
|---|
| 168 | 170 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); |
|---|
| 169 | | - struct mlx5_ib_srq *srq; |
|---|
| 171 | + struct mlx5_ib_srq *srq = NULL; |
|---|
| 170 | 172 | struct mlx5_ib_wq *wq; |
|---|
| 171 | 173 | u16 wqe_ctr; |
|---|
| 172 | 174 | u8 roce_packet_type; |
|---|
| .. | .. |
|---|
| 177 | 179 | struct mlx5_core_srq *msrq = NULL; |
|---|
| 178 | 180 | |
|---|
| 179 | 181 | if (qp->ibqp.xrcd) { |
|---|
| 180 | | - msrq = mlx5_core_get_srq(dev->mdev, |
|---|
| 181 | | - be32_to_cpu(cqe->srqn)); |
|---|
| 182 | | - srq = to_mibsrq(msrq); |
|---|
| 182 | + msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); |
|---|
| 183 | + if (msrq) |
|---|
| 184 | + srq = to_mibsrq(msrq); |
|---|
| 183 | 185 | } else { |
|---|
| 184 | 186 | srq = to_msrq(qp->ibqp.srq); |
|---|
| 185 | 187 | } |
|---|
| .. | .. |
|---|
| 187 | 189 | wqe_ctr = be16_to_cpu(cqe->wqe_counter); |
|---|
| 188 | 190 | wc->wr_id = srq->wrid[wqe_ctr]; |
|---|
| 189 | 191 | mlx5_ib_free_srq_wqe(srq, wqe_ctr); |
|---|
| 190 | | - if (msrq && atomic_dec_and_test(&msrq->refcount)) |
|---|
| 191 | | - complete(&msrq->free); |
|---|
| 192 | + if (msrq) |
|---|
| 193 | + mlx5_core_res_put(&msrq->common); |
|---|
| 192 | 194 | } |
|---|
| 193 | 195 | } else { |
|---|
| 194 | 196 | wq = &qp->rq; |
|---|
| .. | .. |
|---|
| 197 | 199 | } |
|---|
| 198 | 200 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); |
|---|
| 199 | 201 | |
|---|
| 200 | | - switch (cqe->op_own >> 4) { |
|---|
| 202 | + switch (get_cqe_opcode(cqe)) { |
|---|
| 201 | 203 | case MLX5_CQE_RESP_WR_IMM: |
|---|
| 202 | 204 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
|---|
| 203 | 205 | wc->wc_flags = IB_WC_WITH_IMM; |
|---|
| 204 | | - wc->ex.imm_data = cqe->imm_inval_pkey; |
|---|
| 206 | + wc->ex.imm_data = cqe->immediate; |
|---|
| 205 | 207 | break; |
|---|
| 206 | 208 | case MLX5_CQE_RESP_SEND: |
|---|
| 207 | 209 | wc->opcode = IB_WC_RECV; |
|---|
| .. | .. |
|---|
| 213 | 215 | case MLX5_CQE_RESP_SEND_IMM: |
|---|
| 214 | 216 | wc->opcode = IB_WC_RECV; |
|---|
| 215 | 217 | wc->wc_flags = IB_WC_WITH_IMM; |
|---|
| 216 | | - wc->ex.imm_data = cqe->imm_inval_pkey; |
|---|
| 218 | + wc->ex.imm_data = cqe->immediate; |
|---|
| 217 | 219 | break; |
|---|
| 218 | 220 | case MLX5_CQE_RESP_SEND_INV: |
|---|
| 219 | 221 | wc->opcode = IB_WC_RECV; |
|---|
| 220 | 222 | wc->wc_flags = IB_WC_WITH_INVALIDATE; |
|---|
| 221 | | - wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); |
|---|
| 223 | + wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey); |
|---|
| 222 | 224 | break; |
|---|
| 223 | 225 | } |
|---|
| 224 | 226 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; |
|---|
| .. | .. |
|---|
| 226 | 228 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; |
|---|
| 227 | 229 | wc->wc_flags |= g ? IB_WC_GRH : 0; |
|---|
| 228 | 230 | if (unlikely(is_qp1(qp->ibqp.qp_type))) { |
|---|
| 229 | | - u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; |
|---|
| 231 | + u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff; |
|---|
| 230 | 232 | |
|---|
| 231 | 233 | ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, |
|---|
| 232 | 234 | &wc->pkey_index); |
|---|
| .. | .. |
|---|
| 253 | 255 | |
|---|
| 254 | 256 | switch (roce_packet_type) { |
|---|
| 255 | 257 | case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: |
|---|
| 256 | | - wc->network_hdr_type = RDMA_NETWORK_IB; |
|---|
| 258 | + wc->network_hdr_type = RDMA_NETWORK_ROCE_V1; |
|---|
| 257 | 259 | break; |
|---|
| 258 | 260 | case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: |
|---|
| 259 | 261 | wc->network_hdr_type = RDMA_NETWORK_IPV6; |
|---|
| .. | .. |
|---|
| 330 | 332 | dump_cqe(dev, cqe); |
|---|
| 331 | 333 | } |
|---|
| 332 | 334 | |
|---|
| 333 | | -static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) |
|---|
| 334 | | -{ |
|---|
| 335 | | - /* TBD: waiting decision |
|---|
| 336 | | - */ |
|---|
| 337 | | - return 0; |
|---|
| 338 | | -} |
|---|
| 339 | | - |
|---|
| 340 | | -static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) |
|---|
| 341 | | -{ |
|---|
| 342 | | - struct mlx5_wqe_data_seg *dpseg; |
|---|
| 343 | | - void *addr; |
|---|
| 344 | | - |
|---|
| 345 | | - dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + |
|---|
| 346 | | - sizeof(struct mlx5_wqe_raddr_seg) + |
|---|
| 347 | | - sizeof(struct mlx5_wqe_atomic_seg); |
|---|
| 348 | | - addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); |
|---|
| 349 | | - return addr; |
|---|
| 350 | | -} |
|---|
| 351 | | - |
|---|
| 352 | | -static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, |
|---|
| 353 | | - uint16_t idx) |
|---|
| 354 | | -{ |
|---|
| 355 | | - void *addr; |
|---|
| 356 | | - int byte_count; |
|---|
| 357 | | - int i; |
|---|
| 358 | | - |
|---|
| 359 | | - if (!is_atomic_response(qp, idx)) |
|---|
| 360 | | - return; |
|---|
| 361 | | - |
|---|
| 362 | | - byte_count = be32_to_cpu(cqe64->byte_cnt); |
|---|
| 363 | | - addr = mlx5_get_atomic_laddr(qp, idx); |
|---|
| 364 | | - |
|---|
| 365 | | - if (byte_count == 4) { |
|---|
| 366 | | - *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); |
|---|
| 367 | | - } else { |
|---|
| 368 | | - for (i = 0; i < byte_count; i += 8) { |
|---|
| 369 | | - *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); |
|---|
| 370 | | - addr += 8; |
|---|
| 371 | | - } |
|---|
| 372 | | - } |
|---|
| 373 | | - |
|---|
| 374 | | - return; |
|---|
| 375 | | -} |
|---|
| 376 | | - |
|---|
| 377 | 335 | static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, |
|---|
| 378 | 336 | u16 tail, u16 head) |
|---|
| 379 | 337 | { |
|---|
| .. | .. |
|---|
| 381 | 339 | |
|---|
| 382 | 340 | do { |
|---|
| 383 | 341 | idx = tail & (qp->sq.wqe_cnt - 1); |
|---|
| 384 | | - handle_atomic(qp, cqe64, idx); |
|---|
| 385 | 342 | if (idx == head) |
|---|
| 386 | 343 | break; |
|---|
| 387 | 344 | |
|---|
| .. | .. |
|---|
| 393 | 350 | |
|---|
| 394 | 351 | static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) |
|---|
| 395 | 352 | { |
|---|
| 396 | | - mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf); |
|---|
| 353 | + mlx5_frag_buf_free(dev->mdev, &buf->frag_buf); |
|---|
| 397 | 354 | } |
|---|
| 398 | 355 | |
|---|
| 399 | 356 | static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, |
|---|
| .. | .. |
|---|
| 428 | 385 | item->key = be32_to_cpu(cqe->mkey); |
|---|
| 429 | 386 | } |
|---|
| 430 | 387 | |
|---|
| 431 | | -static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, |
|---|
| 432 | | - struct ib_wc *wc, int *npolled) |
|---|
| 388 | +static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, |
|---|
| 389 | + int *npolled, bool is_send) |
|---|
| 433 | 390 | { |
|---|
| 434 | 391 | struct mlx5_ib_wq *wq; |
|---|
| 435 | 392 | unsigned int cur; |
|---|
| 436 | | - unsigned int idx; |
|---|
| 437 | 393 | int np; |
|---|
| 438 | 394 | int i; |
|---|
| 439 | 395 | |
|---|
| 440 | | - wq = &qp->sq; |
|---|
| 396 | + wq = (is_send) ? &qp->sq : &qp->rq; |
|---|
| 441 | 397 | cur = wq->head - wq->tail; |
|---|
| 442 | 398 | np = *npolled; |
|---|
| 443 | 399 | |
|---|
| .. | .. |
|---|
| 445 | 401 | return; |
|---|
| 446 | 402 | |
|---|
| 447 | 403 | for (i = 0; i < cur && np < num_entries; i++) { |
|---|
| 448 | | - idx = wq->last_poll & (wq->wqe_cnt - 1); |
|---|
| 404 | + unsigned int idx; |
|---|
| 405 | + |
|---|
| 406 | + idx = (is_send) ? wq->last_poll : wq->tail; |
|---|
| 407 | + idx &= (wq->wqe_cnt - 1); |
|---|
| 449 | 408 | wc->wr_id = wq->wrid[idx]; |
|---|
| 450 | 409 | wc->status = IB_WC_WR_FLUSH_ERR; |
|---|
| 451 | 410 | wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; |
|---|
| 452 | 411 | wq->tail++; |
|---|
| 453 | | - np++; |
|---|
| 454 | | - wc->qp = &qp->ibqp; |
|---|
| 455 | | - wc++; |
|---|
| 456 | | - wq->last_poll = wq->w_list[idx].next; |
|---|
| 457 | | - } |
|---|
| 458 | | - *npolled = np; |
|---|
| 459 | | -} |
|---|
| 460 | | - |
|---|
| 461 | | -static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, |
|---|
| 462 | | - struct ib_wc *wc, int *npolled) |
|---|
| 463 | | -{ |
|---|
| 464 | | - struct mlx5_ib_wq *wq; |
|---|
| 465 | | - unsigned int cur; |
|---|
| 466 | | - int np; |
|---|
| 467 | | - int i; |
|---|
| 468 | | - |
|---|
| 469 | | - wq = &qp->rq; |
|---|
| 470 | | - cur = wq->head - wq->tail; |
|---|
| 471 | | - np = *npolled; |
|---|
| 472 | | - |
|---|
| 473 | | - if (cur == 0) |
|---|
| 474 | | - return; |
|---|
| 475 | | - |
|---|
| 476 | | - for (i = 0; i < cur && np < num_entries; i++) { |
|---|
| 477 | | - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; |
|---|
| 478 | | - wc->status = IB_WC_WR_FLUSH_ERR; |
|---|
| 479 | | - wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; |
|---|
| 480 | | - wq->tail++; |
|---|
| 412 | + if (is_send) |
|---|
| 413 | + wq->last_poll = wq->w_list[idx].next; |
|---|
| 481 | 414 | np++; |
|---|
| 482 | 415 | wc->qp = &qp->ibqp; |
|---|
| 483 | 416 | wc++; |
|---|
| .. | .. |
|---|
| 493 | 426 | *npolled = 0; |
|---|
| 494 | 427 | /* Find uncompleted WQEs belonging to that cq and return mmics ones */ |
|---|
| 495 | 428 | list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { |
|---|
| 496 | | - sw_send_comp(qp, num_entries, wc + *npolled, npolled); |
|---|
| 429 | + sw_comp(qp, num_entries, wc + *npolled, npolled, true); |
|---|
| 497 | 430 | if (*npolled >= num_entries) |
|---|
| 498 | 431 | return; |
|---|
| 499 | 432 | } |
|---|
| 500 | 433 | |
|---|
| 501 | 434 | list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { |
|---|
| 502 | | - sw_recv_comp(qp, num_entries, wc + *npolled, npolled); |
|---|
| 435 | + sw_comp(qp, num_entries, wc + *npolled, npolled, false); |
|---|
| 503 | 436 | if (*npolled >= num_entries) |
|---|
| 504 | 437 | return; |
|---|
| 505 | 438 | } |
|---|
| .. | .. |
|---|
| 514 | 447 | struct mlx5_cqe64 *cqe64; |
|---|
| 515 | 448 | struct mlx5_core_qp *mqp; |
|---|
| 516 | 449 | struct mlx5_ib_wq *wq; |
|---|
| 517 | | - struct mlx5_sig_err_cqe *sig_err_cqe; |
|---|
| 518 | | - struct mlx5_core_mkey *mmkey; |
|---|
| 519 | | - struct mlx5_ib_mr *mr; |
|---|
| 520 | 450 | uint8_t opcode; |
|---|
| 521 | 451 | uint32_t qpn; |
|---|
| 522 | 452 | u16 wqe_ctr; |
|---|
| .. | .. |
|---|
| 537 | 467 | */ |
|---|
| 538 | 468 | rmb(); |
|---|
| 539 | 469 | |
|---|
| 540 | | - opcode = cqe64->op_own >> 4; |
|---|
| 470 | + opcode = get_cqe_opcode(cqe64); |
|---|
| 541 | 471 | if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { |
|---|
| 542 | 472 | if (likely(cq->resize_buf)) { |
|---|
| 543 | 473 | free_cq_buf(dev, &cq->buf); |
|---|
| .. | .. |
|---|
| 556 | 486 | * because CQs will be locked while QPs are removed |
|---|
| 557 | 487 | * from the table. |
|---|
| 558 | 488 | */ |
|---|
| 559 | | - mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
|---|
| 489 | + mqp = radix_tree_lookup(&dev->qp_table.tree, qpn); |
|---|
| 560 | 490 | *cur_qp = to_mibqp(mqp); |
|---|
| 561 | 491 | } |
|---|
| 562 | 492 | |
|---|
| .. | .. |
|---|
| 611 | 541 | } |
|---|
| 612 | 542 | } |
|---|
| 613 | 543 | break; |
|---|
| 614 | | - case MLX5_CQE_SIG_ERR: |
|---|
| 615 | | - sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; |
|---|
| 544 | + case MLX5_CQE_SIG_ERR: { |
|---|
| 545 | + struct mlx5_sig_err_cqe *sig_err_cqe = |
|---|
| 546 | + (struct mlx5_sig_err_cqe *)cqe64; |
|---|
| 547 | + struct mlx5_core_sig_ctx *sig; |
|---|
| 616 | 548 | |
|---|
| 617 | | - read_lock(&dev->mdev->priv.mkey_table.lock); |
|---|
| 618 | | - mmkey = __mlx5_mr_lookup(dev->mdev, |
|---|
| 619 | | - mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
|---|
| 620 | | - mr = to_mibmr(mmkey); |
|---|
| 621 | | - get_sig_err_item(sig_err_cqe, &mr->sig->err_item); |
|---|
| 622 | | - mr->sig->sig_err_exists = true; |
|---|
| 623 | | - mr->sig->sigerr_count++; |
|---|
| 549 | + xa_lock(&dev->sig_mrs); |
|---|
| 550 | + sig = xa_load(&dev->sig_mrs, |
|---|
| 551 | + mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
|---|
| 552 | + get_sig_err_item(sig_err_cqe, &sig->err_item); |
|---|
| 553 | + sig->sig_err_exists = true; |
|---|
| 554 | + sig->sigerr_count++; |
|---|
| 624 | 555 | |
|---|
| 625 | 556 | mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", |
|---|
| 626 | | - cq->mcq.cqn, mr->sig->err_item.key, |
|---|
| 627 | | - mr->sig->err_item.err_type, |
|---|
| 628 | | - mr->sig->err_item.sig_err_offset, |
|---|
| 629 | | - mr->sig->err_item.expected, |
|---|
| 630 | | - mr->sig->err_item.actual); |
|---|
| 557 | + cq->mcq.cqn, sig->err_item.key, |
|---|
| 558 | + sig->err_item.err_type, |
|---|
| 559 | + sig->err_item.sig_err_offset, |
|---|
| 560 | + sig->err_item.expected, |
|---|
| 561 | + sig->err_item.actual); |
|---|
| 631 | 562 | |
|---|
| 632 | | - read_unlock(&dev->mdev->priv.mkey_table.lock); |
|---|
| 563 | + xa_unlock(&dev->sig_mrs); |
|---|
| 633 | 564 | goto repoll; |
|---|
| 565 | + } |
|---|
| 634 | 566 | } |
|---|
| 635 | 567 | |
|---|
| 636 | 568 | return 0; |
|---|
| .. | .. |
|---|
| 728 | 660 | int nent, |
|---|
| 729 | 661 | int cqe_size) |
|---|
| 730 | 662 | { |
|---|
| 731 | | - struct mlx5_frag_buf_ctrl *c = &buf->fbc; |
|---|
| 732 | | - struct mlx5_frag_buf *frag_buf = &c->frag_buf; |
|---|
| 733 | | - u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0}; |
|---|
| 663 | + struct mlx5_frag_buf *frag_buf = &buf->frag_buf; |
|---|
| 664 | + u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0); |
|---|
| 665 | + u8 log_wq_sz = ilog2(cqe_size); |
|---|
| 734 | 666 | int err; |
|---|
| 735 | | - |
|---|
| 736 | | - MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size)); |
|---|
| 737 | | - MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0); |
|---|
| 738 | | - |
|---|
| 739 | | - mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff); |
|---|
| 740 | 667 | |
|---|
| 741 | 668 | err = mlx5_frag_buf_alloc_node(dev->mdev, |
|---|
| 742 | 669 | nent * cqe_size, |
|---|
| .. | .. |
|---|
| 744 | 671 | dev->mdev->priv.numa_node); |
|---|
| 745 | 672 | if (err) |
|---|
| 746 | 673 | return err; |
|---|
| 674 | + |
|---|
| 675 | + mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); |
|---|
| 747 | 676 | |
|---|
| 748 | 677 | buf->cqe_size = cqe_size; |
|---|
| 749 | 678 | buf->nent = nent; |
|---|
| .. | .. |
|---|
| 774 | 703 | } |
|---|
| 775 | 704 | |
|---|
| 776 | 705 | static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, |
|---|
| 777 | | - struct ib_ucontext *context, struct mlx5_ib_cq *cq, |
|---|
| 778 | | - int entries, u32 **cqb, |
|---|
| 706 | + struct mlx5_ib_cq *cq, int entries, u32 **cqb, |
|---|
| 779 | 707 | int *cqe_size, int *index, int *inlen) |
|---|
| 780 | 708 | { |
|---|
| 781 | 709 | struct mlx5_ib_create_cq ucmd = {}; |
|---|
| .. | .. |
|---|
| 786 | 714 | int ncont; |
|---|
| 787 | 715 | void *cqc; |
|---|
| 788 | 716 | int err; |
|---|
| 717 | + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
|---|
| 718 | + udata, struct mlx5_ib_ucontext, ibucontext); |
|---|
| 789 | 719 | |
|---|
| 790 | | - ucmdlen = udata->inlen < sizeof(ucmd) ? |
|---|
| 791 | | - (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); |
|---|
| 720 | + ucmdlen = min(udata->inlen, sizeof(ucmd)); |
|---|
| 721 | + if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags)) |
|---|
| 722 | + return -EINVAL; |
|---|
| 792 | 723 | |
|---|
| 793 | 724 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) |
|---|
| 794 | 725 | return -EFAULT; |
|---|
| 795 | 726 | |
|---|
| 796 | | - if (ucmdlen == sizeof(ucmd) && |
|---|
| 797 | | - (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD))) |
|---|
| 727 | + if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | |
|---|
| 728 | + MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX))) |
|---|
| 798 | 729 | return -EINVAL; |
|---|
| 799 | 730 | |
|---|
| 800 | | - if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) |
|---|
| 731 | + if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || |
|---|
| 732 | + ucmd.reserved0 || ucmd.reserved1) |
|---|
| 801 | 733 | return -EINVAL; |
|---|
| 802 | 734 | |
|---|
| 803 | 735 | *cqe_size = ucmd.cqe_size; |
|---|
| 804 | 736 | |
|---|
| 805 | | - cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, |
|---|
| 806 | | - entries * ucmd.cqe_size, |
|---|
| 807 | | - IB_ACCESS_LOCAL_WRITE, 1); |
|---|
| 737 | + cq->buf.umem = |
|---|
| 738 | + ib_umem_get(&dev->ib_dev, ucmd.buf_addr, |
|---|
| 739 | + entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); |
|---|
| 808 | 740 | if (IS_ERR(cq->buf.umem)) { |
|---|
| 809 | 741 | err = PTR_ERR(cq->buf.umem); |
|---|
| 810 | 742 | return err; |
|---|
| 811 | 743 | } |
|---|
| 812 | 744 | |
|---|
| 813 | | - err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, |
|---|
| 814 | | - &cq->db); |
|---|
| 745 | + err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); |
|---|
| 815 | 746 | if (err) |
|---|
| 816 | 747 | goto err_umem; |
|---|
| 817 | 748 | |
|---|
| .. | .. |
|---|
| 835 | 766 | MLX5_SET(cqc, cqc, log_page_size, |
|---|
| 836 | 767 | page_shift - MLX5_ADAPTER_PAGE_SHIFT); |
|---|
| 837 | 768 | |
|---|
| 838 | | - *index = to_mucontext(context)->bfregi.sys_pages[0]; |
|---|
| 769 | + if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) { |
|---|
| 770 | + *index = ucmd.uar_page_index; |
|---|
| 771 | + } else if (context->bfregi.lib_uar_dyn) { |
|---|
| 772 | + err = -EINVAL; |
|---|
| 773 | + goto err_cqb; |
|---|
| 774 | + } else { |
|---|
| 775 | + *index = context->bfregi.sys_pages[0]; |
|---|
| 776 | + } |
|---|
| 839 | 777 | |
|---|
| 840 | 778 | if (ucmd.cqe_comp_en == 1) { |
|---|
| 841 | 779 | int mini_cqe_format; |
|---|
| .. | .. |
|---|
| 877 | 815 | cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; |
|---|
| 878 | 816 | } |
|---|
| 879 | 817 | |
|---|
| 818 | + MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid); |
|---|
| 880 | 819 | return 0; |
|---|
| 881 | 820 | |
|---|
| 882 | 821 | err_cqb: |
|---|
| 883 | 822 | kvfree(*cqb); |
|---|
| 884 | 823 | |
|---|
| 885 | 824 | err_db: |
|---|
| 886 | | - mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); |
|---|
| 825 | + mlx5_ib_db_unmap_user(context, &cq->db); |
|---|
| 887 | 826 | |
|---|
| 888 | 827 | err_umem: |
|---|
| 889 | 828 | ib_umem_release(cq->buf.umem); |
|---|
| 890 | 829 | return err; |
|---|
| 891 | 830 | } |
|---|
| 892 | 831 | |
|---|
| 893 | | -static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) |
|---|
| 832 | +static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) |
|---|
| 894 | 833 | { |
|---|
| 895 | | - mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); |
|---|
| 834 | + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
|---|
| 835 | + udata, struct mlx5_ib_ucontext, ibucontext); |
|---|
| 836 | + |
|---|
| 837 | + mlx5_ib_db_unmap_user(context, &cq->db); |
|---|
| 896 | 838 | ib_umem_release(cq->buf.umem); |
|---|
| 897 | 839 | } |
|---|
| 898 | 840 | |
|---|
| .. | .. |
|---|
| 933 | 875 | |
|---|
| 934 | 876 | *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + |
|---|
| 935 | 877 | MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * |
|---|
| 936 | | - cq->buf.fbc.frag_buf.npages; |
|---|
| 878 | + cq->buf.frag_buf.npages; |
|---|
| 937 | 879 | *cqb = kvzalloc(*inlen, GFP_KERNEL); |
|---|
| 938 | 880 | if (!*cqb) { |
|---|
| 939 | 881 | err = -ENOMEM; |
|---|
| .. | .. |
|---|
| 941 | 883 | } |
|---|
| 942 | 884 | |
|---|
| 943 | 885 | pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); |
|---|
| 944 | | - mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas); |
|---|
| 886 | + mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); |
|---|
| 945 | 887 | |
|---|
| 946 | 888 | cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); |
|---|
| 947 | 889 | MLX5_SET(cqc, cqc, log_page_size, |
|---|
| 948 | | - cq->buf.fbc.frag_buf.page_shift - |
|---|
| 890 | + cq->buf.frag_buf.page_shift - |
|---|
| 949 | 891 | MLX5_ADAPTER_PAGE_SHIFT); |
|---|
| 950 | 892 | |
|---|
| 951 | 893 | *index = dev->mdev->priv.uar->index; |
|---|
| .. | .. |
|---|
| 974 | 916 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
|---|
| 975 | 917 | } |
|---|
| 976 | 918 | |
|---|
| 977 | | -struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
|---|
| 978 | | - const struct ib_cq_init_attr *attr, |
|---|
| 979 | | - struct ib_ucontext *context, |
|---|
| 980 | | - struct ib_udata *udata) |
|---|
| 919 | +int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
|---|
| 920 | + struct ib_udata *udata) |
|---|
| 981 | 921 | { |
|---|
| 922 | + struct ib_device *ibdev = ibcq->device; |
|---|
| 982 | 923 | int entries = attr->cqe; |
|---|
| 983 | 924 | int vector = attr->comp_vector; |
|---|
| 984 | 925 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
|---|
| 985 | | - struct mlx5_ib_cq *cq; |
|---|
| 986 | | - int uninitialized_var(index); |
|---|
| 987 | | - int uninitialized_var(inlen); |
|---|
| 926 | + struct mlx5_ib_cq *cq = to_mcq(ibcq); |
|---|
| 927 | + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; |
|---|
| 928 | + int index; |
|---|
| 929 | + int inlen; |
|---|
| 988 | 930 | u32 *cqb = NULL; |
|---|
| 989 | 931 | void *cqc; |
|---|
| 990 | 932 | int cqe_size; |
|---|
| 991 | | - unsigned int irqn; |
|---|
| 992 | 933 | int eqn; |
|---|
| 993 | 934 | int err; |
|---|
| 994 | 935 | |
|---|
| 995 | 936 | if (entries < 0 || |
|---|
| 996 | 937 | (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) |
|---|
| 997 | | - return ERR_PTR(-EINVAL); |
|---|
| 938 | + return -EINVAL; |
|---|
| 998 | 939 | |
|---|
| 999 | 940 | if (check_cq_create_flags(attr->flags)) |
|---|
| 1000 | | - return ERR_PTR(-EOPNOTSUPP); |
|---|
| 941 | + return -EOPNOTSUPP; |
|---|
| 1001 | 942 | |
|---|
| 1002 | 943 | entries = roundup_pow_of_two(entries + 1); |
|---|
| 1003 | 944 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) |
|---|
| 1004 | | - return ERR_PTR(-EINVAL); |
|---|
| 1005 | | - |
|---|
| 1006 | | - cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
|---|
| 1007 | | - if (!cq) |
|---|
| 1008 | | - return ERR_PTR(-ENOMEM); |
|---|
| 945 | + return -EINVAL; |
|---|
| 1009 | 946 | |
|---|
| 1010 | 947 | cq->ibcq.cqe = entries - 1; |
|---|
| 1011 | 948 | mutex_init(&cq->resize_mutex); |
|---|
| .. | .. |
|---|
| 1016 | 953 | INIT_LIST_HEAD(&cq->list_send_qp); |
|---|
| 1017 | 954 | INIT_LIST_HEAD(&cq->list_recv_qp); |
|---|
| 1018 | 955 | |
|---|
| 1019 | | - if (context) { |
|---|
| 1020 | | - err = create_cq_user(dev, udata, context, cq, entries, |
|---|
| 1021 | | - &cqb, &cqe_size, &index, &inlen); |
|---|
| 956 | + if (udata) { |
|---|
| 957 | + err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, |
|---|
| 958 | + &index, &inlen); |
|---|
| 1022 | 959 | if (err) |
|---|
| 1023 | | - goto err_create; |
|---|
| 960 | + return err; |
|---|
| 1024 | 961 | } else { |
|---|
| 1025 | 962 | cqe_size = cache_line_size() == 128 ? 128 : 64; |
|---|
| 1026 | 963 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, |
|---|
| 1027 | 964 | &index, &inlen); |
|---|
| 1028 | 965 | if (err) |
|---|
| 1029 | | - goto err_create; |
|---|
| 966 | + return err; |
|---|
| 1030 | 967 | |
|---|
| 1031 | 968 | INIT_WORK(&cq->notify_work, notify_soft_wc_handler); |
|---|
| 1032 | 969 | } |
|---|
| 1033 | 970 | |
|---|
| 1034 | | - err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); |
|---|
| 971 | + err = mlx5_vector2eqn(dev->mdev, vector, &eqn); |
|---|
| 1035 | 972 | if (err) |
|---|
| 1036 | 973 | goto err_cqb; |
|---|
| 1037 | 974 | |
|---|
| .. | .. |
|---|
| 1049 | 986 | if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) |
|---|
| 1050 | 987 | MLX5_SET(cqc, cqc, oi, 1); |
|---|
| 1051 | 988 | |
|---|
| 1052 | | - err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); |
|---|
| 989 | + err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); |
|---|
| 1053 | 990 | if (err) |
|---|
| 1054 | 991 | goto err_cqb; |
|---|
| 1055 | 992 | |
|---|
| 1056 | 993 | mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); |
|---|
| 1057 | | - cq->mcq.irqn = irqn; |
|---|
| 1058 | | - if (context) |
|---|
| 994 | + if (udata) |
|---|
| 1059 | 995 | cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; |
|---|
| 1060 | 996 | else |
|---|
| 1061 | 997 | cq->mcq.comp = mlx5_ib_cq_comp; |
|---|
| .. | .. |
|---|
| 1063 | 999 | |
|---|
| 1064 | 1000 | INIT_LIST_HEAD(&cq->wc_list); |
|---|
| 1065 | 1001 | |
|---|
| 1066 | | - if (context) |
|---|
| 1002 | + if (udata) |
|---|
| 1067 | 1003 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { |
|---|
| 1068 | 1004 | err = -EFAULT; |
|---|
| 1069 | 1005 | goto err_cmd; |
|---|
| .. | .. |
|---|
| 1071 | 1007 | |
|---|
| 1072 | 1008 | |
|---|
| 1073 | 1009 | kvfree(cqb); |
|---|
| 1074 | | - return &cq->ibcq; |
|---|
| 1010 | + return 0; |
|---|
| 1075 | 1011 | |
|---|
| 1076 | 1012 | err_cmd: |
|---|
| 1077 | 1013 | mlx5_core_destroy_cq(dev->mdev, &cq->mcq); |
|---|
| 1078 | 1014 | |
|---|
| 1079 | 1015 | err_cqb: |
|---|
| 1080 | 1016 | kvfree(cqb); |
|---|
| 1081 | | - if (context) |
|---|
| 1082 | | - destroy_cq_user(cq, context); |
|---|
| 1017 | + if (udata) |
|---|
| 1018 | + destroy_cq_user(cq, udata); |
|---|
| 1083 | 1019 | else |
|---|
| 1084 | 1020 | destroy_cq_kernel(dev, cq); |
|---|
| 1085 | | - |
|---|
| 1086 | | -err_create: |
|---|
| 1087 | | - kfree(cq); |
|---|
| 1088 | | - |
|---|
| 1089 | | - return ERR_PTR(err); |
|---|
| 1021 | + return err; |
|---|
| 1090 | 1022 | } |
|---|
| 1091 | 1023 | |
|---|
| 1092 | | - |
|---|
| 1093 | | -int mlx5_ib_destroy_cq(struct ib_cq *cq) |
|---|
| 1024 | +int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) |
|---|
| 1094 | 1025 | { |
|---|
| 1095 | 1026 | struct mlx5_ib_dev *dev = to_mdev(cq->device); |
|---|
| 1096 | 1027 | struct mlx5_ib_cq *mcq = to_mcq(cq); |
|---|
| 1097 | | - struct ib_ucontext *context = NULL; |
|---|
| 1028 | + int ret; |
|---|
| 1098 | 1029 | |
|---|
| 1099 | | - if (cq->uobject) |
|---|
| 1100 | | - context = cq->uobject->context; |
|---|
| 1030 | + ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); |
|---|
| 1031 | + if (ret) |
|---|
| 1032 | + return ret; |
|---|
| 1101 | 1033 | |
|---|
| 1102 | | - mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); |
|---|
| 1103 | | - if (context) |
|---|
| 1104 | | - destroy_cq_user(mcq, context); |
|---|
| 1034 | + if (udata) |
|---|
| 1035 | + destroy_cq_user(mcq, udata); |
|---|
| 1105 | 1036 | else |
|---|
| 1106 | 1037 | destroy_cq_kernel(dev, mcq); |
|---|
| 1107 | | - |
|---|
| 1108 | | - kfree(mcq); |
|---|
| 1109 | | - |
|---|
| 1110 | 1038 | return 0; |
|---|
| 1111 | 1039 | } |
|---|
| 1112 | 1040 | |
|---|
| .. | .. |
|---|
| 1204 | 1132 | struct ib_umem *umem; |
|---|
| 1205 | 1133 | int err; |
|---|
| 1206 | 1134 | int npages; |
|---|
| 1207 | | - struct ib_ucontext *context = cq->buf.umem->context; |
|---|
| 1208 | 1135 | |
|---|
| 1209 | 1136 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); |
|---|
| 1210 | 1137 | if (err) |
|---|
| .. | .. |
|---|
| 1217 | 1144 | if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) |
|---|
| 1218 | 1145 | return -EINVAL; |
|---|
| 1219 | 1146 | |
|---|
| 1220 | | - umem = ib_umem_get(context, ucmd.buf_addr, |
|---|
| 1147 | + umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, |
|---|
| 1221 | 1148 | (size_t)ucmd.cqe_size * entries, |
|---|
| 1222 | | - IB_ACCESS_LOCAL_WRITE, 1); |
|---|
| 1149 | + IB_ACCESS_LOCAL_WRITE); |
|---|
| 1223 | 1150 | if (IS_ERR(umem)) { |
|---|
| 1224 | 1151 | err = PTR_ERR(umem); |
|---|
| 1225 | 1152 | return err; |
|---|
| .. | .. |
|---|
| 1232 | 1159 | *cqe_size = ucmd.cqe_size; |
|---|
| 1233 | 1160 | |
|---|
| 1234 | 1161 | return 0; |
|---|
| 1235 | | -} |
|---|
| 1236 | | - |
|---|
| 1237 | | -static void un_resize_user(struct mlx5_ib_cq *cq) |
|---|
| 1238 | | -{ |
|---|
| 1239 | | - ib_umem_release(cq->resize_umem); |
|---|
| 1240 | 1162 | } |
|---|
| 1241 | 1163 | |
|---|
| 1242 | 1164 | static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, |
|---|
| .. | .. |
|---|
| 1259 | 1181 | ex: |
|---|
| 1260 | 1182 | kfree(cq->resize_buf); |
|---|
| 1261 | 1183 | return err; |
|---|
| 1262 | | -} |
|---|
| 1263 | | - |
|---|
| 1264 | | -static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) |
|---|
| 1265 | | -{ |
|---|
| 1266 | | - free_cq_buf(dev, cq->resize_buf); |
|---|
| 1267 | | - cq->resize_buf = NULL; |
|---|
| 1268 | 1184 | } |
|---|
| 1269 | 1185 | |
|---|
| 1270 | 1186 | static int copy_resize_cqes(struct mlx5_ib_cq *cq) |
|---|
| .. | .. |
|---|
| 1296 | 1212 | return -EINVAL; |
|---|
| 1297 | 1213 | } |
|---|
| 1298 | 1214 | |
|---|
| 1299 | | - while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { |
|---|
| 1215 | + while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) { |
|---|
| 1300 | 1216 | dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, |
|---|
| 1301 | 1217 | (i + 1) & cq->resize_buf->nent); |
|---|
| 1302 | 1218 | dcqe64 = dsize == 64 ? dcqe : dcqe + 64; |
|---|
| .. | .. |
|---|
| 1333 | 1249 | __be64 *pas; |
|---|
| 1334 | 1250 | int page_shift; |
|---|
| 1335 | 1251 | int inlen; |
|---|
| 1336 | | - int uninitialized_var(cqe_size); |
|---|
| 1252 | + int cqe_size; |
|---|
| 1337 | 1253 | unsigned long flags; |
|---|
| 1338 | 1254 | |
|---|
| 1339 | 1255 | if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { |
|---|
| .. | .. |
|---|
| 1364 | 1280 | cqe_size = 64; |
|---|
| 1365 | 1281 | err = resize_kernel(dev, cq, entries, cqe_size); |
|---|
| 1366 | 1282 | if (!err) { |
|---|
| 1367 | | - struct mlx5_frag_buf_ctrl *c; |
|---|
| 1283 | + struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf; |
|---|
| 1368 | 1284 | |
|---|
| 1369 | | - c = &cq->resize_buf->fbc; |
|---|
| 1370 | | - npas = c->frag_buf.npages; |
|---|
| 1371 | | - page_shift = c->frag_buf.page_shift; |
|---|
| 1285 | + npas = frag_buf->npages; |
|---|
| 1286 | + page_shift = frag_buf->page_shift; |
|---|
| 1372 | 1287 | } |
|---|
| 1373 | 1288 | } |
|---|
| 1374 | 1289 | |
|---|
| .. | .. |
|---|
| 1389 | 1304 | mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, |
|---|
| 1390 | 1305 | pas, 0); |
|---|
| 1391 | 1306 | else |
|---|
| 1392 | | - mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf, |
|---|
| 1393 | | - pas); |
|---|
| 1307 | + mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); |
|---|
| 1394 | 1308 | |
|---|
| 1395 | 1309 | MLX5_SET(modify_cq_in, in, |
|---|
| 1396 | 1310 | modify_field_select_resize_field_select.resize_field_select.resize_field_select, |
|---|
| .. | .. |
|---|
| 1449 | 1363 | kvfree(in); |
|---|
| 1450 | 1364 | |
|---|
| 1451 | 1365 | ex_resize: |
|---|
| 1452 | | - if (udata) |
|---|
| 1453 | | - un_resize_user(cq); |
|---|
| 1454 | | - else |
|---|
| 1455 | | - un_resize_kernel(dev, cq); |
|---|
| 1366 | + ib_umem_release(cq->resize_umem); |
|---|
| 1367 | + if (!udata) { |
|---|
| 1368 | + free_cq_buf(dev, cq->resize_buf); |
|---|
| 1369 | + cq->resize_buf = NULL; |
|---|
| 1370 | + } |
|---|
| 1456 | 1371 | ex: |
|---|
| 1457 | 1372 | mutex_unlock(&cq->resize_mutex); |
|---|
| 1458 | 1373 | return err; |
|---|
| 1459 | 1374 | } |
|---|
| 1460 | 1375 | |
|---|
| 1461 | | -int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) |
|---|
| 1376 | +int mlx5_ib_get_cqe_size(struct ib_cq *ibcq) |
|---|
| 1462 | 1377 | { |
|---|
| 1463 | 1378 | struct mlx5_ib_cq *cq; |
|---|
| 1464 | 1379 | |
|---|