| .. | .. |
|---|
| 52 | 52 | |
|---|
| 53 | 53 | #include "pvrdma.h" |
|---|
| 54 | 54 | |
|---|
| 55 | +static void __pvrdma_destroy_qp(struct pvrdma_dev *dev, |
|---|
| 56 | + struct pvrdma_qp *qp); |
|---|
| 57 | + |
|---|
| 55 | 58 | static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, |
|---|
| 56 | 59 | struct pvrdma_cq **recv_cq) |
|---|
| 57 | 60 | { |
|---|
| .. | .. |
|---|
| 195 | 198 | union pvrdma_cmd_resp rsp; |
|---|
| 196 | 199 | struct pvrdma_cmd_create_qp *cmd = &req.create_qp; |
|---|
| 197 | 200 | struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp; |
|---|
| 201 | + struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2; |
|---|
| 198 | 202 | struct pvrdma_create_qp ucmd; |
|---|
| 203 | + struct pvrdma_create_qp_resp qp_resp = {}; |
|---|
| 199 | 204 | unsigned long flags; |
|---|
| 200 | 205 | int ret; |
|---|
| 201 | 206 | bool is_srq = !!init_attr->srq; |
|---|
| .. | .. |
|---|
| 212 | 217 | init_attr->qp_type != IB_QPT_GSI) { |
|---|
| 213 | 218 | dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", |
|---|
| 214 | 219 | init_attr->qp_type); |
|---|
| 215 | | - return ERR_PTR(-EINVAL); |
|---|
| 220 | + return ERR_PTR(-EOPNOTSUPP); |
|---|
| 216 | 221 | } |
|---|
| 217 | 222 | |
|---|
| 218 | 223 | if (is_srq && !dev->dsr->caps.max_srq) { |
|---|
| .. | .. |
|---|
| 227 | 232 | switch (init_attr->qp_type) { |
|---|
| 228 | 233 | case IB_QPT_GSI: |
|---|
| 229 | 234 | if (init_attr->port_num == 0 || |
|---|
| 230 | | - init_attr->port_num > pd->device->phys_port_cnt || |
|---|
| 231 | | - udata) { |
|---|
| 235 | + init_attr->port_num > pd->device->phys_port_cnt) { |
|---|
| 232 | 236 | dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); |
|---|
| 233 | 237 | ret = -EINVAL; |
|---|
| 234 | 238 | goto err_qp; |
|---|
| 235 | 239 | } |
|---|
| 236 | | - /* fall through */ |
|---|
| 240 | + fallthrough; |
|---|
| 237 | 241 | case IB_QPT_RC: |
|---|
| 238 | 242 | case IB_QPT_UD: |
|---|
| 239 | 243 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 249 | 253 | init_completion(&qp->free); |
|---|
| 250 | 254 | |
|---|
| 251 | 255 | qp->state = IB_QPS_RESET; |
|---|
| 252 | | - qp->is_kernel = !(pd->uobject && udata); |
|---|
| 256 | + qp->is_kernel = !udata; |
|---|
| 253 | 257 | |
|---|
| 254 | 258 | if (!qp->is_kernel) { |
|---|
| 255 | 259 | dev_dbg(&dev->pdev->dev, |
|---|
| .. | .. |
|---|
| 260 | 264 | goto err_qp; |
|---|
| 261 | 265 | } |
|---|
| 262 | 266 | |
|---|
| 267 | + /* Userspace supports qpn and qp handles? */ |
|---|
| 268 | + if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION && |
|---|
| 269 | + udata->outlen < sizeof(qp_resp)) { |
|---|
| 270 | + dev_warn(&dev->pdev->dev, |
|---|
| 271 | + "create queuepair not supported\n"); |
|---|
| 272 | + ret = -EOPNOTSUPP; |
|---|
| 273 | + goto err_qp; |
|---|
| 274 | + } |
|---|
| 275 | + |
|---|
| 263 | 276 | if (!is_srq) { |
|---|
| 264 | 277 | /* set qp->sq.wqe_cnt, shift, buf_size.. */ |
|---|
| 265 | | - qp->rumem = ib_umem_get(pd->uobject->context, |
|---|
| 266 | | - ucmd.rbuf_addr, |
|---|
| 267 | | - ucmd.rbuf_size, 0, 0); |
|---|
| 278 | + qp->rumem = |
|---|
| 279 | + ib_umem_get(pd->device, ucmd.rbuf_addr, |
|---|
| 280 | + ucmd.rbuf_size, 0); |
|---|
| 268 | 281 | if (IS_ERR(qp->rumem)) { |
|---|
| 269 | 282 | ret = PTR_ERR(qp->rumem); |
|---|
| 270 | 283 | goto err_qp; |
|---|
| .. | .. |
|---|
| 275 | 288 | qp->srq = to_vsrq(init_attr->srq); |
|---|
| 276 | 289 | } |
|---|
| 277 | 290 | |
|---|
| 278 | | - qp->sumem = ib_umem_get(pd->uobject->context, |
|---|
| 279 | | - ucmd.sbuf_addr, |
|---|
| 280 | | - ucmd.sbuf_size, 0, 0); |
|---|
| 291 | + qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, |
|---|
| 292 | + ucmd.sbuf_size, 0); |
|---|
| 281 | 293 | if (IS_ERR(qp->sumem)) { |
|---|
| 282 | 294 | if (!is_srq) |
|---|
| 283 | 295 | ib_umem_release(qp->rumem); |
|---|
| .. | .. |
|---|
| 285 | 297 | goto err_qp; |
|---|
| 286 | 298 | } |
|---|
| 287 | 299 | |
|---|
| 288 | | - qp->npages_send = ib_umem_page_count(qp->sumem); |
|---|
| 300 | + qp->npages_send = |
|---|
| 301 | + ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE); |
|---|
| 289 | 302 | if (!is_srq) |
|---|
| 290 | | - qp->npages_recv = ib_umem_page_count(qp->rumem); |
|---|
| 303 | + qp->npages_recv = ib_umem_num_dma_blocks( |
|---|
| 304 | + qp->rumem, PAGE_SIZE); |
|---|
| 291 | 305 | else |
|---|
| 292 | 306 | qp->npages_recv = 0; |
|---|
| 293 | 307 | qp->npages = qp->npages_send + qp->npages_recv; |
|---|
| .. | .. |
|---|
| 381 | 395 | } |
|---|
| 382 | 396 | |
|---|
| 383 | 397 | /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */ |
|---|
| 384 | | - qp->qp_handle = resp->qpn; |
|---|
| 385 | 398 | qp->port = init_attr->port_num; |
|---|
| 386 | | - qp->ibqp.qp_num = resp->qpn; |
|---|
| 399 | + |
|---|
| 400 | + if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) { |
|---|
| 401 | + qp->ibqp.qp_num = resp_v2->qpn; |
|---|
| 402 | + qp->qp_handle = resp_v2->qp_handle; |
|---|
| 403 | + } else { |
|---|
| 404 | + qp->ibqp.qp_num = resp->qpn; |
|---|
| 405 | + qp->qp_handle = resp->qpn; |
|---|
| 406 | + } |
|---|
| 407 | + |
|---|
| 387 | 408 | spin_lock_irqsave(&dev->qp_tbl_lock, flags); |
|---|
| 388 | 409 | dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; |
|---|
| 389 | 410 | spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); |
|---|
| 411 | + |
|---|
| 412 | + if (udata) { |
|---|
| 413 | + qp_resp.qpn = qp->ibqp.qp_num; |
|---|
| 414 | + qp_resp.qp_handle = qp->qp_handle; |
|---|
| 415 | + |
|---|
| 416 | + if (ib_copy_to_udata(udata, &qp_resp, |
|---|
| 417 | + min(udata->outlen, sizeof(qp_resp)))) { |
|---|
| 418 | + dev_warn(&dev->pdev->dev, |
|---|
| 419 | + "failed to copy back udata\n"); |
|---|
| 420 | + __pvrdma_destroy_qp(dev, qp); |
|---|
| 421 | + return ERR_PTR(-EINVAL); |
|---|
| 422 | + } |
|---|
| 423 | + } |
|---|
| 390 | 424 | |
|---|
| 391 | 425 | return &qp->ibqp; |
|---|
| 392 | 426 | |
|---|
| 393 | 427 | err_pdir: |
|---|
| 394 | 428 | pvrdma_page_dir_cleanup(dev, &qp->pdir); |
|---|
| 395 | 429 | err_umem: |
|---|
| 396 | | - if (!qp->is_kernel) { |
|---|
| 397 | | - if (qp->rumem) |
|---|
| 398 | | - ib_umem_release(qp->rumem); |
|---|
| 399 | | - if (qp->sumem) |
|---|
| 400 | | - ib_umem_release(qp->sumem); |
|---|
| 401 | | - } |
|---|
| 430 | + ib_umem_release(qp->rumem); |
|---|
| 431 | + ib_umem_release(qp->sumem); |
|---|
| 402 | 432 | err_qp: |
|---|
| 403 | 433 | kfree(qp); |
|---|
| 404 | 434 | atomic_dec(&dev->num_qps); |
|---|
| .. | .. |
|---|
| 406 | 436 | return ERR_PTR(ret); |
|---|
| 407 | 437 | } |
|---|
| 408 | 438 | |
|---|
| 439 | +static void _pvrdma_free_qp(struct pvrdma_qp *qp) |
|---|
| 440 | +{ |
|---|
| 441 | + unsigned long flags; |
|---|
| 442 | + struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); |
|---|
| 443 | + |
|---|
| 444 | + spin_lock_irqsave(&dev->qp_tbl_lock, flags); |
|---|
| 445 | + dev->qp_tbl[qp->qp_handle] = NULL; |
|---|
| 446 | + spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); |
|---|
| 447 | + |
|---|
| 448 | + if (refcount_dec_and_test(&qp->refcnt)) |
|---|
| 449 | + complete(&qp->free); |
|---|
| 450 | + wait_for_completion(&qp->free); |
|---|
| 451 | + |
|---|
| 452 | + ib_umem_release(qp->rumem); |
|---|
| 453 | + ib_umem_release(qp->sumem); |
|---|
| 454 | + |
|---|
| 455 | + pvrdma_page_dir_cleanup(dev, &qp->pdir); |
|---|
| 456 | + |
|---|
| 457 | + kfree(qp); |
|---|
| 458 | + |
|---|
| 459 | + atomic_dec(&dev->num_qps); |
|---|
| 460 | +} |
|---|
| 461 | + |
|---|
| 409 | 462 | static void pvrdma_free_qp(struct pvrdma_qp *qp) |
|---|
| 410 | 463 | { |
|---|
| 411 | | - struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); |
|---|
| 412 | 464 | struct pvrdma_cq *scq; |
|---|
| 413 | 465 | struct pvrdma_cq *rcq; |
|---|
| 414 | | - unsigned long flags, scq_flags, rcq_flags; |
|---|
| 466 | + unsigned long scq_flags, rcq_flags; |
|---|
| 415 | 467 | |
|---|
| 416 | 468 | /* In case cq is polling */ |
|---|
| 417 | 469 | get_cqs(qp, &scq, &rcq); |
|---|
| .. | .. |
|---|
| 421 | 473 | if (scq != rcq) |
|---|
| 422 | 474 | _pvrdma_flush_cqe(qp, rcq); |
|---|
| 423 | 475 | |
|---|
| 424 | | - spin_lock_irqsave(&dev->qp_tbl_lock, flags); |
|---|
| 425 | | - dev->qp_tbl[qp->qp_handle] = NULL; |
|---|
| 426 | | - spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); |
|---|
| 427 | | - |
|---|
| 476 | + /* |
|---|
| 477 | + * We're now unlocking the CQs before clearing out the qp handle this |
|---|
| 478 | + * should still be safe. We have destroyed the backend QP and flushed |
|---|
| 479 | + * the CQEs so there should be no other completions for this QP. |
|---|
| 480 | + */ |
|---|
| 428 | 481 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); |
|---|
| 429 | 482 | |
|---|
| 430 | | - if (refcount_dec_and_test(&qp->refcnt)) |
|---|
| 431 | | - complete(&qp->free); |
|---|
| 432 | | - wait_for_completion(&qp->free); |
|---|
| 433 | | - |
|---|
| 434 | | - if (!qp->is_kernel) { |
|---|
| 435 | | - if (qp->rumem) |
|---|
| 436 | | - ib_umem_release(qp->rumem); |
|---|
| 437 | | - if (qp->sumem) |
|---|
| 438 | | - ib_umem_release(qp->sumem); |
|---|
| 439 | | - } |
|---|
| 440 | | - |
|---|
| 441 | | - pvrdma_page_dir_cleanup(dev, &qp->pdir); |
|---|
| 442 | | - |
|---|
| 443 | | - kfree(qp); |
|---|
| 444 | | - |
|---|
| 445 | | - atomic_dec(&dev->num_qps); |
|---|
| 483 | + _pvrdma_free_qp(qp); |
|---|
| 446 | 484 | } |
|---|
| 447 | 485 | |
|---|
| 448 | | -/** |
|---|
| 449 | | - * pvrdma_destroy_qp - destroy a queue pair |
|---|
| 450 | | - * @qp: the queue pair to destroy |
|---|
| 451 | | - * |
|---|
| 452 | | - * @return: 0 on success. |
|---|
| 453 | | - */ |
|---|
| 454 | | -int pvrdma_destroy_qp(struct ib_qp *qp) |
|---|
| 486 | +static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev, |
|---|
| 487 | + u32 qp_handle) |
|---|
| 455 | 488 | { |
|---|
| 456 | | - struct pvrdma_qp *vqp = to_vqp(qp); |
|---|
| 457 | 489 | union pvrdma_cmd_req req; |
|---|
| 458 | 490 | struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp; |
|---|
| 459 | 491 | int ret; |
|---|
| 460 | 492 | |
|---|
| 461 | 493 | memset(cmd, 0, sizeof(*cmd)); |
|---|
| 462 | 494 | cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP; |
|---|
| 463 | | - cmd->qp_handle = vqp->qp_handle; |
|---|
| 495 | + cmd->qp_handle = qp_handle; |
|---|
| 464 | 496 | |
|---|
| 465 | | - ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0); |
|---|
| 497 | + ret = pvrdma_cmd_post(dev, &req, NULL, 0); |
|---|
| 466 | 498 | if (ret < 0) |
|---|
| 467 | | - dev_warn(&to_vdev(qp->device)->pdev->dev, |
|---|
| 499 | + dev_warn(&dev->pdev->dev, |
|---|
| 468 | 500 | "destroy queuepair failed, error: %d\n", ret); |
|---|
| 501 | +} |
|---|
| 469 | 502 | |
|---|
| 503 | +/** |
|---|
| 504 | + * pvrdma_destroy_qp - destroy a queue pair |
|---|
| 505 | + * @qp: the queue pair to destroy |
|---|
| 506 | + * @udata: user data or null for kernel object |
|---|
| 507 | + * |
|---|
| 508 | + * @return: always 0. |
|---|
| 509 | + */ |
|---|
| 510 | +int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
|---|
| 511 | +{ |
|---|
| 512 | + struct pvrdma_qp *vqp = to_vqp(qp); |
|---|
| 513 | + |
|---|
| 514 | + _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle); |
|---|
| 470 | 515 | pvrdma_free_qp(vqp); |
|---|
| 471 | 516 | |
|---|
| 472 | 517 | return 0; |
|---|
| 518 | +} |
|---|
| 519 | + |
|---|
| 520 | +static void __pvrdma_destroy_qp(struct pvrdma_dev *dev, |
|---|
| 521 | + struct pvrdma_qp *qp) |
|---|
| 522 | +{ |
|---|
| 523 | + _pvrdma_destroy_qp_work(dev, qp->qp_handle); |
|---|
| 524 | + _pvrdma_free_qp(qp); |
|---|
| 473 | 525 | } |
|---|
| 474 | 526 | |
|---|
| 475 | 527 | /** |
|---|
| .. | .. |
|---|
| 499 | 551 | next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state; |
|---|
| 500 | 552 | |
|---|
| 501 | 553 | if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type, |
|---|
| 502 | | - attr_mask, IB_LINK_LAYER_ETHERNET)) { |
|---|
| 554 | + attr_mask)) { |
|---|
| 503 | 555 | ret = -EINVAL; |
|---|
| 504 | 556 | goto out; |
|---|
| 505 | 557 | } |
|---|