.. | .. |
---|
72 | 72 | return err; |
---|
73 | 73 | } |
---|
74 | 74 | |
---|
75 | | - err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false); |
---|
| 75 | + err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false); |
---|
76 | 76 | if (err) { |
---|
77 | 77 | iser_err("Failed to set up Data-IN RDMA\n"); |
---|
78 | 78 | return err; |
---|
.. | .. |
---|
126 | 126 | return err; |
---|
127 | 127 | } |
---|
128 | 128 | |
---|
129 | | - err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT, |
---|
130 | | - buf_out->data_len == imm_sz); |
---|
| 129 | + err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT, |
---|
| 130 | + buf_out->data_len == imm_sz); |
---|
131 | 131 | if (err != 0) { |
---|
132 | 132 | iser_err("Failed to register write cmd RDMA mem\n"); |
---|
133 | 133 | return err; |
---|
.. | .. |
---|
250 | 250 | iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ |
---|
251 | 251 | iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; |
---|
252 | 252 | |
---|
253 | | - if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max, |
---|
254 | | - iser_conn->pages_per_mr)) |
---|
| 253 | + if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, |
---|
| 254 | + iser_conn->pages_per_mr)) |
---|
255 | 255 | goto create_rdma_reg_res_failed; |
---|
256 | 256 | |
---|
257 | 257 | if (iser_alloc_login_buf(iser_conn)) |
---|
.. | .. |
---|
293 | 293 | rx_desc_alloc_fail: |
---|
294 | 294 | iser_free_login_buf(iser_conn); |
---|
295 | 295 | alloc_login_buf_fail: |
---|
296 | | - device->reg_ops->free_reg_res(ib_conn); |
---|
| 296 | + iser_free_fastreg_pool(ib_conn); |
---|
297 | 297 | create_rdma_reg_res_failed: |
---|
298 | 298 | iser_err("failed allocating rx descriptors / data buffers\n"); |
---|
299 | 299 | return -ENOMEM; |
---|
.. | .. |
---|
306 | 306 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
---|
307 | 307 | struct iser_device *device = ib_conn->device; |
---|
308 | 308 | |
---|
309 | | - if (device->reg_ops->free_reg_res) |
---|
310 | | - device->reg_ops->free_reg_res(ib_conn); |
---|
| 309 | + iser_free_fastreg_pool(ib_conn); |
---|
311 | 310 | |
---|
312 | 311 | rx_desc = iser_conn->rx_descs; |
---|
313 | 312 | for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) |
---|
.. | .. |
---|
358 | 357 | |
---|
359 | 358 | /** |
---|
360 | 359 | * iser_send_command - send command PDU |
---|
| 360 | + * @conn: link to matching iscsi connection |
---|
| 361 | + * @task: SCSI command task |
---|
361 | 362 | */ |
---|
362 | 363 | int iser_send_command(struct iscsi_conn *conn, |
---|
363 | 364 | struct iscsi_task *task) |
---|
.. | .. |
---|
429 | 430 | |
---|
430 | 431 | /** |
---|
431 | 432 | * iser_send_data_out - send data out PDU |
---|
| 433 | + * @conn: link to matching iscsi connection |
---|
| 434 | + * @task: SCSI command task |
---|
| 435 | + * @hdr: pointer to the LLD's iSCSI message header |
---|
432 | 436 | */ |
---|
433 | 437 | int iser_send_data_out(struct iscsi_conn *conn, |
---|
434 | 438 | struct iscsi_task *task, |
---|
.. | .. |
---|
592 | 596 | static inline int |
---|
593 | 597 | iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) |
---|
594 | 598 | { |
---|
595 | | - if (likely(rkey == desc->rsc.mr->rkey)) { |
---|
596 | | - desc->rsc.mr_valid = 0; |
---|
597 | | - } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) { |
---|
598 | | - desc->pi_ctx->sig_mr_valid = 0; |
---|
599 | | - } else { |
---|
| 599 | + if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || |
---|
| 600 | + (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { |
---|
600 | 601 | iser_err("Bogus remote invalidation for rkey %#x\n", rkey); |
---|
601 | 602 | return -EINVAL; |
---|
602 | 603 | } |
---|
| 604 | + |
---|
| 605 | + desc->rsc.mr_valid = 0; |
---|
603 | 606 | |
---|
604 | 607 | return 0; |
---|
605 | 608 | } |
---|
.. | .. |
---|
750 | 753 | iser_task->prot[ISER_DIR_IN].data_len = 0; |
---|
751 | 754 | iser_task->prot[ISER_DIR_OUT].data_len = 0; |
---|
752 | 755 | |
---|
| 756 | + iser_task->prot[ISER_DIR_IN].dma_nents = 0; |
---|
| 757 | + iser_task->prot[ISER_DIR_OUT].dma_nents = 0; |
---|
| 758 | + |
---|
753 | 759 | memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, |
---|
754 | 760 | sizeof(struct iser_mem_reg)); |
---|
755 | 761 | memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, |
---|
.. | .. |
---|
761 | 767 | int prot_count = scsi_prot_sg_count(iser_task->sc); |
---|
762 | 768 | |
---|
763 | 769 | if (iser_task->dir[ISER_DIR_IN]) { |
---|
764 | | - iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); |
---|
| 770 | + iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN); |
---|
765 | 771 | iser_dma_unmap_task_data(iser_task, |
---|
766 | 772 | &iser_task->data[ISER_DIR_IN], |
---|
767 | 773 | DMA_FROM_DEVICE); |
---|
.. | .. |
---|
772 | 778 | } |
---|
773 | 779 | |
---|
774 | 780 | if (iser_task->dir[ISER_DIR_OUT]) { |
---|
775 | | - iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); |
---|
| 781 | + iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT); |
---|
776 | 782 | iser_dma_unmap_task_data(iser_task, |
---|
777 | 783 | &iser_task->data[ISER_DIR_OUT], |
---|
778 | 784 | DMA_TO_DEVICE); |
---|