| .. | .. |
|---|
| 38 | 38 | #include <linux/scatterlist.h> |
|---|
| 39 | 39 | |
|---|
| 40 | 40 | #include "iscsi_iser.h" |
|---|
| 41 | | -static |
|---|
| 42 | | -int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, |
|---|
| 43 | | - struct iser_data_buf *mem, |
|---|
| 44 | | - struct iser_reg_resources *rsc, |
|---|
| 45 | | - struct iser_mem_reg *mem_reg); |
|---|
| 46 | | -static |
|---|
| 47 | | -int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, |
|---|
| 48 | | - struct iser_data_buf *mem, |
|---|
| 49 | | - struct iser_reg_resources *rsc, |
|---|
| 50 | | - struct iser_mem_reg *mem_reg); |
|---|
| 51 | | - |
|---|
| 52 | | -static const struct iser_reg_ops fastreg_ops = { |
|---|
| 53 | | - .alloc_reg_res = iser_alloc_fastreg_pool, |
|---|
| 54 | | - .free_reg_res = iser_free_fastreg_pool, |
|---|
| 55 | | - .reg_mem = iser_fast_reg_mr, |
|---|
| 56 | | - .unreg_mem = iser_unreg_mem_fastreg, |
|---|
| 57 | | - .reg_desc_get = iser_reg_desc_get_fr, |
|---|
| 58 | | - .reg_desc_put = iser_reg_desc_put_fr, |
|---|
| 59 | | -}; |
|---|
| 60 | | - |
|---|
| 61 | | -static const struct iser_reg_ops fmr_ops = { |
|---|
| 62 | | - .alloc_reg_res = iser_alloc_fmr_pool, |
|---|
| 63 | | - .free_reg_res = iser_free_fmr_pool, |
|---|
| 64 | | - .reg_mem = iser_fast_reg_fmr, |
|---|
| 65 | | - .unreg_mem = iser_unreg_mem_fmr, |
|---|
| 66 | | - .reg_desc_get = iser_reg_desc_get_fmr, |
|---|
| 67 | | - .reg_desc_put = iser_reg_desc_put_fmr, |
|---|
| 68 | | -}; |
|---|
| 69 | 41 | |
|---|
| 70 | 42 | void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) |
|---|
| 71 | 43 | { |
|---|
| 72 | 44 | iser_err_comp(wc, "memreg"); |
|---|
| 73 | 45 | } |
|---|
| 74 | 46 | |
|---|
| 75 | | -int iser_assign_reg_ops(struct iser_device *device) |
|---|
| 76 | | -{ |
|---|
| 77 | | - struct ib_device *ib_dev = device->ib_device; |
|---|
| 78 | | - |
|---|
| 79 | | - /* Assign function handles - based on FMR support */ |
|---|
| 80 | | - if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr && |
|---|
| 81 | | - ib_dev->map_phys_fmr && ib_dev->unmap_fmr) { |
|---|
| 82 | | - iser_info("FMR supported, using FMR for registration\n"); |
|---|
| 83 | | - device->reg_ops = &fmr_ops; |
|---|
| 84 | | - } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
|---|
| 85 | | - iser_info("FastReg supported, using FastReg for registration\n"); |
|---|
| 86 | | - device->reg_ops = &fastreg_ops; |
|---|
| 87 | | - device->remote_inv_sup = iser_always_reg; |
|---|
| 88 | | - } else { |
|---|
| 89 | | - iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); |
|---|
| 90 | | - return -1; |
|---|
| 91 | | - } |
|---|
| 92 | | - |
|---|
| 93 | | - return 0; |
|---|
| 94 | | -} |
|---|
| 95 | | - |
|---|
| 96 | | -struct iser_fr_desc * |
|---|
| 47 | +static struct iser_fr_desc * |
|---|
| 97 | 48 | iser_reg_desc_get_fr(struct ib_conn *ib_conn) |
|---|
| 98 | 49 | { |
|---|
| 99 | 50 | struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; |
|---|
| .. | .. |
|---|
| 109 | 60 | return desc; |
|---|
| 110 | 61 | } |
|---|
| 111 | 62 | |
|---|
| 112 | | -void |
|---|
| 63 | +static void |
|---|
| 113 | 64 | iser_reg_desc_put_fr(struct ib_conn *ib_conn, |
|---|
| 114 | 65 | struct iser_fr_desc *desc) |
|---|
| 115 | 66 | { |
|---|
| .. | .. |
|---|
| 119 | 70 | spin_lock_irqsave(&fr_pool->lock, flags); |
|---|
| 120 | 71 | list_add(&desc->list, &fr_pool->list); |
|---|
| 121 | 72 | spin_unlock_irqrestore(&fr_pool->lock, flags); |
|---|
| 122 | | -} |
|---|
| 123 | | - |
|---|
| 124 | | -struct iser_fr_desc * |
|---|
| 125 | | -iser_reg_desc_get_fmr(struct ib_conn *ib_conn) |
|---|
| 126 | | -{ |
|---|
| 127 | | - struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; |
|---|
| 128 | | - |
|---|
| 129 | | - return list_first_entry(&fr_pool->list, |
|---|
| 130 | | - struct iser_fr_desc, list); |
|---|
| 131 | | -} |
|---|
| 132 | | - |
|---|
| 133 | | -void |
|---|
| 134 | | -iser_reg_desc_put_fmr(struct ib_conn *ib_conn, |
|---|
| 135 | | - struct iser_fr_desc *desc) |
|---|
| 136 | | -{ |
|---|
| 137 | | -} |
|---|
| 138 | | - |
|---|
| 139 | | -static void iser_data_buf_dump(struct iser_data_buf *data, |
|---|
| 140 | | - struct ib_device *ibdev) |
|---|
| 141 | | -{ |
|---|
| 142 | | - struct scatterlist *sg; |
|---|
| 143 | | - int i; |
|---|
| 144 | | - |
|---|
| 145 | | - for_each_sg(data->sg, sg, data->dma_nents, i) |
|---|
| 146 | | - iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " |
|---|
| 147 | | - "off:0x%x sz:0x%x dma_len:0x%x\n", |
|---|
| 148 | | - i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
|---|
| 149 | | - sg_page(sg), sg->offset, |
|---|
| 150 | | - sg->length, ib_sg_dma_len(ibdev, sg)); |
|---|
| 151 | | -} |
|---|
| 152 | | - |
|---|
| 153 | | -static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
|---|
| 154 | | -{ |
|---|
| 155 | | - int i; |
|---|
| 156 | | - |
|---|
| 157 | | - iser_err("page vec npages %d data length %lld\n", |
|---|
| 158 | | - page_vec->npages, page_vec->fake_mr.length); |
|---|
| 159 | | - for (i = 0; i < page_vec->npages; i++) |
|---|
| 160 | | - iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); |
|---|
| 161 | 73 | } |
|---|
| 162 | 74 | |
|---|
| 163 | 75 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
|---|
| .. | .. |
|---|
| 171 | 83 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
|---|
| 172 | 84 | |
|---|
| 173 | 85 | data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); |
|---|
| 174 | | - if (data->dma_nents == 0) { |
|---|
| 86 | + if (unlikely(data->dma_nents == 0)) { |
|---|
| 175 | 87 | iser_err("dma_map_sg failed!!!\n"); |
|---|
| 176 | 88 | return -EINVAL; |
|---|
| 177 | 89 | } |
|---|
| .. | .. |
|---|
| 204 | 116 | reg->rkey = device->pd->unsafe_global_rkey; |
|---|
| 205 | 117 | else |
|---|
| 206 | 118 | reg->rkey = 0; |
|---|
| 207 | | - reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); |
|---|
| 208 | | - reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); |
|---|
| 119 | + reg->sge.addr = sg_dma_address(&sg[0]); |
|---|
| 120 | + reg->sge.length = sg_dma_len(&sg[0]); |
|---|
| 209 | 121 | |
|---|
| 210 | 122 | iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx," |
|---|
| 211 | 123 | " length=0x%x\n", reg->sge.lkey, reg->rkey, |
|---|
| .. | .. |
|---|
| 214 | 126 | return 0; |
|---|
| 215 | 127 | } |
|---|
| 216 | 128 | |
|---|
| 217 | | -static int iser_set_page(struct ib_mr *mr, u64 addr) |
|---|
| 218 | | -{ |
|---|
| 219 | | - struct iser_page_vec *page_vec = |
|---|
| 220 | | - container_of(mr, struct iser_page_vec, fake_mr); |
|---|
| 221 | | - |
|---|
| 222 | | - page_vec->pages[page_vec->npages++] = addr; |
|---|
| 223 | | - |
|---|
| 224 | | - return 0; |
|---|
| 225 | | -} |
|---|
| 226 | | - |
|---|
| 227 | | -static |
|---|
| 228 | | -int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, |
|---|
| 229 | | - struct iser_data_buf *mem, |
|---|
| 230 | | - struct iser_reg_resources *rsc, |
|---|
| 231 | | - struct iser_mem_reg *reg) |
|---|
| 232 | | -{ |
|---|
| 233 | | - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
|---|
| 234 | | - struct iser_device *device = ib_conn->device; |
|---|
| 235 | | - struct iser_page_vec *page_vec = rsc->page_vec; |
|---|
| 236 | | - struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; |
|---|
| 237 | | - struct ib_pool_fmr *fmr; |
|---|
| 238 | | - int ret, plen; |
|---|
| 239 | | - |
|---|
| 240 | | - page_vec->npages = 0; |
|---|
| 241 | | - page_vec->fake_mr.page_size = SIZE_4K; |
|---|
| 242 | | - plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg, |
|---|
| 243 | | - mem->dma_nents, NULL, iser_set_page); |
|---|
| 244 | | - if (unlikely(plen < mem->dma_nents)) { |
|---|
| 245 | | - iser_err("page vec too short to hold this SG\n"); |
|---|
| 246 | | - iser_data_buf_dump(mem, device->ib_device); |
|---|
| 247 | | - iser_dump_page_vec(page_vec); |
|---|
| 248 | | - return -EINVAL; |
|---|
| 249 | | - } |
|---|
| 250 | | - |
|---|
| 251 | | - fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, |
|---|
| 252 | | - page_vec->npages, page_vec->pages[0]); |
|---|
| 253 | | - if (IS_ERR(fmr)) { |
|---|
| 254 | | - ret = PTR_ERR(fmr); |
|---|
| 255 | | - iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); |
|---|
| 256 | | - return ret; |
|---|
| 257 | | - } |
|---|
| 258 | | - |
|---|
| 259 | | - reg->sge.lkey = fmr->fmr->lkey; |
|---|
| 260 | | - reg->rkey = fmr->fmr->rkey; |
|---|
| 261 | | - reg->sge.addr = page_vec->fake_mr.iova; |
|---|
| 262 | | - reg->sge.length = page_vec->fake_mr.length; |
|---|
| 263 | | - reg->mem_h = fmr; |
|---|
| 264 | | - |
|---|
| 265 | | - iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," |
|---|
| 266 | | - " length=0x%x\n", reg->sge.lkey, reg->rkey, |
|---|
| 267 | | - reg->sge.addr, reg->sge.length); |
|---|
| 268 | | - |
|---|
| 269 | | - return 0; |
|---|
| 270 | | -} |
|---|
| 271 | | - |
|---|
| 272 | | -/** |
|---|
| 273 | | - * Unregister (previosuly registered using FMR) memory. |
|---|
| 274 | | - * If memory is non-FMR does nothing. |
|---|
| 275 | | - */ |
|---|
| 276 | | -void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, |
|---|
| 277 | | - enum iser_data_dir cmd_dir) |
|---|
| 278 | | -{ |
|---|
| 279 | | - struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; |
|---|
| 280 | | - int ret; |
|---|
| 281 | | - |
|---|
| 282 | | - if (!reg->mem_h) |
|---|
| 283 | | - return; |
|---|
| 284 | | - |
|---|
| 285 | | - iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); |
|---|
| 286 | | - |
|---|
| 287 | | - ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); |
|---|
| 288 | | - if (ret) |
|---|
| 289 | | - iser_err("ib_fmr_pool_unmap failed %d\n", ret); |
|---|
| 290 | | - |
|---|
| 291 | | - reg->mem_h = NULL; |
|---|
| 292 | | -} |
|---|
| 293 | | - |
|---|
| 294 | 129 | void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, |
|---|
| 295 | 130 | enum iser_data_dir cmd_dir) |
|---|
| 296 | 131 | { |
|---|
| 297 | | - struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
|---|
| 298 | 132 | struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; |
|---|
| 133 | + struct iser_fr_desc *desc; |
|---|
| 134 | + struct ib_mr_status mr_status; |
|---|
| 299 | 135 | |
|---|
| 300 | | - if (!reg->mem_h) |
|---|
| 136 | + desc = reg->mem_h; |
|---|
| 137 | + if (!desc) |
|---|
| 301 | 138 | return; |
|---|
| 302 | 139 | |
|---|
| 303 | | - device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, |
|---|
| 304 | | - reg->mem_h); |
|---|
| 140 | + /* |
|---|
| 141 | + * The signature MR cannot be invalidated and reused without checking. |
|---|
| 142 | + * libiscsi calls the check_protection transport handler only if |
|---|
| 143 | + * SCSI-Response is received. And the signature MR is not checked if |
|---|
| 144 | + * the task is completed for some other reason like a timeout or error |
|---|
| 145 | + * handling. That's why we must check the signature MR here before |
|---|
| 146 | + * putting it to the free pool. |
|---|
| 147 | + */ |
|---|
| 148 | + if (unlikely(desc->sig_protected)) { |
|---|
| 149 | + desc->sig_protected = false; |
|---|
| 150 | + ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, |
|---|
| 151 | + &mr_status); |
|---|
| 152 | + } |
|---|
| 153 | + iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h); |
|---|
| 305 | 154 | reg->mem_h = NULL; |
|---|
| 306 | 155 | } |
|---|
| 307 | 156 | |
|---|
| 308 | 157 | static void |
|---|
| 309 | | -iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, |
|---|
| 310 | | - struct ib_sig_domain *domain) |
|---|
| 158 | +iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) |
|---|
| 311 | 159 | { |
|---|
| 312 | 160 | domain->sig_type = IB_SIG_TYPE_T10_DIF; |
|---|
| 313 | 161 | domain->sig.dif.pi_interval = scsi_prot_interval(sc); |
|---|
| .. | .. |
|---|
| 330 | 178 | case SCSI_PROT_WRITE_INSERT: |
|---|
| 331 | 179 | case SCSI_PROT_READ_STRIP: |
|---|
| 332 | 180 | sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; |
|---|
| 333 | | - iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); |
|---|
| 181 | + iser_set_dif_domain(sc, &sig_attrs->wire); |
|---|
| 334 | 182 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
|---|
| 335 | 183 | break; |
|---|
| 336 | 184 | case SCSI_PROT_READ_INSERT: |
|---|
| 337 | 185 | case SCSI_PROT_WRITE_STRIP: |
|---|
| 338 | 186 | sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; |
|---|
| 339 | | - iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); |
|---|
| 187 | + iser_set_dif_domain(sc, &sig_attrs->mem); |
|---|
| 340 | 188 | sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? |
|---|
| 341 | 189 | IB_T10DIF_CSUM : IB_T10DIF_CRC; |
|---|
| 342 | 190 | break; |
|---|
| 343 | 191 | case SCSI_PROT_READ_PASS: |
|---|
| 344 | 192 | case SCSI_PROT_WRITE_PASS: |
|---|
| 345 | | - iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); |
|---|
| 193 | + iser_set_dif_domain(sc, &sig_attrs->wire); |
|---|
| 346 | 194 | sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; |
|---|
| 347 | | - iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); |
|---|
| 195 | + iser_set_dif_domain(sc, &sig_attrs->mem); |
|---|
| 348 | 196 | sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? |
|---|
| 349 | 197 | IB_T10DIF_CSUM : IB_T10DIF_CRC; |
|---|
| 350 | 198 | break; |
|---|
| .. | .. |
|---|
| 370 | 218 | static inline void |
|---|
| 371 | 219 | iser_inv_rkey(struct ib_send_wr *inv_wr, |
|---|
| 372 | 220 | struct ib_mr *mr, |
|---|
| 373 | | - struct ib_cqe *cqe) |
|---|
| 221 | + struct ib_cqe *cqe, |
|---|
| 222 | + struct ib_send_wr *next_wr) |
|---|
| 374 | 223 | { |
|---|
| 375 | 224 | inv_wr->opcode = IB_WR_LOCAL_INV; |
|---|
| 376 | 225 | inv_wr->wr_cqe = cqe; |
|---|
| 377 | 226 | inv_wr->ex.invalidate_rkey = mr->rkey; |
|---|
| 378 | 227 | inv_wr->send_flags = 0; |
|---|
| 379 | 228 | inv_wr->num_sge = 0; |
|---|
| 229 | + inv_wr->next = next_wr; |
|---|
| 380 | 230 | } |
|---|
| 381 | 231 | |
|---|
| 382 | 232 | static int |
|---|
| 383 | 233 | iser_reg_sig_mr(struct iscsi_iser_task *iser_task, |
|---|
| 384 | | - struct iser_pi_context *pi_ctx, |
|---|
| 385 | | - struct iser_mem_reg *data_reg, |
|---|
| 386 | | - struct iser_mem_reg *prot_reg, |
|---|
| 234 | + struct iser_data_buf *mem, |
|---|
| 235 | + struct iser_data_buf *sig_mem, |
|---|
| 236 | + struct iser_reg_resources *rsc, |
|---|
| 387 | 237 | struct iser_mem_reg *sig_reg) |
|---|
| 388 | 238 | { |
|---|
| 389 | 239 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
|---|
| 390 | | - struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; |
|---|
| 391 | 240 | struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; |
|---|
| 392 | | - struct ib_sig_handover_wr *wr; |
|---|
| 393 | | - struct ib_mr *mr = pi_ctx->sig_mr; |
|---|
| 241 | + struct ib_mr *mr = rsc->sig_mr; |
|---|
| 242 | + struct ib_sig_attrs *sig_attrs = mr->sig_attrs; |
|---|
| 243 | + struct ib_reg_wr *wr = &tx_desc->reg_wr; |
|---|
| 394 | 244 | int ret; |
|---|
| 395 | 245 | |
|---|
| 396 | 246 | memset(sig_attrs, 0, sizeof(*sig_attrs)); |
|---|
| .. | .. |
|---|
| 400 | 250 | |
|---|
| 401 | 251 | iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); |
|---|
| 402 | 252 | |
|---|
| 403 | | - if (pi_ctx->sig_mr_valid) |
|---|
| 404 | | - iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); |
|---|
| 253 | + if (rsc->mr_valid) |
|---|
| 254 | + iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); |
|---|
| 405 | 255 | |
|---|
| 406 | 256 | ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); |
|---|
| 407 | 257 | |
|---|
| 408 | | - wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr, |
|---|
| 409 | | - wr); |
|---|
| 410 | | - wr->wr.opcode = IB_WR_REG_SIG_MR; |
|---|
| 258 | + ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL, |
|---|
| 259 | + sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K); |
|---|
| 260 | + if (unlikely(ret)) { |
|---|
| 261 | + iser_err("failed to map PI sg (%d)\n", |
|---|
| 262 | + mem->dma_nents + sig_mem->dma_nents); |
|---|
| 263 | + goto err; |
|---|
| 264 | + } |
|---|
| 265 | + |
|---|
| 266 | + memset(wr, 0, sizeof(*wr)); |
|---|
| 267 | + wr->wr.next = &tx_desc->send_wr; |
|---|
| 268 | + wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; |
|---|
| 411 | 269 | wr->wr.wr_cqe = cqe; |
|---|
| 412 | | - wr->wr.sg_list = &data_reg->sge; |
|---|
| 413 | | - wr->wr.num_sge = 1; |
|---|
| 270 | + wr->wr.num_sge = 0; |
|---|
| 414 | 271 | wr->wr.send_flags = 0; |
|---|
| 415 | | - wr->sig_attrs = sig_attrs; |
|---|
| 416 | | - wr->sig_mr = mr; |
|---|
| 417 | | - if (scsi_prot_sg_count(iser_task->sc)) |
|---|
| 418 | | - wr->prot = &prot_reg->sge; |
|---|
| 419 | | - else |
|---|
| 420 | | - wr->prot = NULL; |
|---|
| 421 | | - wr->access_flags = IB_ACCESS_LOCAL_WRITE | |
|---|
| 422 | | - IB_ACCESS_REMOTE_READ | |
|---|
| 423 | | - IB_ACCESS_REMOTE_WRITE; |
|---|
| 424 | | - pi_ctx->sig_mr_valid = 1; |
|---|
| 272 | + wr->mr = mr; |
|---|
| 273 | + wr->key = mr->rkey; |
|---|
| 274 | + wr->access = IB_ACCESS_LOCAL_WRITE | |
|---|
| 275 | + IB_ACCESS_REMOTE_READ | |
|---|
| 276 | + IB_ACCESS_REMOTE_WRITE; |
|---|
| 277 | + rsc->mr_valid = 1; |
|---|
| 425 | 278 | |
|---|
| 426 | 279 | sig_reg->sge.lkey = mr->lkey; |
|---|
| 427 | 280 | sig_reg->rkey = mr->rkey; |
|---|
| 428 | | - sig_reg->sge.addr = 0; |
|---|
| 429 | | - sig_reg->sge.length = scsi_transfer_length(iser_task->sc); |
|---|
| 281 | + sig_reg->sge.addr = mr->iova; |
|---|
| 282 | + sig_reg->sge.length = mr->length; |
|---|
| 430 | 283 | |
|---|
| 431 | 284 | iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", |
|---|
| 432 | 285 | sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, |
|---|
| .. | .. |
|---|
| 443 | 296 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
|---|
| 444 | 297 | struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; |
|---|
| 445 | 298 | struct ib_mr *mr = rsc->mr; |
|---|
| 446 | | - struct ib_reg_wr *wr; |
|---|
| 299 | + struct ib_reg_wr *wr = &tx_desc->reg_wr; |
|---|
| 447 | 300 | int n; |
|---|
| 448 | 301 | |
|---|
| 449 | 302 | if (rsc->mr_valid) |
|---|
| 450 | | - iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); |
|---|
| 303 | + iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); |
|---|
| 451 | 304 | |
|---|
| 452 | 305 | ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); |
|---|
| 453 | 306 | |
|---|
| 454 | | - n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K); |
|---|
| 307 | + n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K); |
|---|
| 455 | 308 | if (unlikely(n != mem->dma_nents)) { |
|---|
| 456 | 309 | iser_err("failed to map sg (%d/%d)\n", |
|---|
| 457 | 310 | n, mem->dma_nents); |
|---|
| 458 | 311 | return n < 0 ? n : -EINVAL; |
|---|
| 459 | 312 | } |
|---|
| 460 | 313 | |
|---|
| 461 | | - wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr); |
|---|
| 314 | + wr->wr.next = &tx_desc->send_wr; |
|---|
| 462 | 315 | wr->wr.opcode = IB_WR_REG_MR; |
|---|
| 463 | 316 | wr->wr.wr_cqe = cqe; |
|---|
| 464 | 317 | wr->wr.send_flags = 0; |
|---|
| .. | .. |
|---|
| 483 | 336 | } |
|---|
| 484 | 337 | |
|---|
| 485 | 338 | static int |
|---|
| 486 | | -iser_reg_prot_sg(struct iscsi_iser_task *task, |
|---|
| 487 | | - struct iser_data_buf *mem, |
|---|
| 488 | | - struct iser_fr_desc *desc, |
|---|
| 489 | | - bool use_dma_key, |
|---|
| 490 | | - struct iser_mem_reg *reg) |
|---|
| 491 | | -{ |
|---|
| 492 | | - struct iser_device *device = task->iser_conn->ib_conn.device; |
|---|
| 493 | | - |
|---|
| 494 | | - if (use_dma_key) |
|---|
| 495 | | - return iser_reg_dma(device, mem, reg); |
|---|
| 496 | | - |
|---|
| 497 | | - return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); |
|---|
| 498 | | -} |
|---|
| 499 | | - |
|---|
| 500 | | -static int |
|---|
| 501 | 339 | iser_reg_data_sg(struct iscsi_iser_task *task, |
|---|
| 502 | 340 | struct iser_data_buf *mem, |
|---|
| 503 | 341 | struct iser_fr_desc *desc, |
|---|
| .. | .. |
|---|
| 509 | 347 | if (use_dma_key) |
|---|
| 510 | 348 | return iser_reg_dma(device, mem, reg); |
|---|
| 511 | 349 | |
|---|
| 512 | | - return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); |
|---|
| 350 | + return iser_fast_reg_mr(task, mem, &desc->rsc, reg); |
|---|
| 513 | 351 | } |
|---|
| 514 | 352 | |
|---|
| 515 | | -int iser_reg_rdma_mem(struct iscsi_iser_task *task, |
|---|
| 516 | | - enum iser_data_dir dir, |
|---|
| 517 | | - bool all_imm) |
|---|
| 353 | +int iser_reg_mem_fastreg(struct iscsi_iser_task *task, |
|---|
| 354 | + enum iser_data_dir dir, |
|---|
| 355 | + bool all_imm) |
|---|
| 518 | 356 | { |
|---|
| 519 | 357 | struct ib_conn *ib_conn = &task->iser_conn->ib_conn; |
|---|
| 520 | | - struct iser_device *device = ib_conn->device; |
|---|
| 521 | 358 | struct iser_data_buf *mem = &task->data[dir]; |
|---|
| 522 | 359 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; |
|---|
| 523 | | - struct iser_mem_reg *data_reg; |
|---|
| 524 | 360 | struct iser_fr_desc *desc = NULL; |
|---|
| 525 | 361 | bool use_dma_key; |
|---|
| 526 | 362 | int err; |
|---|
| .. | .. |
|---|
| 529 | 365 | scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; |
|---|
| 530 | 366 | |
|---|
| 531 | 367 | if (!use_dma_key) { |
|---|
| 532 | | - desc = device->reg_ops->reg_desc_get(ib_conn); |
|---|
| 368 | + desc = iser_reg_desc_get_fr(ib_conn); |
|---|
| 533 | 369 | reg->mem_h = desc; |
|---|
| 534 | 370 | } |
|---|
| 535 | 371 | |
|---|
| 536 | | - if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) |
|---|
| 537 | | - data_reg = reg; |
|---|
| 538 | | - else |
|---|
| 539 | | - data_reg = &task->desc.data_reg; |
|---|
| 540 | | - |
|---|
| 541 | | - err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); |
|---|
| 542 | | - if (unlikely(err)) |
|---|
| 543 | | - goto err_reg; |
|---|
| 544 | | - |
|---|
| 545 | | - if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { |
|---|
| 546 | | - struct iser_mem_reg *prot_reg = &task->desc.prot_reg; |
|---|
| 547 | | - |
|---|
| 548 | | - if (scsi_prot_sg_count(task->sc)) { |
|---|
| 549 | | - mem = &task->prot[dir]; |
|---|
| 550 | | - err = iser_reg_prot_sg(task, mem, desc, |
|---|
| 551 | | - use_dma_key, prot_reg); |
|---|
| 552 | | - if (unlikely(err)) |
|---|
| 553 | | - goto err_reg; |
|---|
| 554 | | - } |
|---|
| 555 | | - |
|---|
| 556 | | - err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg, |
|---|
| 557 | | - prot_reg, reg); |
|---|
| 372 | + if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) { |
|---|
| 373 | + err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg); |
|---|
| 374 | + if (unlikely(err)) |
|---|
| 375 | + goto err_reg; |
|---|
| 376 | + } else { |
|---|
| 377 | + err = iser_reg_sig_mr(task, mem, &task->prot[dir], |
|---|
| 378 | + &desc->rsc, reg); |
|---|
| 558 | 379 | if (unlikely(err)) |
|---|
| 559 | 380 | goto err_reg; |
|---|
| 560 | 381 | |
|---|
| 561 | | - desc->pi_ctx->sig_protected = 1; |
|---|
| 382 | + desc->sig_protected = true; |
|---|
| 562 | 383 | } |
|---|
| 563 | 384 | |
|---|
| 564 | 385 | return 0; |
|---|
| 565 | 386 | |
|---|
| 566 | 387 | err_reg: |
|---|
| 567 | 388 | if (desc) |
|---|
| 568 | | - device->reg_ops->reg_desc_put(ib_conn, desc); |
|---|
| 389 | + iser_reg_desc_put_fr(ib_conn, desc); |
|---|
| 569 | 390 | |
|---|
| 570 | 391 | return err; |
|---|
| 571 | 392 | } |
|---|
| 572 | 393 | |
|---|
| 573 | | -void iser_unreg_rdma_mem(struct iscsi_iser_task *task, |
|---|
| 574 | | - enum iser_data_dir dir) |
|---|
| 575 | | -{ |
|---|
| 576 | | - struct iser_device *device = task->iser_conn->ib_conn.device; |
|---|
| 577 | | - |
|---|
| 578 | | - device->reg_ops->unreg_mem(task, dir); |
|---|
| 579 | | -} |
|---|