From bedbef8ad3e75a304af6361af235302bcc61d06b Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 14 May 2024 06:39:01 +0000 Subject: [PATCH] 修改内核路径 --- kernel/drivers/infiniband/ulp/iser/iser_verbs.c | 455 +++++++++++++++++--------------------------------------- 1 files changed, 136 insertions(+), 319 deletions(-) diff --git a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c index bee8c0b..2bd18b0 100644 --- a/kernel/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/kernel/drivers/infiniband/ulp/iser/iser_verbs.c @@ -55,261 +55,50 @@ { iser_err("async event %s (%d) on device %s port %d\n", ib_event_msg(event->event), event->event, - event->device->name, event->element.port_num); + dev_name(&event->device->dev), event->element.port_num); } -/** +/* * iser_create_device_ib_res - creates Protection Domain (PD), Completion * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with - * the adapator. + * the adaptor. * - * returns 0 on success, -1 on failure + * Return: 0 on success, -1 on failure */ static int iser_create_device_ib_res(struct iser_device *device) { struct ib_device *ib_dev = device->ib_device; - int ret, i, max_cqe; - ret = iser_assign_reg_ops(device); - if (ret) - return ret; - - device->comps_used = min_t(int, num_online_cpus(), - ib_dev->num_comp_vectors); - - device->comps = kcalloc(device->comps_used, sizeof(*device->comps), - GFP_KERNEL); - if (!device->comps) - goto comps_err; - - max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); - - iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n", - device->comps_used, ib_dev->name, - ib_dev->num_comp_vectors, max_cqe); + if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { + iser_err("IB device does not support memory registrations\n"); + return -1; + } device->pd = ib_alloc_pd(ib_dev, iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); if (IS_ERR(device->pd)) goto pd_err; - for (i = 0; i < device->comps_used; i++) { - struct iser_comp *comp = &device->comps[i]; - - comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, - IB_POLL_SOFTIRQ); - if (IS_ERR(comp->cq)) { - comp->cq = NULL; - goto cq_err; - } - } - INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, iser_event_handler); ib_register_event_handler(&device->event_handler); return 0; -cq_err: - for (i = 0; i < device->comps_used; i++) { - struct iser_comp *comp = &device->comps[i]; - - if (comp->cq) - ib_free_cq(comp->cq); - } - ib_dealloc_pd(device->pd); pd_err: - kfree(device->comps); -comps_err: iser_err("failed to allocate an IB resource\n"); return -1; } -/** +/* * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, - * CQ and PD created with the device associated with the adapator. + * CQ and PD created with the device associated with the adaptor. */ static void iser_free_device_ib_res(struct iser_device *device) { - int i; - - for (i = 0; i < device->comps_used; i++) { - struct iser_comp *comp = &device->comps[i]; - - ib_free_cq(comp->cq); - comp->cq = NULL; - } - ib_unregister_event_handler(&device->event_handler); ib_dealloc_pd(device->pd); - kfree(device->comps); - device->comps = NULL; device->pd = NULL; -} - -/** - * iser_alloc_fmr_pool - Creates FMR pool and page_vector - * - * returns 0 on success, or errno code on failure - */ -int iser_alloc_fmr_pool(struct ib_conn *ib_conn, - unsigned cmds_max, - unsigned int size) -{ - struct iser_device *device = ib_conn->device; - struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; - struct iser_page_vec *page_vec; - struct iser_fr_desc *desc; - struct ib_fmr_pool *fmr_pool; - struct ib_fmr_pool_param params; - int ret; - - INIT_LIST_HEAD(&fr_pool->list); - spin_lock_init(&fr_pool->lock); - - desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) - return -ENOMEM; - - page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), - GFP_KERNEL); - if (!page_vec) { - ret = -ENOMEM; - goto err_frpl; - } - - page_vec->pages = (u64 *)(page_vec + 1); - - params.page_shift = SHIFT_4K; - params.max_pages_per_fmr = size; - /* make the pool size twice the max number of SCSI commands * - * the ML is expected to queue, watermark for unmap at 50% */ - params.pool_size = cmds_max * 2; - params.dirty_watermark = cmds_max; - params.cache = 0; - params.flush_function = NULL; - params.access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - - fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); - if (IS_ERR(fmr_pool)) { - ret = PTR_ERR(fmr_pool); - iser_err("FMR allocation failed, err %d\n", ret); - goto err_fmr; - } - - desc->rsc.page_vec = page_vec; - desc->rsc.fmr_pool = fmr_pool; - list_add(&desc->list, &fr_pool->list); - - return 0; - -err_fmr: - kfree(page_vec); -err_frpl: - kfree(desc); - - return ret; -} - -/** - * iser_free_fmr_pool - releases the FMR pool and page vec - */ -void iser_free_fmr_pool(struct ib_conn *ib_conn) -{ - struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; - struct iser_fr_desc *desc; - - desc = list_first_entry(&fr_pool->list, - struct iser_fr_desc, list); - list_del(&desc->list); - - iser_info("freeing conn %p fmr pool %p\n", - ib_conn, desc->rsc.fmr_pool); - - ib_destroy_fmr_pool(desc->rsc.fmr_pool); - kfree(desc->rsc.page_vec); - kfree(desc); -} - -static int -iser_alloc_reg_res(struct iser_device *device, - struct ib_pd *pd, - struct iser_reg_resources *res, - unsigned int size) -{ - struct ib_device *ib_dev = device->ib_device; - enum ib_mr_type mr_type; - int ret; - - if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) - mr_type = IB_MR_TYPE_SG_GAPS; - else - mr_type = IB_MR_TYPE_MEM_REG; - - res->mr = ib_alloc_mr(pd, mr_type, size); - if (IS_ERR(res->mr)) { - ret = PTR_ERR(res->mr); - iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); - return ret; - } - res->mr_valid = 0; - - return 0; -} - -static void -iser_free_reg_res(struct iser_reg_resources *rsc) -{ - ib_dereg_mr(rsc->mr); -} - -static int -iser_alloc_pi_ctx(struct iser_device *device, - struct ib_pd *pd, - struct iser_fr_desc *desc, - unsigned int size) -{ - struct iser_pi_context *pi_ctx = NULL; - int ret; - - desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); - if (!desc->pi_ctx) - return -ENOMEM; - - pi_ctx = desc->pi_ctx; - - ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size); - if (ret) { - iser_err("failed to allocate reg_resources\n"); - goto alloc_reg_res_err; - } - - pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); - if (IS_ERR(pi_ctx->sig_mr)) { - ret = PTR_ERR(pi_ctx->sig_mr); - goto sig_mr_failure; - } - pi_ctx->sig_mr_valid = 0; - desc->pi_ctx->sig_protected = 0; - - return 0; - -sig_mr_failure: - iser_free_reg_res(&pi_ctx->rsc); -alloc_reg_res_err: - kfree(desc->pi_ctx); - - return ret; -} - -static void -iser_free_pi_ctx(struct iser_pi_context *pi_ctx) -{ - iser_free_reg_res(&pi_ctx->rsc); - ib_dereg_mr(pi_ctx->sig_mr); - kfree(pi_ctx); } static struct iser_fr_desc * @@ -319,36 +108,66 @@ unsigned int size) { struct iser_fr_desc *desc; + struct ib_device *ib_dev = device->ib_device; + enum ib_mr_type mr_type; int ret; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); - ret = iser_alloc_reg_res(device, pd, &desc->rsc, size); - if (ret) - goto reg_res_alloc_failure; + if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + + desc->rsc.mr = ib_alloc_mr(pd, mr_type, size); + if (IS_ERR(desc->rsc.mr)) { + ret = PTR_ERR(desc->rsc.mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + goto err_alloc_mr; + } if (pi_enable) { - ret = iser_alloc_pi_ctx(device, pd, desc, size); - if (ret) - goto pi_ctx_alloc_failure; + desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size); + if (IS_ERR(desc->rsc.sig_mr)) { + ret = PTR_ERR(desc->rsc.sig_mr); + iser_err("Failed to allocate sig_mr err=%d\n", ret); + goto err_alloc_mr_integrity; + } } + desc->rsc.mr_valid = 0; return desc; -pi_ctx_alloc_failure: - iser_free_reg_res(&desc->rsc); -reg_res_alloc_failure: +err_alloc_mr_integrity: + ib_dereg_mr(desc->rsc.mr); +err_alloc_mr: kfree(desc); return ERR_PTR(ret); } +static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc) +{ + struct iser_reg_resources *res = &desc->rsc; + + ib_dereg_mr(res->mr); + if (res->sig_mr) { + ib_dereg_mr(res->sig_mr); + res->sig_mr = NULL; + } + kfree(desc); +} + /** * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors * for fast registration work requests. - * returns 0 on success, or errno code on failure + * @ib_conn: connection RDMA resources + * @cmds_max: max number of SCSI commands for this connection + * @size: max number of pages per map request + * + * Return: 0 on success, or errno code on failure */ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max, @@ -385,6 +204,7 @@ /** * iser_free_fastreg_pool - releases the pool of fast_reg descriptors + * @ib_conn: connection RDMA resources */ void iser_free_fastreg_pool(struct ib_conn *ib_conn) { @@ -399,10 +219,7 @@ list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { list_del(&desc->all_list); - iser_free_reg_res(&desc->rsc); - if (desc->pi_ctx) - iser_free_pi_ctx(desc->pi_ctx); - kfree(desc); + iser_destroy_fastreg_desc(desc); ++i; } @@ -411,10 +228,10 @@ fr_pool->size - i); } -/** +/* * iser_create_ib_conn_res - Queue-Pair (QP) * - * returns 0 on success, -1 on failure + * Return: 0 on success, -1 on failure */ static int iser_create_ib_conn_res(struct ib_conn *ib_conn) { @@ -423,75 +240,63 @@ struct ib_device *ib_dev; struct ib_qp_init_attr init_attr; int ret = -ENOMEM; - int index, min_index = 0; + unsigned int max_send_wr, cq_size; BUG_ON(ib_conn->device == NULL); device = ib_conn->device; ib_dev = device->ib_device; - memset(&init_attr, 0, sizeof init_attr); + if (ib_conn->pi_support) + max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; + else + max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; + max_send_wr = min_t(unsigned int, max_send_wr, + (unsigned int)ib_dev->attrs.max_qp_wr); - mutex_lock(&ig.connlist_mutex); - /* select the CQ with the minimal number of usages */ - for (index = 0; index < device->comps_used; index++) { - if (device->comps[index].active_qps < - device->comps[min_index].active_qps) - min_index = index; + cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; + ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); + if (IS_ERR(ib_conn->cq)) { + ret = PTR_ERR(ib_conn->cq); + goto cq_err; } - ib_conn->comp = &device->comps[min_index]; - ib_conn->comp->active_qps++; - mutex_unlock(&ig.connlist_mutex); - iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); + ib_conn->cq_size = cq_size; + + memset(&init_attr, 0, sizeof(init_attr)); init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; - init_attr.send_cq = ib_conn->comp->cq; - init_attr.recv_cq = ib_conn->comp->cq; + init_attr.send_cq = ib_conn->cq; + init_attr.recv_cq = ib_conn->cq; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_send_sge = 2; init_attr.cap.max_recv_sge = 1; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; - if (ib_conn->pi_support) { - init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; - init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; - iser_conn->max_cmds = - ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); - } else { - if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) { - init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; - iser_conn->max_cmds = - ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS); - } else { - init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr; - iser_conn->max_cmds = - ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr); - iser_dbg("device %s supports max_send_wr %d\n", - device->ib_device->name, ib_dev->attrs.max_qp_wr); - } - } + init_attr.cap.max_send_wr = max_send_wr; + if (ib_conn->pi_support) + init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; + iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1); ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); if (ret) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_info("setting conn %p cma_id %p qp %p\n", + iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn, ib_conn->cma_id, - ib_conn->cma_id->qp); + ib_conn->cma_id->qp, max_send_wr); return ret; out_err: - mutex_lock(&ig.connlist_mutex); - ib_conn->comp->active_qps--; - mutex_unlock(&ig.connlist_mutex); + ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); +cq_err: iser_err("unable to alloc mem or create resource, err %d\n", ret); return ret; } -/** +/* * based on the resolved device node GUID see if there already allocated * device for this device. If there's no such, create one. */ @@ -542,9 +347,9 @@ mutex_unlock(&ig.device_list_mutex); } -/** +/* * Called with state mutex held - **/ + */ static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, enum iser_conn_state comp, enum iser_conn_state exch) @@ -597,10 +402,8 @@ iser_conn, ib_conn->cma_id, ib_conn->qp); if (ib_conn->qp != NULL) { - mutex_lock(&ig.connlist_mutex); - ib_conn->comp->active_qps--; - mutex_unlock(&ig.connlist_mutex); rdma_destroy_qp(ib_conn->cma_id); + ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); ib_conn->qp = NULL; } @@ -616,7 +419,8 @@ } /** - * Frees all conn objects and deallocs conn descriptor + * iser_conn_release - Frees all conn objects and deallocs conn descriptor + * @iser_conn: iSER connection context */ void iser_conn_release(struct iser_conn *iser_conn) { @@ -650,7 +454,10 @@ } /** - * triggers start of the disconnect procedures and wait for them to be done + * iser_conn_terminate - triggers start of the disconnect procedures and + * waits for them to be done + * @iser_conn: iSER connection context + * * Called with state mutex held */ int iser_conn_terminate(struct iser_conn *iser_conn) @@ -687,9 +494,9 @@ return 1; } -/** +/* * Called with state mutex held - **/ + */ static void iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *iser_conn; @@ -706,36 +513,35 @@ struct ib_device_attr *attr = &device->ib_device->attrs; unsigned short sg_tablesize, sup_sg_tablesize; unsigned short reserved_mr_pages; + u32 max_num_sg; /* - * FRs without SG_GAPS or FMRs can only map up to a (device) page per - * entry, but if the first entry is misaligned we'll end up using two - * entries (head and tail) for a single page worth data, so one - * additional entry is required. + * FRs without SG_GAPS can only map up to a (device) page per entry, + * but if the first entry is misaligned we'll end up using two entries + * (head and tail) for a single page worth data, so one additional + * entry is required. */ - if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) && - (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG) reserved_mr_pages = 0; else reserved_mr_pages = 1; - sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); - if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) - sup_sg_tablesize = - min_t( - uint, ISCSI_ISER_MAX_SG_TABLESIZE, - attr->max_fast_reg_page_list_len - reserved_mr_pages); + if (iser_conn->ib_conn.pi_support) + max_num_sg = attr->max_pi_fast_reg_page_list_len; else - sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; + max_num_sg = attr->max_fast_reg_page_list_len; + sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K); + sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE, + max_num_sg - reserved_mr_pages); iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); iser_conn->pages_per_mr = iser_conn->scsi_sg_tablesize + reserved_mr_pages; } -/** +/* * Called with state mutex held - **/ + */ static void iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; @@ -761,10 +567,10 @@ /* connection T10-PI support */ if (iser_pi_enable) { if (!(device->ib_device->attrs.device_cap_flags & - IB_DEVICE_SIGNATURE_HANDOVER)) { + IB_DEVICE_INTEGRITY_HANDOVER)) { iser_warn("T10-PI requested but not supported on %s, " "continue without T10-PI\n", - ib_conn->device->ib_device->name); + dev_name(&ib_conn->device->ib_device->dev)); ib_conn->pi_support = false; } else { ib_conn->pi_support = true; @@ -781,9 +587,9 @@ } } -/** +/* * Called with state mutex held - **/ + */ static void iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; @@ -791,7 +597,7 @@ struct iser_cm_hdr req_hdr; struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; struct ib_conn *ib_conn = &iser_conn->ib_conn; - struct iser_device *device = ib_conn->device; + struct ib_device *ib_dev = ib_conn->device->ib_device; if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ @@ -802,19 +608,19 @@ goto failure; memset(&conn_param, 0, sizeof conn_param); - conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom; + conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom; conn_param.initiator_depth = 1; conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; memset(&req_hdr, 0, sizeof(req_hdr)); req_hdr.flags = ISER_ZBVA_NOT_SUP; - if (!device->remote_inv_sup) + if (!iser_always_reg) req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP; conn_param.private_data = (void *)&req_hdr; conn_param.private_data_len = sizeof(struct iser_cm_hdr); - ret = rdma_connect(cma_id, &conn_param); + ret = rdma_connect_locked(cma_id, &conn_param); if (ret) { iser_err("failure connecting: %d\n", ret); goto failure; @@ -905,7 +711,7 @@ case RDMA_CM_EVENT_REJECTED: iser_info("Connection rejected: %s\n", rdma_reject_msg(cma_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: @@ -1068,7 +874,7 @@ ib_conn->post_recv_buf_count += count; ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); - if (ib_ret) { + if (unlikely(ib_ret)) { iser_err("ib_post_recv failed ret=%d\n", ib_ret); ib_conn->post_recv_buf_count -= count; } else @@ -1079,14 +885,18 @@ /** - * iser_start_send - Initiate a Send DTO operation + * iser_post_send - Initiate a Send DTO operation + * @ib_conn: connection RDMA resources + * @tx_desc: iSER TX descriptor + * @signal: true to send work request as SIGNALED * - * returns 0 on success, -1 on failure + * Return: 0 on success, -1 on failure */ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, bool signal) { - struct ib_send_wr *wr = iser_tx_next_wr(tx_desc); + struct ib_send_wr *wr = &tx_desc->send_wr; + struct ib_send_wr *first_wr; int ib_ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, @@ -1100,8 +910,15 @@ wr->opcode = IB_WR_SEND; wr->send_flags = signal ? IB_SEND_SIGNALED : 0; - ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL); - if (ib_ret) + if (tx_desc->inv_wr.next) + first_wr = &tx_desc->inv_wr; + else if (tx_desc->reg_wr.wr.next) + first_wr = &tx_desc->reg_wr.wr; + else + first_wr = wr; + + ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL); + if (unlikely(ib_ret)) iser_err("ib_post_send failed, ret:%d opcode:%d\n", ib_ret, wr->opcode); @@ -1117,12 +934,12 @@ struct ib_mr_status mr_status; int ret; - if (desc && desc->pi_ctx->sig_protected) { - desc->pi_ctx->sig_protected = 0; - ret = ib_check_mr_status(desc->pi_ctx->sig_mr, + if (desc && desc->sig_protected) { + desc->sig_protected = false; + ret = ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { - pr_err("ib_check_mr_status failed, ret %d\n", ret); + iser_err("ib_check_mr_status failed, ret %d\n", ret); /* Not a lot we can do, return ambiguous guard error */ *sector = 0; return 0x1; @@ -1134,7 +951,7 @@ sector_div(sector_off, sector_size + 8); *sector = scsi_get_lba(iser_task->sc) + sector_off; - pr_err("PI error found type %d at sector %llx " + iser_err("PI error found type %d at sector %llx " "expected %x vs actual %x\n", mr_status.sig_err.err_type, (unsigned long long)*sector, -- Gitblit v1.6.2