From bedbef8ad3e75a304af6361af235302bcc61d06b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 14 May 2024 06:39:01 +0000
Subject: [PATCH] 修改内核路径

---
 kernel/drivers/infiniband/ulp/iser/iser_memory.c |  340 ++++++++++++-------------------------------------------
 1 files changed, 77 insertions(+), 263 deletions(-)

diff --git a/kernel/drivers/infiniband/ulp/iser/iser_memory.c b/kernel/drivers/infiniband/ulp/iser/iser_memory.c
index 379bc0d..d4e057f 100644
--- a/kernel/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/kernel/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,62 +38,13 @@
 #include <linux/scatterlist.h>
 
 #include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
-		      struct iser_data_buf *mem,
-		      struct iser_reg_resources *rsc,
-		      struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
-		     struct iser_data_buf *mem,
-		     struct iser_reg_resources *rsc,
-		     struct iser_mem_reg *mem_reg);
-
-static const struct iser_reg_ops fastreg_ops = {
-	.alloc_reg_res	= iser_alloc_fastreg_pool,
-	.free_reg_res	= iser_free_fastreg_pool,
-	.reg_mem	= iser_fast_reg_mr,
-	.unreg_mem	= iser_unreg_mem_fastreg,
-	.reg_desc_get	= iser_reg_desc_get_fr,
-	.reg_desc_put	= iser_reg_desc_put_fr,
-};
-
-static const struct iser_reg_ops fmr_ops = {
-	.alloc_reg_res	= iser_alloc_fmr_pool,
-	.free_reg_res	= iser_free_fmr_pool,
-	.reg_mem	= iser_fast_reg_fmr,
-	.unreg_mem	= iser_unreg_mem_fmr,
-	.reg_desc_get	= iser_reg_desc_get_fmr,
-	.reg_desc_put	= iser_reg_desc_put_fmr,
-};
 
 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
 {
 	iser_err_comp(wc, "memreg");
 }
 
-int iser_assign_reg_ops(struct iser_device *device)
-{
-	struct ib_device *ib_dev = device->ib_device;
-
-	/* Assign function handles  - based on FMR support */
-	if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
-	    ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
-		iser_info("FMR supported, using FMR for registration\n");
-		device->reg_ops = &fmr_ops;
-	} else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
-		iser_info("FastReg supported, using FastReg for registration\n");
-		device->reg_ops = &fastreg_ops;
-		device->remote_inv_sup = iser_always_reg;
-	} else {
-		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
-		return -1;
-	}
-
-	return 0;
-}
-
-struct iser_fr_desc *
+static struct iser_fr_desc *
 iser_reg_desc_get_fr(struct ib_conn *ib_conn)
 {
 	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
@@ -109,7 +60,7 @@
 	return desc;
 }
 
-void
+static void
 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
 		     struct iser_fr_desc *desc)
 {
@@ -119,45 +70,6 @@
 	spin_lock_irqsave(&fr_pool->lock, flags);
 	list_add(&desc->list, &fr_pool->list);
 	spin_unlock_irqrestore(&fr_pool->lock, flags);
-}
-
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
-	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
-	return list_first_entry(&fr_pool->list,
-				struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
-		      struct iser_fr_desc *desc)
-{
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
-			       struct ib_device *ibdev)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(data->sg, sg, data->dma_nents, i)
-		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
-			 "off:0x%x sz:0x%x dma_len:0x%x\n",
-			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
-			 sg_page(sg), sg->offset,
-			 sg->length, ib_sg_dma_len(ibdev, sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
-	int i;
-
-	iser_err("page vec npages %d data length %lld\n",
-		 page_vec->npages, page_vec->fake_mr.length);
-	for (i = 0; i < page_vec->npages; i++)
-		iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
 }
 
 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
@@ -171,7 +83,7 @@
 	dev = iser_task->iser_conn->ib_conn.device->ib_device;
 
 	data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
-	if (data->dma_nents == 0) {
+	if (unlikely(data->dma_nents == 0)) {
 		iser_err("dma_map_sg failed!!!\n");
 		return -EINVAL;
 	}
@@ -204,8 +116,8 @@
 		reg->rkey = device->pd->unsafe_global_rkey;
 	else
 		reg->rkey = 0;
-	reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
-	reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
+	reg->sge.addr = sg_dma_address(&sg[0]);
+	reg->sge.length = sg_dma_len(&sg[0]);
 
 	iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
 		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
@@ -214,100 +126,36 @@
 	return 0;
 }
 
-static int iser_set_page(struct ib_mr *mr, u64 addr)
-{
-	struct iser_page_vec *page_vec =
-		container_of(mr, struct iser_page_vec, fake_mr);
-
-	page_vec->pages[page_vec->npages++] = addr;
-
-	return 0;
-}
-
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
-		      struct iser_data_buf *mem,
-		      struct iser_reg_resources *rsc,
-		      struct iser_mem_reg *reg)
-{
-	struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
-	struct iser_device *device = ib_conn->device;
-	struct iser_page_vec *page_vec = rsc->page_vec;
-	struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
-	struct ib_pool_fmr *fmr;
-	int ret, plen;
-
-	page_vec->npages = 0;
-	page_vec->fake_mr.page_size = SIZE_4K;
-	plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
-			      mem->dma_nents, NULL, iser_set_page);
-	if (unlikely(plen < mem->dma_nents)) {
-		iser_err("page vec too short to hold this SG\n");
-		iser_data_buf_dump(mem, device->ib_device);
-		iser_dump_page_vec(page_vec);
-		return -EINVAL;
-	}
-
-	fmr  = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
-				    page_vec->npages, page_vec->pages[0]);
-	if (IS_ERR(fmr)) {
-		ret = PTR_ERR(fmr);
-		iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
-		return ret;
-	}
-
-	reg->sge.lkey = fmr->fmr->lkey;
-	reg->rkey = fmr->fmr->rkey;
-	reg->sge.addr = page_vec->fake_mr.iova;
-	reg->sge.length = page_vec->fake_mr.length;
-	reg->mem_h = fmr;
-
-	iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
-		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
-		 reg->sge.addr, reg->sge.length);
-
-	return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
-			enum iser_data_dir cmd_dir)
-{
-	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
-	int ret;
-
-	if (!reg->mem_h)
-		return;
-
-	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
-	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
-	if (ret)
-		iser_err("ib_fmr_pool_unmap failed %d\n", ret);
-
-	reg->mem_h = NULL;
-}
-
 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
 			    enum iser_data_dir cmd_dir)
 {
-	struct iser_device *device = iser_task->iser_conn->ib_conn.device;
 	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+	struct iser_fr_desc *desc;
+	struct ib_mr_status mr_status;
 
-	if (!reg->mem_h)
+	desc = reg->mem_h;
+	if (!desc)
 		return;
 
-	device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
-				     reg->mem_h);
+	/*
+	 * The signature MR cannot be invalidated and reused without checking.
+	 * libiscsi calls the check_protection transport handler only if
+	 * SCSI-Response is received. And the signature MR is not checked if
+	 * the task is completed for some other reason like a timeout or error
+	 * handling. That's why we must check the signature MR here before
+	 * putting it to the free pool.
+	 */
+	if (unlikely(desc->sig_protected)) {
+		desc->sig_protected = false;
+		ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+				   &mr_status);
+	}
+	iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
 	reg->mem_h = NULL;
 }
 
 static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
-		    struct ib_sig_domain *domain)
+iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
 {
 	domain->sig_type = IB_SIG_TYPE_T10_DIF;
 	domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -330,21 +178,21 @@
 	case SCSI_PROT_WRITE_INSERT:
 	case SCSI_PROT_READ_STRIP:
 		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
-		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+		iser_set_dif_domain(sc, &sig_attrs->wire);
 		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
 		break;
 	case SCSI_PROT_READ_INSERT:
 	case SCSI_PROT_WRITE_STRIP:
 		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
-		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+		iser_set_dif_domain(sc, &sig_attrs->mem);
 		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
 						IB_T10DIF_CSUM : IB_T10DIF_CRC;
 		break;
 	case SCSI_PROT_READ_PASS:
 	case SCSI_PROT_WRITE_PASS:
-		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
+		iser_set_dif_domain(sc, &sig_attrs->wire);
 		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
-		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
+		iser_set_dif_domain(sc, &sig_attrs->mem);
 		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
 						IB_T10DIF_CSUM : IB_T10DIF_CRC;
 		break;
@@ -370,27 +218,29 @@
 static inline void
 iser_inv_rkey(struct ib_send_wr *inv_wr,
 	      struct ib_mr *mr,
-	      struct ib_cqe *cqe)
+	      struct ib_cqe *cqe,
+	      struct ib_send_wr *next_wr)
 {
 	inv_wr->opcode = IB_WR_LOCAL_INV;
 	inv_wr->wr_cqe = cqe;
 	inv_wr->ex.invalidate_rkey = mr->rkey;
 	inv_wr->send_flags = 0;
 	inv_wr->num_sge = 0;
+	inv_wr->next = next_wr;
 }
 
 static int
 iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
-		struct iser_pi_context *pi_ctx,
-		struct iser_mem_reg *data_reg,
-		struct iser_mem_reg *prot_reg,
+		struct iser_data_buf *mem,
+		struct iser_data_buf *sig_mem,
+		struct iser_reg_resources *rsc,
 		struct iser_mem_reg *sig_reg)
 {
 	struct iser_tx_desc *tx_desc = &iser_task->desc;
-	struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
-	struct ib_sig_handover_wr *wr;
-	struct ib_mr *mr = pi_ctx->sig_mr;
+	struct ib_mr *mr = rsc->sig_mr;
+	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
+	struct ib_reg_wr *wr = &tx_desc->reg_wr;
 	int ret;
 
 	memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -400,33 +250,36 @@
 
 	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
 
-	if (pi_ctx->sig_mr_valid)
-		iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
+	if (rsc->mr_valid)
+		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
 
 	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
 
-	wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr,
-			  wr);
-	wr->wr.opcode = IB_WR_REG_SIG_MR;
+	ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
+			      sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
+	if (unlikely(ret)) {
+		iser_err("failed to map PI sg (%d)\n",
+			 mem->dma_nents + sig_mem->dma_nents);
+		goto err;
+	}
+
+	memset(wr, 0, sizeof(*wr));
+	wr->wr.next = &tx_desc->send_wr;
+	wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
 	wr->wr.wr_cqe = cqe;
-	wr->wr.sg_list = &data_reg->sge;
-	wr->wr.num_sge = 1;
+	wr->wr.num_sge = 0;
 	wr->wr.send_flags = 0;
-	wr->sig_attrs = sig_attrs;
-	wr->sig_mr = mr;
-	if (scsi_prot_sg_count(iser_task->sc))
-		wr->prot = &prot_reg->sge;
-	else
-		wr->prot = NULL;
-	wr->access_flags = IB_ACCESS_LOCAL_WRITE |
-			   IB_ACCESS_REMOTE_READ |
-			   IB_ACCESS_REMOTE_WRITE;
-	pi_ctx->sig_mr_valid = 1;
+	wr->mr = mr;
+	wr->key = mr->rkey;
+	wr->access = IB_ACCESS_LOCAL_WRITE |
+		     IB_ACCESS_REMOTE_READ |
+		     IB_ACCESS_REMOTE_WRITE;
+	rsc->mr_valid = 1;
 
 	sig_reg->sge.lkey = mr->lkey;
 	sig_reg->rkey = mr->rkey;
-	sig_reg->sge.addr = 0;
-	sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
+	sig_reg->sge.addr = mr->iova;
+	sig_reg->sge.length = mr->length;
 
 	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
 		 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
@@ -443,22 +296,22 @@
 	struct iser_tx_desc *tx_desc = &iser_task->desc;
 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
 	struct ib_mr *mr = rsc->mr;
-	struct ib_reg_wr *wr;
+	struct ib_reg_wr *wr = &tx_desc->reg_wr;
 	int n;
 
 	if (rsc->mr_valid)
-		iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
+		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
 
 	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
 
-	n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+	n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
 	if (unlikely(n != mem->dma_nents)) {
 		iser_err("failed to map sg (%d/%d)\n",
 			 n, mem->dma_nents);
 		return n < 0 ? n : -EINVAL;
 	}
 
-	wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
+	wr->wr.next = &tx_desc->send_wr;
 	wr->wr.opcode = IB_WR_REG_MR;
 	wr->wr.wr_cqe = cqe;
 	wr->wr.send_flags = 0;
@@ -483,21 +336,6 @@
 }
 
 static int
-iser_reg_prot_sg(struct iscsi_iser_task *task,
-		 struct iser_data_buf *mem,
-		 struct iser_fr_desc *desc,
-		 bool use_dma_key,
-		 struct iser_mem_reg *reg)
-{
-	struct iser_device *device = task->iser_conn->ib_conn.device;
-
-	if (use_dma_key)
-		return iser_reg_dma(device, mem, reg);
-
-	return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
-}
-
-static int
 iser_reg_data_sg(struct iscsi_iser_task *task,
 		 struct iser_data_buf *mem,
 		 struct iser_fr_desc *desc,
@@ -509,18 +347,16 @@
 	if (use_dma_key)
 		return iser_reg_dma(device, mem, reg);
 
-	return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+	return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
 }
 
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
-		      enum iser_data_dir dir,
-		      bool all_imm)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+			 enum iser_data_dir dir,
+			 bool all_imm)
 {
 	struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
-	struct iser_device *device = ib_conn->device;
 	struct iser_data_buf *mem = &task->data[dir];
 	struct iser_mem_reg *reg = &task->rdma_reg[dir];
-	struct iser_mem_reg *data_reg;
 	struct iser_fr_desc *desc = NULL;
 	bool use_dma_key;
 	int err;
@@ -529,51 +365,29 @@
 		      scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
 
 	if (!use_dma_key) {
-		desc = device->reg_ops->reg_desc_get(ib_conn);
+		desc = iser_reg_desc_get_fr(ib_conn);
 		reg->mem_h = desc;
 	}
 
-	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
-		data_reg = reg;
-	else
-		data_reg = &task->desc.data_reg;
-
-	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
-	if (unlikely(err))
-		goto err_reg;
-
-	if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
-		struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
-
-		if (scsi_prot_sg_count(task->sc)) {
-			mem = &task->prot[dir];
-			err = iser_reg_prot_sg(task, mem, desc,
-					       use_dma_key, prot_reg);
-			if (unlikely(err))
-				goto err_reg;
-		}
-
-		err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
-				      prot_reg, reg);
+	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
+		err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
+		if (unlikely(err))
+			goto err_reg;
+	} else {
+		err = iser_reg_sig_mr(task, mem, &task->prot[dir],
+				      &desc->rsc, reg);
 		if (unlikely(err))
 			goto err_reg;
 
-		desc->pi_ctx->sig_protected = 1;
+		desc->sig_protected = true;
 	}
 
 	return 0;
 
 err_reg:
 	if (desc)
-		device->reg_ops->reg_desc_put(ib_conn, desc);
+		iser_reg_desc_put_fr(ib_conn, desc);
 
 	return err;
 }
 
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
-			 enum iser_data_dir dir)
-{
-	struct iser_device *device = task->iser_conn->ib_conn.device;
-
-	device->reg_ops->unreg_mem(task, dir);
-}

--
Gitblit v1.6.2