From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp

---
 kernel/drivers/scsi/virtio_scsi.c |  205 +++++++++++++++++++++++++-------------------------
 1 files changed, 103 insertions(+), 102 deletions(-)

diff --git a/kernel/drivers/scsi/virtio_scsi.c b/kernel/drivers/scsi/virtio_scsi.c
index 50e8782..6dac58a 100644
--- a/kernel/drivers/scsi/virtio_scsi.c
+++ b/kernel/drivers/scsi/virtio_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Virtio SCSI HBA driver
  *
@@ -7,10 +8,6 @@
  * Authors:
  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
  *  Paolo Bonzini   <pbonzini@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -32,6 +29,8 @@
 #include <scsi/scsi_devinfo.h>
 #include <linux/seqlock.h>
 #include <linux/blk-mq-virtio.h>
+
+#include "sd.h"
 
 #define VIRTIO_SCSI_MEMPOOL_SZ 64
 #define VIRTIO_SCSI_EVENT_LEN 8
@@ -68,33 +67,6 @@
 	struct virtqueue *vq;
 };
 
-/*
- * Per-target queue state.
- *
- * This struct holds the data needed by the queue steering policy.  When a
- * target is sent multiple requests, we need to drive them to the same queue so
- * that FIFO processing order is kept.  However, if a target was idle, we can
- * choose a queue arbitrarily.  In this case the queue is chosen according to
- * the current VCPU, so the driver expects the number of request queues to be
- * equal to the number of VCPUs.  This makes it easy and fast to select the
- * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
- * (each virtqueue's affinity is set to the CPU that "owns" the queue).
- *
- * tgt_seq is held to serialize reading and writing req_vq.
- *
- * Decrements of reqs are never concurrent with writes of req_vq: before the
- * decrement reqs will be != 0; after the decrement the virtqueue completion
- * routine will not use the req_vq so it can be changed by a new request.
- * Thus they can happen outside the tgt_seq, provided of course we make reqs
- * an atomic_t.
- */
-struct virtio_scsi_target_state {
-	seqcount_t tgt_seq;
-
-	/* Currently active virtqueue for requests sent to this target. */
-	struct virtio_scsi_vq *req_vq;
-};
-
 /* Driver instance state */
 struct virtio_scsi {
 	struct virtio_device *vdev;
@@ -103,9 +75,6 @@
 	struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
 
 	u32 num_queues;
-
-	/* If the affinity hint is set for virtqueues */
-	bool affinity_hint_set;
 
 	struct hlist_node node;
 
@@ -127,19 +96,11 @@
 
 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
 {
-	if (!resid)
-		return;
-
-	if (!scsi_bidi_cmnd(sc)) {
+	if (resid)
 		scsi_set_resid(sc, resid);
-		return;
-	}
-
-	scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
-	scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
 }
 
-/**
+/*
  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
  *
  * Called with vq_lock held.
@@ -187,7 +148,7 @@
 	default:
 		scmd_printk(KERN_WARNING, sc, "Unknown response %d",
 			    resp->response);
-		/* fall through */
+		fallthrough;
 	case VIRTIO_SCSI_S_FAILURE:
 		set_host_byte(sc, DID_ERROR);
 		break;
@@ -323,7 +284,12 @@
 
 	switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
-		scsi_add_device(shost, 0, target, lun);
+		if (lun == 0) {
+			scsi_scan_target(&shost->shost_gendev, 0, target,
+					 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+		} else {
+			scsi_add_device(shost, 0, target, lun);
+		}
 		break;
 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
 		sdev = scsi_device_lookup(shost, 0, target, lun);
@@ -365,6 +331,44 @@
 	scsi_device_put(sdev);
 }
 
+static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
+{
+	struct scsi_device *sdev;
+	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+	int result, inquiry_len, inq_result_len = 256;
+	char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
+
+	shost_for_each_device(sdev, shost) {
+		inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+
+		memset(scsi_cmd, 0, sizeof(scsi_cmd));
+		scsi_cmd[0] = INQUIRY;
+		scsi_cmd[4] = (unsigned char) inquiry_len;
+
+		memset(inq_result, 0, inq_result_len);
+
+		result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+					  inq_result, inquiry_len, NULL,
+					  SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+		if (result == 0 && inq_result[0] >> 5) {
+			/* PQ indicates the LUN is not attached */
+			scsi_remove_device(sdev);
+		} else if (host_byte(result) == DID_BAD_TARGET) {
+			/*
+			 * If all LUNs of a virtio-scsi device are unplugged
+			 * it will respond with BAD TARGET on any INQUIRY
+			 * command.
+			 * Remove the device in this case as well.
+			 */
+			scsi_remove_device(sdev);
+		}
+	}
+
+	kfree(inq_result);
+}
+
 static void virtscsi_handle_event(struct work_struct *work)
 {
 	struct virtio_scsi_event_node *event_node =
@@ -376,6 +380,7 @@
 	    cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
 		event->event &= ~cpu_to_virtio32(vscsi->vdev,
 						   VIRTIO_SCSI_T_EVENTS_MISSED);
+		virtscsi_rescan_hotunplug(vscsi);
 		scsi_scan_host(virtio_scsi_host(vscsi->vdev));
 	}
 
@@ -410,14 +415,7 @@
 	virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
 };
 
-/**
- * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
- * @vq		: the struct virtqueue we're talking about
- * @cmd		: command structure
- * @req_size	: size of the request buffer
- * @resp_size	: size of the response buffer
- */
-static int virtscsi_add_cmd(struct virtqueue *vq,
+static int __virtscsi_add_cmd(struct virtqueue *vq,
 			    struct virtio_scsi_cmd *cmd,
 			    size_t req_size, size_t resp_size)
 {
@@ -430,9 +428,9 @@
 
 	if (sc && sc->sc_data_direction != DMA_NONE) {
 		if (sc->sc_data_direction != DMA_FROM_DEVICE)
-			out = &scsi_out(sc)->table;
+			out = &sc->sdb.table;
 		if (sc->sc_data_direction != DMA_TO_DEVICE)
-			in = &scsi_in(sc)->table;
+			in = &sc->sdb.table;
 	}
 
 	/* Request header.  */
@@ -462,17 +460,39 @@
 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
 }
 
-static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
+{
+	bool needs_kick;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vq->vq_lock, flags);
+	needs_kick = virtqueue_kick_prepare(vq->vq);
+	spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+	if (needs_kick)
+		virtqueue_notify(vq->vq);
+}
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
+ * @vq		: the struct virtqueue we're talking about
+ * @cmd		: command structure
+ * @req_size	: size of the request buffer
+ * @resp_size	: size of the response buffer
+ * @kick	: whether to kick the virtqueue immediately
+ */
+static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
 			     struct virtio_scsi_cmd *cmd,
-			     size_t req_size, size_t resp_size)
+			     size_t req_size, size_t resp_size,
+			     bool kick)
 {
 	unsigned long flags;
 	int err;
 	bool needs_kick = false;
 
 	spin_lock_irqsave(&vq->vq_lock, flags);
-	err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
-	if (!err)
+	err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+	if (!err && kick)
 		needs_kick = virtqueue_kick_prepare(vq->vq);
 
 	spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -537,6 +557,7 @@
 	struct virtio_scsi *vscsi = shost_priv(shost);
 	struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+	bool kick;
 	unsigned long flags;
 	int req_size;
 	int ret;
@@ -566,7 +587,8 @@
 		req_size = sizeof(cmd->req.cmd);
 	}
 
-	ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+	kick = (sc->flags & SCMD_LAST) != 0;
+	ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
 	if (ret == -EIO) {
 		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 		spin_lock_irqsave(&req_vq->vq_lock, flags);
@@ -584,8 +606,8 @@
 	int ret = FAILED;
 
 	cmd->comp = &comp;
-	if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
-			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
+	if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
+			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
 		goto out;
 
 	wait_for_completion(&comp);
@@ -691,34 +713,19 @@
 	return virtscsi_tmf(vscsi, cmd);
 }
 
-static int virtscsi_target_alloc(struct scsi_target *starget)
-{
-	struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
-	struct virtio_scsi *vscsi = shost_priv(sh);
-
-	struct virtio_scsi_target_state *tgt =
-				kmalloc(sizeof(*tgt), GFP_KERNEL);
-	if (!tgt)
-		return -ENOMEM;
-
-	seqcount_init(&tgt->tgt_seq);
-	tgt->req_vq = &vscsi->req_vqs[0];
-
-	starget->hostdata = tgt;
-	return 0;
-}
-
-static void virtscsi_target_destroy(struct scsi_target *starget)
-{
-	struct virtio_scsi_target_state *tgt = starget->hostdata;
-	kfree(tgt);
-}
-
 static int virtscsi_map_queues(struct Scsi_Host *shost)
 {
 	struct virtio_scsi *vscsi = shost_priv(shost);
+	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
-	return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+	return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+}
+
+static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
+{
+	struct virtio_scsi *vscsi = shost_priv(shost);
+
+	virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
 }
 
 /*
@@ -738,6 +745,7 @@
 	.this_id = -1,
 	.cmd_size = sizeof(struct virtio_scsi_cmd),
 	.queuecommand = virtscsi_queuecommand,
+	.commit_rqs = virtscsi_commit_rqs,
 	.change_queue_depth = virtscsi_change_queue_depth,
 	.eh_abort_handler = virtscsi_abort,
 	.eh_device_reset_handler = virtscsi_device_reset,
@@ -745,24 +753,20 @@
 	.slave_alloc = virtscsi_device_alloc,
 
 	.dma_boundary = UINT_MAX,
-	.use_clustering = ENABLE_CLUSTERING,
-	.target_alloc = virtscsi_target_alloc,
-	.target_destroy = virtscsi_target_destroy,
 	.map_queues = virtscsi_map_queues,
 	.track_queue_depth = 1,
-	.force_blk_mq = 1,
 };
 
 #define virtscsi_config_get(vdev, fld) \
 	({ \
-		typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+		__virtio_native_type(struct virtio_scsi_config, fld) __val; \
 		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
 		__val; \
 	})
 
 #define virtscsi_config_set(vdev, fld, val) \
 	do { \
-		typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+		__virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
 		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
 	} while(0)
 
@@ -853,11 +857,12 @@
 
 	/* We need to know how many queues before we allocate. */
 	num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+	num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
 
 	num_targets = virtscsi_config_get(vdev, max_target) + 1;
 
 	shost = scsi_host_alloc(&virtscsi_host_template,
-		sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
+				struct_size(vscsi, req_vqs, num_queues));
 	if (!shost)
 		return -ENOMEM;
 
@@ -1010,14 +1015,10 @@
 	return 0;
 
 error:
-	if (virtscsi_cmd_pool) {
-		mempool_destroy(virtscsi_cmd_pool);
-		virtscsi_cmd_pool = NULL;
-	}
-	if (virtscsi_cmd_cache) {
-		kmem_cache_destroy(virtscsi_cmd_cache);
-		virtscsi_cmd_cache = NULL;
-	}
+	mempool_destroy(virtscsi_cmd_pool);
+	virtscsi_cmd_pool = NULL;
+	kmem_cache_destroy(virtscsi_cmd_cache);
+	virtscsi_cmd_cache = NULL;
 	return ret;
 }
 

--
Gitblit v1.6.2