hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/drivers/vhost/scsi.c
....@@ -52,7 +52,6 @@
5252 #define VHOST_SCSI_VERSION "v0.1"
5353 #define VHOST_SCSI_NAMELEN 256
5454 #define VHOST_SCSI_MAX_CDB_SIZE 32
55
-#define VHOST_SCSI_DEFAULT_TAGS 256
5655 #define VHOST_SCSI_PREALLOC_SGLS 2048
5756 #define VHOST_SCSI_PREALLOC_UPAGES 2048
5857 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
....@@ -140,6 +139,7 @@
140139 struct se_portal_group se_tpg;
141140 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
142141 struct vhost_scsi *vhost_scsi;
142
+ struct list_head tmf_queue;
143143 };
144144
145145 struct vhost_scsi_tport {
....@@ -189,6 +189,9 @@
189189 * Writers must also take dev mutex and flush under it.
190190 */
191191 int inflight_idx;
192
+ struct vhost_scsi_cmd *scsi_cmds;
193
+ struct sbitmap scsi_tags;
194
+ int max_cmds;
192195 };
193196
194197 struct vhost_scsi {
....@@ -207,6 +210,34 @@
207210
208211 bool vs_events_missed; /* any missed events, protected by vq->mutex */
209212 int vs_events_nr; /* num of pending events, protected by vq->mutex */
213
+};
214
+
215
+struct vhost_scsi_tmf {
216
+ struct vhost_work vwork;
217
+ struct vhost_scsi_tpg *tpg;
218
+ struct vhost_scsi *vhost;
219
+ struct vhost_scsi_virtqueue *svq;
220
+ struct list_head queue_entry;
221
+
222
+ struct se_cmd se_cmd;
223
+ u8 scsi_resp;
224
+ struct vhost_scsi_inflight *inflight;
225
+ struct iovec resp_iov;
226
+ int in_iovs;
227
+ int vq_desc;
228
+};
229
+
230
+/*
231
+ * Context for processing request and control queue operations.
232
+ */
233
+struct vhost_scsi_ctx {
234
+ int head;
235
+ unsigned int out, in;
236
+ size_t req_size, rsp_size;
237
+ size_t out_size, in_size;
238
+ u8 *target, *lunp;
239
+ void *req;
240
+ struct iov_iter out_iter;
210241 };
211242
212243 static struct workqueue_struct *vhost_scsi_workqueue;
....@@ -278,11 +309,6 @@
278309 return 0;
279310 }
280311
281
-static char *vhost_scsi_get_fabric_name(void)
282
-{
283
- return "vhost";
284
-}
285
-
286312 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
287313 {
288314 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
....@@ -312,11 +338,13 @@
312338 return 1;
313339 }
314340
315
-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
341
+static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
316342 {
317343 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
318344 struct vhost_scsi_cmd, tvc_se_cmd);
319
- struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
345
+ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
346
+ struct vhost_scsi_virtqueue, vq);
347
+ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
320348 int i;
321349
322350 if (tv_cmd->tvc_sgl_count) {
....@@ -328,8 +356,36 @@
328356 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
329357 }
330358
331
- vhost_scsi_put_inflight(tv_cmd->inflight);
332
- target_free_tag(se_sess, se_cmd);
359
+ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
360
+ vhost_scsi_put_inflight(inflight);
361
+}
362
+
363
+static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
364
+{
365
+ struct vhost_scsi_tpg *tpg = tmf->tpg;
366
+ struct vhost_scsi_inflight *inflight = tmf->inflight;
367
+
368
+ mutex_lock(&tpg->tv_tpg_mutex);
369
+ list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
370
+ mutex_unlock(&tpg->tv_tpg_mutex);
371
+ vhost_scsi_put_inflight(inflight);
372
+}
373
+
374
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
375
+{
376
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
377
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd,
378
+ struct vhost_scsi_tmf, se_cmd);
379
+
380
+ vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
381
+ } else {
382
+ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
383
+ struct vhost_scsi_cmd, tvc_se_cmd);
384
+ struct vhost_scsi *vs = cmd->tvc_vhost;
385
+
386
+ llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
387
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
388
+ }
333389 }
334390
335391 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
....@@ -344,11 +400,6 @@
344400 return 0;
345401 }
346402
347
-static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
348
-{
349
- return 0;
350
-}
351
-
352403 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
353404 {
354405 return;
....@@ -359,34 +410,25 @@
359410 return 0;
360411 }
361412
362
-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
363
-{
364
- struct vhost_scsi *vs = cmd->tvc_vhost;
365
-
366
- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
367
-
368
- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
369
-}
370
-
371413 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
372414 {
373
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
374
- struct vhost_scsi_cmd, tvc_se_cmd);
375
- vhost_scsi_complete_cmd(cmd);
415
+ transport_generic_free_cmd(se_cmd, 0);
376416 return 0;
377417 }
378418
379419 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
380420 {
381
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
382
- struct vhost_scsi_cmd, tvc_se_cmd);
383
- vhost_scsi_complete_cmd(cmd);
421
+ transport_generic_free_cmd(se_cmd, 0);
384422 return 0;
385423 }
386424
387425 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
388426 {
389
- return;
427
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
428
+ se_cmd);
429
+
430
+ tmf->scsi_resp = se_cmd->se_tmr_req->response;
431
+ transport_generic_free_cmd(&tmf->se_cmd, 0);
390432 }
391433
392434 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
....@@ -426,15 +468,6 @@
426468 return evt;
427469 }
428470
429
-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
430
-{
431
- struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
432
-
433
- /* TODO locking against target/backend threads? */
434
- transport_generic_free_cmd(se_cmd, 0);
435
-
436
-}
437
-
438471 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
439472 {
440473 return target_put_sess_cmd(se_cmd);
....@@ -449,7 +482,7 @@
449482 unsigned out, in;
450483 int head, ret;
451484
452
- if (!vq->private_data) {
485
+ if (!vhost_vq_get_backend(vq)) {
453486 vs->vs_events_missed = true;
454487 return;
455488 }
....@@ -553,7 +586,7 @@
553586 } else
554587 pr_err("Faulted on virtio_scsi_cmd_resp\n");
555588
556
- vhost_scsi_free_cmd(cmd);
589
+ vhost_scsi_release_cmd_res(se_cmd);
557590 }
558591
559592 vq = -1;
....@@ -563,31 +596,31 @@
563596 }
564597
565598 static struct vhost_scsi_cmd *
566
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
599
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
567600 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
568601 u32 exp_data_len, int data_direction)
569602 {
603
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
604
+ struct vhost_scsi_virtqueue, vq);
570605 struct vhost_scsi_cmd *cmd;
571606 struct vhost_scsi_nexus *tv_nexus;
572
- struct se_session *se_sess;
573607 struct scatterlist *sg, *prot_sg;
574608 struct page **pages;
575
- int tag, cpu;
609
+ int tag;
576610
577611 tv_nexus = tpg->tpg_nexus;
578612 if (!tv_nexus) {
579613 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
580614 return ERR_PTR(-EIO);
581615 }
582
- se_sess = tv_nexus->tvn_se_sess;
583616
584
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
617
+ tag = sbitmap_get(&svq->scsi_tags, 0, false);
585618 if (tag < 0) {
586619 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
587620 return ERR_PTR(-ENOMEM);
588621 }
589622
590
- cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
623
+ cmd = &svq->scsi_cmds[tag];
591624 sg = cmd->tvc_sgl;
592625 prot_sg = cmd->tvc_prot_sgl;
593626 pages = cmd->tvc_upages;
....@@ -596,7 +629,6 @@
596629 cmd->tvc_prot_sgl = prot_sg;
597630 cmd->tvc_upages = pages;
598631 cmd->tvc_se_cmd.map_tag = tag;
599
- cmd->tvc_se_cmd.map_cpu = cpu;
600632 cmd->tvc_tag = scsi_tag;
601633 cmd->tvc_lun = lun;
602634 cmd->tvc_task_attr = task_attr;
....@@ -806,113 +838,176 @@
806838 pr_err("Faulted on virtio_scsi_cmd_resp\n");
807839 }
808840
841
+static int
842
+vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
843
+ struct vhost_scsi_ctx *vc)
844
+{
845
+ int ret = -ENXIO;
846
+
847
+ vc->head = vhost_get_vq_desc(vq, vq->iov,
848
+ ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
849
+ NULL, NULL);
850
+
851
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
852
+ vc->head, vc->out, vc->in);
853
+
854
+ /* On error, stop handling until the next kick. */
855
+ if (unlikely(vc->head < 0))
856
+ goto done;
857
+
858
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
859
+ if (vc->head == vq->num) {
860
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
861
+ vhost_disable_notify(&vs->dev, vq);
862
+ ret = -EAGAIN;
863
+ }
864
+ goto done;
865
+ }
866
+
867
+ /*
868
+ * Get the size of request and response buffers.
869
+ * FIXME: Not correct for BIDI operation
870
+ */
871
+ vc->out_size = iov_length(vq->iov, vc->out);
872
+ vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
873
+
874
+ /*
875
+ * Copy over the virtio-scsi request header, which for a
876
+ * ANY_LAYOUT enabled guest may span multiple iovecs, or a
877
+ * single iovec may contain both the header + outgoing
878
+ * WRITE payloads.
879
+ *
880
+ * copy_from_iter() will advance out_iter, so that it will
881
+ * point at the start of the outgoing WRITE payload, if
882
+ * DMA_TO_DEVICE is set.
883
+ */
884
+ iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
885
+ ret = 0;
886
+
887
+done:
888
+ return ret;
889
+}
890
+
891
+static int
892
+vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
893
+{
894
+ if (unlikely(vc->in_size < vc->rsp_size)) {
895
+ vq_err(vq,
896
+ "Response buf too small, need min %zu bytes got %zu",
897
+ vc->rsp_size, vc->in_size);
898
+ return -EINVAL;
899
+ } else if (unlikely(vc->out_size < vc->req_size)) {
900
+ vq_err(vq,
901
+ "Request buf too small, need min %zu bytes got %zu",
902
+ vc->req_size, vc->out_size);
903
+ return -EIO;
904
+ }
905
+
906
+ return 0;
907
+}
908
+
909
+static int
910
+vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
911
+ struct vhost_scsi_tpg **tpgp)
912
+{
913
+ int ret = -EIO;
914
+
915
+ if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
916
+ &vc->out_iter))) {
917
+ vq_err(vq, "Faulted on copy_from_iter_full\n");
918
+ } else if (unlikely(*vc->lunp != 1)) {
919
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
920
+ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
921
+ } else {
922
+ struct vhost_scsi_tpg **vs_tpg, *tpg;
923
+
924
+ vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
925
+
926
+ tpg = READ_ONCE(vs_tpg[*vc->target]);
927
+ if (unlikely(!tpg)) {
928
+ vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
929
+ } else {
930
+ if (tpgp)
931
+ *tpgp = tpg;
932
+ ret = 0;
933
+ }
934
+ }
935
+
936
+ return ret;
937
+}
938
+
939
+static u16 vhost_buf_to_lun(u8 *lun_buf)
940
+{
941
+ return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
942
+}
943
+
809944 static void
810945 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
811946 {
812947 struct vhost_scsi_tpg **vs_tpg, *tpg;
813948 struct virtio_scsi_cmd_req v_req;
814949 struct virtio_scsi_cmd_req_pi v_req_pi;
950
+ struct vhost_scsi_ctx vc;
815951 struct vhost_scsi_cmd *cmd;
816
- struct iov_iter out_iter, in_iter, prot_iter, data_iter;
952
+ struct iov_iter in_iter, prot_iter, data_iter;
817953 u64 tag;
818954 u32 exp_data_len, data_direction;
819
- unsigned int out = 0, in = 0;
820
- int head, ret, prot_bytes, c = 0;
821
- size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
822
- size_t out_size, in_size;
955
+ int ret, prot_bytes, c = 0;
823956 u16 lun;
824
- u8 *target, *lunp, task_attr;
957
+ u8 task_attr;
825958 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
826
- void *req, *cdb;
959
+ void *cdb;
827960
828961 mutex_lock(&vq->mutex);
829962 /*
830963 * We can handle the vq only after the endpoint is setup by calling the
831964 * VHOST_SCSI_SET_ENDPOINT ioctl.
832965 */
833
- vs_tpg = vq->private_data;
966
+ vs_tpg = vhost_vq_get_backend(vq);
834967 if (!vs_tpg)
835968 goto out;
969
+
970
+ memset(&vc, 0, sizeof(vc));
971
+ vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
836972
837973 vhost_disable_notify(&vs->dev, vq);
838974
839975 do {
840
- head = vhost_get_vq_desc(vq, vq->iov,
841
- ARRAY_SIZE(vq->iov), &out, &in,
842
- NULL, NULL);
843
- pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
844
- head, out, in);
845
- /* On error, stop handling until the next kick. */
846
- if (unlikely(head < 0))
847
- break;
848
- /* Nothing new? Wait for eventfd to tell us they refilled. */
849
- if (head == vq->num) {
850
- if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
851
- vhost_disable_notify(&vs->dev, vq);
852
- continue;
853
- }
854
- break;
855
- }
856
- /*
857
- * Check for a sane response buffer so we can report early
858
- * errors back to the guest.
859
- */
860
- if (unlikely(vq->iov[out].iov_len < rsp_size)) {
861
- vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
862
- " size, got %zu bytes\n", vq->iov[out].iov_len);
863
- break;
864
- }
976
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
977
+ if (ret)
978
+ goto err;
979
+
865980 /*
866981 * Setup pointers and values based upon different virtio-scsi
867982 * request header if T10_PI is enabled in KVM guest.
868983 */
869984 if (t10_pi) {
870
- req = &v_req_pi;
871
- req_size = sizeof(v_req_pi);
872
- lunp = &v_req_pi.lun[0];
873
- target = &v_req_pi.lun[1];
985
+ vc.req = &v_req_pi;
986
+ vc.req_size = sizeof(v_req_pi);
987
+ vc.lunp = &v_req_pi.lun[0];
988
+ vc.target = &v_req_pi.lun[1];
874989 } else {
875
- req = &v_req;
876
- req_size = sizeof(v_req);
877
- lunp = &v_req.lun[0];
878
- target = &v_req.lun[1];
990
+ vc.req = &v_req;
991
+ vc.req_size = sizeof(v_req);
992
+ vc.lunp = &v_req.lun[0];
993
+ vc.target = &v_req.lun[1];
879994 }
880
- /*
881
- * FIXME: Not correct for BIDI operation
882
- */
883
- out_size = iov_length(vq->iov, out);
884
- in_size = iov_length(&vq->iov[out], in);
885995
886996 /*
887
- * Copy over the virtio-scsi request header, which for a
888
- * ANY_LAYOUT enabled guest may span multiple iovecs, or a
889
- * single iovec may contain both the header + outgoing
890
- * WRITE payloads.
891
- *
892
- * copy_from_iter() will advance out_iter, so that it will
893
- * point at the start of the outgoing WRITE payload, if
894
- * DMA_TO_DEVICE is set.
997
+ * Validate the size of request and response buffers.
998
+ * Check for a sane response buffer so we can report
999
+ * early errors back to the guest.
8951000 */
896
- iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1001
+ ret = vhost_scsi_chk_size(vq, &vc);
1002
+ if (ret)
1003
+ goto err;
8971004
898
- if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
899
- vq_err(vq, "Faulted on copy_from_iter\n");
900
- vhost_scsi_send_bad_target(vs, vq, head, out);
901
- continue;
902
- }
903
- /* virtio-scsi spec requires byte 0 of the lun to be 1 */
904
- if (unlikely(*lunp != 1)) {
905
- vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
906
- vhost_scsi_send_bad_target(vs, vq, head, out);
907
- continue;
908
- }
1005
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
1006
+ if (ret)
1007
+ goto err;
9091008
910
- tpg = READ_ONCE(vs_tpg[*target]);
911
- if (unlikely(!tpg)) {
912
- /* Target does not exist, fail the request */
913
- vhost_scsi_send_bad_target(vs, vq, head, out);
914
- continue;
915
- }
1009
+ ret = -EIO; /* bad target on any error from here on */
1010
+
9161011 /*
9171012 * Determine data_direction by calculating the total outgoing
9181013 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
....@@ -930,17 +1025,17 @@
9301025 */
9311026 prot_bytes = 0;
9321027
933
- if (out_size > req_size) {
1028
+ if (vc.out_size > vc.req_size) {
9341029 data_direction = DMA_TO_DEVICE;
935
- exp_data_len = out_size - req_size;
936
- data_iter = out_iter;
937
- } else if (in_size > rsp_size) {
1030
+ exp_data_len = vc.out_size - vc.req_size;
1031
+ data_iter = vc.out_iter;
1032
+ } else if (vc.in_size > vc.rsp_size) {
9381033 data_direction = DMA_FROM_DEVICE;
939
- exp_data_len = in_size - rsp_size;
1034
+ exp_data_len = vc.in_size - vc.rsp_size;
9401035
941
- iov_iter_init(&in_iter, READ, &vq->iov[out], in,
942
- rsp_size + exp_data_len);
943
- iov_iter_advance(&in_iter, rsp_size);
1036
+ iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1037
+ vc.rsp_size + exp_data_len);
1038
+ iov_iter_advance(&in_iter, vc.rsp_size);
9441039 data_iter = in_iter;
9451040 } else {
9461041 data_direction = DMA_NONE;
....@@ -956,16 +1051,14 @@
9561051 if (data_direction != DMA_TO_DEVICE) {
9571052 vq_err(vq, "Received non zero pi_bytesout,"
9581053 " but wrong data_direction\n");
959
- vhost_scsi_send_bad_target(vs, vq, head, out);
960
- continue;
1054
+ goto err;
9611055 }
9621056 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
9631057 } else if (v_req_pi.pi_bytesin) {
9641058 if (data_direction != DMA_FROM_DEVICE) {
9651059 vq_err(vq, "Received non zero pi_bytesin,"
9661060 " but wrong data_direction\n");
967
- vhost_scsi_send_bad_target(vs, vq, head, out);
968
- continue;
1061
+ goto err;
9691062 }
9701063 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
9711064 }
....@@ -986,12 +1079,12 @@
9861079 tag = vhost64_to_cpu(vq, v_req_pi.tag);
9871080 task_attr = v_req_pi.task_attr;
9881081 cdb = &v_req_pi.cdb[0];
989
- lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1082
+ lun = vhost_buf_to_lun(v_req_pi.lun);
9901083 } else {
9911084 tag = vhost64_to_cpu(vq, v_req.tag);
9921085 task_attr = v_req.task_attr;
9931086 cdb = &v_req.cdb[0];
994
- lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1087
+ lun = vhost_buf_to_lun(v_req.lun);
9951088 }
9961089 /*
9971090 * Check that the received CDB size does not exceeded our
....@@ -1004,22 +1097,20 @@
10041097 vq_err(vq, "Received SCSI CDB with command_size: %d that"
10051098 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
10061099 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1007
- vhost_scsi_send_bad_target(vs, vq, head, out);
1008
- continue;
1100
+ goto err;
10091101 }
1010
- cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1102
+ cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
10111103 exp_data_len + prot_bytes,
10121104 data_direction);
10131105 if (IS_ERR(cmd)) {
1014
- vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1106
+ vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
10151107 PTR_ERR(cmd));
1016
- vhost_scsi_send_bad_target(vs, vq, head, out);
1017
- continue;
1108
+ goto err;
10181109 }
10191110 cmd->tvc_vhost = vs;
10201111 cmd->tvc_vq = vq;
1021
- cmd->tvc_resp_iov = vq->iov[out];
1022
- cmd->tvc_in_iovs = in;
1112
+ cmd->tvc_resp_iov = vq->iov[vc.out];
1113
+ cmd->tvc_in_iovs = vc.in;
10231114
10241115 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
10251116 cmd->tvc_cdb[0], cmd->tvc_lun);
....@@ -1027,14 +1118,12 @@
10271118 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
10281119
10291120 if (data_direction != DMA_NONE) {
1030
- ret = vhost_scsi_mapal(cmd,
1031
- prot_bytes, &prot_iter,
1032
- exp_data_len, &data_iter);
1033
- if (unlikely(ret)) {
1121
+ if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1122
+ &prot_iter, exp_data_len,
1123
+ &data_iter))) {
10341124 vq_err(vq, "Failed to map iov to sgl\n");
1035
- vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1036
- vhost_scsi_send_bad_target(vs, vq, head, out);
1037
- continue;
1125
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1126
+ goto err;
10381127 }
10391128 }
10401129 /*
....@@ -1042,7 +1131,7 @@
10421131 * complete the virtio-scsi request in TCM callback context via
10431132 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
10441133 */
1045
- cmd->tvc_vq_desc = head;
1134
+ cmd->tvc_vq_desc = vc.head;
10461135 /*
10471136 * Dispatch cmd descriptor for cmwq execution in process
10481137 * context provided by vhost_scsi_workqueue. This also ensures
....@@ -1051,6 +1140,241 @@
10511140 */
10521141 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
10531142 queue_work(vhost_scsi_workqueue, &cmd->work);
1143
+ ret = 0;
1144
+err:
1145
+ /*
1146
+ * ENXIO: No more requests, or read error, wait for next kick
1147
+ * EINVAL: Invalid response buffer, drop the request
1148
+ * EIO: Respond with bad target
1149
+ * EAGAIN: Pending request
1150
+ */
1151
+ if (ret == -ENXIO)
1152
+ break;
1153
+ else if (ret == -EIO)
1154
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1155
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1156
+out:
1157
+ mutex_unlock(&vq->mutex);
1158
+}
1159
+
1160
+static void
1161
+vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1162
+ int in_iovs, int vq_desc, struct iovec *resp_iov,
1163
+ int tmf_resp_code)
1164
+{
1165
+ struct virtio_scsi_ctrl_tmf_resp rsp;
1166
+ struct iov_iter iov_iter;
1167
+ int ret;
1168
+
1169
+ pr_debug("%s\n", __func__);
1170
+ memset(&rsp, 0, sizeof(rsp));
1171
+ rsp.response = tmf_resp_code;
1172
+
1173
+ iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1174
+
1175
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1176
+ if (likely(ret == sizeof(rsp)))
1177
+ vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1178
+ else
1179
+ pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1180
+}
1181
+
1182
+static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1183
+{
1184
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1185
+ vwork);
1186
+ int resp_code;
1187
+
1188
+ if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1189
+ resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1190
+ else
1191
+ resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1192
+
1193
+ vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1194
+ tmf->vq_desc, &tmf->resp_iov, resp_code);
1195
+ vhost_scsi_release_tmf_res(tmf);
1196
+}
1197
+
1198
+static void
1199
+vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1200
+ struct vhost_virtqueue *vq,
1201
+ struct virtio_scsi_ctrl_tmf_req *vtmf,
1202
+ struct vhost_scsi_ctx *vc)
1203
+{
1204
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
1205
+ struct vhost_scsi_virtqueue, vq);
1206
+ struct vhost_scsi_tmf *tmf;
1207
+
1208
+ if (vhost32_to_cpu(vq, vtmf->subtype) !=
1209
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1210
+ goto send_reject;
1211
+
1212
+ if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1213
+ pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1214
+ goto send_reject;
1215
+ }
1216
+
1217
+ mutex_lock(&tpg->tv_tpg_mutex);
1218
+ if (list_empty(&tpg->tmf_queue)) {
1219
+ pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1220
+ mutex_unlock(&tpg->tv_tpg_mutex);
1221
+ goto send_reject;
1222
+ }
1223
+
1224
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1225
+ queue_entry);
1226
+ list_del_init(&tmf->queue_entry);
1227
+ mutex_unlock(&tpg->tv_tpg_mutex);
1228
+
1229
+ tmf->tpg = tpg;
1230
+ tmf->vhost = vs;
1231
+ tmf->svq = svq;
1232
+ tmf->resp_iov = vq->iov[vc->out];
1233
+ tmf->vq_desc = vc->head;
1234
+ tmf->in_iovs = vc->in;
1235
+ tmf->inflight = vhost_scsi_get_inflight(vq);
1236
+
1237
+ if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1238
+ vhost_buf_to_lun(vtmf->lun), NULL,
1239
+ TMR_LUN_RESET, GFP_KERNEL, 0,
1240
+ TARGET_SCF_ACK_KREF) < 0) {
1241
+ vhost_scsi_release_tmf_res(tmf);
1242
+ goto send_reject;
1243
+ }
1244
+
1245
+ return;
1246
+
1247
+send_reject:
1248
+ vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1249
+ VIRTIO_SCSI_S_FUNCTION_REJECTED);
1250
+}
1251
+
1252
+static void
1253
+vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1254
+ struct vhost_virtqueue *vq,
1255
+ struct vhost_scsi_ctx *vc)
1256
+{
1257
+ struct virtio_scsi_ctrl_an_resp rsp;
1258
+ struct iov_iter iov_iter;
1259
+ int ret;
1260
+
1261
+ pr_debug("%s\n", __func__);
1262
+ memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1263
+ rsp.response = VIRTIO_SCSI_S_OK;
1264
+
1265
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1266
+
1267
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1268
+ if (likely(ret == sizeof(rsp)))
1269
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1270
+ else
1271
+ pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1272
+}
1273
+
1274
+static void
1275
+vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1276
+{
1277
+ struct vhost_scsi_tpg *tpg;
1278
+ union {
1279
+ __virtio32 type;
1280
+ struct virtio_scsi_ctrl_an_req an;
1281
+ struct virtio_scsi_ctrl_tmf_req tmf;
1282
+ } v_req;
1283
+ struct vhost_scsi_ctx vc;
1284
+ size_t typ_size;
1285
+ int ret, c = 0;
1286
+
1287
+ mutex_lock(&vq->mutex);
1288
+ /*
1289
+ * We can handle the vq only after the endpoint is setup by calling the
1290
+ * VHOST_SCSI_SET_ENDPOINT ioctl.
1291
+ */
1292
+ if (!vhost_vq_get_backend(vq))
1293
+ goto out;
1294
+
1295
+ memset(&vc, 0, sizeof(vc));
1296
+
1297
+ vhost_disable_notify(&vs->dev, vq);
1298
+
1299
+ do {
1300
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
1301
+ if (ret)
1302
+ goto err;
1303
+
1304
+ /*
1305
+ * Get the request type first in order to setup
1306
+ * other parameters dependent on the type.
1307
+ */
1308
+ vc.req = &v_req.type;
1309
+ typ_size = sizeof(v_req.type);
1310
+
1311
+ if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1312
+ &vc.out_iter))) {
1313
+ vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1314
+ /*
1315
+ * The size of the response buffer depends on the
1316
+ * request type and must be validated against it.
1317
+ * Since the request type is not known, don't send
1318
+ * a response.
1319
+ */
1320
+ continue;
1321
+ }
1322
+
1323
+ switch (vhost32_to_cpu(vq, v_req.type)) {
1324
+ case VIRTIO_SCSI_T_TMF:
1325
+ vc.req = &v_req.tmf;
1326
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1327
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1328
+ vc.lunp = &v_req.tmf.lun[0];
1329
+ vc.target = &v_req.tmf.lun[1];
1330
+ break;
1331
+ case VIRTIO_SCSI_T_AN_QUERY:
1332
+ case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1333
+ vc.req = &v_req.an;
1334
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1335
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1336
+ vc.lunp = &v_req.an.lun[0];
1337
+ vc.target = NULL;
1338
+ break;
1339
+ default:
1340
+ vq_err(vq, "Unknown control request %d", v_req.type);
1341
+ continue;
1342
+ }
1343
+
1344
+ /*
1345
+ * Validate the size of request and response buffers.
1346
+ * Check for a sane response buffer so we can report
1347
+ * early errors back to the guest.
1348
+ */
1349
+ ret = vhost_scsi_chk_size(vq, &vc);
1350
+ if (ret)
1351
+ goto err;
1352
+
1353
+ /*
1354
+ * Get the rest of the request now that its size is known.
1355
+ */
1356
+ vc.req += typ_size;
1357
+ vc.req_size -= typ_size;
1358
+
1359
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
1360
+ if (ret)
1361
+ goto err;
1362
+
1363
+ if (v_req.type == VIRTIO_SCSI_T_TMF)
1364
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1365
+ else
1366
+ vhost_scsi_send_an_resp(vs, vq, &vc);
1367
+err:
1368
+ /*
1369
+ * ENXIO: No more requests, or read error, wait for next kick
1370
+ * EINVAL: Invalid response buffer, drop the request
1371
+ * EIO: Respond with bad target
1372
+ * EAGAIN: Pending request
1373
+ */
1374
+ if (ret == -ENXIO)
1375
+ break;
1376
+ else if (ret == -EIO)
1377
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
10541378 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
10551379 out:
10561380 mutex_unlock(&vq->mutex);
....@@ -1058,7 +1382,12 @@
10581382
10591383 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
10601384 {
1385
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1386
+ poll.work);
1387
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1388
+
10611389 pr_debug("%s: The handling func for control queue.\n", __func__);
1390
+ vhost_scsi_ctl_handle_vq(vs, vq);
10621391 }
10631392
10641393 static void
....@@ -1098,7 +1427,7 @@
10981427 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
10991428
11001429 mutex_lock(&vq->mutex);
1101
- if (!vq->private_data)
1430
+ if (!vhost_vq_get_backend(vq))
11021431 goto out;
11031432
11041433 if (vs->vs_events_missed)
....@@ -1147,6 +1476,83 @@
11471476 /* Wait for all reqs issued before the flush to be finished */
11481477 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
11491478 wait_for_completion(&old_inflight[i]->comp);
1479
+}
1480
+
1481
+static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1482
+{
1483
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
1484
+ struct vhost_scsi_virtqueue, vq);
1485
+ struct vhost_scsi_cmd *tv_cmd;
1486
+ unsigned int i;
1487
+
1488
+ if (!svq->scsi_cmds)
1489
+ return;
1490
+
1491
+ for (i = 0; i < svq->max_cmds; i++) {
1492
+ tv_cmd = &svq->scsi_cmds[i];
1493
+
1494
+ kfree(tv_cmd->tvc_sgl);
1495
+ kfree(tv_cmd->tvc_prot_sgl);
1496
+ kfree(tv_cmd->tvc_upages);
1497
+ }
1498
+
1499
+ sbitmap_free(&svq->scsi_tags);
1500
+ kfree(svq->scsi_cmds);
1501
+ svq->scsi_cmds = NULL;
1502
+}
1503
+
1504
+static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1505
+{
1506
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
1507
+ struct vhost_scsi_virtqueue, vq);
1508
+ struct vhost_scsi_cmd *tv_cmd;
1509
+ unsigned int i;
1510
+
1511
+ if (svq->scsi_cmds)
1512
+ return 0;
1513
+
1514
+ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1515
+ NUMA_NO_NODE))
1516
+ return -ENOMEM;
1517
+ svq->max_cmds = max_cmds;
1518
+
1519
+ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1520
+ if (!svq->scsi_cmds) {
1521
+ sbitmap_free(&svq->scsi_tags);
1522
+ return -ENOMEM;
1523
+ }
1524
+
1525
+ for (i = 0; i < max_cmds; i++) {
1526
+ tv_cmd = &svq->scsi_cmds[i];
1527
+
1528
+ tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1529
+ sizeof(struct scatterlist),
1530
+ GFP_KERNEL);
1531
+ if (!tv_cmd->tvc_sgl) {
1532
+ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1533
+ goto out;
1534
+ }
1535
+
1536
+ tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1537
+ sizeof(struct page *),
1538
+ GFP_KERNEL);
1539
+ if (!tv_cmd->tvc_upages) {
1540
+ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1541
+ goto out;
1542
+ }
1543
+
1544
+ tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1545
+ sizeof(struct scatterlist),
1546
+ GFP_KERNEL);
1547
+ if (!tv_cmd->tvc_prot_sgl) {
1548
+ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1549
+ goto out;
1550
+ }
1551
+ }
1552
+ return 0;
1553
+out:
1554
+ vhost_scsi_destroy_vq_cmds(vq);
1555
+ return -ENOMEM;
11501556 }
11511557
11521558 /*
....@@ -1203,10 +1609,9 @@
12031609
12041610 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
12051611 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1206
- kfree(vs_tpg);
12071612 mutex_unlock(&tpg->tv_tpg_mutex);
12081613 ret = -EEXIST;
1209
- goto out;
1614
+ goto undepend;
12101615 }
12111616 /*
12121617 * In order to ensure individual vhost-scsi configfs
....@@ -1217,15 +1622,13 @@
12171622 se_tpg = &tpg->se_tpg;
12181623 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
12191624 if (ret) {
1220
- pr_warn("configfs_depend_item() failed: %d\n", ret);
1221
- kfree(vs_tpg);
1625
+ pr_warn("target_depend_item() failed: %d\n", ret);
12221626 mutex_unlock(&tpg->tv_tpg_mutex);
1223
- goto out;
1627
+ goto undepend;
12241628 }
12251629 tpg->tv_tpg_vhost_count++;
12261630 tpg->vhost_scsi = vs;
12271631 vs_tpg[tpg->tport_tpgt] = tpg;
1228
- smp_mb__after_atomic();
12291632 match = true;
12301633 }
12311634 mutex_unlock(&tpg->tv_tpg_mutex);
....@@ -1234,10 +1637,21 @@
12341637 if (match) {
12351638 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
12361639 sizeof(vs->vs_vhost_wwpn));
1640
+
1641
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1642
+ vq = &vs->vqs[i].vq;
1643
+ if (!vhost_vq_is_setup(vq))
1644
+ continue;
1645
+
1646
+ ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1647
+ if (ret)
1648
+ goto destroy_vq_cmds;
1649
+ }
1650
+
12371651 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
12381652 vq = &vs->vqs[i].vq;
12391653 mutex_lock(&vq->mutex);
1240
- vq->private_data = vs_tpg;
1654
+ vhost_vq_set_backend(vq, vs_tpg);
12411655 vhost_vq_init_access(vq);
12421656 mutex_unlock(&vq->mutex);
12431657 }
....@@ -1253,7 +1667,22 @@
12531667 vhost_scsi_flush(vs);
12541668 kfree(vs->vs_tpg);
12551669 vs->vs_tpg = vs_tpg;
1670
+ goto out;
12561671
1672
+destroy_vq_cmds:
1673
+ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1674
+ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1675
+ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1676
+ }
1677
+undepend:
1678
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1679
+ tpg = vs_tpg[i];
1680
+ if (tpg) {
1681
+ tpg->tv_tpg_vhost_count--;
1682
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1683
+ }
1684
+ }
1685
+ kfree(vs_tpg);
12571686 out:
12581687 mutex_unlock(&vs->dev.mutex);
12591688 mutex_unlock(&vhost_scsi_mutex);
....@@ -1324,8 +1753,14 @@
13241753 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
13251754 vq = &vs->vqs[i].vq;
13261755 mutex_lock(&vq->mutex);
1327
- vq->private_data = NULL;
1756
+ vhost_vq_set_backend(vq, NULL);
13281757 mutex_unlock(&vq->mutex);
1758
+ /*
1759
+ * Make sure cmds are not running before tearing them
1760
+ * down.
1761
+ */
1762
+ vhost_scsi_flush(vs);
1763
+ vhost_scsi_destroy_vq_cmds(vq);
13291764 }
13301765 }
13311766 /*
....@@ -1405,7 +1840,7 @@
14051840 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
14061841 }
14071842 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1408
- VHOST_SCSI_WEIGHT, 0);
1843
+ VHOST_SCSI_WEIGHT, 0, true, NULL);
14091844
14101845 vhost_scsi_init_inflight(vs, NULL);
14111846
....@@ -1504,21 +1939,11 @@
15041939 }
15051940 }
15061941
1507
-#ifdef CONFIG_COMPAT
1508
-static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1509
- unsigned long arg)
1510
-{
1511
- return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1512
-}
1513
-#endif
1514
-
15151942 static const struct file_operations vhost_scsi_fops = {
15161943 .owner = THIS_MODULE,
15171944 .release = vhost_scsi_release,
15181945 .unlocked_ioctl = vhost_scsi_ioctl,
1519
-#ifdef CONFIG_COMPAT
1520
- .compat_ioctl = vhost_scsi_compat_ioctl,
1521
-#endif
1946
+ .compat_ioctl = compat_ptr_ioctl,
15221947 .open = vhost_scsi_open,
15231948 .llseek = noop_llseek,
15241949 };
....@@ -1598,11 +2023,19 @@
15982023 {
15992024 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
16002025 struct vhost_scsi_tpg, se_tpg);
2026
+ struct vhost_scsi_tmf *tmf;
2027
+
2028
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2029
+ if (!tmf)
2030
+ return -ENOMEM;
2031
+ INIT_LIST_HEAD(&tmf->queue_entry);
2032
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
16012033
16022034 mutex_lock(&vhost_scsi_mutex);
16032035
16042036 mutex_lock(&tpg->tv_tpg_mutex);
16052037 tpg->tv_tpg_port_count++;
2038
+ list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
16062039 mutex_unlock(&tpg->tv_tpg_mutex);
16072040
16082041 vhost_scsi_hotplug(tpg, lun);
....@@ -1617,33 +2050,21 @@
16172050 {
16182051 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
16192052 struct vhost_scsi_tpg, se_tpg);
2053
+ struct vhost_scsi_tmf *tmf;
16202054
16212055 mutex_lock(&vhost_scsi_mutex);
16222056
16232057 mutex_lock(&tpg->tv_tpg_mutex);
16242058 tpg->tv_tpg_port_count--;
2059
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2060
+ queue_entry);
2061
+ list_del(&tmf->queue_entry);
2062
+ kfree(tmf);
16252063 mutex_unlock(&tpg->tv_tpg_mutex);
16262064
16272065 vhost_scsi_hotunplug(tpg, lun);
16282066
16292067 mutex_unlock(&vhost_scsi_mutex);
1630
-}
1631
-
1632
-static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1633
-{
1634
- struct vhost_scsi_cmd *tv_cmd;
1635
- unsigned int i;
1636
-
1637
- if (!se_sess->sess_cmd_map)
1638
- return;
1639
-
1640
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1641
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1642
-
1643
- kfree(tv_cmd->tvc_sgl);
1644
- kfree(tv_cmd->tvc_prot_sgl);
1645
- kfree(tv_cmd->tvc_upages);
1646
- }
16472068 }
16482069
16492070 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
....@@ -1685,45 +2106,6 @@
16852106 NULL,
16862107 };
16872108
1688
-static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1689
- struct se_session *se_sess, void *p)
1690
-{
1691
- struct vhost_scsi_cmd *tv_cmd;
1692
- unsigned int i;
1693
-
1694
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1695
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1696
-
1697
- tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1698
- sizeof(struct scatterlist),
1699
- GFP_KERNEL);
1700
- if (!tv_cmd->tvc_sgl) {
1701
- pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1702
- goto out;
1703
- }
1704
-
1705
- tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1706
- sizeof(struct page *),
1707
- GFP_KERNEL);
1708
- if (!tv_cmd->tvc_upages) {
1709
- pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1710
- goto out;
1711
- }
1712
-
1713
- tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1714
- sizeof(struct scatterlist),
1715
- GFP_KERNEL);
1716
- if (!tv_cmd->tvc_prot_sgl) {
1717
- pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1718
- goto out;
1719
- }
1720
- }
1721
- return 0;
1722
-out:
1723
- vhost_scsi_free_cmd_map_res(se_sess);
1724
- return -ENOMEM;
1725
-}
1726
-
17272109 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
17282110 const char *name)
17292111 {
....@@ -1747,12 +2129,9 @@
17472129 * struct se_node_acl for the vhost_scsi struct se_portal_group with
17482130 * the SCSI Initiator port name of the passed configfs group 'name'.
17492131 */
1750
- tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1751
- VHOST_SCSI_DEFAULT_TAGS,
1752
- sizeof(struct vhost_scsi_cmd),
2132
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
17532133 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1754
- (unsigned char *)name, tv_nexus,
1755
- vhost_scsi_nexus_cb);
2134
+ (unsigned char *)name, tv_nexus, NULL);
17562135 if (IS_ERR(tv_nexus->tvn_se_sess)) {
17572136 mutex_unlock(&tpg->tv_tpg_mutex);
17582137 kfree(tv_nexus);
....@@ -1802,7 +2181,6 @@
18022181 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
18032182 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
18042183
1805
- vhost_scsi_free_cmd_map_res(se_sess);
18062184 /*
18072185 * Release the SCSI I_T Nexus to the emulated vhost Target Port
18082186 */
....@@ -1942,6 +2320,7 @@
19422320 }
19432321 mutex_init(&tpg->tv_tpg_mutex);
19442322 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2323
+ INIT_LIST_HEAD(&tpg->tmf_queue);
19452324 tpg->tport = tport;
19462325 tpg->tport_tpgt = tpgt;
19472326
....@@ -2066,8 +2445,8 @@
20662445
20672446 static const struct target_core_fabric_ops vhost_scsi_ops = {
20682447 .module = THIS_MODULE,
2069
- .name = "vhost",
2070
- .get_fabric_name = vhost_scsi_get_fabric_name,
2448
+ .fabric_name = "vhost",
2449
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
20712450 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
20722451 .tpg_get_tag = vhost_scsi_get_tpgt,
20732452 .tpg_check_demo_mode = vhost_scsi_check_true,
....@@ -2081,7 +2460,6 @@
20812460 .sess_get_index = vhost_scsi_sess_get_index,
20822461 .sess_get_initiator_sid = NULL,
20832462 .write_pending = vhost_scsi_write_pending,
2084
- .write_pending_status = vhost_scsi_write_pending_status,
20852463 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
20862464 .get_cmd_state = vhost_scsi_get_cmd_state,
20872465 .queue_data_in = vhost_scsi_queue_data_in,