.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Virtio SCSI HBA driver |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Authors: |
---|
8 | 9 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
---|
9 | 10 | * Paolo Bonzini <pbonzini@redhat.com> |
---|
10 | | - * |
---|
11 | | - * This work is licensed under the terms of the GNU GPL, version 2 or later. |
---|
12 | | - * See the COPYING file in the top-level directory. |
---|
13 | | - * |
---|
14 | 11 | */ |
---|
15 | 12 | |
---|
16 | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
32 | 29 | #include <scsi/scsi_devinfo.h> |
---|
33 | 30 | #include <linux/seqlock.h> |
---|
34 | 31 | #include <linux/blk-mq-virtio.h> |
---|
| 32 | + |
---|
| 33 | +#include "sd.h" |
---|
35 | 34 | |
---|
36 | 35 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 |
---|
37 | 36 | #define VIRTIO_SCSI_EVENT_LEN 8 |
---|
.. | .. |
---|
68 | 67 | struct virtqueue *vq; |
---|
69 | 68 | }; |
---|
70 | 69 | |
---|
71 | | -/* |
---|
72 | | - * Per-target queue state. |
---|
73 | | - * |
---|
74 | | - * This struct holds the data needed by the queue steering policy. When a |
---|
75 | | - * target is sent multiple requests, we need to drive them to the same queue so |
---|
76 | | - * that FIFO processing order is kept. However, if a target was idle, we can |
---|
77 | | - * choose a queue arbitrarily. In this case the queue is chosen according to |
---|
78 | | - * the current VCPU, so the driver expects the number of request queues to be |
---|
79 | | - * equal to the number of VCPUs. This makes it easy and fast to select the |
---|
80 | | - * queue, and also lets the driver optimize the IRQ affinity for the virtqueues |
---|
81 | | - * (each virtqueue's affinity is set to the CPU that "owns" the queue). |
---|
82 | | - * |
---|
83 | | - * tgt_seq is held to serialize reading and writing req_vq. |
---|
84 | | - * |
---|
85 | | - * Decrements of reqs are never concurrent with writes of req_vq: before the |
---|
86 | | - * decrement reqs will be != 0; after the decrement the virtqueue completion |
---|
87 | | - * routine will not use the req_vq so it can be changed by a new request. |
---|
88 | | - * Thus they can happen outside the tgt_seq, provided of course we make reqs |
---|
89 | | - * an atomic_t. |
---|
90 | | - */ |
---|
91 | | -struct virtio_scsi_target_state { |
---|
92 | | - seqcount_t tgt_seq; |
---|
93 | | - |
---|
94 | | - /* Currently active virtqueue for requests sent to this target. */ |
---|
95 | | - struct virtio_scsi_vq *req_vq; |
---|
96 | | -}; |
---|
97 | | - |
---|
98 | 70 | /* Driver instance state */ |
---|
99 | 71 | struct virtio_scsi { |
---|
100 | 72 | struct virtio_device *vdev; |
---|
.. | .. |
---|
103 | 75 | struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; |
---|
104 | 76 | |
---|
105 | 77 | u32 num_queues; |
---|
106 | | - |
---|
107 | | - /* If the affinity hint is set for virtqueues */ |
---|
108 | | - bool affinity_hint_set; |
---|
109 | 78 | |
---|
110 | 79 | struct hlist_node node; |
---|
111 | 80 | |
---|
.. | .. |
---|
127 | 96 | |
---|
128 | 97 | static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) |
---|
129 | 98 | { |
---|
130 | | - if (!resid) |
---|
131 | | - return; |
---|
132 | | - |
---|
133 | | - if (!scsi_bidi_cmnd(sc)) { |
---|
| 99 | + if (resid) |
---|
134 | 100 | scsi_set_resid(sc, resid); |
---|
135 | | - return; |
---|
136 | | - } |
---|
137 | | - |
---|
138 | | - scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); |
---|
139 | | - scsi_out(sc)->resid = resid - scsi_in(sc)->resid; |
---|
140 | 101 | } |
---|
141 | 102 | |
---|
142 | | -/** |
---|
| 103 | +/* |
---|
143 | 104 | * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done |
---|
144 | 105 | * |
---|
145 | 106 | * Called with vq_lock held. |
---|
.. | .. |
---|
187 | 148 | default: |
---|
188 | 149 | scmd_printk(KERN_WARNING, sc, "Unknown response %d", |
---|
189 | 150 | resp->response); |
---|
190 | | - /* fall through */ |
---|
| 151 | + fallthrough; |
---|
191 | 152 | case VIRTIO_SCSI_S_FAILURE: |
---|
192 | 153 | set_host_byte(sc, DID_ERROR); |
---|
193 | 154 | break; |
---|
.. | .. |
---|
323 | 284 | |
---|
324 | 285 | switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { |
---|
325 | 286 | case VIRTIO_SCSI_EVT_RESET_RESCAN: |
---|
326 | | - scsi_add_device(shost, 0, target, lun); |
---|
| 287 | + if (lun == 0) { |
---|
| 288 | + scsi_scan_target(&shost->shost_gendev, 0, target, |
---|
| 289 | + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); |
---|
| 290 | + } else { |
---|
| 291 | + scsi_add_device(shost, 0, target, lun); |
---|
| 292 | + } |
---|
327 | 293 | break; |
---|
328 | 294 | case VIRTIO_SCSI_EVT_RESET_REMOVED: |
---|
329 | 295 | sdev = scsi_device_lookup(shost, 0, target, lun); |
---|
.. | .. |
---|
365 | 331 | scsi_device_put(sdev); |
---|
366 | 332 | } |
---|
367 | 333 | |
---|
| 334 | +static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) |
---|
| 335 | +{ |
---|
| 336 | + struct scsi_device *sdev; |
---|
| 337 | + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); |
---|
| 338 | + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
---|
| 339 | + int result, inquiry_len, inq_result_len = 256; |
---|
| 340 | + char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); |
---|
| 341 | + |
---|
| 342 | + shost_for_each_device(sdev, shost) { |
---|
| 343 | + inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; |
---|
| 344 | + |
---|
| 345 | + memset(scsi_cmd, 0, sizeof(scsi_cmd)); |
---|
| 346 | + scsi_cmd[0] = INQUIRY; |
---|
| 347 | + scsi_cmd[4] = (unsigned char) inquiry_len; |
---|
| 348 | + |
---|
| 349 | + memset(inq_result, 0, inq_result_len); |
---|
| 350 | + |
---|
| 351 | + result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, |
---|
| 352 | + inq_result, inquiry_len, NULL, |
---|
| 353 | + SD_TIMEOUT, SD_MAX_RETRIES, NULL); |
---|
| 354 | + |
---|
| 355 | + if (result == 0 && inq_result[0] >> 5) { |
---|
| 356 | + /* PQ indicates the LUN is not attached */ |
---|
| 357 | + scsi_remove_device(sdev); |
---|
| 358 | + } else if (host_byte(result) == DID_BAD_TARGET) { |
---|
| 359 | + /* |
---|
| 360 | + * If all LUNs of a virtio-scsi device are unplugged |
---|
| 361 | + * it will respond with BAD TARGET on any INQUIRY |
---|
| 362 | + * command. |
---|
| 363 | + * Remove the device in this case as well. |
---|
| 364 | + */ |
---|
| 365 | + scsi_remove_device(sdev); |
---|
| 366 | + } |
---|
| 367 | + } |
---|
| 368 | + |
---|
| 369 | + kfree(inq_result); |
---|
| 370 | +} |
---|
| 371 | + |
---|
368 | 372 | static void virtscsi_handle_event(struct work_struct *work) |
---|
369 | 373 | { |
---|
370 | 374 | struct virtio_scsi_event_node *event_node = |
---|
.. | .. |
---|
376 | 380 | cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { |
---|
377 | 381 | event->event &= ~cpu_to_virtio32(vscsi->vdev, |
---|
378 | 382 | VIRTIO_SCSI_T_EVENTS_MISSED); |
---|
| 383 | + virtscsi_rescan_hotunplug(vscsi); |
---|
379 | 384 | scsi_scan_host(virtio_scsi_host(vscsi->vdev)); |
---|
380 | 385 | } |
---|
381 | 386 | |
---|
.. | .. |
---|
410 | 415 | virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); |
---|
411 | 416 | }; |
---|
412 | 417 | |
---|
413 | | -/** |
---|
414 | | - * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue |
---|
415 | | - * @vq : the struct virtqueue we're talking about |
---|
416 | | - * @cmd : command structure |
---|
417 | | - * @req_size : size of the request buffer |
---|
418 | | - * @resp_size : size of the response buffer |
---|
419 | | - */ |
---|
420 | | -static int virtscsi_add_cmd(struct virtqueue *vq, |
---|
| 418 | +static int __virtscsi_add_cmd(struct virtqueue *vq, |
---|
421 | 419 | struct virtio_scsi_cmd *cmd, |
---|
422 | 420 | size_t req_size, size_t resp_size) |
---|
423 | 421 | { |
---|
.. | .. |
---|
430 | 428 | |
---|
431 | 429 | if (sc && sc->sc_data_direction != DMA_NONE) { |
---|
432 | 430 | if (sc->sc_data_direction != DMA_FROM_DEVICE) |
---|
433 | | - out = &scsi_out(sc)->table; |
---|
| 431 | + out = &sc->sdb.table; |
---|
434 | 432 | if (sc->sc_data_direction != DMA_TO_DEVICE) |
---|
435 | | - in = &scsi_in(sc)->table; |
---|
| 433 | + in = &sc->sdb.table; |
---|
436 | 434 | } |
---|
437 | 435 | |
---|
438 | 436 | /* Request header. */ |
---|
.. | .. |
---|
462 | 460 | return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); |
---|
463 | 461 | } |
---|
464 | 462 | |
---|
465 | | -static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, |
---|
| 463 | +static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) |
---|
| 464 | +{ |
---|
| 465 | + bool needs_kick; |
---|
| 466 | + unsigned long flags; |
---|
| 467 | + |
---|
| 468 | + spin_lock_irqsave(&vq->vq_lock, flags); |
---|
| 469 | + needs_kick = virtqueue_kick_prepare(vq->vq); |
---|
| 470 | + spin_unlock_irqrestore(&vq->vq_lock, flags); |
---|
| 471 | + |
---|
| 472 | + if (needs_kick) |
---|
| 473 | + virtqueue_notify(vq->vq); |
---|
| 474 | +} |
---|
| 475 | + |
---|
| 476 | +/** |
---|
| 477 | + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it |
---|
| 478 | + * @vq : the struct virtqueue we're talking about |
---|
| 479 | + * @cmd : command structure |
---|
| 480 | + * @req_size : size of the request buffer |
---|
| 481 | + * @resp_size : size of the response buffer |
---|
| 482 | + * @kick : whether to kick the virtqueue immediately |
---|
| 483 | + */ |
---|
| 484 | +static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, |
---|
466 | 485 | struct virtio_scsi_cmd *cmd, |
---|
467 | | - size_t req_size, size_t resp_size) |
---|
| 486 | + size_t req_size, size_t resp_size, |
---|
| 487 | + bool kick) |
---|
468 | 488 | { |
---|
469 | 489 | unsigned long flags; |
---|
470 | 490 | int err; |
---|
471 | 491 | bool needs_kick = false; |
---|
472 | 492 | |
---|
473 | 493 | spin_lock_irqsave(&vq->vq_lock, flags); |
---|
474 | | - err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); |
---|
475 | | - if (!err) |
---|
| 494 | + err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); |
---|
| 495 | + if (!err && kick) |
---|
476 | 496 | needs_kick = virtqueue_kick_prepare(vq->vq); |
---|
477 | 497 | |
---|
478 | 498 | spin_unlock_irqrestore(&vq->vq_lock, flags); |
---|
.. | .. |
---|
537 | 557 | struct virtio_scsi *vscsi = shost_priv(shost); |
---|
538 | 558 | struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); |
---|
539 | 559 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); |
---|
| 560 | + bool kick; |
---|
540 | 561 | unsigned long flags; |
---|
541 | 562 | int req_size; |
---|
542 | 563 | int ret; |
---|
.. | .. |
---|
566 | 587 | req_size = sizeof(cmd->req.cmd); |
---|
567 | 588 | } |
---|
568 | 589 | |
---|
569 | | - ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); |
---|
| 590 | + kick = (sc->flags & SCMD_LAST) != 0; |
---|
| 591 | + ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick); |
---|
570 | 592 | if (ret == -EIO) { |
---|
571 | 593 | cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; |
---|
572 | 594 | spin_lock_irqsave(&req_vq->vq_lock, flags); |
---|
.. | .. |
---|
584 | 606 | int ret = FAILED; |
---|
585 | 607 | |
---|
586 | 608 | cmd->comp = ∁ |
---|
587 | | - if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, |
---|
588 | | - sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) |
---|
| 609 | + if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd, |
---|
| 610 | + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0) |
---|
589 | 611 | goto out; |
---|
590 | 612 | |
---|
591 | 613 | wait_for_completion(&comp); |
---|
.. | .. |
---|
691 | 713 | return virtscsi_tmf(vscsi, cmd); |
---|
692 | 714 | } |
---|
693 | 715 | |
---|
694 | | -static int virtscsi_target_alloc(struct scsi_target *starget) |
---|
695 | | -{ |
---|
696 | | - struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); |
---|
697 | | - struct virtio_scsi *vscsi = shost_priv(sh); |
---|
698 | | - |
---|
699 | | - struct virtio_scsi_target_state *tgt = |
---|
700 | | - kmalloc(sizeof(*tgt), GFP_KERNEL); |
---|
701 | | - if (!tgt) |
---|
702 | | - return -ENOMEM; |
---|
703 | | - |
---|
704 | | - seqcount_init(&tgt->tgt_seq); |
---|
705 | | - tgt->req_vq = &vscsi->req_vqs[0]; |
---|
706 | | - |
---|
707 | | - starget->hostdata = tgt; |
---|
708 | | - return 0; |
---|
709 | | -} |
---|
710 | | - |
---|
711 | | -static void virtscsi_target_destroy(struct scsi_target *starget) |
---|
712 | | -{ |
---|
713 | | - struct virtio_scsi_target_state *tgt = starget->hostdata; |
---|
714 | | - kfree(tgt); |
---|
715 | | -} |
---|
716 | | - |
---|
717 | 716 | static int virtscsi_map_queues(struct Scsi_Host *shost) |
---|
718 | 717 | { |
---|
719 | 718 | struct virtio_scsi *vscsi = shost_priv(shost); |
---|
| 719 | + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; |
---|
720 | 720 | |
---|
721 | | - return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); |
---|
| 721 | + return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); |
---|
| 722 | +} |
---|
| 723 | + |
---|
| 724 | +static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) |
---|
| 725 | +{ |
---|
| 726 | + struct virtio_scsi *vscsi = shost_priv(shost); |
---|
| 727 | + |
---|
| 728 | + virtscsi_kick_vq(&vscsi->req_vqs[hwq]); |
---|
722 | 729 | } |
---|
723 | 730 | |
---|
724 | 731 | /* |
---|
.. | .. |
---|
738 | 745 | .this_id = -1, |
---|
739 | 746 | .cmd_size = sizeof(struct virtio_scsi_cmd), |
---|
740 | 747 | .queuecommand = virtscsi_queuecommand, |
---|
| 748 | + .commit_rqs = virtscsi_commit_rqs, |
---|
741 | 749 | .change_queue_depth = virtscsi_change_queue_depth, |
---|
742 | 750 | .eh_abort_handler = virtscsi_abort, |
---|
743 | 751 | .eh_device_reset_handler = virtscsi_device_reset, |
---|
.. | .. |
---|
745 | 753 | .slave_alloc = virtscsi_device_alloc, |
---|
746 | 754 | |
---|
747 | 755 | .dma_boundary = UINT_MAX, |
---|
748 | | - .use_clustering = ENABLE_CLUSTERING, |
---|
749 | | - .target_alloc = virtscsi_target_alloc, |
---|
750 | | - .target_destroy = virtscsi_target_destroy, |
---|
751 | 756 | .map_queues = virtscsi_map_queues, |
---|
752 | 757 | .track_queue_depth = 1, |
---|
753 | | - .force_blk_mq = 1, |
---|
754 | 758 | }; |
---|
755 | 759 | |
---|
756 | 760 | #define virtscsi_config_get(vdev, fld) \ |
---|
757 | 761 | ({ \ |
---|
758 | | - typeof(((struct virtio_scsi_config *)0)->fld) __val; \ |
---|
| 762 | + __virtio_native_type(struct virtio_scsi_config, fld) __val; \ |
---|
759 | 763 | virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ |
---|
760 | 764 | __val; \ |
---|
761 | 765 | }) |
---|
762 | 766 | |
---|
763 | 767 | #define virtscsi_config_set(vdev, fld, val) \ |
---|
764 | 768 | do { \ |
---|
765 | | - typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ |
---|
| 769 | + __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ |
---|
766 | 770 | virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ |
---|
767 | 771 | } while(0) |
---|
768 | 772 | |
---|
.. | .. |
---|
853 | 857 | |
---|
854 | 858 | /* We need to know how many queues before we allocate. */ |
---|
855 | 859 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; |
---|
| 860 | + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); |
---|
856 | 861 | |
---|
857 | 862 | num_targets = virtscsi_config_get(vdev, max_target) + 1; |
---|
858 | 863 | |
---|
859 | 864 | shost = scsi_host_alloc(&virtscsi_host_template, |
---|
860 | | - sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); |
---|
| 865 | + struct_size(vscsi, req_vqs, num_queues)); |
---|
861 | 866 | if (!shost) |
---|
862 | 867 | return -ENOMEM; |
---|
863 | 868 | |
---|
.. | .. |
---|
1010 | 1015 | return 0; |
---|
1011 | 1016 | |
---|
1012 | 1017 | error: |
---|
1013 | | - if (virtscsi_cmd_pool) { |
---|
1014 | | - mempool_destroy(virtscsi_cmd_pool); |
---|
1015 | | - virtscsi_cmd_pool = NULL; |
---|
1016 | | - } |
---|
1017 | | - if (virtscsi_cmd_cache) { |
---|
1018 | | - kmem_cache_destroy(virtscsi_cmd_cache); |
---|
1019 | | - virtscsi_cmd_cache = NULL; |
---|
1020 | | - } |
---|
| 1018 | + mempool_destroy(virtscsi_cmd_pool); |
---|
| 1019 | + virtscsi_cmd_pool = NULL; |
---|
| 1020 | + kmem_cache_destroy(virtscsi_cmd_cache); |
---|
| 1021 | + virtscsi_cmd_cache = NULL; |
---|
1021 | 1022 | return ret; |
---|
1022 | 1023 | } |
---|
1023 | 1024 | |
---|