.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* sunvdc.c: Sun LDOM Virtual Disk Client. |
---|
2 | 3 | * |
---|
3 | 4 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> |
---|
.. | .. |
---|
6 | 7 | #include <linux/module.h> |
---|
7 | 8 | #include <linux/kernel.h> |
---|
8 | 9 | #include <linux/types.h> |
---|
9 | | -#include <linux/blkdev.h> |
---|
| 10 | +#include <linux/blk-mq.h> |
---|
10 | 11 | #include <linux/hdreg.h> |
---|
11 | 12 | #include <linux/genhd.h> |
---|
12 | 13 | #include <linux/cdrom.h> |
---|
.. | .. |
---|
68 | 69 | |
---|
69 | 70 | u64 max_xfer_size; |
---|
70 | 71 | u32 vdisk_block_size; |
---|
| 72 | + u32 drain; |
---|
71 | 73 | |
---|
72 | 74 | u64 ldc_timeout; |
---|
73 | | - struct timer_list ldc_reset_timer; |
---|
| 75 | + struct delayed_work ldc_reset_timer_work; |
---|
74 | 76 | struct work_struct ldc_reset_work; |
---|
75 | 77 | |
---|
76 | 78 | /* The server fills these in for us in the disk attribute |
---|
.. | .. |
---|
82 | 84 | u8 vdisk_mtype; |
---|
83 | 85 | u32 vdisk_phys_blksz; |
---|
84 | 86 | |
---|
| 87 | + struct blk_mq_tag_set tag_set; |
---|
| 88 | + |
---|
85 | 89 | char disk_name[32]; |
---|
86 | 90 | }; |
---|
87 | 91 | |
---|
88 | 92 | static void vdc_ldc_reset(struct vdc_port *port); |
---|
89 | 93 | static void vdc_ldc_reset_work(struct work_struct *work); |
---|
90 | | -static void vdc_ldc_reset_timer(struct timer_list *t); |
---|
| 94 | +static void vdc_ldc_reset_timer_work(struct work_struct *work); |
---|
91 | 95 | |
---|
92 | 96 | static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) |
---|
93 | 97 | { |
---|
.. | .. |
---|
167 | 171 | .owner = THIS_MODULE, |
---|
168 | 172 | .getgeo = vdc_getgeo, |
---|
169 | 173 | .ioctl = vdc_ioctl, |
---|
| 174 | + .compat_ioctl = blkdev_compat_ptr_ioctl, |
---|
170 | 175 | }; |
---|
171 | 176 | |
---|
172 | 177 | static void vdc_blk_queue_start(struct vdc_port *port) |
---|
.. | .. |
---|
177 | 182 | * handshake completes, so check for initial handshake before we've |
---|
178 | 183 | * allocated a disk. |
---|
179 | 184 | */ |
---|
180 | | - if (port->disk && blk_queue_stopped(port->disk->queue) && |
---|
181 | | - vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) { |
---|
182 | | - blk_start_queue(port->disk->queue); |
---|
183 | | - } |
---|
184 | | - |
---|
| 185 | + if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) |
---|
| 186 | + blk_mq_start_stopped_hw_queues(port->disk->queue, true); |
---|
185 | 187 | } |
---|
186 | 188 | |
---|
187 | 189 | static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) |
---|
.. | .. |
---|
199 | 201 | { |
---|
200 | 202 | struct vdc_port *port = to_vdc_port(vio); |
---|
201 | 203 | |
---|
202 | | - del_timer(&port->ldc_reset_timer); |
---|
| 204 | + cancel_delayed_work(&port->ldc_reset_timer_work); |
---|
203 | 205 | vdc_finish(vio, 0, WAITING_FOR_LINK_UP); |
---|
204 | 206 | vdc_blk_queue_start(port); |
---|
205 | 207 | } |
---|
.. | .. |
---|
322 | 324 | |
---|
323 | 325 | rqe->req = NULL; |
---|
324 | 326 | |
---|
325 | | - __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size); |
---|
| 327 | + blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0); |
---|
326 | 328 | |
---|
327 | 329 | vdc_blk_queue_start(port); |
---|
328 | 330 | } |
---|
.. | .. |
---|
530 | 532 | return err; |
---|
531 | 533 | } |
---|
532 | 534 | |
---|
533 | | -static void do_vdc_request(struct request_queue *rq) |
---|
| 535 | +static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx, |
---|
| 536 | + const struct blk_mq_queue_data *bd) |
---|
534 | 537 | { |
---|
535 | | - struct request *req; |
---|
| 538 | + struct vdc_port *port = hctx->queue->queuedata; |
---|
| 539 | + struct vio_dring_state *dr; |
---|
| 540 | + unsigned long flags; |
---|
536 | 541 | |
---|
537 | | - while ((req = blk_peek_request(rq)) != NULL) { |
---|
538 | | - struct vdc_port *port; |
---|
539 | | - struct vio_dring_state *dr; |
---|
| 542 | + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
---|
540 | 543 | |
---|
541 | | - port = req->rq_disk->private_data; |
---|
542 | | - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
---|
543 | | - if (unlikely(vdc_tx_dring_avail(dr) < 1)) |
---|
544 | | - goto wait; |
---|
| 544 | + blk_mq_start_request(bd->rq); |
---|
545 | 545 | |
---|
546 | | - blk_start_request(req); |
---|
| 546 | + spin_lock_irqsave(&port->vio.lock, flags); |
---|
547 | 547 | |
---|
548 | | - if (__send_request(req) < 0) { |
---|
549 | | - blk_requeue_request(rq, req); |
---|
550 | | -wait: |
---|
551 | | - /* Avoid pointless unplugs. */ |
---|
552 | | - blk_stop_queue(rq); |
---|
553 | | - break; |
---|
554 | | - } |
---|
| 548 | + /* |
---|
| 549 | + * Doing drain, just end the request in error |
---|
| 550 | + */ |
---|
| 551 | + if (unlikely(port->drain)) { |
---|
| 552 | + spin_unlock_irqrestore(&port->vio.lock, flags); |
---|
| 553 | + return BLK_STS_IOERR; |
---|
555 | 554 | } |
---|
| 555 | + |
---|
| 556 | + if (unlikely(vdc_tx_dring_avail(dr) < 1)) { |
---|
| 557 | + spin_unlock_irqrestore(&port->vio.lock, flags); |
---|
| 558 | + blk_mq_stop_hw_queue(hctx); |
---|
| 559 | + return BLK_STS_DEV_RESOURCE; |
---|
| 560 | + } |
---|
| 561 | + |
---|
| 562 | + if (__send_request(bd->rq) < 0) { |
---|
| 563 | + spin_unlock_irqrestore(&port->vio.lock, flags); |
---|
| 564 | + return BLK_STS_IOERR; |
---|
| 565 | + } |
---|
| 566 | + |
---|
| 567 | + spin_unlock_irqrestore(&port->vio.lock, flags); |
---|
| 568 | + return BLK_STS_OK; |
---|
556 | 569 | } |
---|
557 | 570 | |
---|
558 | 571 | static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) |
---|
.. | .. |
---|
622 | 635 | case VD_OP_GET_EFI: |
---|
623 | 636 | case VD_OP_SET_EFI: |
---|
624 | 637 | return -EOPNOTSUPP; |
---|
625 | | - break; |
---|
626 | | - }; |
---|
| 638 | + } |
---|
627 | 639 | |
---|
628 | 640 | map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; |
---|
629 | 641 | |
---|
.. | .. |
---|
764 | 776 | vio_ldc_free(&port->vio); |
---|
765 | 777 | } |
---|
766 | 778 | |
---|
| 779 | +static const struct blk_mq_ops vdc_mq_ops = { |
---|
| 780 | + .queue_rq = vdc_queue_rq, |
---|
| 781 | +}; |
---|
| 782 | + |
---|
| 783 | +static void cleanup_queue(struct request_queue *q) |
---|
| 784 | +{ |
---|
| 785 | + struct vdc_port *port = q->queuedata; |
---|
| 786 | + |
---|
| 787 | + blk_cleanup_queue(q); |
---|
| 788 | + blk_mq_free_tag_set(&port->tag_set); |
---|
| 789 | +} |
---|
| 790 | + |
---|
| 791 | +static struct request_queue *init_queue(struct vdc_port *port) |
---|
| 792 | +{ |
---|
| 793 | + struct request_queue *q; |
---|
| 794 | + |
---|
| 795 | + q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE, |
---|
| 796 | + BLK_MQ_F_SHOULD_MERGE); |
---|
| 797 | + if (IS_ERR(q)) |
---|
| 798 | + return q; |
---|
| 799 | + |
---|
| 800 | + q->queuedata = port; |
---|
| 801 | + return q; |
---|
| 802 | +} |
---|
| 803 | + |
---|
767 | 804 | static int probe_disk(struct vdc_port *port) |
---|
768 | 805 | { |
---|
769 | 806 | struct request_queue *q; |
---|
.. | .. |
---|
801 | 838 | (u64)geom.num_sec); |
---|
802 | 839 | } |
---|
803 | 840 | |
---|
804 | | - q = blk_init_queue(do_vdc_request, &port->vio.lock); |
---|
805 | | - if (!q) { |
---|
| 841 | + q = init_queue(port); |
---|
| 842 | + if (IS_ERR(q)) { |
---|
806 | 843 | printk(KERN_ERR PFX "%s: Could not allocate queue.\n", |
---|
807 | 844 | port->vio.name); |
---|
808 | | - return -ENOMEM; |
---|
| 845 | + return PTR_ERR(q); |
---|
809 | 846 | } |
---|
810 | 847 | g = alloc_disk(1 << PARTITION_SHIFT); |
---|
811 | 848 | if (!g) { |
---|
812 | 849 | printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", |
---|
813 | 850 | port->vio.name); |
---|
814 | | - blk_cleanup_queue(q); |
---|
| 851 | + cleanup_queue(q); |
---|
815 | 852 | return -ENOMEM; |
---|
816 | 853 | } |
---|
817 | 854 | |
---|
.. | .. |
---|
862 | 899 | port->vdisk_size, (port->vdisk_size >> (20 - 9)), |
---|
863 | 900 | port->vio.ver.major, port->vio.ver.minor); |
---|
864 | 901 | |
---|
865 | | - device_add_disk(&port->vio.vdev->dev, g); |
---|
| 902 | + device_add_disk(&port->vio.vdev->dev, g, NULL); |
---|
866 | 903 | |
---|
867 | 904 | return 0; |
---|
868 | 905 | } |
---|
.. | .. |
---|
947 | 984 | print_version(); |
---|
948 | 985 | |
---|
949 | 986 | hp = mdesc_grab(); |
---|
| 987 | + if (!hp) |
---|
| 988 | + return -ENODEV; |
---|
950 | 989 | |
---|
951 | 990 | err = -ENODEV; |
---|
952 | 991 | if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { |
---|
.. | .. |
---|
986 | 1025 | */ |
---|
987 | 1026 | ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL); |
---|
988 | 1027 | port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0; |
---|
989 | | - timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0); |
---|
| 1028 | + INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work); |
---|
990 | 1029 | INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work); |
---|
991 | 1030 | |
---|
992 | 1031 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, |
---|
.. | .. |
---|
1039 | 1078 | struct vdc_port *port = dev_get_drvdata(&vdev->dev); |
---|
1040 | 1079 | |
---|
1041 | 1080 | if (port) { |
---|
1042 | | - unsigned long flags; |
---|
1043 | | - |
---|
1044 | | - spin_lock_irqsave(&port->vio.lock, flags); |
---|
1045 | | - blk_stop_queue(port->disk->queue); |
---|
1046 | | - spin_unlock_irqrestore(&port->vio.lock, flags); |
---|
| 1081 | + blk_mq_stop_hw_queues(port->disk->queue); |
---|
1047 | 1082 | |
---|
1048 | 1083 | flush_work(&port->ldc_reset_work); |
---|
1049 | | - del_timer_sync(&port->ldc_reset_timer); |
---|
| 1084 | + cancel_delayed_work_sync(&port->ldc_reset_timer_work); |
---|
1050 | 1085 | del_timer_sync(&port->vio.timer); |
---|
1051 | 1086 | |
---|
1052 | 1087 | del_gendisk(port->disk); |
---|
1053 | | - blk_cleanup_queue(port->disk->queue); |
---|
| 1088 | + cleanup_queue(port->disk->queue); |
---|
1054 | 1089 | put_disk(port->disk); |
---|
1055 | 1090 | port->disk = NULL; |
---|
1056 | 1091 | |
---|
.. | .. |
---|
1085 | 1120 | } |
---|
1086 | 1121 | |
---|
1087 | 1122 | rqe->req = NULL; |
---|
1088 | | - blk_requeue_request(port->disk->queue, req); |
---|
| 1123 | + blk_mq_requeue_request(req, false); |
---|
1089 | 1124 | } |
---|
1090 | 1125 | } |
---|
1091 | 1126 | |
---|
1092 | 1127 | static void vdc_queue_drain(struct vdc_port *port) |
---|
1093 | 1128 | { |
---|
1094 | | - struct request *req; |
---|
| 1129 | + struct request_queue *q = port->disk->queue; |
---|
1095 | 1130 | |
---|
1096 | | - while ((req = blk_fetch_request(port->disk->queue)) != NULL) |
---|
1097 | | - __blk_end_request_all(req, BLK_STS_IOERR); |
---|
| 1131 | + /* |
---|
| 1132 | + * Mark the queue as draining, then freeze/quiesce to ensure |
---|
| 1133 | + * that all existing requests are seen in ->queue_rq() and killed |
---|
| 1134 | + */ |
---|
| 1135 | + port->drain = 1; |
---|
| 1136 | + spin_unlock_irq(&port->vio.lock); |
---|
| 1137 | + |
---|
| 1138 | + blk_mq_freeze_queue(q); |
---|
| 1139 | + blk_mq_quiesce_queue(q); |
---|
| 1140 | + |
---|
| 1141 | + spin_lock_irq(&port->vio.lock); |
---|
| 1142 | + port->drain = 0; |
---|
| 1143 | + blk_mq_unquiesce_queue(q); |
---|
| 1144 | + blk_mq_unfreeze_queue(q); |
---|
1098 | 1145 | } |
---|
1099 | 1146 | |
---|
1100 | | -static void vdc_ldc_reset_timer(struct timer_list *t) |
---|
| 1147 | +static void vdc_ldc_reset_timer_work(struct work_struct *work) |
---|
1101 | 1148 | { |
---|
1102 | | - struct vdc_port *port = from_timer(port, t, ldc_reset_timer); |
---|
1103 | | - struct vio_driver_state *vio = &port->vio; |
---|
1104 | | - unsigned long flags; |
---|
| 1149 | + struct vdc_port *port; |
---|
| 1150 | + struct vio_driver_state *vio; |
---|
1105 | 1151 | |
---|
1106 | | - spin_lock_irqsave(&vio->lock, flags); |
---|
| 1152 | + port = container_of(work, struct vdc_port, ldc_reset_timer_work.work); |
---|
| 1153 | + vio = &port->vio; |
---|
| 1154 | + |
---|
| 1155 | + spin_lock_irq(&vio->lock); |
---|
1107 | 1156 | if (!(port->vio.hs_state & VIO_HS_COMPLETE)) { |
---|
1108 | 1157 | pr_warn(PFX "%s ldc down %llu seconds, draining queue\n", |
---|
1109 | 1158 | port->disk_name, port->ldc_timeout); |
---|
1110 | 1159 | vdc_queue_drain(port); |
---|
1111 | 1160 | vdc_blk_queue_start(port); |
---|
1112 | 1161 | } |
---|
1113 | | - spin_unlock_irqrestore(&vio->lock, flags); |
---|
| 1162 | + spin_unlock_irq(&vio->lock); |
---|
1114 | 1163 | } |
---|
1115 | 1164 | |
---|
1116 | 1165 | static void vdc_ldc_reset_work(struct work_struct *work) |
---|
.. | .. |
---|
1134 | 1183 | assert_spin_locked(&port->vio.lock); |
---|
1135 | 1184 | |
---|
1136 | 1185 | pr_warn(PFX "%s ldc link reset\n", port->disk_name); |
---|
1137 | | - blk_stop_queue(port->disk->queue); |
---|
| 1186 | + blk_mq_stop_hw_queues(port->disk->queue); |
---|
1138 | 1187 | vdc_requeue_inflight(port); |
---|
1139 | 1188 | vdc_port_down(port); |
---|
1140 | 1189 | |
---|
.. | .. |
---|
1151 | 1200 | } |
---|
1152 | 1201 | |
---|
1153 | 1202 | if (port->ldc_timeout) |
---|
1154 | | - mod_timer(&port->ldc_reset_timer, |
---|
| 1203 | + mod_delayed_work(system_wq, &port->ldc_reset_timer_work, |
---|
1155 | 1204 | round_jiffies(jiffies + HZ * port->ldc_timeout)); |
---|
1156 | 1205 | mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ)); |
---|
1157 | 1206 | return; |
---|