hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/mtd/mtd_blkdevs.c
....@@ -1,22 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Interface to Linux block layer for MTD 'translation layers'.
34 *
45 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program; if not, write to the Free Software
18
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19
- *
206 */
217
228 #include <linux/kernel.h>
....@@ -27,6 +13,7 @@
2713 #include <linux/mtd/blktrans.h>
2814 #include <linux/mtd/mtd.h>
2915 #include <linux/blkdev.h>
16
+#include <linux/blk-mq.h>
3017 #include <linux/blkpg.h>
3118 #include <linux/spinlock.h>
3219 #include <linux/hdreg.h>
....@@ -45,6 +32,8 @@
4532
4633 dev->disk->private_data = NULL;
4734 blk_cleanup_queue(dev->rq);
35
+ blk_mq_free_tag_set(dev->tag_set);
36
+ kfree(dev->tag_set);
4837 put_disk(dev->disk);
4938 list_del(&dev->list);
5039 kfree(dev);
....@@ -134,28 +123,39 @@
134123 }
135124 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
136125
137
-static void mtd_blktrans_work(struct work_struct *work)
126
+static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
138127 {
139
- struct mtd_blktrans_dev *dev =
140
- container_of(work, struct mtd_blktrans_dev, work);
128
+ struct request *rq;
129
+
130
+ rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
131
+ if (rq) {
132
+ list_del_init(&rq->queuelist);
133
+ blk_mq_start_request(rq);
134
+ return rq;
135
+ }
136
+
137
+ return NULL;
138
+}
139
+
140
+static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
141
+ __releases(&dev->queue_lock)
142
+ __acquires(&dev->queue_lock)
143
+{
141144 struct mtd_blktrans_ops *tr = dev->tr;
142
- struct request_queue *rq = dev->rq;
143145 struct request *req = NULL;
144146 int background_done = 0;
145
-
146
- spin_lock_irq(rq->queue_lock);
147147
148148 while (1) {
149149 blk_status_t res;
150150
151151 dev->bg_stop = false;
152
- if (!req && !(req = blk_fetch_request(rq))) {
152
+ if (!req && !(req = mtd_next_request(dev))) {
153153 if (tr->background && !background_done) {
154
- spin_unlock_irq(rq->queue_lock);
154
+ spin_unlock_irq(&dev->queue_lock);
155155 mutex_lock(&dev->lock);
156156 tr->background(dev);
157157 mutex_unlock(&dev->lock);
158
- spin_lock_irq(rq->queue_lock);
158
+ spin_lock_irq(&dev->queue_lock);
159159 /*
160160 * Do background processing just once per idle
161161 * period.
....@@ -166,35 +166,39 @@
166166 break;
167167 }
168168
169
- spin_unlock_irq(rq->queue_lock);
169
+ spin_unlock_irq(&dev->queue_lock);
170170
171171 mutex_lock(&dev->lock);
172172 res = do_blktrans_request(dev->tr, dev, req);
173173 mutex_unlock(&dev->lock);
174174
175
- spin_lock_irq(rq->queue_lock);
176
-
177
- if (!__blk_end_request_cur(req, res))
175
+ if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
176
+ __blk_mq_end_request(req, res);
178177 req = NULL;
178
+ }
179179
180180 background_done = 0;
181
+ spin_lock_irq(&dev->queue_lock);
181182 }
182
-
183
- spin_unlock_irq(rq->queue_lock);
184183 }
185184
186
-static void mtd_blktrans_request(struct request_queue *rq)
185
+static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
186
+ const struct blk_mq_queue_data *bd)
187187 {
188188 struct mtd_blktrans_dev *dev;
189
- struct request *req = NULL;
190189
191
- dev = rq->queuedata;
190
+ dev = hctx->queue->queuedata;
191
+ if (!dev) {
192
+ blk_mq_start_request(bd->rq);
193
+ return BLK_STS_IOERR;
194
+ }
192195
193
- if (!dev)
194
- while ((req = blk_fetch_request(rq)) != NULL)
195
- __blk_end_request_all(req, BLK_STS_IOERR);
196
- else
197
- queue_work(dev->wq, &dev->work);
196
+ spin_lock_irq(&dev->queue_lock);
197
+ list_add_tail(&bd->rq->queuelist, &dev->rq_list);
198
+ mtd_blktrans_work(dev);
199
+ spin_unlock_irq(&dev->queue_lock);
200
+
201
+ return BLK_STS_OK;
198202 }
199203
200204 static int blktrans_open(struct block_device *bdev, fmode_t mode)
....@@ -329,6 +333,10 @@
329333 .getgeo = blktrans_getgeo,
330334 };
331335
336
+static const struct blk_mq_ops mtd_mq_ops = {
337
+ .queue_rq = mtd_queue_rq,
338
+};
339
+
332340 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
333341 {
334342 struct mtd_blktrans_ops *tr = new->tr;
....@@ -416,10 +424,19 @@
416424
417425 /* Create the request queue */
418426 spin_lock_init(&new->queue_lock);
419
- new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
427
+ INIT_LIST_HEAD(&new->rq_list);
420428
421
- if (!new->rq)
429
+ new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
430
+ if (!new->tag_set)
422431 goto error3;
432
+
433
+ new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
434
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
435
+ if (IS_ERR(new->rq)) {
436
+ ret = PTR_ERR(new->rq);
437
+ new->rq = NULL;
438
+ goto error4;
439
+ }
423440
424441 if (tr->flush)
425442 blk_queue_write_cache(new->rq, true, false);
....@@ -437,17 +454,10 @@
437454
438455 gd->queue = new->rq;
439456
440
- /* Create processing workqueue */
441
- new->wq = alloc_workqueue("%s%d", 0, 0,
442
- tr->name, new->mtd->index);
443
- if (!new->wq)
444
- goto error4;
445
- INIT_WORK(&new->work, mtd_blktrans_work);
446
-
447457 if (new->readonly)
448458 set_disk_ro(gd, 1);
449459
450
- device_add_disk(&new->mtd->dev, gd);
460
+ device_add_disk(&new->mtd->dev, gd, NULL);
451461
452462 if (new->disk_attributes) {
453463 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
....@@ -456,7 +466,7 @@
456466 }
457467 return 0;
458468 error4:
459
- blk_cleanup_queue(new->rq);
469
+ kfree(new->tag_set);
460470 error3:
461471 put_disk(new->disk);
462472 error2:
....@@ -481,15 +491,17 @@
481491 /* Stop new requests to arrive */
482492 del_gendisk(old->disk);
483493
484
- /* Stop workqueue. This will perform any pending request. */
485
- destroy_workqueue(old->wq);
486
-
487494 /* Kill current requests */
488495 spin_lock_irqsave(&old->queue_lock, flags);
489496 old->rq->queuedata = NULL;
490
- blk_start_queue(old->rq);
491497 spin_unlock_irqrestore(&old->queue_lock, flags);
492498
499
+ /* freeze+quiesce queue to ensure all requests are flushed */
500
+ blk_mq_freeze_queue(old->rq);
501
+ blk_mq_quiesce_queue(old->rq);
502
+ blk_mq_unquiesce_queue(old->rq);
503
+ blk_mq_unfreeze_queue(old->rq);
504
+
493505 /* If the device is currently open, tell trans driver to close it,
494506 then put mtd device, and don't touch it again */
495507 mutex_lock(&old->lock);