hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/block/bsg-lib.c
....@@ -1,27 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * BSG helper library
34 *
45 * Copyright (C) 2008 James Smart, Emulex Corporation
56 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
67 * Copyright (C) 2011 Mike Christie
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program; if not, write to the Free Software
20
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
- *
228 */
239 #include <linux/slab.h>
24
-#include <linux/blkdev.h>
10
+#include <linux/blk-mq.h>
2511 #include <linux/delay.h>
2612 #include <linux/scatterlist.h>
2713 #include <linux/bsg-lib.h>
....@@ -30,6 +16,12 @@
3016 #include <scsi/sg.h>
3117
3218 #define uptr64(val) ((void __user *)(uintptr_t)(val))
19
+
20
+struct bsg_set {
21
+ struct blk_mq_tag_set tag_set;
22
+ bsg_job_fn *job_fn;
23
+ bsg_timeout_fn *timeout_fn;
24
+};
3325
3426 static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
3527 {
....@@ -45,11 +37,40 @@
4537 fmode_t mode)
4638 {
4739 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
40
+ int ret;
4841
4942 job->request_len = hdr->request_len;
5043 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
44
+ if (IS_ERR(job->request))
45
+ return PTR_ERR(job->request);
5146
52
- return PTR_ERR_OR_ZERO(job->request);
47
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
48
+ job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
49
+ if (IS_ERR(job->bidi_rq)) {
50
+ ret = PTR_ERR(job->bidi_rq);
51
+ goto out;
52
+ }
53
+
54
+ ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
55
+ uptr64(hdr->din_xferp), hdr->din_xfer_len,
56
+ GFP_KERNEL);
57
+ if (ret)
58
+ goto out_free_bidi_rq;
59
+
60
+ job->bidi_bio = job->bidi_rq->bio;
61
+ } else {
62
+ job->bidi_rq = NULL;
63
+ job->bidi_bio = NULL;
64
+ }
65
+
66
+ return 0;
67
+
68
+out_free_bidi_rq:
69
+ if (job->bidi_rq)
70
+ blk_put_request(job->bidi_rq);
71
+out:
72
+ kfree(job->request);
73
+ return ret;
5374 }
5475
5576 static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
....@@ -87,7 +108,7 @@
87108 /* we assume all request payload was transferred, residual == 0 */
88109 hdr->dout_resid = 0;
89110
90
- if (rq->next_rq) {
111
+ if (job->bidi_rq) {
91112 unsigned int rsp_len = job->reply_payload.payload_len;
92113
93114 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
....@@ -104,6 +125,11 @@
104125 static void bsg_transport_free_rq(struct request *rq)
105126 {
106127 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
128
+
129
+ if (job->bidi_rq) {
130
+ blk_rq_unmap_user(job->bidi_bio);
131
+ blk_put_request(job->bidi_rq);
132
+ }
107133
108134 kfree(job->request);
109135 }
....@@ -129,7 +155,7 @@
129155 kfree(job->request_payload.sg_list);
130156 kfree(job->reply_payload.sg_list);
131157
132
- blk_end_request_all(rq, BLK_STS_OK);
158
+ blk_mq_end_request(rq, BLK_STS_OK);
133159 }
134160
135161 void bsg_job_put(struct bsg_job *job)
....@@ -155,17 +181,20 @@
155181 void bsg_job_done(struct bsg_job *job, int result,
156182 unsigned int reply_payload_rcv_len)
157183 {
184
+ struct request *rq = blk_mq_rq_from_pdu(job);
185
+
158186 job->result = result;
159187 job->reply_payload_rcv_len = reply_payload_rcv_len;
160
- blk_complete_request(blk_mq_rq_from_pdu(job));
188
+ if (likely(!blk_should_fake_timeout(rq->q)))
189
+ blk_mq_complete_request(rq);
161190 }
162191 EXPORT_SYMBOL_GPL(bsg_job_done);
163192
164193 /**
165
- * bsg_softirq_done - softirq done routine for destroying the bsg requests
194
+ * bsg_complete - softirq done routine for destroying the bsg requests
166195 * @rq: BSG request that holds the job to be destroyed
167196 */
168
-static void bsg_softirq_done(struct request *rq)
197
+static void bsg_complete(struct request *rq)
169198 {
170199 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
171200
....@@ -178,7 +207,7 @@
178207
179208 BUG_ON(!req->nr_phys_segments);
180209
181
- buf->sg_list = kzalloc(sz, GFP_KERNEL);
210
+ buf->sg_list = kmalloc(sz, GFP_KERNEL);
182211 if (!buf->sg_list)
183212 return -ENOMEM;
184213 sg_init_table(buf->sg_list, req->nr_phys_segments);
....@@ -194,7 +223,6 @@
194223 */
195224 static bool bsg_prepare_job(struct device *dev, struct request *req)
196225 {
197
- struct request *rsp = req->next_rq;
198226 struct bsg_job *job = blk_mq_rq_to_pdu(req);
199227 int ret;
200228
....@@ -205,8 +233,8 @@
205233 if (ret)
206234 goto failjob_rls_job;
207235 }
208
- if (rsp && rsp->bio) {
209
- ret = bsg_map_buffer(&job->reply_payload, rsp);
236
+ if (job->bidi_rq) {
237
+ ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
210238 if (ret)
211239 goto failjob_rls_rqst_payload;
212240 }
....@@ -224,54 +252,50 @@
224252 }
225253
226254 /**
227
- * bsg_request_fn - generic handler for bsg requests
228
- * @q: request queue to manage
255
+ * bsg_queue_rq - generic handler for bsg requests
256
+ * @hctx: hardware queue
257
+ * @bd: queue data
229258 *
230259 * On error the create_bsg_job function should return a -Exyz error value
231260 * that will be set to ->result.
232261 *
233262 * Drivers/subsys should pass this to the queue init function.
234263 */
235
-static void bsg_request_fn(struct request_queue *q)
236
- __releases(q->queue_lock)
237
- __acquires(q->queue_lock)
264
+static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
265
+ const struct blk_mq_queue_data *bd)
238266 {
267
+ struct request_queue *q = hctx->queue;
239268 struct device *dev = q->queuedata;
240
- struct request *req;
269
+ struct request *req = bd->rq;
270
+ struct bsg_set *bset =
271
+ container_of(q->tag_set, struct bsg_set, tag_set);
272
+ blk_status_t sts = BLK_STS_IOERR;
241273 int ret;
242274
275
+ blk_mq_start_request(req);
276
+
243277 if (!get_device(dev))
244
- return;
278
+ return BLK_STS_IOERR;
245279
246
- while (1) {
247
- req = blk_fetch_request(q);
248
- if (!req)
249
- break;
250
- spin_unlock_irq(q->queue_lock);
280
+ if (!bsg_prepare_job(dev, req))
281
+ goto out;
251282
252
- if (!bsg_prepare_job(dev, req)) {
253
- blk_end_request_all(req, BLK_STS_OK);
254
- spin_lock_irq(q->queue_lock);
255
- continue;
256
- }
283
+ ret = bset->job_fn(blk_mq_rq_to_pdu(req));
284
+ if (!ret)
285
+ sts = BLK_STS_OK;
257286
258
- ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
259
- spin_lock_irq(q->queue_lock);
260
- if (ret)
261
- break;
262
- }
263
-
264
- spin_unlock_irq(q->queue_lock);
287
+out:
265288 put_device(dev);
266
- spin_lock_irq(q->queue_lock);
289
+ return sts;
267290 }
268291
269292 /* called right after the request is allocated for the request_queue */
270
-static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
293
+static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
294
+ unsigned int hctx_idx, unsigned int numa_node)
271295 {
272296 struct bsg_job *job = blk_mq_rq_to_pdu(req);
273297
274
- job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
298
+ job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
275299 if (!job->reply)
276300 return -ENOMEM;
277301 return 0;
....@@ -289,43 +313,87 @@
289313 job->dd_data = job + 1;
290314 }
291315
292
-static void bsg_exit_rq(struct request_queue *q, struct request *req)
316
+static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
317
+ unsigned int hctx_idx)
293318 {
294319 struct bsg_job *job = blk_mq_rq_to_pdu(req);
295320
296321 kfree(job->reply);
297322 }
298323
324
+void bsg_remove_queue(struct request_queue *q)
325
+{
326
+ if (q) {
327
+ struct bsg_set *bset =
328
+ container_of(q->tag_set, struct bsg_set, tag_set);
329
+
330
+ bsg_unregister_queue(q);
331
+ blk_cleanup_queue(q);
332
+ blk_mq_free_tag_set(&bset->tag_set);
333
+ kfree(bset);
334
+ }
335
+}
336
+EXPORT_SYMBOL_GPL(bsg_remove_queue);
337
+
338
+static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
339
+{
340
+ struct bsg_set *bset =
341
+ container_of(rq->q->tag_set, struct bsg_set, tag_set);
342
+
343
+ if (!bset->timeout_fn)
344
+ return BLK_EH_DONE;
345
+ return bset->timeout_fn(rq);
346
+}
347
+
348
+static const struct blk_mq_ops bsg_mq_ops = {
349
+ .queue_rq = bsg_queue_rq,
350
+ .init_request = bsg_init_rq,
351
+ .exit_request = bsg_exit_rq,
352
+ .initialize_rq_fn = bsg_initialize_rq,
353
+ .complete = bsg_complete,
354
+ .timeout = bsg_timeout,
355
+};
356
+
299357 /**
300358 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
301359 * @dev: device to attach bsg device to
302360 * @name: device to give bsg device
303361 * @job_fn: bsg job handler
362
+ * @timeout: timeout handler function pointer
304363 * @dd_job_size: size of LLD data needed for each job
305364 */
306365 struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
307
- bsg_job_fn *job_fn, int dd_job_size)
366
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
308367 {
368
+ struct bsg_set *bset;
369
+ struct blk_mq_tag_set *set;
309370 struct request_queue *q;
310
- int ret;
371
+ int ret = -ENOMEM;
311372
312
- q = blk_alloc_queue(GFP_KERNEL);
313
- if (!q)
373
+ bset = kzalloc(sizeof(*bset), GFP_KERNEL);
374
+ if (!bset)
314375 return ERR_PTR(-ENOMEM);
315
- q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
316
- q->init_rq_fn = bsg_init_rq;
317
- q->exit_rq_fn = bsg_exit_rq;
318
- q->initialize_rq_fn = bsg_initialize_rq;
319
- q->request_fn = bsg_request_fn;
320376
321
- ret = blk_init_allocated_queue(q);
322
- if (ret)
323
- goto out_cleanup_queue;
377
+ bset->job_fn = job_fn;
378
+ bset->timeout_fn = timeout;
379
+
380
+ set = &bset->tag_set;
381
+ set->ops = &bsg_mq_ops;
382
+ set->nr_hw_queues = 1;
383
+ set->queue_depth = 128;
384
+ set->numa_node = NUMA_NO_NODE;
385
+ set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
386
+ set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
387
+ if (blk_mq_alloc_tag_set(set))
388
+ goto out_tag_set;
389
+
390
+ q = blk_mq_init_queue(set);
391
+ if (IS_ERR(q)) {
392
+ ret = PTR_ERR(q);
393
+ goto out_queue;
394
+ }
324395
325396 q->queuedata = dev;
326
- q->bsg_job_fn = job_fn;
327
- blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
328
- blk_queue_softirq_done(q, bsg_softirq_done);
329397 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
330398
331399 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
....@@ -338,6 +406,10 @@
338406 return q;
339407 out_cleanup_queue:
340408 blk_cleanup_queue(q);
409
+out_queue:
410
+ blk_mq_free_tag_set(set);
411
+out_tag_set:
412
+ kfree(bset);
341413 return ERR_PTR(ret);
342414 }
343415 EXPORT_SYMBOL_GPL(bsg_setup_queue);