hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/blk-mq.h
....@@ -10,173 +10,374 @@
1010 struct blk_flush_queue;
1111
1212 /**
13
- * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
13
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
14
+ * block device
1415 */
1516 struct blk_mq_hw_ctx {
1617 struct {
18
+ /** @lock: Protects the dispatch list. */
1719 spinlock_t lock;
20
+ /**
21
+ * @dispatch: Used for requests that are ready to be
22
+ * dispatched to the hardware but for some reason (e.g. lack of
23
+ * resources) could not be sent to the hardware. As soon as the
24
+ * driver can send new requests, requests at this list will
25
+ * be sent first for a fairer dispatch.
26
+ */
1827 struct list_head dispatch;
19
- unsigned long state; /* BLK_MQ_S_* flags */
28
+ /**
29
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
30
+ * queue (active, scheduled to restart, stopped).
31
+ */
32
+ unsigned long state;
2033 } ____cacheline_aligned_in_smp;
2134
35
+ /**
36
+ * @run_work: Used for scheduling a hardware queue run at a later time.
37
+ */
2238 struct delayed_work run_work;
39
+ /** @cpumask: Map of available CPUs where this hctx can run. */
2340 cpumask_var_t cpumask;
41
+ /**
42
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
43
+ * selection from @cpumask.
44
+ */
2445 int next_cpu;
46
+ /**
47
+ * @next_cpu_batch: Counter of how many works left in the batch before
48
+ * changing to the next CPU.
49
+ */
2550 int next_cpu_batch;
2651
27
- unsigned long flags; /* BLK_MQ_F_* flags */
52
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
53
+ unsigned long flags;
2854
55
+ /**
56
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
57
+ * queue. It's up to the IO scheduler how to use this pointer.
58
+ */
2959 void *sched_data;
60
+ /**
61
+ * @queue: Pointer to the request queue that owns this hardware context.
62
+ */
3063 struct request_queue *queue;
64
+ /** @fq: Queue of requests that need to perform a flush operation. */
3165 struct blk_flush_queue *fq;
3266
67
+ /**
68
+ * @driver_data: Pointer to data owned by the block driver that created
69
+ * this hctx
70
+ */
3371 void *driver_data;
3472
73
+ /**
74
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
75
+ * pending request in that software queue.
76
+ */
3577 struct sbitmap ctx_map;
3678
79
+ /**
80
+ * @dispatch_from: Software queue to be used when no scheduler was
81
+ * selected.
82
+ */
3783 struct blk_mq_ctx *dispatch_from;
84
+ /**
85
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
86
+ * decide if the hw_queue is busy using Exponential Weighted Moving
87
+ * Average algorithm.
88
+ */
3889 unsigned int dispatch_busy;
3990
40
- unsigned int nr_ctx;
91
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
92
+ unsigned short type;
93
+ /** @nr_ctx: Number of software queues. */
94
+ unsigned short nr_ctx;
95
+ /** @ctxs: Array of software queues. */
4196 struct blk_mq_ctx **ctxs;
4297
98
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
4399 spinlock_t dispatch_wait_lock;
100
+ /**
101
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
102
+ * available at the moment, to wait for another try in the future.
103
+ */
44104 wait_queue_entry_t dispatch_wait;
105
+
106
+ /**
107
+ * @wait_index: Index of next available dispatch_wait queue to insert
108
+ * requests.
109
+ */
45110 atomic_t wait_index;
46111
112
+ /**
113
+ * @tags: Tags owned by the block driver. A tag at this set is only
114
+ * assigned when a request is dispatched from a hardware queue.
115
+ */
47116 struct blk_mq_tags *tags;
117
+ /**
118
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
119
+ * scheduler associated with a request queue, a tag is assigned when
120
+ * that request is allocated. Else, this member is not used.
121
+ */
48122 struct blk_mq_tags *sched_tags;
49123
124
+ /** @queued: Number of queued requests. */
50125 unsigned long queued;
126
+ /** @run: Number of dispatched requests. */
51127 unsigned long run;
52128 #define BLK_MQ_MAX_DISPATCH_ORDER 7
129
+ /** @dispatched: Number of dispatch requests by queue. */
53130 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
54131
132
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
55133 unsigned int numa_node;
134
+ /** @queue_num: Index of this hardware queue. */
56135 unsigned int queue_num;
57136
137
+ /**
138
+ * @nr_active: Number of active requests. Only used when a tag set is
139
+ * shared across request queues.
140
+ */
58141 atomic_t nr_active;
59
- unsigned int nr_expired;
60142
143
+ /** @cpuhp_online: List to store request if CPU is going to die */
144
+ struct hlist_node cpuhp_online;
145
+ /** @cpuhp_dead: List to store request if some CPU die. */
61146 struct hlist_node cpuhp_dead;
147
+ /** @kobj: Kernel object for sysfs. */
62148 struct kobject kobj;
63149
150
+ /** @poll_considered: Count times blk_poll() was called. */
64151 unsigned long poll_considered;
152
+ /** @poll_invoked: Count how many requests blk_poll() polled. */
65153 unsigned long poll_invoked;
154
+ /** @poll_success: Count how many polled requests were completed. */
66155 unsigned long poll_success;
67156
68157 #ifdef CONFIG_BLK_DEBUG_FS
158
+ /**
159
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
160
+ * as cpu<cpu_number>.
161
+ */
69162 struct dentry *debugfs_dir;
163
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
70164 struct dentry *sched_debugfs_dir;
71165 #endif
72166
73
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
74
- struct srcu_struct srcu[0];
167
+ /**
168
+ * @hctx_list: if this hctx is not in use, this is an entry in
169
+ * q->unused_hctx_list.
170
+ */
171
+ struct list_head hctx_list;
172
+
173
+ /**
174
+ * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
175
+ * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
176
+ * blk_mq_hw_ctx_size().
177
+ */
178
+ struct srcu_struct srcu[];
75179 };
76180
181
+/**
182
+ * struct blk_mq_queue_map - Map software queues to hardware queues
183
+ * @mq_map: CPU ID to hardware queue index map. This is an array
184
+ * with nr_cpu_ids elements. Each element has a value in the range
185
+ * [@queue_offset, @queue_offset + @nr_queues).
186
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
187
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
188
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
189
+ * set of hardware queues.
190
+ */
191
+struct blk_mq_queue_map {
192
+ unsigned int *mq_map;
193
+ unsigned int nr_queues;
194
+ unsigned int queue_offset;
195
+};
196
+
197
+/**
198
+ * enum hctx_type - Type of hardware queue
199
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
200
+ * @HCTX_TYPE_READ: Just for READ I/O.
201
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
202
+ * @HCTX_MAX_TYPES: Number of types of hctx.
203
+ */
204
+enum hctx_type {
205
+ HCTX_TYPE_DEFAULT,
206
+ HCTX_TYPE_READ,
207
+ HCTX_TYPE_POLL,
208
+
209
+ HCTX_MAX_TYPES,
210
+};
211
+
212
+/**
213
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
214
+ * @map: One or more ctx -> hctx mappings. One map exists for each
215
+ * hardware queue type (enum hctx_type) that the driver wishes
216
+ * to support. There are no restrictions on maps being of the
217
+ * same size, and it's perfectly legal to share maps between
218
+ * types.
219
+ * @nr_maps: Number of elements in the @map array. A number in the range
220
+ * [1, HCTX_MAX_TYPES].
221
+ * @ops: Pointers to functions that implement block driver behavior.
222
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
223
+ * owns this data structure.
224
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
225
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
226
+ * allocations.
227
+ * @cmd_size: Number of additional bytes to allocate per request. The block
228
+ * driver owns these additional bytes.
229
+ * @numa_node: NUMA node the storage adapter has been connected to.
230
+ * @timeout: Request processing timeout in jiffies.
231
+ * @flags: Zero or more BLK_MQ_F_* flags.
232
+ * @driver_data: Pointer to data owned by the block driver that created this
233
+ * tag set.
234
+ * @active_queues_shared_sbitmap:
235
+ * number of active request queues per tag set.
236
+ * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
237
+ * @__breserved_tags:
238
+ * A shared reserved tags sbitmap, used over all hctx's
239
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
240
+ * elements.
241
+ * @tag_list_lock: Serializes tag_list accesses.
242
+ * @tag_list: List of the request queues that use this tag set. See also
243
+ * request_queue.tag_set_list.
244
+ */
77245 struct blk_mq_tag_set {
78
- unsigned int *mq_map;
246
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
247
+ unsigned int nr_maps;
79248 const struct blk_mq_ops *ops;
80249 unsigned int nr_hw_queues;
81
- unsigned int queue_depth; /* max hw supported */
250
+ unsigned int queue_depth;
82251 unsigned int reserved_tags;
83
- unsigned int cmd_size; /* per-request extra data */
252
+ unsigned int cmd_size;
84253 int numa_node;
85254 unsigned int timeout;
86
- unsigned int flags; /* BLK_MQ_F_* */
255
+ unsigned int flags;
87256 void *driver_data;
257
+ atomic_t active_queues_shared_sbitmap;
88258
259
+ struct sbitmap_queue __bitmap_tags;
260
+ struct sbitmap_queue __breserved_tags;
89261 struct blk_mq_tags **tags;
90262
91263 struct mutex tag_list_lock;
92264 struct list_head tag_list;
93265 };
94266
267
+/**
268
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
269
+ *
270
+ * @rq: Request pointer.
271
+ * @last: If it is the last request in the queue.
272
+ */
95273 struct blk_mq_queue_data {
96274 struct request *rq;
97275 bool last;
98276 };
99277
100
-typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
101
- const struct blk_mq_queue_data *);
102
-typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
103
-typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
104
-typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
105
-typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
106
-typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
107
-typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
108
- unsigned int, unsigned int);
109
-typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
110
- unsigned int);
111
-
112
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
278
+typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
113279 bool);
114
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
115
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
116
-typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
117
-typedef void (cleanup_rq_fn)(struct request *);
280
+typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
118281
119
-
282
+/**
283
+ * struct blk_mq_ops - Callback functions that implements block driver
284
+ * behaviour.
285
+ */
120286 struct blk_mq_ops {
121
- /*
122
- * Queue request
287
+ /**
288
+ * @queue_rq: Queue a new request from block IO.
123289 */
124
- queue_rq_fn *queue_rq;
290
+ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
291
+ const struct blk_mq_queue_data *);
125292
126
- /*
127
- * Reserve budget before queue request, once .queue_rq is
293
+ /**
294
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
295
+ * requests to hardware, it must define this function. In case of errors
296
+ * that make us stop issuing further requests, this hook serves the
297
+ * purpose of kicking the hardware (which the last request otherwise
298
+ * would have done).
299
+ */
300
+ void (*commit_rqs)(struct blk_mq_hw_ctx *);
301
+
302
+ /**
303
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
128304 * run, it is driver's responsibility to release the
129305 * reserved budget. Also we have to handle failure case
130306 * of .get_budget for avoiding I/O deadlock.
131307 */
132
- get_budget_fn *get_budget;
133
- put_budget_fn *put_budget;
308
+ bool (*get_budget)(struct request_queue *);
134309
135
- /*
136
- * Called on request timeout
310
+ /**
311
+ * @put_budget: Release the reserved budget.
137312 */
138
- timeout_fn *timeout;
313
+ void (*put_budget)(struct request_queue *);
139314
140
- /*
141
- * Called to poll for completion of a specific tag.
315
+ /**
316
+ * @timeout: Called on request timeout.
142317 */
143
- poll_fn *poll;
318
+ enum blk_eh_timer_return (*timeout)(struct request *, bool);
144319
145
- softirq_done_fn *complete;
146
-
147
- /*
148
- * Called when the block layer side of a hardware queue has been
149
- * set up, allowing the driver to allocate/init matching structures.
150
- * Ditto for exit/teardown.
320
+ /**
321
+ * @poll: Called to poll for completion of a specific tag.
151322 */
152
- init_hctx_fn *init_hctx;
153
- exit_hctx_fn *exit_hctx;
323
+ int (*poll)(struct blk_mq_hw_ctx *);
154324
155
- /*
156
- * Called for every command allocated by the block layer to allow
157
- * the driver to set up driver specific data.
325
+ /**
326
+ * @complete: Mark the request as complete.
327
+ */
328
+ void (*complete)(struct request *);
329
+
330
+ /**
331
+ * @init_hctx: Called when the block layer side of a hardware queue has
332
+ * been set up, allowing the driver to allocate/init matching
333
+ * structures.
334
+ */
335
+ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
336
+ /**
337
+ * @exit_hctx: Ditto for exit/teardown.
338
+ */
339
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
340
+
341
+ /**
342
+ * @init_request: Called for every command allocated by the block layer
343
+ * to allow the driver to set up driver specific data.
158344 *
159345 * Tag greater than or equal to queue_depth is for setting up
160346 * flush request.
161
- *
162
- * Ditto for exit/teardown.
163347 */
164
- init_request_fn *init_request;
165
- exit_request_fn *exit_request;
166
- /* Called from inside blk_get_request() */
348
+ int (*init_request)(struct blk_mq_tag_set *set, struct request *,
349
+ unsigned int, unsigned int);
350
+ /**
351
+ * @exit_request: Ditto for exit/teardown.
352
+ */
353
+ void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
354
+ unsigned int);
355
+
356
+ /**
357
+ * @initialize_rq_fn: Called from inside blk_get_request().
358
+ */
167359 void (*initialize_rq_fn)(struct request *rq);
168360
169
- /*
170
- * Called before freeing one request which isn't completed yet,
171
- * and usually for freeing the driver private data
361
+ /**
362
+ * @cleanup_rq: Called before freeing one request which isn't completed
363
+ * yet, and usually for freeing the driver private data.
172364 */
173
- cleanup_rq_fn *cleanup_rq;
365
+ void (*cleanup_rq)(struct request *);
174366
175
- map_queues_fn *map_queues;
367
+ /**
368
+ * @busy: If set, returns whether or not this queue currently is busy.
369
+ */
370
+ bool (*busy)(struct request_queue *);
371
+
372
+ /**
373
+ * @map_queues: This allows drivers specify their own queue mapping by
374
+ * overriding the setup-time function that builds the mq_map.
375
+ */
376
+ int (*map_queues)(struct blk_mq_tag_set *set);
176377
177378 #ifdef CONFIG_BLK_DEBUG_FS
178
- /*
179
- * Used by the debugfs implementation to show driver-specific
379
+ /**
380
+ * @show_rq: Used by the debugfs implementation to show driver-specific
180381 * information about a request.
181382 */
182383 void (*show_rq)(struct seq_file *m, struct request *rq);
....@@ -185,16 +386,30 @@
185386
186387 enum {
187388 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
188
- BLK_MQ_F_TAG_SHARED = 1 << 1,
189
- BLK_MQ_F_SG_MERGE = 1 << 2,
389
+ BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
390
+ /*
391
+ * Set when this device requires underlying blk-mq device for
392
+ * completing IO:
393
+ */
394
+ BLK_MQ_F_STACKING = 1 << 2,
395
+ BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
190396 BLK_MQ_F_BLOCKING = 1 << 5,
397
+ /* Do not allow an I/O scheduler to be configured. */
191398 BLK_MQ_F_NO_SCHED = 1 << 6,
399
+ /*
400
+ * Select 'none' during queue registration in case of a single hwq
401
+ * or shared hwqs instead of 'mq-deadline'.
402
+ */
403
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
192404 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
193405 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
194406
195407 BLK_MQ_S_STOPPED = 0,
196408 BLK_MQ_S_TAG_ACTIVE = 1,
197409 BLK_MQ_S_SCHED_RESTART = 2,
410
+
411
+ /* hw queue is inactive after all its CPUs become offline */
412
+ BLK_MQ_S_INACTIVE = 3,
198413
199414 BLK_MQ_MAX_DEPTH = 10240,
200415
....@@ -208,9 +423,15 @@
208423 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
209424
210425 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
426
+struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
427
+ void *queuedata);
211428 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
212
- struct request_queue *q);
213
-int blk_mq_register_dev(struct device *, struct request_queue *);
429
+ struct request_queue *q,
430
+ bool elevator_init);
431
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
432
+ const struct blk_mq_ops *ops,
433
+ unsigned int queue_depth,
434
+ unsigned int set_flags);
214435 void blk_mq_unregister_dev(struct device *, struct request_queue *);
215436
216437 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
....@@ -219,17 +440,16 @@
219440 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
220441
221442 void blk_mq_free_request(struct request *rq);
222
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
443
+
444
+bool blk_mq_queue_inflight(struct request_queue *q);
223445
224446 enum {
225447 /* return when out of requests */
226448 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
227449 /* allocate from reserved pool */
228450 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
229
- /* allocate internal/sched tag */
230
- BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
231
- /* set RQF_PREEMPT */
232
- BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
451
+ /* set RQF_PM */
452
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
233453 };
234454
235455 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
....@@ -256,20 +476,34 @@
256476 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
257477 }
258478
259
-void __blk_mq_complete_request_remote_work(struct work_struct *work);
260
-int blk_mq_request_started(struct request *rq);
479
+/**
480
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
481
+ * @rq: target request.
482
+ */
483
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
484
+{
485
+ return READ_ONCE(rq->state);
486
+}
487
+
488
+static inline int blk_mq_request_started(struct request *rq)
489
+{
490
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
491
+}
492
+
493
+static inline int blk_mq_request_completed(struct request *rq)
494
+{
495
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
496
+}
497
+
261498 void blk_mq_start_request(struct request *rq);
262499 void blk_mq_end_request(struct request *rq, blk_status_t error);
263500 void __blk_mq_end_request(struct request *rq, blk_status_t error);
264501
265502 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
266
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
267
- bool kick_requeue_list);
268503 void blk_mq_kick_requeue_list(struct request_queue *q);
269504 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
270505 void blk_mq_complete_request(struct request *rq);
271
-bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
272
- struct bio *bio);
506
+bool blk_mq_complete_request_remote(struct request *rq);
273507 bool blk_mq_queue_stopped(struct request_queue *q);
274508 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
275509 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
....@@ -280,10 +514,12 @@
280514 void blk_mq_quiesce_queue(struct request_queue *q);
281515 void blk_mq_unquiesce_queue(struct request_queue *q);
282516 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
283
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
517
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
284518 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
519
+void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
285520 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
286521 busy_tag_iter_fn *fn, void *priv);
522
+void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
287523 void blk_mq_freeze_queue(struct request_queue *q);
288524 void blk_mq_unfreeze_queue(struct request_queue *q);
289525 void blk_freeze_queue_start(struct request_queue *q);
....@@ -291,33 +527,45 @@
291527 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
292528 unsigned long timeout);
293529
294
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
530
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
295531 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
296532
297533 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
298534
299
-/**
300
- * blk_mq_mark_complete() - Set request state to complete
301
- * @rq: request to set to complete state
302
- *
303
- * Returns true if request state was successfully set to complete. If
304
- * successful, the caller is responsibile for seeing this request is ended, as
305
- * blk_mq_complete_request will not work again.
306
- */
307
-static inline bool blk_mq_mark_complete(struct request *rq)
535
+unsigned int blk_mq_rq_cpu(struct request *rq);
536
+
537
+bool __blk_should_fake_timeout(struct request_queue *q);
538
+static inline bool blk_should_fake_timeout(struct request_queue *q)
308539 {
309
- return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
310
- MQ_RQ_IN_FLIGHT;
540
+ if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
541
+ test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
542
+ return __blk_should_fake_timeout(q);
543
+ return false;
311544 }
312545
313
-/*
546
+/**
547
+ * blk_mq_rq_from_pdu - cast a PDU to a request
548
+ * @pdu: the PDU (Protocol Data Unit) to be casted
549
+ *
550
+ * Return: request
551
+ *
314552 * Driver command data is immediately after the request. So subtract request
315
- * size to get back to the original request, add request size to get the PDU.
553
+ * size to get back to the original request.
316554 */
317555 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
318556 {
319557 return pdu - sizeof(struct request);
320558 }
559
+
560
+/**
561
+ * blk_mq_rq_to_pdu - cast a request to a PDU
562
+ * @rq: the request to be casted
563
+ *
564
+ * Return: pointer to the PDU
565
+ *
566
+ * Driver command data is immediately after the request. So add request to get
567
+ * the PDU.
568
+ */
321569 static inline void *blk_mq_rq_to_pdu(struct request *rq)
322570 {
323571 return rq + 1;
....@@ -331,10 +579,22 @@
331579 for ((i) = 0; (i) < (hctx)->nr_ctx && \
332580 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
333581
582
+static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
583
+ struct request *rq)
584
+{
585
+ if (rq->tag != -1)
586
+ return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
587
+
588
+ return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
589
+ BLK_QC_T_INTERNAL;
590
+}
591
+
334592 static inline void blk_mq_cleanup_rq(struct request *rq)
335593 {
336594 if (rq->q->mq_ops->cleanup_rq)
337595 rq->q->mq_ops->cleanup_rq(rq);
338596 }
339597
598
+blk_qc_t blk_mq_submit_bio(struct bio *bio);
599
+
340600 #endif