.. | .. |
---|
4 | 4 | |
---|
5 | 5 | #include <linux/sched.h> |
---|
6 | 6 | #include <linux/sched/clock.h> |
---|
7 | | - |
---|
8 | | -#ifdef CONFIG_BLOCK |
---|
9 | | - |
---|
10 | 7 | #include <linux/major.h> |
---|
11 | 8 | #include <linux/genhd.h> |
---|
12 | 9 | #include <linux/list.h> |
---|
13 | 10 | #include <linux/llist.h> |
---|
| 11 | +#include <linux/minmax.h> |
---|
14 | 12 | #include <linux/timer.h> |
---|
15 | 13 | #include <linux/workqueue.h> |
---|
16 | 14 | #include <linux/pagemap.h> |
---|
.. | .. |
---|
27 | 25 | #include <linux/percpu-refcount.h> |
---|
28 | 26 | #include <linux/scatterlist.h> |
---|
29 | 27 | #include <linux/blkzoned.h> |
---|
| 28 | +#include <linux/pm.h> |
---|
| 29 | +#include <linux/android_kabi.h> |
---|
| 30 | +#include <linux/android_vendor.h> |
---|
30 | 31 | |
---|
31 | 32 | struct module; |
---|
32 | 33 | struct scsi_ioctl_command; |
---|
.. | .. |
---|
43 | 44 | struct rq_qos; |
---|
44 | 45 | struct blk_queue_stats; |
---|
45 | 46 | struct blk_stat_callback; |
---|
46 | | -struct keyslot_manager; |
---|
| 47 | +struct blk_keyslot_manager; |
---|
47 | 48 | |
---|
48 | 49 | #define BLKDEV_MIN_RQ 4 |
---|
49 | 50 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
---|
.. | .. |
---|
51 | 52 | /* Must be consistent with blk_mq_poll_stats_bkt() */ |
---|
52 | 53 | #define BLK_MQ_POLL_STATS_BKTS 16 |
---|
53 | 54 | |
---|
| 55 | +/* Doing classic polling */ |
---|
| 56 | +#define BLK_MQ_POLL_CLASSIC -1 |
---|
| 57 | + |
---|
54 | 58 | /* |
---|
55 | 59 | * Maximum number of blkcg policies allowed to be registered concurrently. |
---|
56 | 60 | * Defined here to simplify include dependency. |
---|
57 | 61 | */ |
---|
58 | 62 | #define BLKCG_MAX_POLS 5 |
---|
59 | 63 | |
---|
| 64 | +static inline int blk_validate_block_size(unsigned int bsize) |
---|
| 65 | +{ |
---|
| 66 | + if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) |
---|
| 67 | + return -EINVAL; |
---|
| 68 | + |
---|
| 69 | + return 0; |
---|
| 70 | +} |
---|
| 71 | + |
---|
60 | 72 | typedef void (rq_end_io_fn)(struct request *, blk_status_t); |
---|
61 | | - |
---|
62 | | -#define BLK_RL_SYNCFULL (1U << 0) |
---|
63 | | -#define BLK_RL_ASYNCFULL (1U << 1) |
---|
64 | | - |
---|
65 | | -struct request_list { |
---|
66 | | - struct request_queue *q; /* the queue this rl belongs to */ |
---|
67 | | -#ifdef CONFIG_BLK_CGROUP |
---|
68 | | - struct blkcg_gq *blkg; /* blkg this request pool belongs to */ |
---|
69 | | -#endif |
---|
70 | | - /* |
---|
71 | | - * count[], starved[], and wait[] are indexed by |
---|
72 | | - * BLK_RW_SYNC/BLK_RW_ASYNC |
---|
73 | | - */ |
---|
74 | | - int count[2]; |
---|
75 | | - int starved[2]; |
---|
76 | | - mempool_t *rq_pool; |
---|
77 | | - wait_queue_head_t wait[2]; |
---|
78 | | - unsigned int flags; |
---|
79 | | -}; |
---|
80 | 73 | |
---|
81 | 74 | /* |
---|
82 | 75 | * request flags */ |
---|
.. | .. |
---|
86 | 79 | #define RQF_SORTED ((__force req_flags_t)(1 << 0)) |
---|
87 | 80 | /* drive already may have started this one */ |
---|
88 | 81 | #define RQF_STARTED ((__force req_flags_t)(1 << 1)) |
---|
89 | | -/* uses tagged queueing */ |
---|
90 | | -#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) |
---|
91 | 82 | /* may not be passed by ioscheduler */ |
---|
92 | 83 | #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) |
---|
93 | 84 | /* request for flush sequence */ |
---|
.. | .. |
---|
98 | 89 | #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) |
---|
99 | 90 | /* don't call prep for this one */ |
---|
100 | 91 | #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) |
---|
101 | | -/* set for "ide_preempt" requests and also for requests for which the SCSI |
---|
102 | | - "quiesce" state must be ignored. */ |
---|
103 | | -#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) |
---|
104 | | -/* contains copies of user pages */ |
---|
105 | | -#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) |
---|
106 | 92 | /* vaguely specified driver internal error. Ignored by the block layer */ |
---|
107 | 93 | #define RQF_FAILED ((__force req_flags_t)(1 << 10)) |
---|
108 | 94 | /* don't warn about errors */ |
---|
109 | 95 | #define RQF_QUIET ((__force req_flags_t)(1 << 11)) |
---|
110 | 96 | /* elevator private data attached */ |
---|
111 | 97 | #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) |
---|
112 | | -/* account I/O stat */ |
---|
| 98 | +/* account into disk and partition IO statistics */ |
---|
113 | 99 | #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) |
---|
114 | 100 | /* request came from our alloc pool */ |
---|
115 | 101 | #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) |
---|
.. | .. |
---|
117 | 103 | #define RQF_PM ((__force req_flags_t)(1 << 15)) |
---|
118 | 104 | /* on IO scheduler merge hash */ |
---|
119 | 105 | #define RQF_HASHED ((__force req_flags_t)(1 << 16)) |
---|
120 | | -/* IO stats tracking on */ |
---|
| 106 | +/* track IO completion time */ |
---|
121 | 107 | #define RQF_STATS ((__force req_flags_t)(1 << 17)) |
---|
122 | 108 | /* Look at ->special_vec for the actual data payload instead of the |
---|
123 | 109 | bio chain. */ |
---|
.. | .. |
---|
151 | 137 | struct request { |
---|
152 | 138 | struct request_queue *q; |
---|
153 | 139 | struct blk_mq_ctx *mq_ctx; |
---|
| 140 | + struct blk_mq_hw_ctx *mq_hctx; |
---|
154 | 141 | |
---|
155 | | - int cpu; |
---|
156 | 142 | unsigned int cmd_flags; /* op and common flags */ |
---|
157 | 143 | req_flags_t rq_flags; |
---|
158 | 144 | |
---|
| 145 | + int tag; |
---|
159 | 146 | int internal_tag; |
---|
160 | 147 | |
---|
161 | 148 | /* the following two fields are internal, NEVER access directly */ |
---|
162 | 149 | unsigned int __data_len; /* total data len */ |
---|
163 | | - int tag; |
---|
164 | 150 | sector_t __sector; /* sector cursor */ |
---|
165 | 151 | |
---|
166 | 152 | struct bio *bio; |
---|
.. | .. |
---|
177 | 163 | */ |
---|
178 | 164 | union { |
---|
179 | 165 | struct hlist_node hash; /* merge hash */ |
---|
180 | | - struct list_head ipi_list; |
---|
| 166 | + struct llist_node ipi_list; |
---|
181 | 167 | }; |
---|
182 | 168 | |
---|
183 | 169 | /* |
---|
.. | .. |
---|
213 | 199 | |
---|
214 | 200 | struct gendisk *rq_disk; |
---|
215 | 201 | struct hd_struct *part; |
---|
216 | | - /* Time that I/O was submitted to the kernel. */ |
---|
| 202 | +#ifdef CONFIG_BLK_RQ_ALLOC_TIME |
---|
| 203 | + /* Time that the first bio started allocating this request. */ |
---|
| 204 | + u64 alloc_time_ns; |
---|
| 205 | +#endif |
---|
| 206 | + /* Time that this request was allocated for this IO. */ |
---|
217 | 207 | u64 start_time_ns; |
---|
218 | 208 | /* Time that I/O was submitted to the device. */ |
---|
219 | 209 | u64 io_start_time_ns; |
---|
.. | .. |
---|
221 | 211 | #ifdef CONFIG_BLK_WBT |
---|
222 | 212 | unsigned short wbt_flags; |
---|
223 | 213 | #endif |
---|
224 | | -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
---|
225 | | - unsigned short throtl_size; |
---|
226 | | -#endif |
---|
| 214 | + /* |
---|
| 215 | + * rq sectors used for blk stats. It has the same value |
---|
| 216 | + * with blk_rq_sectors(rq), except that it never be zeroed |
---|
| 217 | + * by completion. |
---|
| 218 | + */ |
---|
| 219 | + unsigned short stats_sectors; |
---|
227 | 220 | |
---|
228 | 221 | /* |
---|
229 | 222 | * Number of scatter-gather DMA addr+len pairs after |
---|
.. | .. |
---|
235 | 228 | unsigned short nr_integrity_segments; |
---|
236 | 229 | #endif |
---|
237 | 230 | |
---|
| 231 | +#ifdef CONFIG_BLK_INLINE_ENCRYPTION |
---|
| 232 | + struct bio_crypt_ctx *crypt_ctx; |
---|
| 233 | + struct blk_ksm_keyslot *crypt_keyslot; |
---|
| 234 | +#endif |
---|
| 235 | + |
---|
238 | 236 | unsigned short write_hint; |
---|
239 | 237 | unsigned short ioprio; |
---|
240 | | - |
---|
241 | | - void *special; /* opaque pointer available for LLD use */ |
---|
242 | | - |
---|
243 | | - unsigned int extra_len; /* length of alignment and padding */ |
---|
244 | 238 | |
---|
245 | 239 | enum mq_rq_state state; |
---|
246 | 240 | refcount_t ref; |
---|
247 | 241 | |
---|
248 | 242 | unsigned int timeout; |
---|
249 | | - |
---|
250 | | - /* access through blk_rq_set_deadline, blk_rq_deadline */ |
---|
251 | | - unsigned long __deadline; |
---|
252 | | - |
---|
253 | | - struct list_head timeout_list; |
---|
| 243 | + unsigned long deadline; |
---|
254 | 244 | |
---|
255 | 245 | union { |
---|
256 | 246 | struct __call_single_data csd; |
---|
.. | .. |
---|
263 | 253 | rq_end_io_fn *end_io; |
---|
264 | 254 | void *end_io_data; |
---|
265 | 255 | |
---|
266 | | - /* for bidi */ |
---|
267 | | - struct request *next_rq; |
---|
268 | | - |
---|
269 | | -#ifdef CONFIG_BLK_CGROUP |
---|
270 | | - struct request_list *rl; /* rl this rq is alloced from */ |
---|
271 | | -#endif |
---|
| 256 | + ANDROID_KABI_RESERVE(1); |
---|
272 | 257 | }; |
---|
273 | 258 | |
---|
274 | 259 | static inline bool blk_op_is_scsi(unsigned int op) |
---|
.. | .. |
---|
312 | 297 | |
---|
313 | 298 | struct blk_queue_ctx; |
---|
314 | 299 | |
---|
315 | | -typedef void (request_fn_proc) (struct request_queue *q); |
---|
316 | | -typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); |
---|
317 | | -typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); |
---|
318 | | -typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
---|
319 | | -typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
---|
320 | | - |
---|
321 | 300 | struct bio_vec; |
---|
322 | | -typedef void (softirq_done_fn)(struct request *); |
---|
323 | | -typedef int (dma_drain_needed_fn)(struct request *); |
---|
324 | | -typedef int (lld_busy_fn) (struct request_queue *q); |
---|
325 | | -typedef int (bsg_job_fn) (struct bsg_job *); |
---|
326 | | -typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); |
---|
327 | | -typedef void (exit_rq_fn)(struct request_queue *, struct request *); |
---|
328 | 301 | |
---|
329 | 302 | enum blk_eh_timer_return { |
---|
330 | 303 | BLK_EH_DONE, /* drivers has completed the command */ |
---|
331 | 304 | BLK_EH_RESET_TIMER, /* reset timer and try again */ |
---|
332 | 305 | }; |
---|
333 | 306 | |
---|
334 | | -typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); |
---|
335 | | - |
---|
336 | 307 | enum blk_queue_state { |
---|
337 | 308 | Queue_down, |
---|
338 | 309 | Queue_up, |
---|
339 | 310 | }; |
---|
340 | 311 | |
---|
341 | | -struct blk_queue_tag { |
---|
342 | | - struct request **tag_index; /* map of busy tags */ |
---|
343 | | - unsigned long *tag_map; /* bit map of free/busy tags */ |
---|
344 | | - int max_depth; /* what we will send to device */ |
---|
345 | | - int real_max_depth; /* what the array can hold */ |
---|
346 | | - atomic_t refcnt; /* map can be shared */ |
---|
347 | | - int alloc_policy; /* tag allocation policy */ |
---|
348 | | - int next_tag; /* next tag */ |
---|
349 | | -}; |
---|
350 | 312 | #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ |
---|
351 | 313 | #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ |
---|
352 | 314 | |
---|
.. | .. |
---|
355 | 317 | |
---|
356 | 318 | /* |
---|
357 | 319 | * Zoned block device models (zoned limit). |
---|
| 320 | + * |
---|
| 321 | + * Note: This needs to be ordered from the least to the most severe |
---|
| 322 | + * restrictions for the inheritance in blk_stack_limits() to work. |
---|
358 | 323 | */ |
---|
359 | 324 | enum blk_zoned_model { |
---|
360 | | - BLK_ZONED_NONE, /* Regular block device */ |
---|
361 | | - BLK_ZONED_HA, /* Host-aware zoned block device */ |
---|
362 | | - BLK_ZONED_HM, /* Host-managed zoned block device */ |
---|
| 325 | + BLK_ZONED_NONE = 0, /* Regular block device */ |
---|
| 326 | + BLK_ZONED_HA, /* Host-aware zoned block device */ |
---|
| 327 | + BLK_ZONED_HM, /* Host-managed zoned block device */ |
---|
363 | 328 | }; |
---|
364 | 329 | |
---|
365 | 330 | struct queue_limits { |
---|
.. | .. |
---|
381 | 346 | unsigned int max_hw_discard_sectors; |
---|
382 | 347 | unsigned int max_write_same_sectors; |
---|
383 | 348 | unsigned int max_write_zeroes_sectors; |
---|
| 349 | + unsigned int max_zone_append_sectors; |
---|
384 | 350 | unsigned int discard_granularity; |
---|
385 | 351 | unsigned int discard_alignment; |
---|
386 | 352 | |
---|
.. | .. |
---|
390 | 356 | |
---|
391 | 357 | unsigned char misaligned; |
---|
392 | 358 | unsigned char discard_misaligned; |
---|
393 | | - unsigned char cluster; |
---|
394 | 359 | unsigned char raid_partial_stripes_expensive; |
---|
395 | 360 | enum blk_zoned_model zoned; |
---|
| 361 | + |
---|
| 362 | + ANDROID_KABI_RESERVE(1); |
---|
396 | 363 | }; |
---|
| 364 | + |
---|
| 365 | +typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, |
---|
| 366 | + void *data); |
---|
| 367 | + |
---|
| 368 | +void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); |
---|
397 | 369 | |
---|
398 | 370 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
399 | 371 | |
---|
400 | | -struct blk_zone_report_hdr { |
---|
401 | | - unsigned int nr_zones; |
---|
402 | | - u8 padding[60]; |
---|
403 | | -}; |
---|
404 | | - |
---|
405 | | -extern int blkdev_report_zones(struct block_device *bdev, |
---|
406 | | - sector_t sector, struct blk_zone *zones, |
---|
407 | | - unsigned int *nr_zones, gfp_t gfp_mask); |
---|
408 | | -extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, |
---|
409 | | - sector_t nr_sectors, gfp_t gfp_mask); |
---|
| 372 | +#define BLK_ALL_ZONES ((unsigned int)-1) |
---|
| 373 | +int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
---|
| 374 | + unsigned int nr_zones, report_zones_cb cb, void *data); |
---|
| 375 | +unsigned int blkdev_nr_zones(struct gendisk *disk); |
---|
| 376 | +extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, |
---|
| 377 | + sector_t sectors, sector_t nr_sectors, |
---|
| 378 | + gfp_t gfp_mask); |
---|
| 379 | +int blk_revalidate_disk_zones(struct gendisk *disk, |
---|
| 380 | + void (*update_driver_data)(struct gendisk *disk)); |
---|
410 | 381 | |
---|
411 | 382 | extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, |
---|
412 | 383 | unsigned int cmd, unsigned long arg); |
---|
413 | | -extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, |
---|
414 | | - unsigned int cmd, unsigned long arg); |
---|
| 384 | +extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, |
---|
| 385 | + unsigned int cmd, unsigned long arg); |
---|
415 | 386 | |
---|
416 | 387 | #else /* CONFIG_BLK_DEV_ZONED */ |
---|
| 388 | + |
---|
| 389 | +static inline unsigned int blkdev_nr_zones(struct gendisk *disk) |
---|
| 390 | +{ |
---|
| 391 | + return 0; |
---|
| 392 | +} |
---|
417 | 393 | |
---|
418 | 394 | static inline int blkdev_report_zones_ioctl(struct block_device *bdev, |
---|
419 | 395 | fmode_t mode, unsigned int cmd, |
---|
.. | .. |
---|
422 | 398 | return -ENOTTY; |
---|
423 | 399 | } |
---|
424 | 400 | |
---|
425 | | -static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, |
---|
426 | | - fmode_t mode, unsigned int cmd, |
---|
427 | | - unsigned long arg) |
---|
| 401 | +static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, |
---|
| 402 | + fmode_t mode, unsigned int cmd, |
---|
| 403 | + unsigned long arg) |
---|
428 | 404 | { |
---|
429 | 405 | return -ENOTTY; |
---|
430 | 406 | } |
---|
.. | .. |
---|
432 | 408 | #endif /* CONFIG_BLK_DEV_ZONED */ |
---|
433 | 409 | |
---|
434 | 410 | struct request_queue { |
---|
435 | | - /* |
---|
436 | | - * Together with queue_head for cacheline sharing |
---|
437 | | - */ |
---|
438 | | - struct list_head queue_head; |
---|
439 | 411 | struct request *last_merge; |
---|
440 | 412 | struct elevator_queue *elevator; |
---|
441 | | - int nr_rqs[2]; /* # allocated [a]sync rqs */ |
---|
442 | | - int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ |
---|
| 413 | + |
---|
| 414 | + struct percpu_ref q_usage_counter; |
---|
443 | 415 | |
---|
444 | 416 | struct blk_queue_stats *stats; |
---|
445 | 417 | struct rq_qos *rq_qos; |
---|
446 | 418 | |
---|
447 | | - /* |
---|
448 | | - * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
---|
449 | | - * is used, root blkg allocates from @q->root_rl and all other |
---|
450 | | - * blkgs from their own blkg->rl. Which one to use should be |
---|
451 | | - * determined using bio_request_list(). |
---|
452 | | - */ |
---|
453 | | - struct request_list root_rl; |
---|
454 | | - |
---|
455 | | - request_fn_proc *request_fn; |
---|
456 | | - make_request_fn *make_request_fn; |
---|
457 | | - poll_q_fn *poll_fn; |
---|
458 | | - prep_rq_fn *prep_rq_fn; |
---|
459 | | - unprep_rq_fn *unprep_rq_fn; |
---|
460 | | - softirq_done_fn *softirq_done_fn; |
---|
461 | | - rq_timed_out_fn *rq_timed_out_fn; |
---|
462 | | - dma_drain_needed_fn *dma_drain_needed; |
---|
463 | | - lld_busy_fn *lld_busy_fn; |
---|
464 | | - /* Called just after a request is allocated */ |
---|
465 | | - init_rq_fn *init_rq_fn; |
---|
466 | | - /* Called just before a request is freed */ |
---|
467 | | - exit_rq_fn *exit_rq_fn; |
---|
468 | | - /* Called from inside blk_get_request() */ |
---|
469 | | - void (*initialize_rq_fn)(struct request *rq); |
---|
470 | | - |
---|
471 | 419 | const struct blk_mq_ops *mq_ops; |
---|
472 | | - |
---|
473 | | - unsigned int *mq_map; |
---|
474 | 420 | |
---|
475 | 421 | /* sw queues */ |
---|
476 | 422 | struct blk_mq_ctx __percpu *queue_ctx; |
---|
477 | | - unsigned int nr_queues; |
---|
478 | 423 | |
---|
479 | 424 | unsigned int queue_depth; |
---|
480 | 425 | |
---|
481 | 426 | /* hw dispatch queues */ |
---|
482 | 427 | struct blk_mq_hw_ctx **queue_hw_ctx; |
---|
483 | 428 | unsigned int nr_hw_queues; |
---|
484 | | - |
---|
485 | | - /* |
---|
486 | | - * Dispatch queue sorting |
---|
487 | | - */ |
---|
488 | | - sector_t end_sector; |
---|
489 | | - struct request *boundary_rq; |
---|
490 | | - |
---|
491 | | - /* |
---|
492 | | - * Delayed queue handling |
---|
493 | | - */ |
---|
494 | | - struct delayed_work delay_work; |
---|
495 | 429 | |
---|
496 | 430 | struct backing_dev_info *backing_dev_info; |
---|
497 | 431 | |
---|
.. | .. |
---|
507 | 441 | unsigned long queue_flags; |
---|
508 | 442 | /* |
---|
509 | 443 | * Number of contexts that have called blk_set_pm_only(). If this |
---|
510 | | - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are |
---|
511 | | - * processed. |
---|
| 444 | + * counter is above zero then only RQF_PM requests are processed. |
---|
512 | 445 | */ |
---|
513 | 446 | atomic_t pm_only; |
---|
514 | 447 | |
---|
.. | .. |
---|
523 | 456 | */ |
---|
524 | 457 | gfp_t bounce_gfp; |
---|
525 | 458 | |
---|
526 | | - /* |
---|
527 | | - * protects queue structures from reentrancy. ->__queue_lock should |
---|
528 | | - * _never_ be used directly, it is queue private. always use |
---|
529 | | - * ->queue_lock. |
---|
530 | | - */ |
---|
531 | | - spinlock_t __queue_lock; |
---|
532 | | - spinlock_t *queue_lock; |
---|
| 459 | + spinlock_t queue_lock; |
---|
533 | 460 | |
---|
534 | 461 | /* |
---|
535 | 462 | * queue kobject |
---|
.. | .. |
---|
539 | 466 | /* |
---|
540 | 467 | * mq queue kobject |
---|
541 | 468 | */ |
---|
542 | | - struct kobject mq_kobj; |
---|
| 469 | + struct kobject *mq_kobj; |
---|
543 | 470 | |
---|
544 | 471 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
---|
545 | 472 | struct blk_integrity integrity; |
---|
.. | .. |
---|
547 | 474 | |
---|
548 | 475 | #ifdef CONFIG_PM |
---|
549 | 476 | struct device *dev; |
---|
550 | | - int rpm_status; |
---|
| 477 | + enum rpm_status rpm_status; |
---|
551 | 478 | unsigned int nr_pending; |
---|
552 | 479 | #endif |
---|
553 | 480 | |
---|
.. | .. |
---|
555 | 482 | * queue settings |
---|
556 | 483 | */ |
---|
557 | 484 | unsigned long nr_requests; /* Max # of requests */ |
---|
558 | | - unsigned int nr_congestion_on; |
---|
559 | | - unsigned int nr_congestion_off; |
---|
560 | | - unsigned int nr_batching; |
---|
561 | 485 | |
---|
562 | | - unsigned int dma_drain_size; |
---|
563 | | - void *dma_drain_buffer; |
---|
564 | 486 | unsigned int dma_pad_mask; |
---|
565 | 487 | unsigned int dma_alignment; |
---|
566 | 488 | |
---|
567 | | - struct blk_queue_tag *queue_tags; |
---|
568 | | - |
---|
569 | | - unsigned int nr_sorted; |
---|
570 | | - unsigned int in_flight[2]; |
---|
571 | | - |
---|
572 | | - /* |
---|
573 | | - * Number of active block driver functions for which blk_drain_queue() |
---|
574 | | - * must wait. Must be incremented around functions that unlock the |
---|
575 | | - * queue_lock internally, e.g. scsi_request_fn(). |
---|
576 | | - */ |
---|
577 | | - unsigned int request_fn_active; |
---|
578 | 489 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
---|
579 | 490 | /* Inline crypto capabilities */ |
---|
580 | | - struct keyslot_manager *ksm; |
---|
| 491 | + struct blk_keyslot_manager *ksm; |
---|
581 | 492 | #endif |
---|
582 | 493 | |
---|
583 | 494 | unsigned int rq_timeout; |
---|
.. | .. |
---|
588 | 499 | |
---|
589 | 500 | struct timer_list timeout; |
---|
590 | 501 | struct work_struct timeout_work; |
---|
591 | | - struct list_head timeout_list; |
---|
| 502 | + |
---|
| 503 | + atomic_t nr_active_requests_shared_sbitmap; |
---|
592 | 504 | |
---|
593 | 505 | struct list_head icq_list; |
---|
594 | 506 | #ifdef CONFIG_BLK_CGROUP |
---|
.. | .. |
---|
599 | 511 | |
---|
600 | 512 | struct queue_limits limits; |
---|
601 | 513 | |
---|
| 514 | + unsigned int required_elevator_features; |
---|
| 515 | + |
---|
602 | 516 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
603 | 517 | /* |
---|
604 | 518 | * Zoned block device information for request dispatch control. |
---|
605 | 519 | * nr_zones is the total number of zones of the device. This is always |
---|
606 | | - * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones |
---|
607 | | - * bits which indicates if a zone is conventional (bit clear) or |
---|
608 | | - * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones |
---|
| 520 | + * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones |
---|
| 521 | + * bits which indicates if a zone is conventional (bit set) or |
---|
| 522 | + * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones |
---|
609 | 523 | * bits which indicates if a zone is write locked, that is, if a write |
---|
610 | 524 | * request targeting the zone was dispatched. All three fields are |
---|
611 | 525 | * initialized by the low level device driver (e.g. scsi/sd.c). |
---|
.. | .. |
---|
618 | 532 | * blk_mq_unfreeze_queue(). |
---|
619 | 533 | */ |
---|
620 | 534 | unsigned int nr_zones; |
---|
621 | | - unsigned long *seq_zones_bitmap; |
---|
| 535 | + unsigned long *conv_zones_bitmap; |
---|
622 | 536 | unsigned long *seq_zones_wlock; |
---|
| 537 | + unsigned int max_open_zones; |
---|
| 538 | + unsigned int max_active_zones; |
---|
623 | 539 | #endif /* CONFIG_BLK_DEV_ZONED */ |
---|
624 | 540 | |
---|
625 | 541 | /* |
---|
.. | .. |
---|
628 | 544 | unsigned int sg_timeout; |
---|
629 | 545 | unsigned int sg_reserved_size; |
---|
630 | 546 | int node; |
---|
| 547 | + struct mutex debugfs_mutex; |
---|
631 | 548 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
---|
632 | 549 | struct blk_trace __rcu *blk_trace; |
---|
633 | | - struct mutex blk_trace_mutex; |
---|
634 | 550 | #endif |
---|
635 | 551 | /* |
---|
636 | 552 | * for flush operations |
---|
.. | .. |
---|
642 | 558 | struct delayed_work requeue_work; |
---|
643 | 559 | |
---|
644 | 560 | struct mutex sysfs_lock; |
---|
| 561 | + struct mutex sysfs_dir_lock; |
---|
645 | 562 | |
---|
646 | | - int bypass_depth; |
---|
647 | | - atomic_t mq_freeze_depth; |
---|
| 563 | + /* |
---|
| 564 | + * for reusing dead hctx instance in case of updating |
---|
| 565 | + * nr_hw_queues |
---|
| 566 | + */ |
---|
| 567 | + struct list_head unused_hctx_list; |
---|
| 568 | + spinlock_t unused_hctx_lock; |
---|
648 | 569 | |
---|
649 | | - bsg_job_fn *bsg_job_fn; |
---|
| 570 | + int mq_freeze_depth; |
---|
| 571 | + |
---|
| 572 | +#if defined(CONFIG_BLK_DEV_BSG) |
---|
650 | 573 | struct bsg_class_device bsg_dev; |
---|
| 574 | +#endif |
---|
651 | 575 | |
---|
652 | 576 | #ifdef CONFIG_BLK_DEV_THROTTLING |
---|
653 | 577 | /* Throttle data */ |
---|
.. | .. |
---|
655 | 579 | #endif |
---|
656 | 580 | struct rcu_head rcu_head; |
---|
657 | 581 | wait_queue_head_t mq_freeze_wq; |
---|
658 | | - struct percpu_ref q_usage_counter; |
---|
659 | | - struct list_head all_q_node; |
---|
| 582 | + /* |
---|
| 583 | + * Protect concurrent access to q_usage_counter by |
---|
| 584 | + * percpu_ref_kill() and percpu_ref_reinit(). |
---|
| 585 | + */ |
---|
| 586 | + struct mutex mq_freeze_lock; |
---|
660 | 587 | |
---|
661 | 588 | struct blk_mq_tag_set *tag_set; |
---|
662 | 589 | struct list_head tag_set_list; |
---|
663 | 590 | struct bio_set bio_split; |
---|
664 | 591 | |
---|
665 | | -#ifdef CONFIG_BLK_DEBUG_FS |
---|
666 | 592 | struct dentry *debugfs_dir; |
---|
| 593 | + |
---|
| 594 | +#ifdef CONFIG_BLK_DEBUG_FS |
---|
667 | 595 | struct dentry *sched_debugfs_dir; |
---|
| 596 | + struct dentry *rqos_debugfs_dir; |
---|
668 | 597 | #endif |
---|
669 | 598 | |
---|
670 | 599 | bool mq_sysfs_init_done; |
---|
671 | 600 | |
---|
672 | 601 | size_t cmd_size; |
---|
673 | | - void *rq_alloc_data; |
---|
674 | | - |
---|
675 | | - struct work_struct release_work; |
---|
676 | 602 | |
---|
677 | 603 | #define BLK_MAX_WRITE_HINTS 5 |
---|
678 | 604 | u64 write_hints[BLK_MAX_WRITE_HINTS]; |
---|
| 605 | + |
---|
| 606 | + ANDROID_KABI_RESERVE(1); |
---|
| 607 | + ANDROID_KABI_RESERVE(2); |
---|
| 608 | + ANDROID_KABI_RESERVE(3); |
---|
| 609 | + ANDROID_KABI_RESERVE(4); |
---|
| 610 | + ANDROID_OEM_DATA(1); |
---|
679 | 611 | }; |
---|
680 | 612 | |
---|
681 | | -#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ |
---|
682 | | -#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ |
---|
683 | | -#define QUEUE_FLAG_DYING 2 /* queue being torn down */ |
---|
684 | | -#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ |
---|
685 | | -#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ |
---|
686 | | -#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ |
---|
687 | | -#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ |
---|
688 | | -#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ |
---|
689 | | -#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ |
---|
690 | | -#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
---|
691 | | -#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ |
---|
692 | | -#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ |
---|
693 | | -#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ |
---|
694 | | -#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ |
---|
695 | | -#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ |
---|
696 | | -#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ |
---|
697 | | -#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ |
---|
698 | | -#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ |
---|
699 | | -#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ |
---|
700 | | -#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ |
---|
701 | | -#define QUEUE_FLAG_WC 20 /* Write back caching */ |
---|
702 | | -#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ |
---|
703 | | -#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ |
---|
704 | | -#define QUEUE_FLAG_DAX 23 /* device supports DAX */ |
---|
705 | | -#define QUEUE_FLAG_STATS 24 /* track rq completion times */ |
---|
706 | | -#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ |
---|
707 | | -#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ |
---|
708 | | -#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ |
---|
709 | | -#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ |
---|
710 | | - |
---|
711 | | -#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
---|
712 | | - (1 << QUEUE_FLAG_SAME_COMP) | \ |
---|
713 | | - (1 << QUEUE_FLAG_ADD_RANDOM)) |
---|
| 613 | +/* Keep blk_queue_flag_name[] in sync with the definitions below */ |
---|
| 614 | +#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ |
---|
| 615 | +#define QUEUE_FLAG_DYING 1 /* queue being torn down */ |
---|
| 616 | +#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ |
---|
| 617 | +#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ |
---|
| 618 | +#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ |
---|
| 619 | +#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ |
---|
| 620 | +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
---|
| 621 | +#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ |
---|
| 622 | +#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ |
---|
| 623 | +#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ |
---|
| 624 | +#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ |
---|
| 625 | +#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ |
---|
| 626 | +#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ |
---|
| 627 | +#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ |
---|
| 628 | +#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ |
---|
| 629 | +#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ |
---|
| 630 | +#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ |
---|
| 631 | +#define QUEUE_FLAG_WC 17 /* Write back caching */ |
---|
| 632 | +#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ |
---|
| 633 | +#define QUEUE_FLAG_DAX 19 /* device supports DAX */ |
---|
| 634 | +#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ |
---|
| 635 | +#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ |
---|
| 636 | +#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ |
---|
| 637 | +#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ |
---|
| 638 | +#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ |
---|
| 639 | +#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ |
---|
| 640 | +#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ |
---|
| 641 | +#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ |
---|
| 642 | +#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ |
---|
| 643 | +#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ |
---|
714 | 644 | |
---|
715 | 645 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
---|
716 | | - (1 << QUEUE_FLAG_SAME_COMP) | \ |
---|
717 | | - (1 << QUEUE_FLAG_POLL)) |
---|
| 646 | + (1 << QUEUE_FLAG_SAME_COMP) | \ |
---|
| 647 | + (1 << QUEUE_FLAG_NOWAIT)) |
---|
718 | 648 | |
---|
719 | 649 | void blk_queue_flag_set(unsigned int flag, struct request_queue *q); |
---|
720 | 650 | void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); |
---|
721 | 651 | bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); |
---|
722 | | -bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); |
---|
723 | 652 | |
---|
724 | | -#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
---|
725 | 653 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
---|
726 | 654 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) |
---|
727 | 655 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
---|
728 | | -#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
---|
729 | 656 | #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) |
---|
730 | 657 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
---|
731 | 658 | #define blk_queue_noxmerges(q) \ |
---|
732 | 659 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
---|
733 | 660 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
---|
| 661 | +#define blk_queue_stable_writes(q) \ |
---|
| 662 | + test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) |
---|
734 | 663 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
---|
735 | 664 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
---|
736 | 665 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
---|
| 666 | +#define blk_queue_zone_resetall(q) \ |
---|
| 667 | + test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) |
---|
737 | 668 | #define blk_queue_secure_erase(q) \ |
---|
738 | 669 | (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) |
---|
739 | 670 | #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) |
---|
740 | 671 | #define blk_queue_scsi_passthrough(q) \ |
---|
741 | 672 | test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) |
---|
| 673 | +#define blk_queue_pci_p2pdma(q) \ |
---|
| 674 | + test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) |
---|
| 675 | +#ifdef CONFIG_BLK_RQ_ALLOC_TIME |
---|
| 676 | +#define blk_queue_rq_alloc_time(q) \ |
---|
| 677 | + test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) |
---|
| 678 | +#else |
---|
| 679 | +#define blk_queue_rq_alloc_time(q) false |
---|
| 680 | +#endif |
---|
742 | 681 | |
---|
743 | 682 | #define blk_noretry_request(rq) \ |
---|
744 | 683 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
---|
.. | .. |
---|
747 | 686 | #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) |
---|
748 | 687 | #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) |
---|
749 | 688 | #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) |
---|
| 689 | +#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) |
---|
750 | 690 | |
---|
751 | 691 | extern void blk_set_pm_only(struct request_queue *q); |
---|
752 | 692 | extern void blk_clear_pm_only(struct request_queue *q); |
---|
753 | | - |
---|
754 | | -static inline int queue_in_flight(struct request_queue *q) |
---|
755 | | -{ |
---|
756 | | - return q->in_flight[0] + q->in_flight[1]; |
---|
757 | | -} |
---|
758 | 693 | |
---|
759 | 694 | static inline bool blk_account_rq(struct request *rq) |
---|
760 | 695 | { |
---|
761 | 696 | return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); |
---|
762 | 697 | } |
---|
763 | 698 | |
---|
764 | | -#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
---|
765 | | -#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
---|
766 | | -/* rq->queuelist of dequeued request must be list_empty() */ |
---|
767 | | -#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
---|
768 | | - |
---|
769 | 699 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
---|
770 | 700 | |
---|
771 | 701 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) |
---|
772 | 702 | |
---|
773 | | -/* |
---|
774 | | - * Driver can handle struct request, if it either has an old style |
---|
775 | | - * request_fn defined, or is blk-mq based. |
---|
776 | | - */ |
---|
777 | | -static inline bool queue_is_rq_based(struct request_queue *q) |
---|
778 | | -{ |
---|
779 | | - return q->request_fn || q->mq_ops; |
---|
780 | | -} |
---|
| 703 | +#define rq_dma_dir(rq) \ |
---|
| 704 | + (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) |
---|
781 | 705 | |
---|
782 | | -static inline unsigned int blk_queue_cluster(struct request_queue *q) |
---|
| 706 | +#define dma_map_bvec(dev, bv, dir, attrs) \ |
---|
| 707 | + dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ |
---|
| 708 | + (dir), (attrs)) |
---|
| 709 | + |
---|
| 710 | +static inline bool queue_is_mq(struct request_queue *q) |
---|
783 | 711 | { |
---|
784 | | - return q->limits.cluster; |
---|
| 712 | + return q->mq_ops; |
---|
785 | 713 | } |
---|
786 | 714 | |
---|
787 | 715 | static inline enum blk_zoned_model |
---|
788 | 716 | blk_queue_zoned_model(struct request_queue *q) |
---|
789 | 717 | { |
---|
790 | | - return q->limits.zoned; |
---|
| 718 | + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) |
---|
| 719 | + return q->limits.zoned; |
---|
| 720 | + return BLK_ZONED_NONE; |
---|
791 | 721 | } |
---|
792 | 722 | |
---|
793 | 723 | static inline bool blk_queue_is_zoned(struct request_queue *q) |
---|
.. | .. |
---|
801 | 731 | } |
---|
802 | 732 | } |
---|
803 | 733 | |
---|
804 | | -static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) |
---|
| 734 | +static inline sector_t blk_queue_zone_sectors(struct request_queue *q) |
---|
805 | 735 | { |
---|
806 | 736 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; |
---|
807 | 737 | } |
---|
808 | 738 | |
---|
809 | 739 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
| 740 | +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) |
---|
| 741 | +{ |
---|
| 742 | + return blk_queue_is_zoned(q) ? q->nr_zones : 0; |
---|
| 743 | +} |
---|
| 744 | + |
---|
810 | 745 | static inline unsigned int blk_queue_zone_no(struct request_queue *q, |
---|
811 | 746 | sector_t sector) |
---|
812 | 747 | { |
---|
.. | .. |
---|
818 | 753 | static inline bool blk_queue_zone_is_seq(struct request_queue *q, |
---|
819 | 754 | sector_t sector) |
---|
820 | 755 | { |
---|
821 | | - if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) |
---|
| 756 | + if (!blk_queue_is_zoned(q)) |
---|
822 | 757 | return false; |
---|
823 | | - return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); |
---|
| 758 | + if (!q->conv_zones_bitmap) |
---|
| 759 | + return true; |
---|
| 760 | + return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); |
---|
| 761 | +} |
---|
| 762 | + |
---|
| 763 | +static inline void blk_queue_max_open_zones(struct request_queue *q, |
---|
| 764 | + unsigned int max_open_zones) |
---|
| 765 | +{ |
---|
| 766 | + q->max_open_zones = max_open_zones; |
---|
| 767 | +} |
---|
| 768 | + |
---|
| 769 | +static inline unsigned int queue_max_open_zones(const struct request_queue *q) |
---|
| 770 | +{ |
---|
| 771 | + return q->max_open_zones; |
---|
| 772 | +} |
---|
| 773 | + |
---|
| 774 | +static inline void blk_queue_max_active_zones(struct request_queue *q, |
---|
| 775 | + unsigned int max_active_zones) |
---|
| 776 | +{ |
---|
| 777 | + q->max_active_zones = max_active_zones; |
---|
| 778 | +} |
---|
| 779 | + |
---|
| 780 | +static inline unsigned int queue_max_active_zones(const struct request_queue *q) |
---|
| 781 | +{ |
---|
| 782 | + return q->max_active_zones; |
---|
| 783 | +} |
---|
| 784 | +#else /* CONFIG_BLK_DEV_ZONED */ |
---|
| 785 | +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) |
---|
| 786 | +{ |
---|
| 787 | + return 0; |
---|
| 788 | +} |
---|
| 789 | +static inline bool blk_queue_zone_is_seq(struct request_queue *q, |
---|
| 790 | + sector_t sector) |
---|
| 791 | +{ |
---|
| 792 | + return false; |
---|
| 793 | +} |
---|
| 794 | +static inline unsigned int blk_queue_zone_no(struct request_queue *q, |
---|
| 795 | + sector_t sector) |
---|
| 796 | +{ |
---|
| 797 | + return 0; |
---|
| 798 | +} |
---|
| 799 | +static inline unsigned int queue_max_open_zones(const struct request_queue *q) |
---|
| 800 | +{ |
---|
| 801 | + return 0; |
---|
| 802 | +} |
---|
| 803 | +static inline unsigned int queue_max_active_zones(const struct request_queue *q) |
---|
| 804 | +{ |
---|
| 805 | + return 0; |
---|
824 | 806 | } |
---|
825 | 807 | #endif /* CONFIG_BLK_DEV_ZONED */ |
---|
826 | 808 | |
---|
827 | 809 | static inline bool rq_is_sync(struct request *rq) |
---|
828 | 810 | { |
---|
829 | 811 | return op_is_sync(rq->cmd_flags); |
---|
830 | | -} |
---|
831 | | - |
---|
832 | | -static inline bool blk_rl_full(struct request_list *rl, bool sync) |
---|
833 | | -{ |
---|
834 | | - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
---|
835 | | - |
---|
836 | | - return rl->flags & flag; |
---|
837 | | -} |
---|
838 | | - |
---|
839 | | -static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
---|
840 | | -{ |
---|
841 | | - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
---|
842 | | - |
---|
843 | | - rl->flags |= flag; |
---|
844 | | -} |
---|
845 | | - |
---|
846 | | -static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
---|
847 | | -{ |
---|
848 | | - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
---|
849 | | - |
---|
850 | | - rl->flags &= ~flag; |
---|
851 | 812 | } |
---|
852 | 813 | |
---|
853 | 814 | static inline bool rq_mergeable(struct request *rq) |
---|
.. | .. |
---|
859 | 820 | return false; |
---|
860 | 821 | |
---|
861 | 822 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) |
---|
| 823 | + return false; |
---|
| 824 | + |
---|
| 825 | + if (req_op(rq) == REQ_OP_ZONE_APPEND) |
---|
862 | 826 | return false; |
---|
863 | 827 | |
---|
864 | 828 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
---|
.. | .. |
---|
885 | 849 | |
---|
886 | 850 | return q->nr_requests; |
---|
887 | 851 | } |
---|
888 | | - |
---|
889 | | -/* |
---|
890 | | - * q->prep_rq_fn return values |
---|
891 | | - */ |
---|
892 | | -enum { |
---|
893 | | - BLKPREP_OK, /* serve it */ |
---|
894 | | - BLKPREP_KILL, /* fatal error, kill, return -EIO */ |
---|
895 | | - BLKPREP_DEFER, /* leave on queue */ |
---|
896 | | - BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ |
---|
897 | | -}; |
---|
898 | 852 | |
---|
899 | 853 | extern unsigned long blk_max_low_pfn, blk_max_pfn; |
---|
900 | 854 | |
---|
.. | .. |
---|
945 | 899 | __rq_for_each_bio(_iter.bio, _rq) \ |
---|
946 | 900 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) |
---|
947 | 901 | |
---|
| 902 | +#define rq_for_each_bvec(bvl, _rq, _iter) \ |
---|
| 903 | + __rq_for_each_bio(_iter.bio, _rq) \ |
---|
| 904 | + bio_for_each_bvec(bvl, _iter.bio, _iter.iter) |
---|
| 905 | + |
---|
948 | 906 | #define rq_iter_last(bvec, _iter) \ |
---|
949 | 907 | (_iter.bio->bi_next == NULL && \ |
---|
950 | 908 | bio_iter_last(bvec, _iter.iter)) |
---|
.. | .. |
---|
962 | 920 | |
---|
963 | 921 | extern int blk_register_queue(struct gendisk *disk); |
---|
964 | 922 | extern void blk_unregister_queue(struct gendisk *disk); |
---|
965 | | -extern blk_qc_t generic_make_request(struct bio *bio); |
---|
966 | | -extern blk_qc_t direct_make_request(struct bio *bio); |
---|
| 923 | +blk_qc_t submit_bio_noacct(struct bio *bio); |
---|
967 | 924 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
---|
968 | | -extern void blk_init_request_from_bio(struct request *req, struct bio *bio); |
---|
969 | 925 | extern void blk_put_request(struct request *); |
---|
970 | | -extern void __blk_put_request(struct request_queue *, struct request *); |
---|
971 | 926 | extern struct request *blk_get_request(struct request_queue *, unsigned int op, |
---|
972 | 927 | blk_mq_req_flags_t flags); |
---|
973 | | -extern void blk_requeue_request(struct request_queue *, struct request *); |
---|
974 | 928 | extern int blk_lld_busy(struct request_queue *q); |
---|
975 | 929 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
---|
976 | 930 | struct bio_set *bs, gfp_t gfp_mask, |
---|
.. | .. |
---|
980 | 934 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, |
---|
981 | 935 | struct request *rq); |
---|
982 | 936 | extern int blk_rq_append_bio(struct request *rq, struct bio **bio); |
---|
983 | | -extern void blk_delay_queue(struct request_queue *, unsigned long); |
---|
984 | | -extern void blk_queue_split(struct request_queue *, struct bio **); |
---|
985 | | -extern void blk_recount_segments(struct request_queue *, struct bio *); |
---|
| 937 | +extern void blk_queue_split(struct bio **); |
---|
986 | 938 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); |
---|
987 | 939 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, |
---|
988 | 940 | unsigned int, void __user *); |
---|
.. | .. |
---|
990 | 942 | unsigned int, void __user *); |
---|
991 | 943 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
---|
992 | 944 | struct scsi_ioctl_command __user *); |
---|
| 945 | +extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); |
---|
| 946 | +extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); |
---|
993 | 947 | |
---|
994 | 948 | extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); |
---|
995 | 949 | extern void blk_queue_exit(struct request_queue *q); |
---|
996 | | -extern void blk_start_queue(struct request_queue *q); |
---|
997 | | -extern void blk_start_queue_async(struct request_queue *q); |
---|
998 | | -extern void blk_stop_queue(struct request_queue *q); |
---|
999 | 950 | extern void blk_sync_queue(struct request_queue *q); |
---|
1000 | | -extern void __blk_stop_queue(struct request_queue *q); |
---|
1001 | | -extern void __blk_run_queue(struct request_queue *q); |
---|
1002 | | -extern void __blk_run_queue_uncond(struct request_queue *q); |
---|
1003 | | -extern void blk_run_queue(struct request_queue *); |
---|
1004 | | -extern void blk_run_queue_async(struct request_queue *q); |
---|
1005 | 951 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
---|
1006 | 952 | struct rq_map_data *, void __user *, unsigned long, |
---|
1007 | 953 | gfp_t); |
---|
.. | .. |
---|
1015 | 961 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
---|
1016 | 962 | struct request *, int, rq_end_io_fn *); |
---|
1017 | 963 | |
---|
| 964 | +/* Helper to convert REQ_OP_XXX to its string format XXX */ |
---|
| 965 | +extern const char *blk_op_str(unsigned int op); |
---|
| 966 | + |
---|
1018 | 967 | int blk_status_to_errno(blk_status_t status); |
---|
1019 | 968 | blk_status_t errno_to_blk_status(int errno); |
---|
1020 | 969 | |
---|
1021 | | -bool blk_poll(struct request_queue *q, blk_qc_t cookie); |
---|
| 970 | +int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); |
---|
1022 | 971 | |
---|
1023 | 972 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
---|
1024 | 973 | { |
---|
.. | .. |
---|
1045 | 994 | * blk_rq_err_bytes() : bytes left till the next error boundary |
---|
1046 | 995 | * blk_rq_sectors() : sectors left in the entire request |
---|
1047 | 996 | * blk_rq_cur_sectors() : sectors left in the current segment |
---|
| 997 | + * blk_rq_stats_sectors() : sectors of the entire request used for stats |
---|
1048 | 998 | */ |
---|
1049 | 999 | static inline sector_t blk_rq_pos(const struct request *rq) |
---|
1050 | 1000 | { |
---|
.. | .. |
---|
1073 | 1023 | return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; |
---|
1074 | 1024 | } |
---|
1075 | 1025 | |
---|
| 1026 | +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) |
---|
| 1027 | +{ |
---|
| 1028 | + return rq->stats_sectors; |
---|
| 1029 | +} |
---|
| 1030 | + |
---|
1076 | 1031 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
| 1032 | + |
---|
| 1033 | +/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ |
---|
| 1034 | +const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); |
---|
| 1035 | + |
---|
1077 | 1036 | static inline unsigned int blk_rq_zone_no(struct request *rq) |
---|
1078 | 1037 | { |
---|
1079 | 1038 | return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); |
---|
.. | .. |
---|
1098 | 1057 | return blk_rq_bytes(rq); |
---|
1099 | 1058 | } |
---|
1100 | 1059 | |
---|
| 1060 | +/* |
---|
| 1061 | + * Return the first full biovec in the request. The caller needs to check that |
---|
| 1062 | + * there are any bvecs before calling this helper. |
---|
| 1063 | + */ |
---|
| 1064 | +static inline struct bio_vec req_bvec(struct request *rq) |
---|
| 1065 | +{ |
---|
| 1066 | + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
---|
| 1067 | + return rq->special_vec; |
---|
| 1068 | + return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); |
---|
| 1069 | +} |
---|
| 1070 | + |
---|
1101 | 1071 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
---|
1102 | 1072 | int op) |
---|
1103 | 1073 | { |
---|
.. | .. |
---|
1119 | 1089 | * file system requests. |
---|
1120 | 1090 | */ |
---|
1121 | 1091 | static inline unsigned int blk_max_size_offset(struct request_queue *q, |
---|
1122 | | - sector_t offset) |
---|
| 1092 | + sector_t offset, |
---|
| 1093 | + unsigned int chunk_sectors) |
---|
1123 | 1094 | { |
---|
1124 | | - if (!q->limits.chunk_sectors) |
---|
1125 | | - return q->limits.max_sectors; |
---|
| 1095 | + if (!chunk_sectors) { |
---|
| 1096 | + if (q->limits.chunk_sectors) |
---|
| 1097 | + chunk_sectors = q->limits.chunk_sectors; |
---|
| 1098 | + else |
---|
| 1099 | + return q->limits.max_sectors; |
---|
| 1100 | + } |
---|
1126 | 1101 | |
---|
1127 | | - return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - |
---|
1128 | | - (offset & (q->limits.chunk_sectors - 1)))); |
---|
| 1102 | + if (likely(is_power_of_2(chunk_sectors))) |
---|
| 1103 | + chunk_sectors -= offset & (chunk_sectors - 1); |
---|
| 1104 | + else |
---|
| 1105 | + chunk_sectors -= sector_div(offset, chunk_sectors); |
---|
| 1106 | + |
---|
| 1107 | + return min(q->limits.max_sectors, chunk_sectors); |
---|
1129 | 1108 | } |
---|
1130 | 1109 | |
---|
1131 | 1110 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
---|
.. | .. |
---|
1141 | 1120 | req_op(rq) == REQ_OP_SECURE_ERASE) |
---|
1142 | 1121 | return blk_queue_get_max_sectors(q, req_op(rq)); |
---|
1143 | 1122 | |
---|
1144 | | - return min(blk_max_size_offset(q, offset), |
---|
| 1123 | + return min(blk_max_size_offset(q, offset, 0), |
---|
1145 | 1124 | blk_queue_get_max_sectors(q, req_op(rq))); |
---|
1146 | 1125 | } |
---|
1147 | 1126 | |
---|
.. | .. |
---|
1156 | 1135 | return nr_bios; |
---|
1157 | 1136 | } |
---|
1158 | 1137 | |
---|
1159 | | -/* |
---|
1160 | | - * Request issue related functions. |
---|
1161 | | - */ |
---|
1162 | | -extern struct request *blk_peek_request(struct request_queue *q); |
---|
1163 | | -extern void blk_start_request(struct request *rq); |
---|
1164 | | -extern struct request *blk_fetch_request(struct request_queue *q); |
---|
1165 | | - |
---|
1166 | 1138 | void blk_steal_bios(struct bio_list *list, struct request *rq); |
---|
1167 | 1139 | |
---|
1168 | 1140 | /* |
---|
.. | .. |
---|
1170 | 1142 | * |
---|
1171 | 1143 | * blk_update_request() completes given number of bytes and updates |
---|
1172 | 1144 | * the request without completing it. |
---|
1173 | | - * |
---|
1174 | | - * blk_end_request() and friends. __blk_end_request() must be called |
---|
1175 | | - * with the request queue spinlock acquired. |
---|
1176 | | - * |
---|
1177 | | - * Several drivers define their own end_request and call |
---|
1178 | | - * blk_end_request() for parts of the original function. |
---|
1179 | | - * This prevents code duplication in drivers. |
---|
1180 | 1145 | */ |
---|
1181 | 1146 | extern bool blk_update_request(struct request *rq, blk_status_t error, |
---|
1182 | 1147 | unsigned int nr_bytes); |
---|
1183 | | -extern void blk_finish_request(struct request *rq, blk_status_t error); |
---|
1184 | | -extern bool blk_end_request(struct request *rq, blk_status_t error, |
---|
1185 | | - unsigned int nr_bytes); |
---|
1186 | | -extern void blk_end_request_all(struct request *rq, blk_status_t error); |
---|
1187 | | -extern bool __blk_end_request(struct request *rq, blk_status_t error, |
---|
1188 | | - unsigned int nr_bytes); |
---|
1189 | | -extern void __blk_end_request_all(struct request *rq, blk_status_t error); |
---|
1190 | | -extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); |
---|
1191 | 1148 | |
---|
1192 | | -extern void blk_complete_request(struct request *); |
---|
1193 | | -extern void __blk_complete_request(struct request *); |
---|
1194 | 1149 | extern void blk_abort_request(struct request *); |
---|
1195 | | -extern void blk_unprep_request(struct request *); |
---|
1196 | 1150 | |
---|
1197 | 1151 | /* |
---|
1198 | 1152 | * Access functions for manipulating queue properties |
---|
1199 | 1153 | */ |
---|
1200 | | -extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
---|
1201 | | - spinlock_t *lock, int node_id); |
---|
1202 | | -extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
---|
1203 | | -extern int blk_init_allocated_queue(struct request_queue *); |
---|
1204 | 1154 | extern void blk_cleanup_queue(struct request_queue *); |
---|
1205 | | -extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
---|
1206 | 1155 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
---|
1207 | 1156 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
---|
1208 | 1157 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); |
---|
.. | .. |
---|
1217 | 1166 | extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, |
---|
1218 | 1167 | unsigned int max_write_same_sectors); |
---|
1219 | 1168 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); |
---|
| 1169 | +extern void blk_queue_max_zone_append_sectors(struct request_queue *q, |
---|
| 1170 | + unsigned int max_zone_append_sectors); |
---|
1220 | 1171 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
---|
1221 | 1172 | extern void blk_queue_alignment_offset(struct request_queue *q, |
---|
1222 | 1173 | unsigned int alignment); |
---|
| 1174 | +void blk_queue_update_readahead(struct request_queue *q); |
---|
1223 | 1175 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
---|
1224 | 1176 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
---|
1225 | 1177 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
---|
.. | .. |
---|
1229 | 1181 | extern void blk_set_stacking_limits(struct queue_limits *lim); |
---|
1230 | 1182 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
---|
1231 | 1183 | sector_t offset); |
---|
1232 | | -extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
---|
1233 | | - sector_t offset); |
---|
1234 | 1184 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
---|
1235 | 1185 | sector_t offset); |
---|
1236 | | -extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
---|
1237 | | -extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
---|
1238 | 1186 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
---|
1239 | | -extern int blk_queue_dma_drain(struct request_queue *q, |
---|
1240 | | - dma_drain_needed_fn *dma_drain_needed, |
---|
1241 | | - void *buf, unsigned int size); |
---|
1242 | | -extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
---|
1243 | 1187 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
---|
1244 | 1188 | extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); |
---|
1245 | | -extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
---|
1246 | | -extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
---|
1247 | 1189 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
---|
1248 | 1190 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
---|
1249 | | -extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
---|
1250 | | -extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
---|
1251 | 1191 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
---|
1252 | | -extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); |
---|
1253 | 1192 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); |
---|
| 1193 | +extern void blk_queue_required_elevator_features(struct request_queue *q, |
---|
| 1194 | + unsigned int features); |
---|
| 1195 | +extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, |
---|
| 1196 | + struct device *dev); |
---|
1254 | 1197 | |
---|
1255 | 1198 | /* |
---|
1256 | 1199 | * Number of physical segments as sent to the device. |
---|
.. | .. |
---|
1277 | 1220 | return max_t(unsigned short, rq->nr_phys_segments, 1); |
---|
1278 | 1221 | } |
---|
1279 | 1222 | |
---|
1280 | | -extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
---|
| 1223 | +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, |
---|
| 1224 | + struct scatterlist *sglist, struct scatterlist **last_sg); |
---|
| 1225 | +static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
---|
| 1226 | + struct scatterlist *sglist) |
---|
| 1227 | +{ |
---|
| 1228 | + struct scatterlist *last_sg = NULL; |
---|
| 1229 | + |
---|
| 1230 | + return __blk_rq_map_sg(q, rq, sglist, &last_sg); |
---|
| 1231 | +} |
---|
1281 | 1232 | extern void blk_dump_rq_flags(struct request *, char *); |
---|
1282 | | -extern long nr_blockdev_pages(void); |
---|
1283 | 1233 | |
---|
1284 | 1234 | bool __must_check blk_get_queue(struct request_queue *); |
---|
1285 | | -struct request_queue *blk_alloc_queue(gfp_t); |
---|
1286 | | -struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, |
---|
1287 | | - spinlock_t *lock); |
---|
| 1235 | +struct request_queue *blk_alloc_queue(int node_id); |
---|
1288 | 1236 | extern void blk_put_queue(struct request_queue *); |
---|
1289 | 1237 | extern void blk_set_queue_dying(struct request_queue *); |
---|
1290 | 1238 | |
---|
1291 | | -/* |
---|
1292 | | - * block layer runtime pm functions |
---|
1293 | | - */ |
---|
1294 | | -#ifdef CONFIG_PM |
---|
1295 | | -extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); |
---|
1296 | | -extern int blk_pre_runtime_suspend(struct request_queue *q); |
---|
1297 | | -extern void blk_post_runtime_suspend(struct request_queue *q, int err); |
---|
1298 | | -extern void blk_pre_runtime_resume(struct request_queue *q); |
---|
1299 | | -extern void blk_post_runtime_resume(struct request_queue *q, int err); |
---|
1300 | | -extern void blk_set_runtime_active(struct request_queue *q); |
---|
1301 | | -#else |
---|
1302 | | -static inline void blk_pm_runtime_init(struct request_queue *q, |
---|
1303 | | - struct device *dev) {} |
---|
1304 | | -static inline int blk_pre_runtime_suspend(struct request_queue *q) |
---|
1305 | | -{ |
---|
1306 | | - return -ENOSYS; |
---|
1307 | | -} |
---|
1308 | | -static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} |
---|
1309 | | -static inline void blk_pre_runtime_resume(struct request_queue *q) {} |
---|
1310 | | -static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} |
---|
1311 | | -static inline void blk_set_runtime_active(struct request_queue *q) {} |
---|
1312 | | -#endif |
---|
1313 | | - |
---|
| 1239 | +#ifdef CONFIG_BLOCK |
---|
1314 | 1240 | /* |
---|
1315 | 1241 | * blk_plug permits building a queue of related requests by holding the I/O |
---|
1316 | 1242 | * fragments for a short period. This allows merging of sequential requests |
---|
.. | .. |
---|
1324 | 1250 | * schedule() where blk_schedule_flush_plug() is called. |
---|
1325 | 1251 | */ |
---|
1326 | 1252 | struct blk_plug { |
---|
1327 | | - struct list_head list; /* requests */ |
---|
1328 | 1253 | struct list_head mq_list; /* blk-mq requests */ |
---|
1329 | 1254 | struct list_head cb_list; /* md requires an unplug callback */ |
---|
| 1255 | + unsigned short rq_count; |
---|
| 1256 | + bool multiple_queues; |
---|
| 1257 | + bool nowait; |
---|
1330 | 1258 | }; |
---|
1331 | | -#define BLK_MAX_REQUEST_COUNT 16 |
---|
1332 | | -#define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
---|
1333 | 1259 | |
---|
1334 | 1260 | struct blk_plug_cb; |
---|
1335 | 1261 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); |
---|
.. | .. |
---|
1365 | 1291 | struct blk_plug *plug = tsk->plug; |
---|
1366 | 1292 | |
---|
1367 | 1293 | return plug && |
---|
1368 | | - (!list_empty(&plug->list) || |
---|
1369 | | - !list_empty(&plug->mq_list) || |
---|
| 1294 | + (!list_empty(&plug->mq_list) || |
---|
1370 | 1295 | !list_empty(&plug->cb_list)); |
---|
1371 | 1296 | } |
---|
1372 | 1297 | |
---|
1373 | | -/* |
---|
1374 | | - * tag stuff |
---|
1375 | | - */ |
---|
1376 | | -extern int blk_queue_start_tag(struct request_queue *, struct request *); |
---|
1377 | | -extern struct request *blk_queue_find_tag(struct request_queue *, int); |
---|
1378 | | -extern void blk_queue_end_tag(struct request_queue *, struct request *); |
---|
1379 | | -extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); |
---|
1380 | | -extern void blk_queue_free_tags(struct request_queue *); |
---|
1381 | | -extern int blk_queue_resize_tags(struct request_queue *, int); |
---|
1382 | | -extern struct blk_queue_tag *blk_init_tags(int, int); |
---|
1383 | | -extern void blk_free_tags(struct blk_queue_tag *); |
---|
| 1298 | +int blkdev_issue_flush(struct block_device *, gfp_t); |
---|
| 1299 | +long nr_blockdev_pages(void); |
---|
| 1300 | +#else /* CONFIG_BLOCK */ |
---|
| 1301 | +struct blk_plug { |
---|
| 1302 | +}; |
---|
1384 | 1303 | |
---|
1385 | | -static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, |
---|
1386 | | - int tag) |
---|
| 1304 | +static inline void blk_start_plug(struct blk_plug *plug) |
---|
1387 | 1305 | { |
---|
1388 | | - if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
---|
1389 | | - return NULL; |
---|
1390 | | - return bqt->tag_index[tag]; |
---|
1391 | 1306 | } |
---|
1392 | 1307 | |
---|
1393 | | -extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
---|
| 1308 | +static inline void blk_finish_plug(struct blk_plug *plug) |
---|
| 1309 | +{ |
---|
| 1310 | +} |
---|
| 1311 | + |
---|
| 1312 | +static inline void blk_flush_plug(struct task_struct *task) |
---|
| 1313 | +{ |
---|
| 1314 | +} |
---|
| 1315 | + |
---|
| 1316 | +static inline void blk_schedule_flush_plug(struct task_struct *task) |
---|
| 1317 | +{ |
---|
| 1318 | +} |
---|
| 1319 | + |
---|
| 1320 | + |
---|
| 1321 | +static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
---|
| 1322 | +{ |
---|
| 1323 | + return false; |
---|
| 1324 | +} |
---|
| 1325 | + |
---|
| 1326 | +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) |
---|
| 1327 | +{ |
---|
| 1328 | + return 0; |
---|
| 1329 | +} |
---|
| 1330 | + |
---|
| 1331 | +static inline long nr_blockdev_pages(void) |
---|
| 1332 | +{ |
---|
| 1333 | + return 0; |
---|
| 1334 | +} |
---|
| 1335 | +#endif /* CONFIG_BLOCK */ |
---|
| 1336 | + |
---|
| 1337 | +extern void blk_io_schedule(void); |
---|
| 1338 | + |
---|
1394 | 1339 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
---|
1395 | 1340 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
---|
1396 | 1341 | |
---|
.. | .. |
---|
1434 | 1379 | |
---|
1435 | 1380 | extern int blk_verify_command(unsigned char *cmd, fmode_t mode); |
---|
1436 | 1381 | |
---|
| 1382 | +static inline bool bdev_is_partition(struct block_device *bdev) |
---|
| 1383 | +{ |
---|
| 1384 | + return bdev->bd_partno; |
---|
| 1385 | +} |
---|
| 1386 | + |
---|
1437 | 1387 | enum blk_default_limits { |
---|
1438 | 1388 | BLK_MAX_SEGMENTS = 128, |
---|
1439 | 1389 | BLK_SAFE_MAX_SECTORS = 255, |
---|
.. | .. |
---|
1442 | 1392 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
---|
1443 | 1393 | }; |
---|
1444 | 1394 | |
---|
1445 | | -static inline unsigned long queue_segment_boundary(struct request_queue *q) |
---|
| 1395 | +static inline unsigned long queue_segment_boundary(const struct request_queue *q) |
---|
1446 | 1396 | { |
---|
1447 | 1397 | return q->limits.seg_boundary_mask; |
---|
1448 | 1398 | } |
---|
1449 | 1399 | |
---|
1450 | | -static inline unsigned long queue_virt_boundary(struct request_queue *q) |
---|
| 1400 | +static inline unsigned long queue_virt_boundary(const struct request_queue *q) |
---|
1451 | 1401 | { |
---|
1452 | 1402 | return q->limits.virt_boundary_mask; |
---|
1453 | 1403 | } |
---|
1454 | 1404 | |
---|
1455 | | -static inline unsigned int queue_max_sectors(struct request_queue *q) |
---|
| 1405 | +static inline unsigned int queue_max_sectors(const struct request_queue *q) |
---|
1456 | 1406 | { |
---|
1457 | 1407 | return q->limits.max_sectors; |
---|
1458 | 1408 | } |
---|
1459 | 1409 | |
---|
1460 | | -static inline unsigned int queue_max_hw_sectors(struct request_queue *q) |
---|
| 1410 | +static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) |
---|
1461 | 1411 | { |
---|
1462 | 1412 | return q->limits.max_hw_sectors; |
---|
1463 | 1413 | } |
---|
1464 | 1414 | |
---|
1465 | | -static inline unsigned short queue_max_segments(struct request_queue *q) |
---|
| 1415 | +static inline unsigned short queue_max_segments(const struct request_queue *q) |
---|
1466 | 1416 | { |
---|
1467 | 1417 | return q->limits.max_segments; |
---|
1468 | 1418 | } |
---|
1469 | 1419 | |
---|
1470 | | -static inline unsigned short queue_max_discard_segments(struct request_queue *q) |
---|
| 1420 | +static inline unsigned short queue_max_discard_segments(const struct request_queue *q) |
---|
1471 | 1421 | { |
---|
1472 | 1422 | return q->limits.max_discard_segments; |
---|
1473 | 1423 | } |
---|
1474 | 1424 | |
---|
1475 | | -static inline unsigned int queue_max_segment_size(struct request_queue *q) |
---|
| 1425 | +static inline unsigned int queue_max_segment_size(const struct request_queue *q) |
---|
1476 | 1426 | { |
---|
1477 | 1427 | return q->limits.max_segment_size; |
---|
1478 | 1428 | } |
---|
1479 | 1429 | |
---|
1480 | | -static inline unsigned queue_logical_block_size(struct request_queue *q) |
---|
| 1430 | +static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) |
---|
| 1431 | +{ |
---|
| 1432 | + |
---|
| 1433 | + const struct queue_limits *l = &q->limits; |
---|
| 1434 | + |
---|
| 1435 | + return min(l->max_zone_append_sectors, l->max_sectors); |
---|
| 1436 | +} |
---|
| 1437 | + |
---|
| 1438 | +static inline unsigned queue_logical_block_size(const struct request_queue *q) |
---|
1481 | 1439 | { |
---|
1482 | 1440 | int retval = 512; |
---|
1483 | 1441 | |
---|
.. | .. |
---|
1492 | 1450 | return queue_logical_block_size(bdev_get_queue(bdev)); |
---|
1493 | 1451 | } |
---|
1494 | 1452 | |
---|
1495 | | -static inline unsigned int queue_physical_block_size(struct request_queue *q) |
---|
| 1453 | +static inline unsigned int queue_physical_block_size(const struct request_queue *q) |
---|
1496 | 1454 | { |
---|
1497 | 1455 | return q->limits.physical_block_size; |
---|
1498 | 1456 | } |
---|
.. | .. |
---|
1502 | 1460 | return queue_physical_block_size(bdev_get_queue(bdev)); |
---|
1503 | 1461 | } |
---|
1504 | 1462 | |
---|
1505 | | -static inline unsigned int queue_io_min(struct request_queue *q) |
---|
| 1463 | +static inline unsigned int queue_io_min(const struct request_queue *q) |
---|
1506 | 1464 | { |
---|
1507 | 1465 | return q->limits.io_min; |
---|
1508 | 1466 | } |
---|
.. | .. |
---|
1512 | 1470 | return queue_io_min(bdev_get_queue(bdev)); |
---|
1513 | 1471 | } |
---|
1514 | 1472 | |
---|
1515 | | -static inline unsigned int queue_io_opt(struct request_queue *q) |
---|
| 1473 | +static inline unsigned int queue_io_opt(const struct request_queue *q) |
---|
1516 | 1474 | { |
---|
1517 | 1475 | return q->limits.io_opt; |
---|
1518 | 1476 | } |
---|
.. | .. |
---|
1522 | 1480 | return queue_io_opt(bdev_get_queue(bdev)); |
---|
1523 | 1481 | } |
---|
1524 | 1482 | |
---|
1525 | | -static inline int queue_alignment_offset(struct request_queue *q) |
---|
| 1483 | +static inline int queue_alignment_offset(const struct request_queue *q) |
---|
1526 | 1484 | { |
---|
1527 | 1485 | if (q->limits.misaligned) |
---|
1528 | 1486 | return -1; |
---|
.. | .. |
---|
1545 | 1503 | |
---|
1546 | 1504 | if (q->limits.misaligned) |
---|
1547 | 1505 | return -1; |
---|
1548 | | - |
---|
1549 | | - if (bdev != bdev->bd_contains) |
---|
1550 | | - return bdev->bd_part->alignment_offset; |
---|
1551 | | - |
---|
| 1506 | + if (bdev_is_partition(bdev)) |
---|
| 1507 | + return queue_limit_alignment_offset(&q->limits, |
---|
| 1508 | + bdev->bd_part->start_sect); |
---|
1552 | 1509 | return q->limits.alignment_offset; |
---|
1553 | 1510 | } |
---|
1554 | 1511 | |
---|
1555 | | -static inline int queue_discard_alignment(struct request_queue *q) |
---|
| 1512 | +static inline int queue_discard_alignment(const struct request_queue *q) |
---|
1556 | 1513 | { |
---|
1557 | 1514 | if (q->limits.discard_misaligned) |
---|
1558 | 1515 | return -1; |
---|
.. | .. |
---|
1583 | 1540 | return offset << SECTOR_SHIFT; |
---|
1584 | 1541 | } |
---|
1585 | 1542 | |
---|
| 1543 | +/* |
---|
| 1544 | + * Two cases of handling DISCARD merge: |
---|
| 1545 | + * If max_discard_segments > 1, the driver takes every bio |
---|
| 1546 | + * as a range and send them to controller together. The ranges |
---|
| 1547 | + * needn't to be contiguous. |
---|
| 1548 | + * Otherwise, the bios/requests will be handled as same as |
---|
| 1549 | + * others which should be contiguous. |
---|
| 1550 | + */ |
---|
| 1551 | +static inline bool blk_discard_mergable(struct request *req) |
---|
| 1552 | +{ |
---|
| 1553 | + if (req_op(req) == REQ_OP_DISCARD && |
---|
| 1554 | + queue_max_discard_segments(req->q) > 1) |
---|
| 1555 | + return true; |
---|
| 1556 | + return false; |
---|
| 1557 | +} |
---|
| 1558 | + |
---|
1586 | 1559 | static inline int bdev_discard_alignment(struct block_device *bdev) |
---|
1587 | 1560 | { |
---|
1588 | 1561 | struct request_queue *q = bdev_get_queue(bdev); |
---|
1589 | 1562 | |
---|
1590 | | - if (bdev != bdev->bd_contains) |
---|
1591 | | - return bdev->bd_part->discard_alignment; |
---|
1592 | | - |
---|
| 1563 | + if (bdev_is_partition(bdev)) |
---|
| 1564 | + return queue_limit_discard_alignment(&q->limits, |
---|
| 1565 | + bdev->bd_part->start_sect); |
---|
1593 | 1566 | return q->limits.discard_alignment; |
---|
1594 | 1567 | } |
---|
1595 | 1568 | |
---|
.. | .. |
---|
1633 | 1606 | return false; |
---|
1634 | 1607 | } |
---|
1635 | 1608 | |
---|
1636 | | -static inline unsigned int bdev_zone_sectors(struct block_device *bdev) |
---|
| 1609 | +static inline sector_t bdev_zone_sectors(struct block_device *bdev) |
---|
1637 | 1610 | { |
---|
1638 | 1611 | struct request_queue *q = bdev_get_queue(bdev); |
---|
1639 | 1612 | |
---|
.. | .. |
---|
1642 | 1615 | return 0; |
---|
1643 | 1616 | } |
---|
1644 | 1617 | |
---|
1645 | | -static inline int queue_dma_alignment(struct request_queue *q) |
---|
| 1618 | +static inline unsigned int bdev_max_open_zones(struct block_device *bdev) |
---|
| 1619 | +{ |
---|
| 1620 | + struct request_queue *q = bdev_get_queue(bdev); |
---|
| 1621 | + |
---|
| 1622 | + if (q) |
---|
| 1623 | + return queue_max_open_zones(q); |
---|
| 1624 | + return 0; |
---|
| 1625 | +} |
---|
| 1626 | + |
---|
| 1627 | +static inline unsigned int bdev_max_active_zones(struct block_device *bdev) |
---|
| 1628 | +{ |
---|
| 1629 | + struct request_queue *q = bdev_get_queue(bdev); |
---|
| 1630 | + |
---|
| 1631 | + if (q) |
---|
| 1632 | + return queue_max_active_zones(q); |
---|
| 1633 | + return 0; |
---|
| 1634 | +} |
---|
| 1635 | + |
---|
| 1636 | +static inline int queue_dma_alignment(const struct request_queue *q) |
---|
1646 | 1637 | { |
---|
1647 | 1638 | return q ? q->dma_alignment : 511; |
---|
1648 | 1639 | } |
---|
.. | .. |
---|
1667 | 1658 | |
---|
1668 | 1659 | static inline unsigned int block_size(struct block_device *bdev) |
---|
1669 | 1660 | { |
---|
1670 | | - return bdev->bd_block_size; |
---|
1671 | | -} |
---|
1672 | | - |
---|
1673 | | -static inline bool queue_flush_queueable(struct request_queue *q) |
---|
1674 | | -{ |
---|
1675 | | - return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); |
---|
1676 | | -} |
---|
1677 | | - |
---|
1678 | | -typedef struct {struct page *v;} Sector; |
---|
1679 | | - |
---|
1680 | | -unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
---|
1681 | | - |
---|
1682 | | -static inline void put_dev_sector(Sector p) |
---|
1683 | | -{ |
---|
1684 | | - put_page(p.v); |
---|
1685 | | -} |
---|
1686 | | - |
---|
1687 | | -static inline bool __bvec_gap_to_prev(struct request_queue *q, |
---|
1688 | | - struct bio_vec *bprv, unsigned int offset) |
---|
1689 | | -{ |
---|
1690 | | - return offset || |
---|
1691 | | - ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
---|
1692 | | -} |
---|
1693 | | - |
---|
1694 | | -/* |
---|
1695 | | - * Check if adding a bio_vec after bprv with offset would create a gap in |
---|
1696 | | - * the SG list. Most drivers don't care about this, but some do. |
---|
1697 | | - */ |
---|
1698 | | -static inline bool bvec_gap_to_prev(struct request_queue *q, |
---|
1699 | | - struct bio_vec *bprv, unsigned int offset) |
---|
1700 | | -{ |
---|
1701 | | - if (!queue_virt_boundary(q)) |
---|
1702 | | - return false; |
---|
1703 | | - return __bvec_gap_to_prev(q, bprv, offset); |
---|
1704 | | -} |
---|
1705 | | - |
---|
1706 | | -/* |
---|
1707 | | - * Check if the two bvecs from two bios can be merged to one segment. |
---|
1708 | | - * If yes, no need to check gap between the two bios since the 1st bio |
---|
1709 | | - * and the 1st bvec in the 2nd bio can be handled in one segment. |
---|
1710 | | - */ |
---|
1711 | | -static inline bool bios_segs_mergeable(struct request_queue *q, |
---|
1712 | | - struct bio *prev, struct bio_vec *prev_last_bv, |
---|
1713 | | - struct bio_vec *next_first_bv) |
---|
1714 | | -{ |
---|
1715 | | - if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) |
---|
1716 | | - return false; |
---|
1717 | | - if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) |
---|
1718 | | - return false; |
---|
1719 | | - if (prev->bi_seg_back_size + next_first_bv->bv_len > |
---|
1720 | | - queue_max_segment_size(q)) |
---|
1721 | | - return false; |
---|
1722 | | - return true; |
---|
1723 | | -} |
---|
1724 | | - |
---|
1725 | | -static inline bool bio_will_gap(struct request_queue *q, |
---|
1726 | | - struct request *prev_rq, |
---|
1727 | | - struct bio *prev, |
---|
1728 | | - struct bio *next) |
---|
1729 | | -{ |
---|
1730 | | - if (bio_has_data(prev) && queue_virt_boundary(q)) { |
---|
1731 | | - struct bio_vec pb, nb; |
---|
1732 | | - |
---|
1733 | | - /* |
---|
1734 | | - * don't merge if the 1st bio starts with non-zero |
---|
1735 | | - * offset, otherwise it is quite difficult to respect |
---|
1736 | | - * sg gap limit. We work hard to merge a huge number of small |
---|
1737 | | - * single bios in case of mkfs. |
---|
1738 | | - */ |
---|
1739 | | - if (prev_rq) |
---|
1740 | | - bio_get_first_bvec(prev_rq->bio, &pb); |
---|
1741 | | - else |
---|
1742 | | - bio_get_first_bvec(prev, &pb); |
---|
1743 | | - if (pb.bv_offset) |
---|
1744 | | - return true; |
---|
1745 | | - |
---|
1746 | | - /* |
---|
1747 | | - * We don't need to worry about the situation that the |
---|
1748 | | - * merged segment ends in unaligned virt boundary: |
---|
1749 | | - * |
---|
1750 | | - * - if 'pb' ends aligned, the merged segment ends aligned |
---|
1751 | | - * - if 'pb' ends unaligned, the next bio must include |
---|
1752 | | - * one single bvec of 'nb', otherwise the 'nb' can't |
---|
1753 | | - * merge with 'pb' |
---|
1754 | | - */ |
---|
1755 | | - bio_get_last_bvec(prev, &pb); |
---|
1756 | | - bio_get_first_bvec(next, &nb); |
---|
1757 | | - |
---|
1758 | | - if (!bios_segs_mergeable(q, prev, &pb, &nb)) |
---|
1759 | | - return __bvec_gap_to_prev(q, &pb, nb.bv_offset); |
---|
1760 | | - } |
---|
1761 | | - |
---|
1762 | | - return false; |
---|
1763 | | -} |
---|
1764 | | - |
---|
1765 | | -static inline bool req_gap_back_merge(struct request *req, struct bio *bio) |
---|
1766 | | -{ |
---|
1767 | | - return bio_will_gap(req->q, req, req->biotail, bio); |
---|
1768 | | -} |
---|
1769 | | - |
---|
1770 | | -static inline bool req_gap_front_merge(struct request *req, struct bio *bio) |
---|
1771 | | -{ |
---|
1772 | | - return bio_will_gap(req->q, NULL, bio, req->bio); |
---|
| 1661 | + return 1 << bdev->bd_inode->i_blkbits; |
---|
1773 | 1662 | } |
---|
1774 | 1663 | |
---|
1775 | 1664 | int kblockd_schedule_work(struct work_struct *work); |
---|
1776 | | -int kblockd_schedule_work_on(int cpu, struct work_struct *work); |
---|
1777 | 1665 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); |
---|
1778 | 1666 | |
---|
1779 | 1667 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
---|
.. | .. |
---|
1800 | 1688 | }; |
---|
1801 | 1689 | |
---|
1802 | 1690 | typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); |
---|
| 1691 | +typedef void (integrity_prepare_fn) (struct request *); |
---|
| 1692 | +typedef void (integrity_complete_fn) (struct request *, unsigned int); |
---|
1803 | 1693 | |
---|
1804 | 1694 | struct blk_integrity_profile { |
---|
1805 | 1695 | integrity_processing_fn *generate_fn; |
---|
1806 | 1696 | integrity_processing_fn *verify_fn; |
---|
| 1697 | + integrity_prepare_fn *prepare_fn; |
---|
| 1698 | + integrity_complete_fn *complete_fn; |
---|
1807 | 1699 | const char *name; |
---|
1808 | 1700 | }; |
---|
1809 | 1701 | |
---|
.. | .. |
---|
1813 | 1705 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
---|
1814 | 1706 | struct scatterlist *); |
---|
1815 | 1707 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
---|
1816 | | -extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
---|
1817 | | - struct request *); |
---|
1818 | | -extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
---|
1819 | | - struct bio *); |
---|
1820 | 1708 | |
---|
1821 | 1709 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
---|
1822 | 1710 | { |
---|
.. | .. |
---|
1834 | 1722 | return blk_get_integrity(bdev->bd_disk); |
---|
1835 | 1723 | } |
---|
1836 | 1724 | |
---|
| 1725 | +static inline bool |
---|
| 1726 | +blk_integrity_queue_supports_integrity(struct request_queue *q) |
---|
| 1727 | +{ |
---|
| 1728 | + return q->integrity.profile; |
---|
| 1729 | +} |
---|
| 1730 | + |
---|
1837 | 1731 | static inline bool blk_integrity_rq(struct request *rq) |
---|
1838 | 1732 | { |
---|
1839 | 1733 | return rq->cmd_flags & REQ_INTEGRITY; |
---|
.. | .. |
---|
1846 | 1740 | } |
---|
1847 | 1741 | |
---|
1848 | 1742 | static inline unsigned short |
---|
1849 | | -queue_max_integrity_segments(struct request_queue *q) |
---|
| 1743 | +queue_max_integrity_segments(const struct request_queue *q) |
---|
1850 | 1744 | { |
---|
1851 | 1745 | return q->limits.max_integrity_segments; |
---|
1852 | | -} |
---|
1853 | | - |
---|
1854 | | -static inline bool integrity_req_gap_back_merge(struct request *req, |
---|
1855 | | - struct bio *next) |
---|
1856 | | -{ |
---|
1857 | | - struct bio_integrity_payload *bip = bio_integrity(req->bio); |
---|
1858 | | - struct bio_integrity_payload *bip_next = bio_integrity(next); |
---|
1859 | | - |
---|
1860 | | - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
---|
1861 | | - bip_next->bip_vec[0].bv_offset); |
---|
1862 | | -} |
---|
1863 | | - |
---|
1864 | | -static inline bool integrity_req_gap_front_merge(struct request *req, |
---|
1865 | | - struct bio *bio) |
---|
1866 | | -{ |
---|
1867 | | - struct bio_integrity_payload *bip = bio_integrity(bio); |
---|
1868 | | - struct bio_integrity_payload *bip_next = bio_integrity(req->bio); |
---|
1869 | | - |
---|
1870 | | - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], |
---|
1871 | | - bip_next->bip_vec[0].bv_offset); |
---|
1872 | 1746 | } |
---|
1873 | 1747 | |
---|
1874 | 1748 | /** |
---|
.. | .. |
---|
1891 | 1765 | unsigned int sectors) |
---|
1892 | 1766 | { |
---|
1893 | 1767 | return bio_integrity_intervals(bi, sectors) * bi->tuple_size; |
---|
| 1768 | +} |
---|
| 1769 | + |
---|
| 1770 | +/* |
---|
| 1771 | + * Return the first bvec that contains integrity data. Only drivers that are |
---|
| 1772 | + * limited to a single integrity segment should use this helper. |
---|
| 1773 | + */ |
---|
| 1774 | +static inline struct bio_vec *rq_integrity_vec(struct request *rq) |
---|
| 1775 | +{ |
---|
| 1776 | + if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) |
---|
| 1777 | + return NULL; |
---|
| 1778 | + return rq->bio->bi_integrity->bip_vec; |
---|
1894 | 1779 | } |
---|
1895 | 1780 | |
---|
1896 | 1781 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
---|
.. | .. |
---|
1923 | 1808 | { |
---|
1924 | 1809 | return NULL; |
---|
1925 | 1810 | } |
---|
| 1811 | +static inline bool |
---|
| 1812 | +blk_integrity_queue_supports_integrity(struct request_queue *q) |
---|
| 1813 | +{ |
---|
| 1814 | + return false; |
---|
| 1815 | +} |
---|
1926 | 1816 | static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) |
---|
1927 | 1817 | { |
---|
1928 | 1818 | return 0; |
---|
.. | .. |
---|
1938 | 1828 | unsigned int segs) |
---|
1939 | 1829 | { |
---|
1940 | 1830 | } |
---|
1941 | | -static inline unsigned short queue_max_integrity_segments(struct request_queue *q) |
---|
| 1831 | +static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) |
---|
1942 | 1832 | { |
---|
1943 | 1833 | return 0; |
---|
1944 | | -} |
---|
1945 | | -static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
---|
1946 | | - struct request *r1, |
---|
1947 | | - struct request *r2) |
---|
1948 | | -{ |
---|
1949 | | - return true; |
---|
1950 | | -} |
---|
1951 | | -static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
---|
1952 | | - struct request *r, |
---|
1953 | | - struct bio *b) |
---|
1954 | | -{ |
---|
1955 | | - return true; |
---|
1956 | | -} |
---|
1957 | | - |
---|
1958 | | -static inline bool integrity_req_gap_back_merge(struct request *req, |
---|
1959 | | - struct bio *next) |
---|
1960 | | -{ |
---|
1961 | | - return false; |
---|
1962 | | -} |
---|
1963 | | -static inline bool integrity_req_gap_front_merge(struct request *req, |
---|
1964 | | - struct bio *bio) |
---|
1965 | | -{ |
---|
1966 | | - return false; |
---|
1967 | 1834 | } |
---|
1968 | 1835 | |
---|
1969 | 1836 | static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, |
---|
.. | .. |
---|
1978 | 1845 | return 0; |
---|
1979 | 1846 | } |
---|
1980 | 1847 | |
---|
| 1848 | +static inline struct bio_vec *rq_integrity_vec(struct request *rq) |
---|
| 1849 | +{ |
---|
| 1850 | + return NULL; |
---|
| 1851 | +} |
---|
| 1852 | + |
---|
1981 | 1853 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
---|
1982 | 1854 | |
---|
| 1855 | +#ifdef CONFIG_BLK_INLINE_ENCRYPTION |
---|
| 1856 | + |
---|
| 1857 | +bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); |
---|
| 1858 | + |
---|
| 1859 | +void blk_ksm_unregister(struct request_queue *q); |
---|
| 1860 | + |
---|
| 1861 | +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
---|
| 1862 | + |
---|
| 1863 | +static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, |
---|
| 1864 | + struct request_queue *q) |
---|
| 1865 | +{ |
---|
| 1866 | + return true; |
---|
| 1867 | +} |
---|
| 1868 | + |
---|
| 1869 | +static inline void blk_ksm_unregister(struct request_queue *q) { } |
---|
| 1870 | + |
---|
| 1871 | +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ |
---|
| 1872 | + |
---|
| 1873 | + |
---|
1983 | 1874 | struct block_device_operations { |
---|
| 1875 | + blk_qc_t (*submit_bio) (struct bio *bio); |
---|
1984 | 1876 | int (*open) (struct block_device *, fmode_t); |
---|
1985 | 1877 | void (*release) (struct gendisk *, fmode_t); |
---|
1986 | 1878 | int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); |
---|
.. | .. |
---|
1988 | 1880 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
---|
1989 | 1881 | unsigned int (*check_events) (struct gendisk *disk, |
---|
1990 | 1882 | unsigned int clearing); |
---|
1991 | | - /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
---|
1992 | | - int (*media_changed) (struct gendisk *); |
---|
1993 | 1883 | void (*unlock_native_capacity) (struct gendisk *); |
---|
1994 | 1884 | int (*revalidate_disk) (struct gendisk *); |
---|
1995 | 1885 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
---|
1996 | 1886 | /* this callback is with swap_lock and sometimes page table lock held */ |
---|
1997 | 1887 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
---|
| 1888 | + int (*report_zones)(struct gendisk *, sector_t sector, |
---|
| 1889 | + unsigned int nr_zones, report_zones_cb cb, void *data); |
---|
| 1890 | + char *(*devnode)(struct gendisk *disk, umode_t *mode); |
---|
1998 | 1891 | struct module *owner; |
---|
1999 | 1892 | const struct pr_ops *pr_ops; |
---|
| 1893 | + |
---|
| 1894 | + ANDROID_KABI_RESERVE(1); |
---|
| 1895 | + ANDROID_KABI_RESERVE(2); |
---|
| 1896 | + ANDROID_OEM_DATA(1); |
---|
2000 | 1897 | }; |
---|
| 1898 | + |
---|
| 1899 | +#ifdef CONFIG_COMPAT |
---|
| 1900 | +extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, |
---|
| 1901 | + unsigned int, unsigned long); |
---|
| 1902 | +#else |
---|
| 1903 | +#define blkdev_compat_ptr_ioctl NULL |
---|
| 1904 | +#endif |
---|
2001 | 1905 | |
---|
2002 | 1906 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
---|
2003 | 1907 | unsigned long); |
---|
.. | .. |
---|
2007 | 1911 | |
---|
2008 | 1912 | #ifdef CONFIG_BLK_DEV_ZONED |
---|
2009 | 1913 | bool blk_req_needs_zone_write_lock(struct request *rq); |
---|
| 1914 | +bool blk_req_zone_write_trylock(struct request *rq); |
---|
2010 | 1915 | void __blk_req_zone_write_lock(struct request *rq); |
---|
2011 | 1916 | void __blk_req_zone_write_unlock(struct request *rq); |
---|
2012 | 1917 | |
---|
.. | .. |
---|
2058 | 1963 | } |
---|
2059 | 1964 | #endif /* CONFIG_BLK_DEV_ZONED */ |
---|
2060 | 1965 | |
---|
2061 | | -#else /* CONFIG_BLOCK */ |
---|
| 1966 | +static inline void blk_wake_io_task(struct task_struct *waiter) |
---|
| 1967 | +{ |
---|
| 1968 | + /* |
---|
| 1969 | + * If we're polling, the task itself is doing the completions. For |
---|
| 1970 | + * that case, we don't need to signal a wakeup, it's enough to just |
---|
| 1971 | + * mark us as RUNNING. |
---|
| 1972 | + */ |
---|
| 1973 | + if (waiter == current) |
---|
| 1974 | + __set_current_state(TASK_RUNNING); |
---|
| 1975 | + else |
---|
| 1976 | + wake_up_process(waiter); |
---|
| 1977 | +} |
---|
2062 | 1978 | |
---|
2063 | | -struct block_device; |
---|
| 1979 | +unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, |
---|
| 1980 | + unsigned int op); |
---|
| 1981 | +void disk_end_io_acct(struct gendisk *disk, unsigned int op, |
---|
| 1982 | + unsigned long start_time); |
---|
2064 | 1983 | |
---|
2065 | | -/* |
---|
2066 | | - * stubs for when the block layer is configured out |
---|
| 1984 | +unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part, |
---|
| 1985 | + struct bio *bio); |
---|
| 1986 | +void part_end_io_acct(struct hd_struct *part, struct bio *bio, |
---|
| 1987 | + unsigned long start_time); |
---|
| 1988 | + |
---|
| 1989 | +/** |
---|
| 1990 | + * bio_start_io_acct - start I/O accounting for bio based drivers |
---|
| 1991 | + * @bio: bio to start account for |
---|
| 1992 | + * |
---|
| 1993 | + * Returns the start time that should be passed back to bio_end_io_acct(). |
---|
2067 | 1994 | */ |
---|
2068 | | -#define buffer_heads_over_limit 0 |
---|
2069 | | - |
---|
2070 | | -static inline long nr_blockdev_pages(void) |
---|
| 1995 | +static inline unsigned long bio_start_io_acct(struct bio *bio) |
---|
2071 | 1996 | { |
---|
2072 | | - return 0; |
---|
| 1997 | + return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio)); |
---|
2073 | 1998 | } |
---|
2074 | 1999 | |
---|
2075 | | -struct blk_plug { |
---|
2076 | | -}; |
---|
2077 | | - |
---|
2078 | | -static inline void blk_start_plug(struct blk_plug *plug) |
---|
| 2000 | +/** |
---|
| 2001 | + * bio_end_io_acct - end I/O accounting for bio based drivers |
---|
| 2002 | + * @bio: bio to end account for |
---|
| 2003 | + * @start: start time returned by bio_start_io_acct() |
---|
| 2004 | + */ |
---|
| 2005 | +static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) |
---|
2079 | 2006 | { |
---|
| 2007 | + return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); |
---|
2080 | 2008 | } |
---|
2081 | 2009 | |
---|
2082 | | -static inline void blk_finish_plug(struct blk_plug *plug) |
---|
2083 | | -{ |
---|
2084 | | -} |
---|
| 2010 | +int bdev_read_only(struct block_device *bdev); |
---|
| 2011 | +int set_blocksize(struct block_device *bdev, int size); |
---|
2085 | 2012 | |
---|
2086 | | -static inline void blk_flush_plug(struct task_struct *task) |
---|
2087 | | -{ |
---|
2088 | | -} |
---|
| 2013 | +const char *bdevname(struct block_device *bdev, char *buffer); |
---|
| 2014 | +struct block_device *lookup_bdev(const char *); |
---|
2089 | 2015 | |
---|
2090 | | -static inline void blk_schedule_flush_plug(struct task_struct *task) |
---|
2091 | | -{ |
---|
2092 | | -} |
---|
| 2016 | +void blkdev_show(struct seq_file *seqf, off_t offset); |
---|
2093 | 2017 | |
---|
2094 | | - |
---|
2095 | | -static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
---|
2096 | | -{ |
---|
2097 | | - return false; |
---|
2098 | | -} |
---|
2099 | | - |
---|
2100 | | -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
---|
2101 | | - sector_t *error_sector) |
---|
2102 | | -{ |
---|
2103 | | - return 0; |
---|
2104 | | -} |
---|
2105 | | - |
---|
2106 | | -#endif /* CONFIG_BLOCK */ |
---|
2107 | | - |
---|
| 2018 | +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ |
---|
| 2019 | +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ |
---|
| 2020 | +#ifdef CONFIG_BLOCK |
---|
| 2021 | +#define BLKDEV_MAJOR_MAX 512 |
---|
| 2022 | +#else |
---|
| 2023 | +#define BLKDEV_MAJOR_MAX 0 |
---|
2108 | 2024 | #endif |
---|
| 2025 | + |
---|
| 2026 | +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, |
---|
| 2027 | + void *holder); |
---|
| 2028 | +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); |
---|
| 2029 | +int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, |
---|
| 2030 | + void *holder); |
---|
| 2031 | +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, |
---|
| 2032 | + void *holder); |
---|
| 2033 | +void blkdev_put(struct block_device *bdev, fmode_t mode); |
---|
| 2034 | + |
---|
| 2035 | +struct block_device *I_BDEV(struct inode *inode); |
---|
| 2036 | +struct block_device *bdget_part(struct hd_struct *part); |
---|
| 2037 | +struct block_device *bdgrab(struct block_device *bdev); |
---|
| 2038 | +void bdput(struct block_device *); |
---|
| 2039 | + |
---|
| 2040 | +#ifdef CONFIG_BLOCK |
---|
| 2041 | +void invalidate_bdev(struct block_device *bdev); |
---|
| 2042 | +int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, |
---|
| 2043 | + loff_t lend); |
---|
| 2044 | +int sync_blockdev(struct block_device *bdev); |
---|
| 2045 | +#else |
---|
| 2046 | +static inline void invalidate_bdev(struct block_device *bdev) |
---|
| 2047 | +{ |
---|
| 2048 | +} |
---|
| 2049 | +static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode, |
---|
| 2050 | + loff_t lstart, loff_t lend) |
---|
| 2051 | +{ |
---|
| 2052 | + return 0; |
---|
| 2053 | +} |
---|
| 2054 | +static inline int sync_blockdev(struct block_device *bdev) |
---|
| 2055 | +{ |
---|
| 2056 | + return 0; |
---|
| 2057 | +} |
---|
| 2058 | +#endif |
---|
| 2059 | +int fsync_bdev(struct block_device *bdev); |
---|
| 2060 | + |
---|
| 2061 | +int freeze_bdev(struct block_device *bdev); |
---|
| 2062 | +int thaw_bdev(struct block_device *bdev); |
---|
| 2063 | + |
---|
| 2064 | +#endif /* _LINUX_BLKDEV_H */ |
---|