| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * You should have received a copy of the GNU General Public Licens |
|---|
| 15 | | - * along with this program; if not, write to the Free Software |
|---|
| 16 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
|---|
| 17 | 4 | */ |
|---|
| 18 | 5 | #ifndef __LINUX_BIO_H |
|---|
| 19 | 6 | #define __LINUX_BIO_H |
|---|
| .. | .. |
|---|
| 21 | 8 | #include <linux/highmem.h> |
|---|
| 22 | 9 | #include <linux/mempool.h> |
|---|
| 23 | 10 | #include <linux/ioprio.h> |
|---|
| 24 | | -#include <linux/bug.h> |
|---|
| 25 | | -#include <linux/bio-crypt-ctx.h> |
|---|
| 26 | | -#include <linux/android_kabi.h> |
|---|
| 27 | | - |
|---|
| 28 | | -#ifdef CONFIG_BLOCK |
|---|
| 29 | | - |
|---|
| 30 | | -#include <asm/io.h> |
|---|
| 31 | | - |
|---|
| 32 | 11 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
|---|
| 33 | 12 | #include <linux/blk_types.h> |
|---|
| 13 | +#include <linux/android_kabi.h> |
|---|
| 34 | 14 | |
|---|
| 35 | 15 | #define BIO_DEBUG |
|---|
| 36 | 16 | |
|---|
| .. | .. |
|---|
| 40 | 20 | #define BIO_BUG_ON |
|---|
| 41 | 21 | #endif |
|---|
| 42 | 22 | |
|---|
| 43 | | -#ifdef CONFIG_THP_SWAP |
|---|
| 44 | | -#if HPAGE_PMD_NR > 256 |
|---|
| 45 | | -#define BIO_MAX_PAGES HPAGE_PMD_NR |
|---|
| 46 | | -#else |
|---|
| 47 | 23 | #define BIO_MAX_PAGES 256 |
|---|
| 48 | | -#endif |
|---|
| 49 | | -#else |
|---|
| 50 | | -#define BIO_MAX_PAGES 256 |
|---|
| 51 | | -#endif |
|---|
| 52 | 24 | |
|---|
| 53 | 25 | #define bio_prio(bio) (bio)->bi_ioprio |
|---|
| 54 | 26 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
|---|
| .. | .. |
|---|
| 66 | 38 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) |
|---|
| 67 | 39 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) |
|---|
| 68 | 40 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) |
|---|
| 69 | | - |
|---|
| 70 | | -#define bio_multiple_segments(bio) \ |
|---|
| 71 | | - ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) |
|---|
| 72 | 41 | |
|---|
| 73 | 42 | #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) |
|---|
| 74 | 43 | #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) |
|---|
| .. | .. |
|---|
| 97 | 66 | return false; |
|---|
| 98 | 67 | } |
|---|
| 99 | 68 | |
|---|
| 100 | | -static inline bool bio_no_advance_iter(struct bio *bio) |
|---|
| 69 | +static inline bool bio_no_advance_iter(const struct bio *bio) |
|---|
| 101 | 70 | { |
|---|
| 102 | 71 | return bio_op(bio) == REQ_OP_DISCARD || |
|---|
| 103 | 72 | bio_op(bio) == REQ_OP_SECURE_ERASE || |
|---|
| .. | .. |
|---|
| 129 | 98 | return NULL; |
|---|
| 130 | 99 | } |
|---|
| 131 | 100 | |
|---|
| 132 | | -static inline bool bio_full(struct bio *bio) |
|---|
| 101 | +/** |
|---|
| 102 | + * bio_full - check if the bio is full |
|---|
| 103 | + * @bio: bio to check |
|---|
| 104 | + * @len: length of one segment to be added |
|---|
| 105 | + * |
|---|
| 106 | + * Return true if @bio is full and one segment with @len bytes can't be |
|---|
| 107 | + * added to the bio, otherwise return false |
|---|
| 108 | + */ |
|---|
| 109 | +static inline bool bio_full(struct bio *bio, unsigned len) |
|---|
| 133 | 110 | { |
|---|
| 134 | | - return bio->bi_vcnt >= bio->bi_max_vecs; |
|---|
| 111 | + if (bio->bi_vcnt >= bio->bi_max_vecs) |
|---|
| 112 | + return true; |
|---|
| 113 | + |
|---|
| 114 | + if (bio->bi_iter.bi_size > UINT_MAX - len) |
|---|
| 115 | + return true; |
|---|
| 116 | + |
|---|
| 117 | + return false; |
|---|
| 135 | 118 | } |
|---|
| 136 | 119 | |
|---|
| 137 | | -/* |
|---|
| 138 | | - * will die |
|---|
| 139 | | - */ |
|---|
| 140 | | -#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) |
|---|
| 120 | +static inline bool bio_next_segment(const struct bio *bio, |
|---|
| 121 | + struct bvec_iter_all *iter) |
|---|
| 122 | +{ |
|---|
| 123 | + if (iter->idx >= bio->bi_vcnt) |
|---|
| 124 | + return false; |
|---|
| 141 | 125 | |
|---|
| 142 | | -/* |
|---|
| 143 | | - * merge helpers etc |
|---|
| 144 | | - */ |
|---|
| 145 | | - |
|---|
| 146 | | -/* Default implementation of BIOVEC_PHYS_MERGEABLE */ |
|---|
| 147 | | -#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
|---|
| 148 | | - ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) |
|---|
| 149 | | - |
|---|
| 150 | | -/* |
|---|
| 151 | | - * allow arch override, for eg virtualized architectures (put in asm/io.h) |
|---|
| 152 | | - */ |
|---|
| 153 | | -#ifndef BIOVEC_PHYS_MERGEABLE |
|---|
| 154 | | -#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
|---|
| 155 | | - __BIOVEC_PHYS_MERGEABLE(vec1, vec2) |
|---|
| 156 | | -#endif |
|---|
| 157 | | - |
|---|
| 158 | | -#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
|---|
| 159 | | - (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
|---|
| 160 | | -#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
|---|
| 161 | | - __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
|---|
| 126 | + bvec_advance(&bio->bi_io_vec[iter->idx], iter); |
|---|
| 127 | + return true; |
|---|
| 128 | +} |
|---|
| 162 | 129 | |
|---|
| 163 | 130 | /* |
|---|
| 164 | 131 | * drivers should _never_ use the all version - the bio may have been split |
|---|
| 165 | 132 | * before it got to the driver and the driver won't own all of it |
|---|
| 166 | 133 | */ |
|---|
| 167 | | -#define bio_for_each_segment_all(bvl, bio, i) \ |
|---|
| 168 | | - for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) |
|---|
| 134 | +#define bio_for_each_segment_all(bvl, bio, iter) \ |
|---|
| 135 | + for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) |
|---|
| 169 | 136 | |
|---|
| 170 | | -static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
|---|
| 171 | | - unsigned bytes) |
|---|
| 137 | +static inline void bio_advance_iter(const struct bio *bio, |
|---|
| 138 | + struct bvec_iter *iter, unsigned int bytes) |
|---|
| 172 | 139 | { |
|---|
| 173 | 140 | iter->bi_sector += bytes >> 9; |
|---|
| 174 | 141 | |
|---|
| 175 | | - if (bio_no_advance_iter(bio)) { |
|---|
| 142 | + if (bio_no_advance_iter(bio)) |
|---|
| 176 | 143 | iter->bi_size -= bytes; |
|---|
| 177 | | - iter->bi_done += bytes; |
|---|
| 178 | | - } else { |
|---|
| 144 | + else |
|---|
| 179 | 145 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
|---|
| 180 | 146 | /* TODO: It is reasonable to complete bio with error here. */ |
|---|
| 181 | | - } |
|---|
| 182 | | -} |
|---|
| 183 | | - |
|---|
| 184 | | -static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter, |
|---|
| 185 | | - unsigned int bytes) |
|---|
| 186 | | -{ |
|---|
| 187 | | - iter->bi_sector -= bytes >> 9; |
|---|
| 188 | | - |
|---|
| 189 | | - if (bio_no_advance_iter(bio)) { |
|---|
| 190 | | - iter->bi_size += bytes; |
|---|
| 191 | | - iter->bi_done -= bytes; |
|---|
| 192 | | - return true; |
|---|
| 193 | | - } |
|---|
| 194 | | - |
|---|
| 195 | | - return bvec_iter_rewind(bio->bi_io_vec, iter, bytes); |
|---|
| 196 | 147 | } |
|---|
| 197 | 148 | |
|---|
| 198 | 149 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
|---|
| .. | .. |
|---|
| 203 | 154 | |
|---|
| 204 | 155 | #define bio_for_each_segment(bvl, bio, iter) \ |
|---|
| 205 | 156 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) |
|---|
| 157 | + |
|---|
| 158 | +#define __bio_for_each_bvec(bvl, bio, iter, start) \ |
|---|
| 159 | + for (iter = (start); \ |
|---|
| 160 | + (iter).bi_size && \ |
|---|
| 161 | + ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ |
|---|
| 162 | + bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
|---|
| 163 | + |
|---|
| 164 | +/* iterate over multi-page bvec */ |
|---|
| 165 | +#define bio_for_each_bvec(bvl, bio, iter) \ |
|---|
| 166 | + __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) |
|---|
| 167 | + |
|---|
| 168 | +/* |
|---|
| 169 | + * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the |
|---|
| 170 | + * same reasons as bio_for_each_segment_all(). |
|---|
| 171 | + */ |
|---|
| 172 | +#define bio_for_each_bvec_all(bvl, bio, i) \ |
|---|
| 173 | + for (i = 0, bvl = bio_first_bvec_all(bio); \ |
|---|
| 174 | + i < (bio)->bi_vcnt; i++, bvl++) \ |
|---|
| 206 | 175 | |
|---|
| 207 | 176 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
|---|
| 208 | 177 | |
|---|
| .. | .. |
|---|
| 281 | 250 | |
|---|
| 282 | 251 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
|---|
| 283 | 252 | { |
|---|
| 284 | | - *bv = bio_iovec(bio); |
|---|
| 253 | + *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); |
|---|
| 285 | 254 | } |
|---|
| 286 | 255 | |
|---|
| 287 | 256 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) |
|---|
| .. | .. |
|---|
| 289 | 258 | struct bvec_iter iter = bio->bi_iter; |
|---|
| 290 | 259 | int idx; |
|---|
| 291 | 260 | |
|---|
| 292 | | - if (unlikely(!bio_multiple_segments(bio))) { |
|---|
| 293 | | - *bv = bio_iovec(bio); |
|---|
| 294 | | - return; |
|---|
| 295 | | - } |
|---|
| 261 | + bio_get_first_bvec(bio, bv); |
|---|
| 262 | + if (bv->bv_len == bio->bi_iter.bi_size) |
|---|
| 263 | + return; /* this bio only has a single bvec */ |
|---|
| 296 | 264 | |
|---|
| 297 | 265 | bio_advance_iter(bio, &iter, iter.bi_size); |
|---|
| 298 | 266 | |
|---|
| .. | .. |
|---|
| 309 | 277 | */ |
|---|
| 310 | 278 | if (iter.bi_bvec_done) |
|---|
| 311 | 279 | bv->bv_len = iter.bi_bvec_done; |
|---|
| 312 | | -} |
|---|
| 313 | | - |
|---|
| 314 | | -static inline unsigned bio_pages_all(struct bio *bio) |
|---|
| 315 | | -{ |
|---|
| 316 | | - WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
|---|
| 317 | | - return bio->bi_vcnt; |
|---|
| 318 | 280 | } |
|---|
| 319 | 281 | |
|---|
| 320 | 282 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
|---|
| .. | .. |
|---|
| 355 | 317 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
|---|
| 356 | 318 | unsigned short bip_flags; /* control flags */ |
|---|
| 357 | 319 | |
|---|
| 320 | + struct bvec_iter bio_iter; /* for rewinding parent bio */ |
|---|
| 321 | + |
|---|
| 358 | 322 | struct work_struct bip_work; /* I/O completion */ |
|---|
| 359 | 323 | |
|---|
| 360 | 324 | struct bio_vec *bip_vec; |
|---|
| .. | .. |
|---|
| 362 | 326 | ANDROID_KABI_RESERVE(1); |
|---|
| 363 | 327 | ANDROID_KABI_RESERVE(2); |
|---|
| 364 | 328 | |
|---|
| 365 | | - struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ |
|---|
| 329 | + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ |
|---|
| 366 | 330 | }; |
|---|
| 367 | 331 | |
|---|
| 368 | 332 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
|---|
| .. | .. |
|---|
| 460 | 424 | |
|---|
| 461 | 425 | static inline void bio_wouldblock_error(struct bio *bio) |
|---|
| 462 | 426 | { |
|---|
| 427 | + bio_set_flag(bio, BIO_QUIET); |
|---|
| 463 | 428 | bio->bi_status = BLK_STS_AGAIN; |
|---|
| 464 | 429 | bio_endio(bio); |
|---|
| 465 | 430 | } |
|---|
| 466 | 431 | |
|---|
| 467 | 432 | struct request_queue; |
|---|
| 468 | | -extern int bio_phys_segments(struct request_queue *, struct bio *); |
|---|
| 469 | 433 | |
|---|
| 470 | 434 | extern int submit_bio_wait(struct bio *bio); |
|---|
| 471 | 435 | extern void bio_advance(struct bio *, unsigned); |
|---|
| .. | .. |
|---|
| 480 | 444 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
|---|
| 481 | 445 | unsigned int, unsigned int); |
|---|
| 482 | 446 | bool __bio_try_merge_page(struct bio *bio, struct page *page, |
|---|
| 483 | | - unsigned int len, unsigned int off); |
|---|
| 447 | + unsigned int len, unsigned int off, bool *same_page); |
|---|
| 484 | 448 | void __bio_add_page(struct bio *bio, struct page *page, |
|---|
| 485 | 449 | unsigned int len, unsigned int off); |
|---|
| 486 | 450 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
|---|
| 487 | | -struct rq_map_data; |
|---|
| 488 | | -extern struct bio *bio_map_user_iov(struct request_queue *, |
|---|
| 489 | | - struct iov_iter *, gfp_t); |
|---|
| 490 | | -extern void bio_unmap_user(struct bio *); |
|---|
| 491 | | -extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
|---|
| 492 | | - gfp_t); |
|---|
| 493 | | -extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, |
|---|
| 494 | | - gfp_t, int); |
|---|
| 451 | +void bio_release_pages(struct bio *bio, bool mark_dirty); |
|---|
| 495 | 452 | extern void bio_set_pages_dirty(struct bio *bio); |
|---|
| 496 | 453 | extern void bio_check_pages_dirty(struct bio *bio); |
|---|
| 497 | | - |
|---|
| 498 | | -void generic_start_io_acct(struct request_queue *q, int op, |
|---|
| 499 | | - unsigned long sectors, struct hd_struct *part); |
|---|
| 500 | | -void generic_end_io_acct(struct request_queue *q, int op, |
|---|
| 501 | | - struct hd_struct *part, |
|---|
| 502 | | - unsigned long start_time); |
|---|
| 503 | | - |
|---|
| 504 | | -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
|---|
| 505 | | -# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
|---|
| 506 | | -#endif |
|---|
| 507 | | -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
|---|
| 508 | | -extern void bio_flush_dcache_pages(struct bio *bi); |
|---|
| 509 | | -#else |
|---|
| 510 | | -static inline void bio_flush_dcache_pages(struct bio *bi) |
|---|
| 511 | | -{ |
|---|
| 512 | | -} |
|---|
| 513 | | -#endif |
|---|
| 514 | 454 | |
|---|
| 515 | 455 | extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
|---|
| 516 | 456 | struct bio *src, struct bvec_iter *src_iter); |
|---|
| 517 | 457 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
|---|
| 518 | 458 | extern void bio_list_copy_data(struct bio *dst, struct bio *src); |
|---|
| 519 | 459 | extern void bio_free_pages(struct bio *bio); |
|---|
| 520 | | - |
|---|
| 521 | | -extern struct bio *bio_copy_user_iov(struct request_queue *, |
|---|
| 522 | | - struct rq_map_data *, |
|---|
| 523 | | - struct iov_iter *, |
|---|
| 524 | | - gfp_t); |
|---|
| 525 | | -extern int bio_uncopy_user(struct bio *); |
|---|
| 526 | 460 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); |
|---|
| 461 | +void bio_truncate(struct bio *bio, unsigned new_size); |
|---|
| 462 | +void guard_bio_eod(struct bio *bio); |
|---|
| 527 | 463 | |
|---|
| 528 | 464 | static inline void zero_fill_bio(struct bio *bio) |
|---|
| 529 | 465 | { |
|---|
| .. | .. |
|---|
| 541 | 477 | bio_clear_flag(bio, BIO_THROTTLED);\ |
|---|
| 542 | 478 | (bio)->bi_disk = (bdev)->bd_disk; \ |
|---|
| 543 | 479 | (bio)->bi_partno = (bdev)->bd_partno; \ |
|---|
| 480 | + bio_associate_blkg(bio); \ |
|---|
| 544 | 481 | } while (0) |
|---|
| 545 | 482 | |
|---|
| 546 | 483 | #define bio_copy_dev(dst, src) \ |
|---|
| 547 | 484 | do { \ |
|---|
| 548 | 485 | (dst)->bi_disk = (src)->bi_disk; \ |
|---|
| 549 | 486 | (dst)->bi_partno = (src)->bi_partno; \ |
|---|
| 487 | + bio_clone_blkg_association(dst, src); \ |
|---|
| 550 | 488 | } while (0) |
|---|
| 551 | 489 | |
|---|
| 552 | 490 | #define bio_dev(bio) \ |
|---|
| 553 | 491 | disk_devt((bio)->bi_disk) |
|---|
| 554 | 492 | |
|---|
| 555 | | -#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
|---|
| 556 | | -int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); |
|---|
| 557 | | -#else |
|---|
| 558 | | -static inline int bio_associate_blkcg_from_page(struct bio *bio, |
|---|
| 559 | | - struct page *page) { return 0; } |
|---|
| 560 | | -#endif |
|---|
| 561 | | - |
|---|
| 562 | 493 | #ifdef CONFIG_BLK_CGROUP |
|---|
| 563 | | -int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); |
|---|
| 564 | | -int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); |
|---|
| 565 | | -void bio_disassociate_task(struct bio *bio); |
|---|
| 566 | | -void bio_clone_blkcg_association(struct bio *dst, struct bio *src); |
|---|
| 494 | +void bio_associate_blkg(struct bio *bio); |
|---|
| 495 | +void bio_associate_blkg_from_css(struct bio *bio, |
|---|
| 496 | + struct cgroup_subsys_state *css); |
|---|
| 497 | +void bio_clone_blkg_association(struct bio *dst, struct bio *src); |
|---|
| 567 | 498 | #else /* CONFIG_BLK_CGROUP */ |
|---|
| 568 | | -static inline int bio_associate_blkcg(struct bio *bio, |
|---|
| 569 | | - struct cgroup_subsys_state *blkcg_css) { return 0; } |
|---|
| 570 | | -static inline void bio_disassociate_task(struct bio *bio) { } |
|---|
| 571 | | -static inline void bio_clone_blkcg_association(struct bio *dst, |
|---|
| 572 | | - struct bio *src) { } |
|---|
| 499 | +static inline void bio_associate_blkg(struct bio *bio) { } |
|---|
| 500 | +static inline void bio_associate_blkg_from_css(struct bio *bio, |
|---|
| 501 | + struct cgroup_subsys_state *css) |
|---|
| 502 | +{ } |
|---|
| 503 | +static inline void bio_clone_blkg_association(struct bio *dst, |
|---|
| 504 | + struct bio *src) { } |
|---|
| 573 | 505 | #endif /* CONFIG_BLK_CGROUP */ |
|---|
| 574 | 506 | |
|---|
| 575 | 507 | #ifdef CONFIG_HIGHMEM |
|---|
| .. | .. |
|---|
| 873 | 805 | |
|---|
| 874 | 806 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
|---|
| 875 | 807 | |
|---|
| 876 | | -#endif /* CONFIG_BLOCK */ |
|---|
| 808 | +/* |
|---|
| 809 | + * Mark a bio as polled. Note that for async polled IO, the caller must |
|---|
| 810 | + * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). |
|---|
| 811 | + * We cannot block waiting for requests on polled IO, as those completions |
|---|
| 812 | + * must be found by the caller. This is different than IRQ driven IO, where |
|---|
| 813 | + * it's safe to wait for IO to complete. |
|---|
| 814 | + */ |
|---|
| 815 | +static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) |
|---|
| 816 | +{ |
|---|
| 817 | + bio->bi_opf |= REQ_HIPRI; |
|---|
| 818 | + if (!is_sync_kiocb(kiocb)) |
|---|
| 819 | + bio->bi_opf |= REQ_NOWAIT; |
|---|
| 820 | +} |
|---|
| 821 | + |
|---|
| 877 | 822 | #endif /* __LINUX_BIO_H */ |
|---|