hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/linux/bio.h
....@@ -1,19 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0 */
12 /*
23 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- *
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public Licens
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
174 */
185 #ifndef __LINUX_BIO_H
196 #define __LINUX_BIO_H
....@@ -21,16 +8,9 @@
218 #include <linux/highmem.h>
229 #include <linux/mempool.h>
2310 #include <linux/ioprio.h>
24
-#include <linux/bug.h>
25
-#include <linux/bio-crypt-ctx.h>
26
-#include <linux/android_kabi.h>
27
-
28
-#ifdef CONFIG_BLOCK
29
-
30
-#include <asm/io.h>
31
-
3211 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
3312 #include <linux/blk_types.h>
13
+#include <linux/android_kabi.h>
3414
3515 #define BIO_DEBUG
3616
....@@ -40,15 +20,7 @@
4020 #define BIO_BUG_ON
4121 #endif
4222
43
-#ifdef CONFIG_THP_SWAP
44
-#if HPAGE_PMD_NR > 256
45
-#define BIO_MAX_PAGES HPAGE_PMD_NR
46
-#else
4723 #define BIO_MAX_PAGES 256
48
-#endif
49
-#else
50
-#define BIO_MAX_PAGES 256
51
-#endif
5224
5325 #define bio_prio(bio) (bio)->bi_ioprio
5426 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
....@@ -66,9 +38,6 @@
6638 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
6739 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
6840 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
69
-
70
-#define bio_multiple_segments(bio) \
71
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
7241
7342 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
7443 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
....@@ -97,7 +66,7 @@
9766 return false;
9867 }
9968
100
-static inline bool bio_no_advance_iter(struct bio *bio)
69
+static inline bool bio_no_advance_iter(const struct bio *bio)
10170 {
10271 return bio_op(bio) == REQ_OP_DISCARD ||
10372 bio_op(bio) == REQ_OP_SECURE_ERASE ||
....@@ -129,70 +98,52 @@
12998 return NULL;
13099 }
131100
132
-static inline bool bio_full(struct bio *bio)
101
+/**
102
+ * bio_full - check if the bio is full
103
+ * @bio: bio to check
104
+ * @len: length of one segment to be added
105
+ *
106
+ * Return true if @bio is full and one segment with @len bytes can't be
107
+ * added to the bio, otherwise return false
108
+ */
109
+static inline bool bio_full(struct bio *bio, unsigned len)
133110 {
134
- return bio->bi_vcnt >= bio->bi_max_vecs;
111
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
112
+ return true;
113
+
114
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
115
+ return true;
116
+
117
+ return false;
135118 }
136119
137
-/*
138
- * will die
139
- */
140
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
120
+static inline bool bio_next_segment(const struct bio *bio,
121
+ struct bvec_iter_all *iter)
122
+{
123
+ if (iter->idx >= bio->bi_vcnt)
124
+ return false;
141125
142
-/*
143
- * merge helpers etc
144
- */
145
-
146
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
147
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
148
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
149
-
150
-/*
151
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
152
- */
153
-#ifndef BIOVEC_PHYS_MERGEABLE
154
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
155
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
156
-#endif
157
-
158
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
159
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
160
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
161
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
126
+ bvec_advance(&bio->bi_io_vec[iter->idx], iter);
127
+ return true;
128
+}
162129
163130 /*
164131 * drivers should _never_ use the all version - the bio may have been split
165132 * before it got to the driver and the driver won't own all of it
166133 */
167
-#define bio_for_each_segment_all(bvl, bio, i) \
168
- for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
134
+#define bio_for_each_segment_all(bvl, bio, iter) \
135
+ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
169136
170
-static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
171
- unsigned bytes)
137
+static inline void bio_advance_iter(const struct bio *bio,
138
+ struct bvec_iter *iter, unsigned int bytes)
172139 {
173140 iter->bi_sector += bytes >> 9;
174141
175
- if (bio_no_advance_iter(bio)) {
142
+ if (bio_no_advance_iter(bio))
176143 iter->bi_size -= bytes;
177
- iter->bi_done += bytes;
178
- } else {
144
+ else
179145 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
180146 /* TODO: It is reasonable to complete bio with error here. */
181
- }
182
-}
183
-
184
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
185
- unsigned int bytes)
186
-{
187
- iter->bi_sector -= bytes >> 9;
188
-
189
- if (bio_no_advance_iter(bio)) {
190
- iter->bi_size += bytes;
191
- iter->bi_done -= bytes;
192
- return true;
193
- }
194
-
195
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
196147 }
197148
198149 #define __bio_for_each_segment(bvl, bio, iter, start) \
....@@ -203,6 +154,24 @@
203154
204155 #define bio_for_each_segment(bvl, bio, iter) \
205156 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
157
+
158
+#define __bio_for_each_bvec(bvl, bio, iter, start) \
159
+ for (iter = (start); \
160
+ (iter).bi_size && \
161
+ ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
162
+ bio_advance_iter((bio), &(iter), (bvl).bv_len))
163
+
164
+/* iterate over multi-page bvec */
165
+#define bio_for_each_bvec(bvl, bio, iter) \
166
+ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
167
+
168
+/*
169
+ * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
170
+ * same reasons as bio_for_each_segment_all().
171
+ */
172
+#define bio_for_each_bvec_all(bvl, bio, i) \
173
+ for (i = 0, bvl = bio_first_bvec_all(bio); \
174
+ i < (bio)->bi_vcnt; i++, bvl++) \
206175
207176 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
208177
....@@ -281,7 +250,7 @@
281250
282251 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
283252 {
284
- *bv = bio_iovec(bio);
253
+ *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
285254 }
286255
287256 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
....@@ -289,10 +258,9 @@
289258 struct bvec_iter iter = bio->bi_iter;
290259 int idx;
291260
292
- if (unlikely(!bio_multiple_segments(bio))) {
293
- *bv = bio_iovec(bio);
294
- return;
295
- }
261
+ bio_get_first_bvec(bio, bv);
262
+ if (bv->bv_len == bio->bi_iter.bi_size)
263
+ return; /* this bio only has a single bvec */
296264
297265 bio_advance_iter(bio, &iter, iter.bi_size);
298266
....@@ -309,12 +277,6 @@
309277 */
310278 if (iter.bi_bvec_done)
311279 bv->bv_len = iter.bi_bvec_done;
312
-}
313
-
314
-static inline unsigned bio_pages_all(struct bio *bio)
315
-{
316
- WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
317
- return bio->bi_vcnt;
318280 }
319281
320282 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
....@@ -355,6 +317,8 @@
355317 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
356318 unsigned short bip_flags; /* control flags */
357319
320
+ struct bvec_iter bio_iter; /* for rewinding parent bio */
321
+
358322 struct work_struct bip_work; /* I/O completion */
359323
360324 struct bio_vec *bip_vec;
....@@ -362,7 +326,7 @@
362326 ANDROID_KABI_RESERVE(1);
363327 ANDROID_KABI_RESERVE(2);
364328
365
- struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
329
+ struct bio_vec bip_inline_vecs[];/* embedded bvec array */
366330 };
367331
368332 #if defined(CONFIG_BLK_DEV_INTEGRITY)
....@@ -460,12 +424,12 @@
460424
461425 static inline void bio_wouldblock_error(struct bio *bio)
462426 {
427
+ bio_set_flag(bio, BIO_QUIET);
463428 bio->bi_status = BLK_STS_AGAIN;
464429 bio_endio(bio);
465430 }
466431
467432 struct request_queue;
468
-extern int bio_phys_segments(struct request_queue *, struct bio *);
469433
470434 extern int submit_bio_wait(struct bio *bio);
471435 extern void bio_advance(struct bio *, unsigned);
....@@ -480,50 +444,22 @@
480444 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
481445 unsigned int, unsigned int);
482446 bool __bio_try_merge_page(struct bio *bio, struct page *page,
483
- unsigned int len, unsigned int off);
447
+ unsigned int len, unsigned int off, bool *same_page);
484448 void __bio_add_page(struct bio *bio, struct page *page,
485449 unsigned int len, unsigned int off);
486450 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
487
-struct rq_map_data;
488
-extern struct bio *bio_map_user_iov(struct request_queue *,
489
- struct iov_iter *, gfp_t);
490
-extern void bio_unmap_user(struct bio *);
491
-extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
492
- gfp_t);
493
-extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
494
- gfp_t, int);
451
+void bio_release_pages(struct bio *bio, bool mark_dirty);
495452 extern void bio_set_pages_dirty(struct bio *bio);
496453 extern void bio_check_pages_dirty(struct bio *bio);
497
-
498
-void generic_start_io_acct(struct request_queue *q, int op,
499
- unsigned long sectors, struct hd_struct *part);
500
-void generic_end_io_acct(struct request_queue *q, int op,
501
- struct hd_struct *part,
502
- unsigned long start_time);
503
-
504
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
505
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
506
-#endif
507
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
508
-extern void bio_flush_dcache_pages(struct bio *bi);
509
-#else
510
-static inline void bio_flush_dcache_pages(struct bio *bi)
511
-{
512
-}
513
-#endif
514454
515455 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
516456 struct bio *src, struct bvec_iter *src_iter);
517457 extern void bio_copy_data(struct bio *dst, struct bio *src);
518458 extern void bio_list_copy_data(struct bio *dst, struct bio *src);
519459 extern void bio_free_pages(struct bio *bio);
520
-
521
-extern struct bio *bio_copy_user_iov(struct request_queue *,
522
- struct rq_map_data *,
523
- struct iov_iter *,
524
- gfp_t);
525
-extern int bio_uncopy_user(struct bio *);
526460 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
461
+void bio_truncate(struct bio *bio, unsigned new_size);
462
+void guard_bio_eod(struct bio *bio);
527463
528464 static inline void zero_fill_bio(struct bio *bio)
529465 {
....@@ -541,35 +477,31 @@
541477 bio_clear_flag(bio, BIO_THROTTLED);\
542478 (bio)->bi_disk = (bdev)->bd_disk; \
543479 (bio)->bi_partno = (bdev)->bd_partno; \
480
+ bio_associate_blkg(bio); \
544481 } while (0)
545482
546483 #define bio_copy_dev(dst, src) \
547484 do { \
548485 (dst)->bi_disk = (src)->bi_disk; \
549486 (dst)->bi_partno = (src)->bi_partno; \
487
+ bio_clone_blkg_association(dst, src); \
550488 } while (0)
551489
552490 #define bio_dev(bio) \
553491 disk_devt((bio)->bi_disk)
554492
555
-#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
556
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
557
-#else
558
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
559
- struct page *page) { return 0; }
560
-#endif
561
-
562493 #ifdef CONFIG_BLK_CGROUP
563
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
564
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
565
-void bio_disassociate_task(struct bio *bio);
566
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
494
+void bio_associate_blkg(struct bio *bio);
495
+void bio_associate_blkg_from_css(struct bio *bio,
496
+ struct cgroup_subsys_state *css);
497
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
567498 #else /* CONFIG_BLK_CGROUP */
568
-static inline int bio_associate_blkcg(struct bio *bio,
569
- struct cgroup_subsys_state *blkcg_css) { return 0; }
570
-static inline void bio_disassociate_task(struct bio *bio) { }
571
-static inline void bio_clone_blkcg_association(struct bio *dst,
572
- struct bio *src) { }
499
+static inline void bio_associate_blkg(struct bio *bio) { }
500
+static inline void bio_associate_blkg_from_css(struct bio *bio,
501
+ struct cgroup_subsys_state *css)
502
+{ }
503
+static inline void bio_clone_blkg_association(struct bio *dst,
504
+ struct bio *src) { }
573505 #endif /* CONFIG_BLK_CGROUP */
574506
575507 #ifdef CONFIG_HIGHMEM
....@@ -873,5 +805,18 @@
873805
874806 #endif /* CONFIG_BLK_DEV_INTEGRITY */
875807
876
-#endif /* CONFIG_BLOCK */
808
+/*
809
+ * Mark a bio as polled. Note that for async polled IO, the caller must
810
+ * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
811
+ * We cannot block waiting for requests on polled IO, as those completions
812
+ * must be found by the caller. This is different than IRQ driven IO, where
813
+ * it's safe to wait for IO to complete.
814
+ */
815
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
816
+{
817
+ bio->bi_opf |= REQ_HIPRI;
818
+ if (!is_sync_kiocb(kiocb))
819
+ bio->bi_opf |= REQ_NOWAIT;
820
+}
821
+
877822 #endif /* __LINUX_BIO_H */