hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/block/blk.h
....@@ -4,23 +4,19 @@
44
55 #include <linux/idr.h>
66 #include <linux/blk-mq.h>
7
+#include <linux/part_stat.h>
8
+#include <linux/blk-crypto.h>
9
+#include <xen/xen.h>
10
+#include "blk-crypto-internal.h"
711 #include "blk-mq.h"
8
-
9
-/* Amount of time in which a process may batch requests */
10
-#define BLK_BATCH_TIME (HZ/50UL)
11
-
12
-/* Number of requests a "batching" process may submit */
13
-#define BLK_BATCH_REQ 32
12
+#include "blk-mq-sched.h"
1413
1514 /* Max future timer expiry for timeouts */
1615 #define BLK_MAX_TIMEOUT (5 * HZ)
1716
18
-#ifdef CONFIG_DEBUG_FS
1917 extern struct dentry *blk_debugfs_root;
20
-#endif
2118
2219 struct blk_flush_queue {
23
- unsigned int flush_queue_delayed:1;
2420 unsigned int flush_pending_idx:1;
2521 unsigned int flush_running_idx:1;
2622 blk_status_t rq_status;
....@@ -29,94 +25,18 @@
2925 struct list_head flush_data_in_flight;
3026 struct request *flush_rq;
3127
32
- /*
33
- * flush_rq shares tag with this rq, both can't be active
34
- * at the same time
35
- */
36
- struct request *orig_rq;
28
+ struct lock_class_key key;
3729 spinlock_t mq_flush_lock;
3830 };
3931
4032 extern struct kmem_cache *blk_requestq_cachep;
41
-extern struct kmem_cache *request_cachep;
4233 extern struct kobj_type blk_queue_ktype;
4334 extern struct ida blk_queue_ida;
4435
45
-/*
46
- * @q->queue_lock is set while a queue is being initialized. Since we know
47
- * that no other threads access the queue object before @q->queue_lock has
48
- * been set, it is safe to manipulate queue flags without holding the
49
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
50
- * blk_init_allocated_queue().
51
- */
52
-static inline void queue_lockdep_assert_held(struct request_queue *q)
36
+static inline struct blk_flush_queue *
37
+blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
5338 {
54
- if (q->queue_lock)
55
- lockdep_assert_held(q->queue_lock);
56
-}
57
-
58
-static inline void queue_flag_set_unlocked(unsigned int flag,
59
- struct request_queue *q)
60
-{
61
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
62
- kref_read(&q->kobj.kref))
63
- lockdep_assert_held(q->queue_lock);
64
- __set_bit(flag, &q->queue_flags);
65
-}
66
-
67
-static inline void queue_flag_clear_unlocked(unsigned int flag,
68
- struct request_queue *q)
69
-{
70
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
71
- kref_read(&q->kobj.kref))
72
- lockdep_assert_held(q->queue_lock);
73
- __clear_bit(flag, &q->queue_flags);
74
-}
75
-
76
-static inline int queue_flag_test_and_clear(unsigned int flag,
77
- struct request_queue *q)
78
-{
79
- queue_lockdep_assert_held(q);
80
-
81
- if (test_bit(flag, &q->queue_flags)) {
82
- __clear_bit(flag, &q->queue_flags);
83
- return 1;
84
- }
85
-
86
- return 0;
87
-}
88
-
89
-static inline int queue_flag_test_and_set(unsigned int flag,
90
- struct request_queue *q)
91
-{
92
- queue_lockdep_assert_held(q);
93
-
94
- if (!test_bit(flag, &q->queue_flags)) {
95
- __set_bit(flag, &q->queue_flags);
96
- return 0;
97
- }
98
-
99
- return 1;
100
-}
101
-
102
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
103
-{
104
- queue_lockdep_assert_held(q);
105
- __set_bit(flag, &q->queue_flags);
106
-}
107
-
108
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
109
-{
110
- queue_lockdep_assert_held(q);
111
- __clear_bit(flag, &q->queue_flags);
112
-}
113
-
114
-static inline struct blk_flush_queue *blk_get_flush_queue(
115
- struct request_queue *q, struct blk_mq_ctx *ctx)
116
-{
117
- if (q->mq_ops)
118
- return blk_mq_map_queue(q, ctx->cpu)->fq;
119
- return q->fq;
39
+ return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
12040 }
12141
12242 static inline void __blk_get_queue(struct request_queue *q)
....@@ -124,48 +44,121 @@
12444 kobject_get(&q->kobj);
12545 }
12646
127
-static inline bool
128
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
129
-{
130
- return hctx->fq->flush_rq == req;
131
-}
47
+bool is_flush_rq(struct request *req);
13248
133
-struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
134
- int node, int cmd_size, gfp_t flags);
49
+struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50
+ gfp_t flags);
13551 void blk_free_flush_queue(struct blk_flush_queue *q);
13652
137
-int blk_init_rl(struct request_list *rl, struct request_queue *q,
138
- gfp_t gfp_mask);
139
-void blk_exit_rl(struct request_queue *q, struct request_list *rl);
140
-void blk_exit_queue(struct request_queue *q);
141
-void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
142
- struct bio *bio);
143
-void blk_queue_bypass_start(struct request_queue *q);
144
-void blk_queue_bypass_end(struct request_queue *q);
145
-void __blk_queue_free_tags(struct request_queue *q);
14653 void blk_freeze_queue(struct request_queue *q);
14754
148
-static inline void blk_queue_enter_live(struct request_queue *q)
55
+static inline bool biovec_phys_mergeable(struct request_queue *q,
56
+ struct bio_vec *vec1, struct bio_vec *vec2)
14957 {
150
- /*
151
- * Given that running in generic_make_request() context
152
- * guarantees that a live reference against q_usage_counter has
153
- * been established, further references under that same context
154
- * need not check that the queue has been frozen (marked dead).
155
- */
156
- percpu_ref_get(&q->q_usage_counter);
58
+ unsigned long mask = queue_segment_boundary(q);
59
+ phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
60
+ phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
61
+
62
+ if (addr1 + vec1->bv_len != addr2)
63
+ return false;
64
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
65
+ return false;
66
+ if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
67
+ return false;
68
+ return true;
69
+}
70
+
71
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
72
+ struct bio_vec *bprv, unsigned int offset)
73
+{
74
+ return (offset & queue_virt_boundary(q)) ||
75
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
76
+}
77
+
78
+/*
79
+ * Check if adding a bio_vec after bprv with offset would create a gap in
80
+ * the SG list. Most drivers don't care about this, but some do.
81
+ */
82
+static inline bool bvec_gap_to_prev(struct request_queue *q,
83
+ struct bio_vec *bprv, unsigned int offset)
84
+{
85
+ if (!queue_virt_boundary(q))
86
+ return false;
87
+ return __bvec_gap_to_prev(q, bprv, offset);
88
+}
89
+
90
+static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
91
+ unsigned int nr_segs)
92
+{
93
+ rq->nr_phys_segments = nr_segs;
94
+ rq->__data_len = bio->bi_iter.bi_size;
95
+ rq->bio = rq->biotail = bio;
96
+ rq->ioprio = bio_prio(bio);
97
+
98
+ if (bio->bi_disk)
99
+ rq->rq_disk = bio->bi_disk;
157100 }
158101
159102 #ifdef CONFIG_BLK_DEV_INTEGRITY
160103 void blk_flush_integrity(void);
161104 bool __bio_integrity_endio(struct bio *);
105
+void bio_integrity_free(struct bio *bio);
162106 static inline bool bio_integrity_endio(struct bio *bio)
163107 {
164108 if (bio_integrity(bio))
165109 return __bio_integrity_endio(bio);
166110 return true;
167111 }
168
-#else
112
+
113
+bool blk_integrity_merge_rq(struct request_queue *, struct request *,
114
+ struct request *);
115
+bool blk_integrity_merge_bio(struct request_queue *, struct request *,
116
+ struct bio *);
117
+
118
+static inline bool integrity_req_gap_back_merge(struct request *req,
119
+ struct bio *next)
120
+{
121
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
122
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
123
+
124
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
125
+ bip_next->bip_vec[0].bv_offset);
126
+}
127
+
128
+static inline bool integrity_req_gap_front_merge(struct request *req,
129
+ struct bio *bio)
130
+{
131
+ struct bio_integrity_payload *bip = bio_integrity(bio);
132
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
133
+
134
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
135
+ bip_next->bip_vec[0].bv_offset);
136
+}
137
+
138
+void blk_integrity_add(struct gendisk *);
139
+void blk_integrity_del(struct gendisk *);
140
+#else /* CONFIG_BLK_DEV_INTEGRITY */
141
+static inline bool blk_integrity_merge_rq(struct request_queue *rq,
142
+ struct request *r1, struct request *r2)
143
+{
144
+ return true;
145
+}
146
+static inline bool blk_integrity_merge_bio(struct request_queue *rq,
147
+ struct request *r, struct bio *b)
148
+{
149
+ return true;
150
+}
151
+static inline bool integrity_req_gap_back_merge(struct request *req,
152
+ struct bio *next)
153
+{
154
+ return false;
155
+}
156
+static inline bool integrity_req_gap_front_merge(struct request *req,
157
+ struct bio *bio)
158
+{
159
+ return false;
160
+}
161
+
169162 static inline void blk_flush_integrity(void)
170163 {
171164 }
....@@ -173,48 +166,33 @@
173166 {
174167 return true;
175168 }
176
-#endif
169
+static inline void bio_integrity_free(struct bio *bio)
170
+{
171
+}
172
+static inline void blk_integrity_add(struct gendisk *disk)
173
+{
174
+}
175
+static inline void blk_integrity_del(struct gendisk *disk)
176
+{
177
+}
178
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
177179
178
-void blk_timeout_work(struct work_struct *work);
179180 unsigned long blk_rq_timeout(unsigned long timeout);
180181 void blk_add_timer(struct request *req);
181
-void blk_delete_timer(struct request *);
182182
183
-
184
-bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
185
- struct bio *bio);
186
-bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
187
- struct bio *bio);
188
-bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
189
- struct bio *bio);
190183 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
191
- unsigned int *request_count,
192
- struct request **same_queue_rq);
193
-unsigned int blk_plug_queued_count(struct request_queue *q);
184
+ unsigned int nr_segs, struct request **same_queue_rq);
185
+bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
186
+ struct bio *bio, unsigned int nr_segs);
194187
195
-void blk_account_io_start(struct request *req, bool new_io);
196
-void blk_account_io_completion(struct request *req, unsigned int bytes);
188
+void blk_account_io_start(struct request *req);
197189 void blk_account_io_done(struct request *req, u64 now);
198190
199191 /*
200
- * EH timer and IO completion will both attempt to 'grab' the request, make
201
- * sure that only one of them succeeds. Steal the bottom bit of the
202
- * __deadline field for this.
192
+ * Plug flush limits
203193 */
204
-static inline int blk_mark_rq_complete(struct request *rq)
205
-{
206
- return test_and_set_bit(0, &rq->__deadline);
207
-}
208
-
209
-static inline void blk_clear_rq_complete(struct request *rq)
210
-{
211
- clear_bit(0, &rq->__deadline);
212
-}
213
-
214
-static inline bool blk_rq_is_complete(struct request *rq)
215
-{
216
- return test_bit(0, &rq->__deadline);
217
-}
194
+#define BLK_MAX_REQUEST_COUNT 32
195
+#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
218196
219197 /*
220198 * Internal elevator interface
....@@ -223,94 +201,59 @@
223201
224202 void blk_insert_flush(struct request *rq);
225203
226
-static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
227
-{
228
- struct elevator_queue *e = q->elevator;
229
-
230
- if (e->type->ops.sq.elevator_activate_req_fn)
231
- e->type->ops.sq.elevator_activate_req_fn(q, rq);
232
-}
233
-
234
-static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
235
-{
236
- struct elevator_queue *e = q->elevator;
237
-
238
- if (e->type->ops.sq.elevator_deactivate_req_fn)
239
- e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
240
-}
241
-
242
-int elevator_init(struct request_queue *);
243
-int elevator_init_mq(struct request_queue *q);
204
+void elevator_init_mq(struct request_queue *q);
244205 int elevator_switch_mq(struct request_queue *q,
245206 struct elevator_type *new_e);
246
-void elevator_exit(struct request_queue *, struct elevator_queue *);
247
-int elv_register_queue(struct request_queue *q);
207
+void __elevator_exit(struct request_queue *, struct elevator_queue *);
208
+int elv_register_queue(struct request_queue *q, bool uevent);
248209 void elv_unregister_queue(struct request_queue *q);
210
+
211
+static inline void elevator_exit(struct request_queue *q,
212
+ struct elevator_queue *e)
213
+{
214
+ lockdep_assert_held(&q->sysfs_lock);
215
+
216
+ blk_mq_sched_free_requests(q);
217
+ __elevator_exit(q, e);
218
+}
249219
250220 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
251221
252
-#ifdef CONFIG_FAIL_IO_TIMEOUT
253
-int blk_should_fake_timeout(struct request_queue *);
222
+ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
223
+ char *buf);
224
+ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
225
+ char *buf);
226
+ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
227
+ char *buf);
228
+ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
229
+ char *buf);
230
+ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
231
+ const char *buf, size_t count);
254232 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
255233 ssize_t part_timeout_store(struct device *, struct device_attribute *,
256234 const char *, size_t);
257
-#else
258
-static inline int blk_should_fake_timeout(struct request_queue *q)
259
-{
260
- return 0;
261
-}
262
-#endif
263235
264
-int ll_back_merge_fn(struct request_queue *q, struct request *req,
265
- struct bio *bio);
266
-int ll_front_merge_fn(struct request_queue *q, struct request *req,
267
- struct bio *bio);
268
-struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
269
-struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
236
+void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
237
+int ll_back_merge_fn(struct request *req, struct bio *bio,
238
+ unsigned int nr_segs);
270239 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
271240 struct request *next);
272
-void blk_recalc_rq_segments(struct request *rq);
241
+unsigned int blk_recalc_rq_segments(struct request *rq);
273242 void blk_rq_set_mixed_merge(struct request *rq);
274243 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
275244 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
276245
277
-void blk_queue_congestion_threshold(struct request_queue *q);
278
-
279246 int blk_dev_init(void);
280
-
281
-
282
-/*
283
- * Return the threshold (number of used requests) at which the queue is
284
- * considered to be congested. It include a little hysteresis to keep the
285
- * context switch rate down.
286
- */
287
-static inline int queue_congestion_on_threshold(struct request_queue *q)
288
-{
289
- return q->nr_congestion_on;
290
-}
291
-
292
-/*
293
- * The threshold at which a queue is considered to be uncongested
294
- */
295
-static inline int queue_congestion_off_threshold(struct request_queue *q)
296
-{
297
- return q->nr_congestion_off;
298
-}
299
-
300
-extern int blk_update_nr_requests(struct request_queue *, unsigned int);
301247
302248 /*
303249 * Contribute to IO statistics IFF:
304250 *
305251 * a) it's attached to a gendisk, and
306
- * b) the queue had IO stats enabled when this request was started, and
307
- * c) it's a file system request
252
+ * b) the queue had IO stats enabled when this request was started
308253 */
309254 static inline bool blk_do_io_stat(struct request *rq)
310255 {
311
- return rq->rq_disk &&
312
- (rq->rq_flags & RQF_IO_STAT) &&
313
- !blk_rq_is_passthrough(rq);
256
+ return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
314257 }
315258
316259 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
....@@ -318,21 +261,6 @@
318261 req->cmd_flags |= REQ_NOMERGE;
319262 if (req == q->last_merge)
320263 q->last_merge = NULL;
321
-}
322
-
323
-/*
324
- * Steal a bit from this field for legacy IO path atomic IO marking. Note that
325
- * setting the deadline clears the bottom bit, potentially clearing the
326
- * completed bit. The user has to be OK with this (current ones are fine).
327
- */
328
-static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
329
-{
330
- rq->__deadline = time & ~0x1UL;
331
-}
332
-
333
-static inline unsigned long blk_rq_deadline(struct request *rq)
334
-{
335
- return rq->__deadline & ~0x1UL;
336264 }
337265
338266 /*
....@@ -346,6 +274,20 @@
346274 }
347275
348276 /*
277
+ * The max bio size which is aligned to q->limits.discard_granularity. This
278
+ * is a hint to split large discard bio in generic block layer, then if device
279
+ * driver needs to split the discard bio into smaller ones, their bi_size can
280
+ * be very probably and easily aligned to discard_granularity of the device's
281
+ * queue.
282
+ */
283
+static inline unsigned int bio_aligned_discard_max_sectors(
284
+ struct request_queue *q)
285
+{
286
+ return round_down(UINT_MAX, q->limits.discard_granularity) >>
287
+ SECTOR_SHIFT;
288
+}
289
+
290
+/*
349291 * Internal io_context interface
350292 */
351293 void get_io_context(struct io_context *ioc);
....@@ -356,55 +298,21 @@
356298
357299 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
358300
359
-/**
360
- * rq_ioc - determine io_context for request allocation
361
- * @bio: request being allocated is for this bio (can be %NULL)
362
- *
363
- * Determine io_context to use for request allocation for @bio. May return
364
- * %NULL if %current->io_context doesn't exist.
365
- */
366
-static inline struct io_context *rq_ioc(struct bio *bio)
367
-{
368
-#ifdef CONFIG_BLK_CGROUP
369
- if (bio && bio->bi_ioc)
370
- return bio->bi_ioc;
371
-#endif
372
- return current->io_context;
373
-}
374
-
375
-/**
376
- * create_io_context - try to create task->io_context
377
- * @gfp_mask: allocation mask
378
- * @node: allocation node
379
- *
380
- * If %current->io_context is %NULL, allocate a new io_context and install
381
- * it. Returns the current %current->io_context which may be %NULL if
382
- * allocation failed.
383
- *
384
- * Note that this function can't be called with IRQ disabled because
385
- * task_lock which protects %current->io_context is IRQ-unsafe.
386
- */
387
-static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
388
-{
389
- WARN_ON_ONCE(irqs_disabled());
390
- if (unlikely(!current->io_context))
391
- create_task_io_context(current, gfp_mask, node);
392
- return current->io_context;
393
-}
394
-
395301 /*
396302 * Internal throttling interface
397303 */
398304 #ifdef CONFIG_BLK_DEV_THROTTLING
399
-extern void blk_throtl_drain(struct request_queue *q);
400305 extern int blk_throtl_init(struct request_queue *q);
401306 extern void blk_throtl_exit(struct request_queue *q);
402307 extern void blk_throtl_register_queue(struct request_queue *q);
308
+extern void blk_throtl_charge_bio_split(struct bio *bio);
309
+bool blk_throtl_bio(struct bio *bio);
403310 #else /* CONFIG_BLK_DEV_THROTTLING */
404
-static inline void blk_throtl_drain(struct request_queue *q) { }
405311 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
406312 static inline void blk_throtl_exit(struct request_queue *q) { }
407313 static inline void blk_throtl_register_queue(struct request_queue *q) { }
314
+static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
315
+static inline bool blk_throtl_bio(struct bio *bio) { return false; }
408316 #endif /* CONFIG_BLK_DEV_THROTTLING */
409317 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
410318 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
....@@ -430,12 +338,114 @@
430338 }
431339 #endif /* CONFIG_BOUNCE */
432340
433
-extern void blk_drain_queue(struct request_queue *q);
434
-
435341 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
436342 extern int blk_iolatency_init(struct request_queue *q);
437343 #else
438344 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
439345 #endif
440346
347
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
348
+
349
+#ifdef CONFIG_BLK_DEV_ZONED
350
+void blk_queue_free_zone_bitmaps(struct request_queue *q);
351
+#else
352
+static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
353
+#endif
354
+
355
+struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
356
+
357
+int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
358
+void blk_free_devt(dev_t devt);
359
+void blk_invalidate_devt(dev_t devt);
360
+char *disk_name(struct gendisk *hd, int partno, char *buf);
361
+#define ADDPART_FLAG_NONE 0
362
+#define ADDPART_FLAG_RAID 1
363
+#define ADDPART_FLAG_WHOLEDISK 2
364
+void delete_partition(struct hd_struct *part);
365
+int bdev_add_partition(struct block_device *bdev, int partno,
366
+ sector_t start, sector_t length);
367
+int bdev_del_partition(struct block_device *bdev, int partno);
368
+int bdev_resize_partition(struct block_device *bdev, int partno,
369
+ sector_t start, sector_t length);
370
+int disk_expand_part_tbl(struct gendisk *disk, int target);
371
+int hd_ref_init(struct hd_struct *part);
372
+
373
+/* no need to get/put refcount of part0 */
374
+static inline int hd_struct_try_get(struct hd_struct *part)
375
+{
376
+ if (part->partno)
377
+ return percpu_ref_tryget_live(&part->ref);
378
+ return 1;
379
+}
380
+
381
+static inline void hd_struct_put(struct hd_struct *part)
382
+{
383
+ if (part->partno)
384
+ percpu_ref_put(&part->ref);
385
+}
386
+
387
+static inline void hd_free_part(struct hd_struct *part)
388
+{
389
+ free_percpu(part->dkstats);
390
+ kfree(part->info);
391
+ percpu_ref_exit(&part->ref);
392
+}
393
+
394
+/*
395
+ * Any access of part->nr_sects which is not protected by partition
396
+ * bd_mutex or gendisk bdev bd_mutex, should be done using this
397
+ * accessor function.
398
+ *
399
+ * Code written along the lines of i_size_read() and i_size_write().
400
+ * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
401
+ * on.
402
+ */
403
+static inline sector_t part_nr_sects_read(struct hd_struct *part)
404
+{
405
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
406
+ sector_t nr_sects;
407
+ unsigned seq;
408
+ do {
409
+ seq = read_seqcount_begin(&part->nr_sects_seq);
410
+ nr_sects = part->nr_sects;
411
+ } while (read_seqcount_retry(&part->nr_sects_seq, seq));
412
+ return nr_sects;
413
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
414
+ sector_t nr_sects;
415
+
416
+ preempt_disable();
417
+ nr_sects = part->nr_sects;
418
+ preempt_enable();
419
+ return nr_sects;
420
+#else
421
+ return part->nr_sects;
422
+#endif
423
+}
424
+
425
+/*
426
+ * Should be called with mutex lock held (typically bd_mutex) of partition
427
+ * to provide mutual exlusion among writers otherwise seqcount might be
428
+ * left in wrong state leaving the readers spinning infinitely.
429
+ */
430
+static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
431
+{
432
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
433
+ preempt_disable();
434
+ write_seqcount_begin(&part->nr_sects_seq);
435
+ part->nr_sects = size;
436
+ write_seqcount_end(&part->nr_sects_seq);
437
+ preempt_enable();
438
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
439
+ preempt_disable();
440
+ part->nr_sects = size;
441
+ preempt_enable();
442
+#else
443
+ part->nr_sects = size;
444
+#endif
445
+}
446
+
447
+int bio_add_hw_page(struct request_queue *q, struct bio *bio,
448
+ struct page *page, unsigned int len, unsigned int offset,
449
+ unsigned int max_sectors, bool *same_page);
450
+
441451 #endif /* BLK_INTERNAL_H */