hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/block/blk-mq-sched.h
....@@ -5,22 +5,20 @@
55 #include "blk-mq.h"
66 #include "blk-mq-tag.h"
77
8
-void blk_mq_sched_free_hctx_data(struct request_queue *q,
9
- void (*exit)(struct blk_mq_hw_ctx *));
10
-
11
-void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
8
+void blk_mq_sched_assign_ioc(struct request *rq);
129
1310 void blk_mq_sched_request_inserted(struct request *rq);
1411 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
15
- struct request **merged_request);
16
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
12
+ unsigned int nr_segs, struct request **merged_request);
13
+bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
14
+ unsigned int nr_segs);
1715 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
1816 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
1917 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
2018
2119 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
2220 bool run_queue, bool async);
23
-void blk_mq_sched_insert_requests(struct request_queue *q,
21
+void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
2422 struct blk_mq_ctx *ctx,
2523 struct list_head *list, bool run_queue_async);
2624
....@@ -28,14 +26,16 @@
2826
2927 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
3028 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
29
+void blk_mq_sched_free_requests(struct request_queue *q);
3130
3231 static inline bool
33
-blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
32
+blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
33
+ unsigned int nr_segs)
3434 {
3535 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
3636 return false;
3737
38
- return __blk_mq_sched_bio_merge(q, bio);
38
+ return __blk_mq_sched_bio_merge(q, bio, nr_segs);
3939 }
4040
4141 static inline bool
....@@ -44,27 +44,18 @@
4444 {
4545 struct elevator_queue *e = q->elevator;
4646
47
- if (e && e->type->ops.mq.allow_merge)
48
- return e->type->ops.mq.allow_merge(q, rq, bio);
47
+ if (e && e->type->ops.allow_merge)
48
+ return e->type->ops.allow_merge(q, rq, bio);
4949
5050 return true;
5151 }
5252
53
-static inline void blk_mq_sched_completed_request(struct request *rq)
53
+static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
5454 {
5555 struct elevator_queue *e = rq->q->elevator;
5656
57
- if (e && e->type->ops.mq.completed_request)
58
- e->type->ops.mq.completed_request(rq);
59
-}
60
-
61
-static inline void blk_mq_sched_started_request(struct request *rq)
62
-{
63
- struct request_queue *q = rq->q;
64
- struct elevator_queue *e = q->elevator;
65
-
66
- if (e && e->type->ops.mq.started_request)
67
- e->type->ops.mq.started_request(rq);
57
+ if (e && e->type->ops.completed_request)
58
+ e->type->ops.completed_request(rq, now);
6859 }
6960
7061 static inline void blk_mq_sched_requeue_request(struct request *rq)
....@@ -72,16 +63,16 @@
7263 struct request_queue *q = rq->q;
7364 struct elevator_queue *e = q->elevator;
7465
75
- if (e && e->type->ops.mq.requeue_request)
76
- e->type->ops.mq.requeue_request(rq);
66
+ if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
67
+ e->type->ops.requeue_request(rq);
7768 }
7869
7970 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
8071 {
8172 struct elevator_queue *e = hctx->queue->elevator;
8273
83
- if (e && e->type->ops.mq.has_work)
84
- return e->type->ops.mq.has_work(hctx);
74
+ if (e && e->type->ops.has_work)
75
+ return e->type->ops.has_work(hctx);
8576
8677 return false;
8778 }