hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/block/bfq-cgroup.c
....@@ -1,15 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * cgroups support for the BFQ I/O scheduler.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License as
6
- * published by the Free Software Foundation; either version 2 of the
7
- * License, or (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12
- * General Public License for more details.
134 */
145 #include <linux/module.h>
156 #include <linux/slab.h>
....@@ -24,7 +15,83 @@
2415
2516 #include "bfq-iosched.h"
2617
27
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
18
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
19
+static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20
+{
21
+ int ret;
22
+
23
+ ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24
+ if (ret)
25
+ return ret;
26
+
27
+ atomic64_set(&stat->aux_cnt, 0);
28
+ return 0;
29
+}
30
+
31
+static void bfq_stat_exit(struct bfq_stat *stat)
32
+{
33
+ percpu_counter_destroy(&stat->cpu_cnt);
34
+}
35
+
36
+/**
37
+ * bfq_stat_add - add a value to a bfq_stat
38
+ * @stat: target bfq_stat
39
+ * @val: value to add
40
+ *
41
+ * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42
+ * don't re-enter this function for the same counter.
43
+ */
44
+static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45
+{
46
+ percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47
+}
48
+
49
+/**
50
+ * bfq_stat_read - read the current value of a bfq_stat
51
+ * @stat: bfq_stat to read
52
+ */
53
+static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54
+{
55
+ return percpu_counter_sum_positive(&stat->cpu_cnt);
56
+}
57
+
58
+/**
59
+ * bfq_stat_reset - reset a bfq_stat
60
+ * @stat: bfq_stat to reset
61
+ */
62
+static inline void bfq_stat_reset(struct bfq_stat *stat)
63
+{
64
+ percpu_counter_set(&stat->cpu_cnt, 0);
65
+ atomic64_set(&stat->aux_cnt, 0);
66
+}
67
+
68
+/**
69
+ * bfq_stat_add_aux - add a bfq_stat into another's aux count
70
+ * @to: the destination bfq_stat
71
+ * @from: the source
72
+ *
73
+ * Add @from's count including the aux one to @to's aux count.
74
+ */
75
+static inline void bfq_stat_add_aux(struct bfq_stat *to,
76
+ struct bfq_stat *from)
77
+{
78
+ atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79
+ &to->aux_cnt);
80
+}
81
+
82
+/**
83
+ * blkg_prfill_stat - prfill callback for bfq_stat
84
+ * @sf: seq_file to print to
85
+ * @pd: policy private data of interest
86
+ * @off: offset to the bfq_stat in @pd
87
+ *
88
+ * prfill callback for printing a bfq_stat.
89
+ */
90
+static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91
+ int off)
92
+{
93
+ return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94
+}
2895
2996 /* bfqg stats flags */
3097 enum bfqg_stats_flags {
....@@ -62,7 +129,7 @@
62129
63130 now = ktime_get_ns();
64131 if (now > stats->start_group_wait_time)
65
- blkg_stat_add(&stats->group_wait_time,
132
+ bfq_stat_add(&stats->group_wait_time,
66133 now - stats->start_group_wait_time);
67134 bfqg_stats_clear_waiting(stats);
68135 }
....@@ -91,14 +158,14 @@
91158
92159 now = ktime_get_ns();
93160 if (now > stats->start_empty_time)
94
- blkg_stat_add(&stats->empty_time,
161
+ bfq_stat_add(&stats->empty_time,
95162 now - stats->start_empty_time);
96163 bfqg_stats_clear_empty(stats);
97164 }
98165
99166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
100167 {
101
- blkg_stat_add(&bfqg->stats.dequeue, 1);
168
+ bfq_stat_add(&bfqg->stats.dequeue, 1);
102169 }
103170
104171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
....@@ -128,7 +195,7 @@
128195 u64 now = ktime_get_ns();
129196
130197 if (now > stats->start_idle_time)
131
- blkg_stat_add(&stats->idle_time,
198
+ bfq_stat_add(&stats->idle_time,
132199 now - stats->start_idle_time);
133200 bfqg_stats_clear_idling(stats);
134201 }
....@@ -146,9 +213,9 @@
146213 {
147214 struct bfqg_stats *stats = &bfqg->stats;
148215
149
- blkg_stat_add(&stats->avg_queue_size_sum,
216
+ bfq_stat_add(&stats->avg_queue_size_sum,
150217 blkg_rwstat_total(&stats->queued));
151
- blkg_stat_add(&stats->avg_queue_size_samples, 1);
218
+ bfq_stat_add(&stats->avg_queue_size_samples, 1);
152219 bfqg_stats_update_group_wait_time(stats);
153220 }
154221
....@@ -185,7 +252,7 @@
185252 io_start_time_ns - start_time_ns);
186253 }
187254
188
-#else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
255
+#else /* CONFIG_BFQ_CGROUP_DEBUG */
189256
190257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
191258 unsigned int op) { }
....@@ -199,7 +266,7 @@
199266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
200267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
201268
202
-#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
269
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
203270
204271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
205272
....@@ -280,21 +347,32 @@
280347 bfqg_put(bfqg);
281348 }
282349
350
+void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
351
+{
352
+ struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
353
+
354
+ if (!bfqg)
355
+ return;
356
+
357
+ blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
358
+ blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
359
+}
360
+
283361 /* @stats = 0 */
284362 static void bfqg_stats_reset(struct bfqg_stats *stats)
285363 {
286
-#ifdef CONFIG_DEBUG_BLK_CGROUP
364
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
287365 /* queued stats shouldn't be cleared */
288366 blkg_rwstat_reset(&stats->merged);
289367 blkg_rwstat_reset(&stats->service_time);
290368 blkg_rwstat_reset(&stats->wait_time);
291
- blkg_stat_reset(&stats->time);
292
- blkg_stat_reset(&stats->avg_queue_size_sum);
293
- blkg_stat_reset(&stats->avg_queue_size_samples);
294
- blkg_stat_reset(&stats->dequeue);
295
- blkg_stat_reset(&stats->group_wait_time);
296
- blkg_stat_reset(&stats->idle_time);
297
- blkg_stat_reset(&stats->empty_time);
369
+ bfq_stat_reset(&stats->time);
370
+ bfq_stat_reset(&stats->avg_queue_size_sum);
371
+ bfq_stat_reset(&stats->avg_queue_size_samples);
372
+ bfq_stat_reset(&stats->dequeue);
373
+ bfq_stat_reset(&stats->group_wait_time);
374
+ bfq_stat_reset(&stats->idle_time);
375
+ bfq_stat_reset(&stats->empty_time);
298376 #endif
299377 }
300378
....@@ -304,19 +382,19 @@
304382 if (!to || !from)
305383 return;
306384
307
-#ifdef CONFIG_DEBUG_BLK_CGROUP
385
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
308386 /* queued stats shouldn't be cleared */
309387 blkg_rwstat_add_aux(&to->merged, &from->merged);
310388 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
311389 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
312
- blkg_stat_add_aux(&from->time, &from->time);
313
- blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
314
- blkg_stat_add_aux(&to->avg_queue_size_samples,
390
+ bfq_stat_add_aux(&from->time, &from->time);
391
+ bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
392
+ bfq_stat_add_aux(&to->avg_queue_size_samples,
315393 &from->avg_queue_size_samples);
316
- blkg_stat_add_aux(&to->dequeue, &from->dequeue);
317
- blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
318
- blkg_stat_add_aux(&to->idle_time, &from->idle_time);
319
- blkg_stat_add_aux(&to->empty_time, &from->empty_time);
394
+ bfq_stat_add_aux(&to->dequeue, &from->dequeue);
395
+ bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
396
+ bfq_stat_add_aux(&to->idle_time, &from->idle_time);
397
+ bfq_stat_add_aux(&to->empty_time, &from->empty_time);
320398 #endif
321399 }
322400
....@@ -334,7 +412,7 @@
334412
335413 parent = bfqg_parent(bfqg);
336414
337
- lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
415
+ lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
338416
339417 if (unlikely(!parent))
340418 return;
....@@ -364,35 +442,41 @@
364442
365443 static void bfqg_stats_exit(struct bfqg_stats *stats)
366444 {
367
-#ifdef CONFIG_DEBUG_BLK_CGROUP
445
+ blkg_rwstat_exit(&stats->bytes);
446
+ blkg_rwstat_exit(&stats->ios);
447
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
368448 blkg_rwstat_exit(&stats->merged);
369449 blkg_rwstat_exit(&stats->service_time);
370450 blkg_rwstat_exit(&stats->wait_time);
371451 blkg_rwstat_exit(&stats->queued);
372
- blkg_stat_exit(&stats->time);
373
- blkg_stat_exit(&stats->avg_queue_size_sum);
374
- blkg_stat_exit(&stats->avg_queue_size_samples);
375
- blkg_stat_exit(&stats->dequeue);
376
- blkg_stat_exit(&stats->group_wait_time);
377
- blkg_stat_exit(&stats->idle_time);
378
- blkg_stat_exit(&stats->empty_time);
452
+ bfq_stat_exit(&stats->time);
453
+ bfq_stat_exit(&stats->avg_queue_size_sum);
454
+ bfq_stat_exit(&stats->avg_queue_size_samples);
455
+ bfq_stat_exit(&stats->dequeue);
456
+ bfq_stat_exit(&stats->group_wait_time);
457
+ bfq_stat_exit(&stats->idle_time);
458
+ bfq_stat_exit(&stats->empty_time);
379459 #endif
380460 }
381461
382462 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
383463 {
384
-#ifdef CONFIG_DEBUG_BLK_CGROUP
464
+ if (blkg_rwstat_init(&stats->bytes, gfp) ||
465
+ blkg_rwstat_init(&stats->ios, gfp))
466
+ return -ENOMEM;
467
+
468
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
385469 if (blkg_rwstat_init(&stats->merged, gfp) ||
386470 blkg_rwstat_init(&stats->service_time, gfp) ||
387471 blkg_rwstat_init(&stats->wait_time, gfp) ||
388472 blkg_rwstat_init(&stats->queued, gfp) ||
389
- blkg_stat_init(&stats->time, gfp) ||
390
- blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
391
- blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
392
- blkg_stat_init(&stats->dequeue, gfp) ||
393
- blkg_stat_init(&stats->group_wait_time, gfp) ||
394
- blkg_stat_init(&stats->idle_time, gfp) ||
395
- blkg_stat_init(&stats->empty_time, gfp)) {
473
+ bfq_stat_init(&stats->time, gfp) ||
474
+ bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
475
+ bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
476
+ bfq_stat_init(&stats->dequeue, gfp) ||
477
+ bfq_stat_init(&stats->group_wait_time, gfp) ||
478
+ bfq_stat_init(&stats->idle_time, gfp) ||
479
+ bfq_stat_init(&stats->empty_time, gfp)) {
396480 bfqg_stats_exit(stats);
397481 return -ENOMEM;
398482 }
....@@ -434,11 +518,12 @@
434518 kfree(cpd_to_bfqgd(cpd));
435519 }
436520
437
-static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
521
+static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
522
+ struct blkcg *blkcg)
438523 {
439524 struct bfq_group *bfqg;
440525
441
- bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
526
+ bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
442527 if (!bfqg)
443528 return NULL;
444529
....@@ -468,6 +553,7 @@
468553 */
469554 bfqg->bfqd = bfqd;
470555 bfqg->active_entities = 0;
556
+ bfqg->online = true;
471557 bfqg->rq_pos_tree = RB_ROOT;
472558 }
473559
....@@ -496,27 +582,10 @@
496582 entity->sched_data = &parent->sched_data;
497583 }
498584
499
-static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
500
- struct blkcg *blkcg)
585
+static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
501586 {
502
- struct blkcg_gq *blkg;
503
-
504
- blkg = blkg_lookup(blkcg, bfqd->queue);
505
- if (likely(blkg))
506
- return blkg_to_bfqg(blkg);
507
- return NULL;
508
-}
509
-
510
-struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
511
- struct blkcg *blkcg)
512
-{
513
- struct bfq_group *bfqg, *parent;
587
+ struct bfq_group *parent;
514588 struct bfq_entity *entity;
515
-
516
- bfqg = bfq_lookup_bfqg(bfqd, blkcg);
517
-
518
- if (unlikely(!bfqg))
519
- return NULL;
520589
521590 /*
522591 * Update chain of bfq_groups as we might be handling a leaf group
....@@ -534,8 +603,28 @@
534603 bfq_group_set_parent(curr_bfqg, parent);
535604 }
536605 }
606
+}
537607
538
- return bfqg;
608
+struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
609
+{
610
+ struct blkcg_gq *blkg = bio->bi_blkg;
611
+ struct bfq_group *bfqg;
612
+
613
+ while (blkg) {
614
+ if (!blkg->online) {
615
+ blkg = blkg->parent;
616
+ continue;
617
+ }
618
+ bfqg = blkg_to_bfqg(blkg);
619
+ if (bfqg->online) {
620
+ bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
621
+ return bfqg;
622
+ }
623
+ blkg = blkg->parent;
624
+ }
625
+ bio_associate_blkg_from_css(bio,
626
+ &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
627
+ return bfqd->root_group;
539628 }
540629
541630 /**
....@@ -557,6 +646,18 @@
557646 {
558647 struct bfq_entity *entity = &bfqq->entity;
559648
649
+ /*
650
+ * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
651
+ * until elevator exit.
652
+ */
653
+ if (bfqq == &bfqd->oom_bfqq)
654
+ return;
655
+ /*
656
+ * Get extra reference to prevent bfqq from being freed in
657
+ * next possible expire or deactivate.
658
+ */
659
+ bfqq->ref++;
660
+
560661 /* If bfqq is empty, then bfq_bfqq_expire also invokes
561662 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
562663 * from data structures related to current group. Otherwise we
....@@ -569,7 +670,7 @@
569670
570671 if (bfq_bfqq_busy(bfqq))
571672 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
572
- else if (entity->on_st)
673
+ else if (entity->on_st_or_in_serv)
573674 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
574675 bfqg_and_blkg_put(bfqq_group(bfqq));
575676
....@@ -579,12 +680,15 @@
579680 bfqg_and_blkg_get(bfqg);
580681
581682 if (bfq_bfqq_busy(bfqq)) {
582
- bfq_pos_tree_add_move(bfqd, bfqq);
683
+ if (unlikely(!bfqd->nonrot_with_queueing))
684
+ bfq_pos_tree_add_move(bfqd, bfqq);
583685 bfq_activate_bfqq(bfqd, bfqq);
584686 }
585687
586688 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
587689 bfq_schedule_dispatch(bfqd);
690
+ /* release extra ref taken above, bfqq may happen to be freed now */
691
+ bfq_put_queue(bfqq);
588692 }
589693
590694 /**
....@@ -596,41 +700,58 @@
596700 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
597701 * sure that the reference to cgroup is valid across the call (see
598702 * comments in bfq_bic_update_cgroup on this issue)
599
- *
600
- * NOTE: an alternative approach might have been to store the current
601
- * cgroup in bfqq and getting a reference to it, reducing the lookup
602
- * time here, at the price of slightly more complex code.
603703 */
604
-static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
605
- struct bfq_io_cq *bic,
606
- struct blkcg *blkcg)
704
+static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
705
+ struct bfq_io_cq *bic,
706
+ struct bfq_group *bfqg)
607707 {
608
- struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
609
- struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
610
- struct bfq_group *bfqg;
708
+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
709
+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
611710 struct bfq_entity *entity;
612
-
613
- bfqg = bfq_find_set_group(bfqd, blkcg);
614
-
615
- if (unlikely(!bfqg))
616
- bfqg = bfqd->root_group;
617711
618712 if (async_bfqq) {
619713 entity = &async_bfqq->entity;
620714
621715 if (entity->sched_data != &bfqg->sched_data) {
622
- bic_set_bfqq(bic, NULL, 0);
623
- bfq_log_bfqq(bfqd, async_bfqq,
624
- "bic_change_group: %p %d",
625
- async_bfqq, async_bfqq->ref);
626
- bfq_put_queue(async_bfqq);
716
+ bic_set_bfqq(bic, NULL, false);
717
+ bfq_release_process_ref(bfqd, async_bfqq);
627718 }
628719 }
629720
630721 if (sync_bfqq) {
631
- entity = &sync_bfqq->entity;
632
- if (entity->sched_data != &bfqg->sched_data)
633
- bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
722
+ if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
723
+ /* We are the only user of this bfqq, just move it */
724
+ if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
725
+ bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
726
+ } else {
727
+ struct bfq_queue *bfqq;
728
+
729
+ /*
730
+ * The queue was merged to a different queue. Check
731
+ * that the merge chain still belongs to the same
732
+ * cgroup.
733
+ */
734
+ for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
735
+ if (bfqq->entity.sched_data !=
736
+ &bfqg->sched_data)
737
+ break;
738
+ if (bfqq) {
739
+ /*
740
+ * Some queue changed cgroup so the merge is
741
+ * not valid anymore. We cannot easily just
742
+ * cancel the merge (by clearing new_bfqq) as
743
+ * there may be other processes using this
744
+ * queue and holding refs to all queues below
745
+ * sync_bfqq->new_bfqq. Similarly if the merge
746
+ * already happened, we need to detach from
747
+ * bfqq now so that we cannot merge bio to a
748
+ * request from the old cgroup.
749
+ */
750
+ bfq_put_cooperator(sync_bfqq);
751
+ bic_set_bfqq(bic, NULL, true);
752
+ bfq_release_process_ref(bfqd, sync_bfqq);
753
+ }
754
+ }
634755 }
635756
636757 return bfqg;
....@@ -639,20 +760,24 @@
639760 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
640761 {
641762 struct bfq_data *bfqd = bic_to_bfqd(bic);
642
- struct bfq_group *bfqg = NULL;
763
+ struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
643764 uint64_t serial_nr;
644765
645
- rcu_read_lock();
646
- serial_nr = bio_blkcg(bio)->css.serial_nr;
766
+ serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
647767
648768 /*
649769 * Check whether blkcg has changed. The condition may trigger
650770 * spuriously on a newly created cic but there's no harm.
651771 */
652772 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
653
- goto out;
773
+ return;
654774
655
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
775
+ /*
776
+ * New cgroup for this process. Make sure it is linked to bfq internal
777
+ * cgroup hierarchy.
778
+ */
779
+ bfq_link_bfqg(bfqd, bfqg);
780
+ __bfq_bic_change_cgroup(bfqd, bic, bfqg);
656781 /*
657782 * Update blkg_path for bfq_log_* functions. We cache this
658783 * path, and update it here, for the following
....@@ -705,8 +830,6 @@
705830 */
706831 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
707832 bic->blkcg_serial_nr = serial_nr;
708
-out:
709
- rcu_read_unlock();
710833 }
711834
712835 /**
....@@ -724,39 +847,53 @@
724847 /**
725848 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
726849 * @bfqd: the device data structure with the root group.
727
- * @entity: the entity to move.
850
+ * @entity: the entity to move, if entity is a leaf; or the parent entity
851
+ * of an active leaf entity to move, if entity is not a leaf.
728852 */
729853 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
730
- struct bfq_entity *entity)
854
+ struct bfq_entity *entity,
855
+ int ioprio_class)
731856 {
732
- struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
857
+ struct bfq_queue *bfqq;
858
+ struct bfq_entity *child_entity = entity;
733859
860
+ while (child_entity->my_sched_data) { /* leaf not reached yet */
861
+ struct bfq_sched_data *child_sd = child_entity->my_sched_data;
862
+ struct bfq_service_tree *child_st = child_sd->service_tree +
863
+ ioprio_class;
864
+ struct rb_root *child_active = &child_st->active;
865
+
866
+ child_entity = bfq_entity_of(rb_first(child_active));
867
+
868
+ if (!child_entity)
869
+ child_entity = child_sd->in_service_entity;
870
+ }
871
+
872
+ bfqq = bfq_entity_to_bfqq(child_entity);
734873 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
735874 }
736875
737876 /**
738
- * bfq_reparent_active_entities - move to the root group all active
739
- * entities.
877
+ * bfq_reparent_active_queues - move to the root group all active queues.
740878 * @bfqd: the device data structure with the root group.
741879 * @bfqg: the group to move from.
742
- * @st: the service tree with the entities.
880
+ * @st: the service tree to start the search from.
743881 */
744
-static void bfq_reparent_active_entities(struct bfq_data *bfqd,
745
- struct bfq_group *bfqg,
746
- struct bfq_service_tree *st)
882
+static void bfq_reparent_active_queues(struct bfq_data *bfqd,
883
+ struct bfq_group *bfqg,
884
+ struct bfq_service_tree *st,
885
+ int ioprio_class)
747886 {
748887 struct rb_root *active = &st->active;
749
- struct bfq_entity *entity = NULL;
888
+ struct bfq_entity *entity;
750889
751
- if (!RB_EMPTY_ROOT(&st->active))
752
- entity = bfq_entity_of(rb_first(active));
753
-
754
- for (; entity ; entity = bfq_entity_of(rb_first(active)))
755
- bfq_reparent_leaf_entity(bfqd, entity);
890
+ while ((entity = bfq_entity_of(rb_first(active))))
891
+ bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
756892
757893 if (bfqg->sched_data.in_service_entity)
758894 bfq_reparent_leaf_entity(bfqd,
759
- bfqg->sched_data.in_service_entity);
895
+ bfqg->sched_data.in_service_entity,
896
+ ioprio_class);
760897 }
761898
762899 /**
....@@ -789,13 +926,6 @@
789926 st = bfqg->sched_data.service_tree + i;
790927
791928 /*
792
- * The idle tree may still contain bfq_queues belonging
793
- * to exited task because they never migrated to a different
794
- * cgroup from the one being destroyed now.
795
- */
796
- bfq_flush_idle_tree(st);
797
-
798
- /*
799929 * It may happen that some queues are still active
800930 * (busy) upon group destruction (if the corresponding
801931 * processes have been forced to terminate). We move
....@@ -807,13 +937,27 @@
807937 * There is no need to put the sync queues, as the
808938 * scheduler has taken no reference.
809939 */
810
- bfq_reparent_active_entities(bfqd, bfqg, st);
940
+ bfq_reparent_active_queues(bfqd, bfqg, st, i);
941
+
942
+ /*
943
+ * The idle tree may still contain bfq_queues
944
+ * belonging to exited task because they never
945
+ * migrated to a different cgroup from the one being
946
+ * destroyed now. In addition, even
947
+ * bfq_reparent_active_queues() may happen to add some
948
+ * entities to the idle tree. It happens if, in some
949
+ * of the calls to bfq_bfqq_move() performed by
950
+ * bfq_reparent_active_queues(), the queue to move is
951
+ * empty and gets expired.
952
+ */
953
+ bfq_flush_idle_tree(st);
811954 }
812955
813956 __bfq_deactivate_entity(entity, false);
814957
815958 put_async_queues:
816959 bfq_put_async_queues(bfqd, bfqg);
960
+ bfqg->online = false;
817961
818962 spin_unlock_irqrestore(&bfqd->lock, flags);
819963 /*
....@@ -837,7 +981,7 @@
837981 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
838982 }
839983
840
-static int bfq_io_show_weight(struct seq_file *sf, void *v)
984
+static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
841985 {
842986 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
843987 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
....@@ -849,6 +993,60 @@
849993 seq_printf(sf, "%u\n", val);
850994
851995 return 0;
996
+}
997
+
998
+static u64 bfqg_prfill_weight_device(struct seq_file *sf,
999
+ struct blkg_policy_data *pd, int off)
1000
+{
1001
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
1002
+
1003
+ if (!bfqg->entity.dev_weight)
1004
+ return 0;
1005
+ return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1006
+}
1007
+
1008
+static int bfq_io_show_weight(struct seq_file *sf, void *v)
1009
+{
1010
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1011
+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1012
+
1013
+ seq_printf(sf, "default %u\n", bfqgd->weight);
1014
+ blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1015
+ &blkcg_policy_bfq, 0, false);
1016
+ return 0;
1017
+}
1018
+
1019
+static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1020
+{
1021
+ weight = dev_weight ?: weight;
1022
+
1023
+ bfqg->entity.dev_weight = dev_weight;
1024
+ /*
1025
+ * Setting the prio_changed flag of the entity
1026
+ * to 1 with new_weight == weight would re-set
1027
+ * the value of the weight to its ioprio mapping.
1028
+ * Set the flag only if necessary.
1029
+ */
1030
+ if ((unsigned short)weight != bfqg->entity.new_weight) {
1031
+ bfqg->entity.new_weight = (unsigned short)weight;
1032
+ /*
1033
+ * Make sure that the above new value has been
1034
+ * stored in bfqg->entity.new_weight before
1035
+ * setting the prio_changed flag. In fact,
1036
+ * this flag may be read asynchronously (in
1037
+ * critical sections protected by a different
1038
+ * lock than that held here), and finding this
1039
+ * flag set may cause the execution of the code
1040
+ * for updating parameters whose value may
1041
+ * depend also on bfqg->entity.new_weight (in
1042
+ * __bfq_entity_update_weight_prio).
1043
+ * This barrier makes sure that the new value
1044
+ * of bfqg->entity.new_weight is correctly
1045
+ * seen in that code.
1046
+ */
1047
+ smp_wmb();
1048
+ bfqg->entity.prio_changed = 1;
1049
+ }
8521050 }
8531051
8541052 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
....@@ -869,61 +1067,70 @@
8691067 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
8701068 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
8711069
872
- if (!bfqg)
873
- continue;
874
- /*
875
- * Setting the prio_changed flag of the entity
876
- * to 1 with new_weight == weight would re-set
877
- * the value of the weight to its ioprio mapping.
878
- * Set the flag only if necessary.
879
- */
880
- if ((unsigned short)val != bfqg->entity.new_weight) {
881
- bfqg->entity.new_weight = (unsigned short)val;
882
- /*
883
- * Make sure that the above new value has been
884
- * stored in bfqg->entity.new_weight before
885
- * setting the prio_changed flag. In fact,
886
- * this flag may be read asynchronously (in
887
- * critical sections protected by a different
888
- * lock than that held here), and finding this
889
- * flag set may cause the execution of the code
890
- * for updating parameters whose value may
891
- * depend also on bfqg->entity.new_weight (in
892
- * __bfq_entity_update_weight_prio).
893
- * This barrier makes sure that the new value
894
- * of bfqg->entity.new_weight is correctly
895
- * seen in that code.
896
- */
897
- smp_wmb();
898
- bfqg->entity.prio_changed = 1;
899
- }
1070
+ if (bfqg)
1071
+ bfq_group_set_weight(bfqg, val, 0);
9001072 }
9011073 spin_unlock_irq(&blkcg->lock);
9021074
9031075 return ret;
9041076 }
9051077
1078
+static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1079
+ char *buf, size_t nbytes,
1080
+ loff_t off)
1081
+{
1082
+ int ret;
1083
+ struct blkg_conf_ctx ctx;
1084
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
1085
+ struct bfq_group *bfqg;
1086
+ u64 v;
1087
+
1088
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1089
+ if (ret)
1090
+ return ret;
1091
+
1092
+ if (sscanf(ctx.body, "%llu", &v) == 1) {
1093
+ /* require "default" on dfl */
1094
+ ret = -ERANGE;
1095
+ if (!v)
1096
+ goto out;
1097
+ } else if (!strcmp(strim(ctx.body), "default")) {
1098
+ v = 0;
1099
+ } else {
1100
+ ret = -EINVAL;
1101
+ goto out;
1102
+ }
1103
+
1104
+ bfqg = blkg_to_bfqg(ctx.blkg);
1105
+
1106
+ ret = -ERANGE;
1107
+ if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1108
+ bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1109
+ ret = 0;
1110
+ }
1111
+out:
1112
+ blkg_conf_finish(&ctx);
1113
+ return ret ?: nbytes;
1114
+}
1115
+
9061116 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
9071117 char *buf, size_t nbytes,
9081118 loff_t off)
9091119 {
910
- u64 weight;
911
- /* First unsigned long found in the file is used */
912
- int ret = kstrtoull(strim(buf), 0, &weight);
1120
+ char *endp;
1121
+ int ret;
1122
+ u64 v;
9131123
914
- if (ret)
915
- return ret;
1124
+ buf = strim(buf);
9161125
917
- ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
918
- return ret ?: nbytes;
919
-}
1126
+ /* "WEIGHT" or "default WEIGHT" sets the default weight */
1127
+ v = simple_strtoull(buf, &endp, 0);
1128
+ if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1129
+ ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1130
+ return ret ?: nbytes;
1131
+ }
9201132
921
-#ifdef CONFIG_DEBUG_BLK_CGROUP
922
-static int bfqg_print_stat(struct seq_file *sf, void *v)
923
-{
924
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
925
- &blkcg_policy_bfq, seq_cft(sf)->private, false);
926
- return 0;
1133
+ return bfq_io_set_device_weight(of, buf, nbytes, off);
9271134 }
9281135
9291136 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
....@@ -933,29 +1140,13 @@
9331140 return 0;
9341141 }
9351142
936
-static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
937
- struct blkg_policy_data *pd, int off)
938
-{
939
- u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
940
- &blkcg_policy_bfq, off);
941
- return __blkg_prfill_u64(sf, pd, sum);
942
-}
943
-
9441143 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
9451144 struct blkg_policy_data *pd, int off)
9461145 {
947
- struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
948
- &blkcg_policy_bfq,
949
- off);
950
- return __blkg_prfill_rwstat(sf, pd, &sum);
951
-}
1146
+ struct blkg_rwstat_sample sum;
9521147
953
-static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
954
-{
955
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
956
- bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
957
- seq_cft(sf)->private, false);
958
- return 0;
1148
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1149
+ return __blkg_prfill_rwstat(sf, pd, &sum);
9591150 }
9601151
9611152 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
....@@ -966,10 +1157,52 @@
9661157 return 0;
9671158 }
9681159
1160
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
1161
+static int bfqg_print_stat(struct seq_file *sf, void *v)
1162
+{
1163
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1164
+ &blkcg_policy_bfq, seq_cft(sf)->private, false);
1165
+ return 0;
1166
+}
1167
+
1168
+static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1169
+ struct blkg_policy_data *pd, int off)
1170
+{
1171
+ struct blkcg_gq *blkg = pd_to_blkg(pd);
1172
+ struct blkcg_gq *pos_blkg;
1173
+ struct cgroup_subsys_state *pos_css;
1174
+ u64 sum = 0;
1175
+
1176
+ lockdep_assert_held(&blkg->q->queue_lock);
1177
+
1178
+ rcu_read_lock();
1179
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1180
+ struct bfq_stat *stat;
1181
+
1182
+ if (!pos_blkg->online)
1183
+ continue;
1184
+
1185
+ stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1186
+ sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1187
+ }
1188
+ rcu_read_unlock();
1189
+
1190
+ return __blkg_prfill_u64(sf, pd, sum);
1191
+}
1192
+
1193
+static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1194
+{
1195
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1196
+ bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1197
+ seq_cft(sf)->private, false);
1198
+ return 0;
1199
+}
1200
+
9691201 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
9701202 int off)
9711203 {
972
- u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1204
+ struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1205
+ u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
9731206
9741207 return __blkg_prfill_u64(sf, pd, sum >> 9);
9751208 }
....@@ -984,12 +1217,13 @@
9841217 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
9851218 struct blkg_policy_data *pd, int off)
9861219 {
987
- struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
988
- offsetof(struct blkcg_gq, stat_bytes));
989
- u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
990
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1220
+ struct blkg_rwstat_sample tmp;
9911221
992
- return __blkg_prfill_u64(sf, pd, sum >> 9);
1222
+ blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1223
+ offsetof(struct bfq_group, stats.bytes), &tmp);
1224
+
1225
+ return __blkg_prfill_u64(sf, pd,
1226
+ (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
9931227 }
9941228
9951229 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
....@@ -1004,11 +1238,11 @@
10041238 struct blkg_policy_data *pd, int off)
10051239 {
10061240 struct bfq_group *bfqg = pd_to_bfqg(pd);
1007
- u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
1241
+ u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
10081242 u64 v = 0;
10091243
10101244 if (samples) {
1011
- v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
1245
+ v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
10121246 v = div64_u64(v, samples);
10131247 }
10141248 __blkg_prfill_u64(sf, pd, v);
....@@ -1023,7 +1257,7 @@
10231257 0, false);
10241258 return 0;
10251259 }
1026
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
1260
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
10271261
10281262 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
10291263 {
....@@ -1056,22 +1290,28 @@
10561290 {
10571291 .name = "bfq.weight",
10581292 .flags = CFTYPE_NOT_ON_ROOT,
1059
- .seq_show = bfq_io_show_weight,
1293
+ .seq_show = bfq_io_show_weight_legacy,
10601294 .write_u64 = bfq_io_set_weight_legacy,
1295
+ },
1296
+ {
1297
+ .name = "bfq.weight_device",
1298
+ .flags = CFTYPE_NOT_ON_ROOT,
1299
+ .seq_show = bfq_io_show_weight,
1300
+ .write = bfq_io_set_weight,
10611301 },
10621302
10631303 /* statistics, covers only the tasks in the bfqg */
10641304 {
10651305 .name = "bfq.io_service_bytes",
1066
- .private = (unsigned long)&blkcg_policy_bfq,
1067
- .seq_show = blkg_print_stat_bytes,
1306
+ .private = offsetof(struct bfq_group, stats.bytes),
1307
+ .seq_show = bfqg_print_rwstat,
10681308 },
10691309 {
10701310 .name = "bfq.io_serviced",
1071
- .private = (unsigned long)&blkcg_policy_bfq,
1072
- .seq_show = blkg_print_stat_ios,
1311
+ .private = offsetof(struct bfq_group, stats.ios),
1312
+ .seq_show = bfqg_print_rwstat,
10731313 },
1074
-#ifdef CONFIG_DEBUG_BLK_CGROUP
1314
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
10751315 {
10761316 .name = "bfq.time",
10771317 .private = offsetof(struct bfq_group, stats.time),
....@@ -1101,20 +1341,20 @@
11011341 .private = offsetof(struct bfq_group, stats.queued),
11021342 .seq_show = bfqg_print_rwstat,
11031343 },
1104
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
1344
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
11051345
1106
- /* the same statictics which cover the bfqg and its descendants */
1346
+ /* the same statistics which cover the bfqg and its descendants */
11071347 {
11081348 .name = "bfq.io_service_bytes_recursive",
1109
- .private = (unsigned long)&blkcg_policy_bfq,
1110
- .seq_show = blkg_print_stat_bytes_recursive,
1349
+ .private = offsetof(struct bfq_group, stats.bytes),
1350
+ .seq_show = bfqg_print_rwstat_recursive,
11111351 },
11121352 {
11131353 .name = "bfq.io_serviced_recursive",
1114
- .private = (unsigned long)&blkcg_policy_bfq,
1115
- .seq_show = blkg_print_stat_ios_recursive,
1354
+ .private = offsetof(struct bfq_group, stats.ios),
1355
+ .seq_show = bfqg_print_rwstat_recursive,
11161356 },
1117
-#ifdef CONFIG_DEBUG_BLK_CGROUP
1357
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
11181358 {
11191359 .name = "bfq.time_recursive",
11201360 .private = offsetof(struct bfq_group, stats.time),
....@@ -1168,7 +1408,7 @@
11681408 .private = offsetof(struct bfq_group, stats.dequeue),
11691409 .seq_show = bfqg_print_stat,
11701410 },
1171
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
1411
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
11721412 { } /* terminate */
11731413 };
11741414
....@@ -1207,7 +1447,7 @@
12071447 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
12081448 }
12091449
1210
-struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1450
+struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
12111451 {
12121452 return bfqd->root_group;
12131453 }
....@@ -1217,6 +1457,10 @@
12171457 return bfqq->bfqd->root_group;
12181458 }
12191459
1460
+void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1461
+
1462
+void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1463
+
12201464 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
12211465 {
12221466 struct bfq_group *bfqg;