hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/block/blk-wbt.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * buffered writeback throttling. loosely based on CoDel. We can't drop
34 * packets for IO scheduling, so the logic is something like this:
....@@ -313,7 +314,7 @@
313314 calc_wb_limits(rwb);
314315 rwb->unknown_cnt = 0;
315316 rwb_wake_all(rwb);
316
- rwb_trace_step(rwb, "scale up");
317
+ rwb_trace_step(rwb, tracepoint_string("scale up"));
317318 }
318319
319320 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
....@@ -322,7 +323,7 @@
322323 return;
323324 calc_wb_limits(rwb);
324325 rwb->unknown_cnt = 0;
325
- rwb_trace_step(rwb, "scale down");
326
+ rwb_trace_step(rwb, tracepoint_string("scale down"));
326327 }
327328
328329 static void rwb_arm_timer(struct rq_wb *rwb)
....@@ -405,7 +406,7 @@
405406 rwb_arm_timer(rwb);
406407 }
407408
408
-static void __wbt_update_limits(struct rq_wb *rwb)
409
+static void wbt_update_limits(struct rq_wb *rwb)
409410 {
410411 struct rq_depth *rqd = &rwb->rq_depth;
411412
....@@ -416,14 +417,6 @@
416417 calc_wb_limits(rwb);
417418
418419 rwb_wake_all(rwb);
419
-}
420
-
421
-void wbt_update_limits(struct request_queue *q)
422
-{
423
- struct rq_qos *rqos = wbt_rq_qos(q);
424
- if (!rqos)
425
- return;
426
- __wbt_update_limits(RQWB(rqos));
427420 }
428421
429422 u64 wbt_get_min_lat(struct request_queue *q)
....@@ -441,7 +434,7 @@
441434 return;
442435 RQWB(rqos)->min_lat_nsec = val;
443436 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
444
- __wbt_update_limits(RQWB(rqos));
437
+ wbt_update_limits(RQWB(rqos));
445438 }
446439
447440
....@@ -492,31 +485,21 @@
492485 }
493486
494487 struct wbt_wait_data {
495
- struct wait_queue_entry wq;
496
- struct task_struct *task;
497488 struct rq_wb *rwb;
498
- struct rq_wait *rqw;
489
+ enum wbt_flags wb_acct;
499490 unsigned long rw;
500
- bool got_token;
501491 };
502492
503
-static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
504
- int wake_flags, void *key)
493
+static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
505494 {
506
- struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
507
- wq);
495
+ struct wbt_wait_data *data = private_data;
496
+ return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
497
+}
508498
509
- /*
510
- * If we fail to get a budget, return -1 to interrupt the wake up
511
- * loop in __wake_up_common.
512
- */
513
- if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
514
- return -1;
515
-
516
- data->got_token = true;
517
- list_del_init(&curr->entry);
518
- wake_up_process(data->task);
519
- return 1;
499
+static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
500
+{
501
+ struct wbt_wait_data *data = private_data;
502
+ wbt_rqw_done(data->rwb, rqw, data->wb_acct);
520503 }
521504
522505 /*
....@@ -524,57 +507,16 @@
524507 * the timer to kick off queuing again.
525508 */
526509 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
527
- unsigned long rw, spinlock_t *lock)
528
- __releases(lock)
529
- __acquires(lock)
510
+ unsigned long rw)
530511 {
531512 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
532513 struct wbt_wait_data data = {
533
- .wq = {
534
- .func = wbt_wake_function,
535
- .entry = LIST_HEAD_INIT(data.wq.entry),
536
- },
537
- .task = current,
538514 .rwb = rwb,
539
- .rqw = rqw,
515
+ .wb_acct = wb_acct,
540516 .rw = rw,
541517 };
542
- bool has_sleeper;
543518
544
- has_sleeper = wq_has_sleeper(&rqw->wait);
545
- if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
546
- return;
547
-
548
- prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
549
- do {
550
- if (data.got_token)
551
- break;
552
-
553
- if (!has_sleeper &&
554
- rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
555
- finish_wait(&rqw->wait, &data.wq);
556
-
557
- /*
558
- * We raced with wbt_wake_function() getting a token,
559
- * which means we now have two. Put our local token
560
- * and wake anyone else potentially waiting for one.
561
- */
562
- if (data.got_token)
563
- wbt_rqw_done(rwb, rqw, wb_acct);
564
- break;
565
- }
566
-
567
- if (lock) {
568
- spin_unlock_irq(lock);
569
- io_schedule();
570
- spin_lock_irq(lock);
571
- } else
572
- io_schedule();
573
-
574
- has_sleeper = false;
575
- } while (1);
576
-
577
- finish_wait(&rqw->wait, &data.wq);
519
+ rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
578520 }
579521
580522 static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
....@@ -587,7 +529,7 @@
587529 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
588530 (REQ_SYNC | REQ_IDLE))
589531 return false;
590
- /* fallthrough */
532
+ fallthrough;
591533 case REQ_OP_DISCARD:
592534 return true;
593535 default:
....@@ -627,7 +569,7 @@
627569 * in an irq held spinlock, if it holds one when calling this function.
628570 * If we do sleep, we'll release and re-grab it.
629571 */
630
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
572
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
631573 {
632574 struct rq_wb *rwb = RQWB(rqos);
633575 enum wbt_flags flags;
....@@ -639,7 +581,7 @@
639581 return;
640582 }
641583
642
- __wbt_wait(rwb, flags, bio->bi_opf, lock);
584
+ __wbt_wait(rwb, flags, bio->bi_opf);
643585
644586 if (!blk_stat_is_active(rwb->cb))
645587 rwb_arm_timer(rwb);
....@@ -651,7 +593,7 @@
651593 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
652594 }
653595
654
-void wbt_issue(struct rq_qos *rqos, struct request *rq)
596
+static void wbt_issue(struct rq_qos *rqos, struct request *rq)
655597 {
656598 struct rq_wb *rwb = RQWB(rqos);
657599
....@@ -671,7 +613,7 @@
671613 }
672614 }
673615
674
-void wbt_requeue(struct rq_qos *rqos, struct request *rq)
616
+static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
675617 {
676618 struct rq_wb *rwb = RQWB(rqos);
677619 if (!rwb_enabled(rwb))
....@@ -679,15 +621,6 @@
679621 if (rq == rwb->sync_cookie) {
680622 rwb->sync_issue = 0;
681623 rwb->sync_cookie = NULL;
682
- }
683
-}
684
-
685
-void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
686
-{
687
- struct rq_qos *rqos = wbt_rq_qos(q);
688
- if (rqos) {
689
- RQWB(rqos)->rq_depth.queue_depth = depth;
690
- __wbt_update_limits(RQWB(rqos));
691624 }
692625 }
693626
....@@ -716,8 +649,7 @@
716649 if (!blk_queue_registered(q))
717650 return;
718651
719
- if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
720
- (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
652
+ if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
721653 wbt_init(q);
722654 }
723655 EXPORT_SYMBOL_GPL(wbt_enable_default);
....@@ -747,6 +679,12 @@
747679 return -1;
748680 }
749681
682
+static void wbt_queue_depth_changed(struct rq_qos *rqos)
683
+{
684
+ RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
685
+ wbt_update_limits(RQWB(rqos));
686
+}
687
+
750688 static void wbt_exit(struct rq_qos *rqos)
751689 {
752690 struct rq_wb *rwb = RQWB(rqos);
....@@ -774,6 +712,93 @@
774712 }
775713 EXPORT_SYMBOL_GPL(wbt_disable_default);
776714
715
+#ifdef CONFIG_BLK_DEBUG_FS
716
+static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
717
+{
718
+ struct rq_qos *rqos = data;
719
+ struct rq_wb *rwb = RQWB(rqos);
720
+
721
+ seq_printf(m, "%llu\n", rwb->cur_win_nsec);
722
+ return 0;
723
+}
724
+
725
+static int wbt_enabled_show(void *data, struct seq_file *m)
726
+{
727
+ struct rq_qos *rqos = data;
728
+ struct rq_wb *rwb = RQWB(rqos);
729
+
730
+ seq_printf(m, "%d\n", rwb->enable_state);
731
+ return 0;
732
+}
733
+
734
+static int wbt_id_show(void *data, struct seq_file *m)
735
+{
736
+ struct rq_qos *rqos = data;
737
+
738
+ seq_printf(m, "%u\n", rqos->id);
739
+ return 0;
740
+}
741
+
742
+static int wbt_inflight_show(void *data, struct seq_file *m)
743
+{
744
+ struct rq_qos *rqos = data;
745
+ struct rq_wb *rwb = RQWB(rqos);
746
+ int i;
747
+
748
+ for (i = 0; i < WBT_NUM_RWQ; i++)
749
+ seq_printf(m, "%d: inflight %d\n", i,
750
+ atomic_read(&rwb->rq_wait[i].inflight));
751
+ return 0;
752
+}
753
+
754
+static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
755
+{
756
+ struct rq_qos *rqos = data;
757
+ struct rq_wb *rwb = RQWB(rqos);
758
+
759
+ seq_printf(m, "%lu\n", rwb->min_lat_nsec);
760
+ return 0;
761
+}
762
+
763
+static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
764
+{
765
+ struct rq_qos *rqos = data;
766
+ struct rq_wb *rwb = RQWB(rqos);
767
+
768
+ seq_printf(m, "%u\n", rwb->unknown_cnt);
769
+ return 0;
770
+}
771
+
772
+static int wbt_normal_show(void *data, struct seq_file *m)
773
+{
774
+ struct rq_qos *rqos = data;
775
+ struct rq_wb *rwb = RQWB(rqos);
776
+
777
+ seq_printf(m, "%u\n", rwb->wb_normal);
778
+ return 0;
779
+}
780
+
781
+static int wbt_background_show(void *data, struct seq_file *m)
782
+{
783
+ struct rq_qos *rqos = data;
784
+ struct rq_wb *rwb = RQWB(rqos);
785
+
786
+ seq_printf(m, "%u\n", rwb->wb_background);
787
+ return 0;
788
+}
789
+
790
+static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
791
+ {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
792
+ {"enabled", 0400, wbt_enabled_show},
793
+ {"id", 0400, wbt_id_show},
794
+ {"inflight", 0400, wbt_inflight_show},
795
+ {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
796
+ {"unknown_cnt", 0400, wbt_unknown_cnt_show},
797
+ {"wb_normal", 0400, wbt_normal_show},
798
+ {"wb_background", 0400, wbt_background_show},
799
+ {},
800
+};
801
+#endif
777802
778803 static struct rq_qos_ops wbt_rqos_ops = {
779804 .throttle = wbt_wait,
....@@ -782,7 +807,11 @@
782807 .requeue = wbt_requeue,
783808 .done = wbt_done,
784809 .cleanup = wbt_cleanup,
810
+ .queue_depth_changed = wbt_queue_depth_changed,
785811 .exit = wbt_exit,
812
+#ifdef CONFIG_BLK_DEBUG_FS
813
+ .debugfs_attrs = wbt_debugfs_attrs,
814
+#endif
786815 };
787816
788817 int wbt_init(struct request_queue *q)
....@@ -809,20 +838,17 @@
809838 rwb->last_comp = rwb->last_issue = jiffies;
810839 rwb->win_nsec = RWB_WINDOW_NSEC;
811840 rwb->enable_state = WBT_STATE_ON_DEFAULT;
812
- rwb->wc = 1;
841
+ rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
813842 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
814
- __wbt_update_limits(rwb);
843
+ rwb->min_lat_nsec = wbt_default_latency_nsec(q);
844
+
845
+ wbt_queue_depth_changed(&rwb->rqos);
815846
816847 /*
817848 * Assign rwb and add the stats callback.
818849 */
819850 rq_qos_add(q, &rwb->rqos);
820851 blk_stat_add_callback(q, rwb->cb);
821
-
822
- rwb->min_lat_nsec = wbt_default_latency_nsec(q);
823
-
824
- wbt_set_queue_depth(q, blk_queue_depth(q));
825
- wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
826852
827853 return 0;
828854 }