forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/md/bcache/sysfs.c
....@@ -11,12 +11,15 @@
1111 #include "btree.h"
1212 #include "request.h"
1313 #include "writeback.h"
14
+#include "features.h"
1415
1516 #include <linux/blkdev.h>
1617 #include <linux/sort.h>
1718 #include <linux/sched/clock.h>
1819
19
-/* Default is -1; we skip past it for struct cached_dev's cache mode */
20
+extern bool bcache_is_reboot;
21
+
22
+/* Default is 0 ("writethrough") */
2023 static const char * const bch_cache_modes[] = {
2124 "writethrough",
2225 "writeback",
....@@ -31,7 +34,7 @@
3134 NULL
3235 };
3336
34
-/* Default is -1; we skip past it for stop_when_cache_set_failed */
37
+/* Default is 0 ("auto") */
3538 static const char * const bch_stop_on_failure_modes[] = {
3639 "auto",
3740 "always",
....@@ -73,6 +76,8 @@
7376 read_attribute(btree_written);
7477 read_attribute(metadata_written);
7578 read_attribute(active_journal_entries);
79
+read_attribute(backing_dev_name);
80
+read_attribute(backing_dev_uuid);
7681
7782 sysfs_time_stats_attribute(btree_gc, sec, ms);
7883 sysfs_time_stats_attribute(btree_split, sec, us);
....@@ -84,16 +89,21 @@
8489 read_attribute(average_key_size);
8590 read_attribute(dirty_data);
8691 read_attribute(bset_tree_stats);
92
+read_attribute(feature_compat);
93
+read_attribute(feature_ro_compat);
94
+read_attribute(feature_incompat);
8795
8896 read_attribute(state);
8997 read_attribute(cache_read_races);
9098 read_attribute(reclaim);
99
+read_attribute(reclaimed_journal_buckets);
91100 read_attribute(flush_write);
92
-read_attribute(retry_flush_write);
93101 read_attribute(writeback_keys_done);
94102 read_attribute(writeback_keys_failed);
95103 read_attribute(io_errors);
96104 read_attribute(congested);
105
+read_attribute(cutoff_writeback);
106
+read_attribute(cutoff_writeback_sync);
97107 rw_attribute(congested_read_threshold_us);
98108 rw_attribute(congested_write_threshold_us);
99109
....@@ -135,6 +145,8 @@
135145 rw_attribute(cache_replacement_policy);
136146 rw_attribute(btree_shrinker_disabled);
137147 rw_attribute(copy_gc_enabled);
148
+rw_attribute(idle_max_writeback_rate);
149
+rw_attribute(gc_after_writeback);
138150 rw_attribute(size);
139151
140152 static ssize_t bch_snprint_string_list(char *buf,
....@@ -146,7 +158,7 @@
146158 size_t i;
147159
148160 for (i = 0; list[i]; i++)
149
- out += snprintf(out, buf + size - out,
161
+ out += scnprintf(out, buf + size - out,
150162 i == selected ? "[%s] " : "%s ", list[i]);
151163
152164 out[-1] = '\n';
....@@ -252,6 +264,19 @@
252264 return strlen(buf);
253265 }
254266
267
+ if (attr == &sysfs_backing_dev_name) {
268
+ snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
269
+ strcat(buf, "\n");
270
+ return strlen(buf);
271
+ }
272
+
273
+ if (attr == &sysfs_backing_dev_uuid) {
274
+ /* convert binary uuid into 36-byte string plus '\0' */
275
+ snprintf(buf, 36+1, "%pU", dc->sb.uuid);
276
+ strcat(buf, "\n");
277
+ return strlen(buf);
278
+ }
279
+
255280 #undef var
256281 return 0;
257282 }
....@@ -265,18 +290,23 @@
265290 struct cache_set *c;
266291 struct kobj_uevent_env *env;
267292
293
+ /* no user space access if system is rebooting */
294
+ if (bcache_is_reboot)
295
+ return -EBUSY;
296
+
268297 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
269298 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
270299 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
271300
272301 sysfs_strtoul(data_csum, dc->disk.data_csum);
273302 d_strtoul(verify);
274
- d_strtoul(bypass_torture_test);
275
- d_strtoul(writeback_metadata);
276
- d_strtoul(writeback_running);
277
- d_strtoul(writeback_delay);
303
+ sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
304
+ sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
305
+ sysfs_strtoul_bool(writeback_running, dc->writeback_running);
306
+ sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
278307
279
- sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
308
+ sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
309
+ 0, bch_cutoff_writeback);
280310
281311 if (attr == &sysfs_writeback_rate) {
282312 ssize_t ret;
....@@ -322,8 +352,11 @@
322352 bch_cache_accounting_clear(&dc->accounting);
323353
324354 if (attr == &sysfs_running &&
325
- strtoul_or_return(buf))
326
- bch_cached_dev_run(dc);
355
+ strtoul_or_return(buf)) {
356
+ v = bch_cached_dev_run(dc);
357
+ if (v)
358
+ return v;
359
+ }
327360
328361 if (attr == &sysfs_cache_mode) {
329362 v = __sysfs_match_string(bch_cache_modes, -1, buf);
....@@ -392,7 +425,7 @@
392425 return size;
393426 }
394427 if (v == -ENOENT)
395
- pr_err("Can't attach %s: cache set not found", buf);
428
+ pr_err("Can't attach %s: cache set not found\n", buf);
396429 return v;
397430 }
398431
....@@ -410,11 +443,32 @@
410443 struct cached_dev *dc = container_of(kobj, struct cached_dev,
411444 disk.kobj);
412445
446
+ /* no user space access if system is rebooting */
447
+ if (bcache_is_reboot)
448
+ return -EBUSY;
449
+
413450 mutex_lock(&bch_register_lock);
414451 size = __cached_dev_store(kobj, attr, buf, size);
415452
416
- if (attr == &sysfs_writeback_running)
417
- bch_writeback_queue(dc);
453
+ if (attr == &sysfs_writeback_running) {
454
+ /* dc->writeback_running changed in __cached_dev_store() */
455
+ if (IS_ERR_OR_NULL(dc->writeback_thread)) {
456
+ /*
457
+ * reject setting it to 1 via sysfs if writeback
458
+ * kthread is not created yet.
459
+ */
460
+ if (dc->writeback_running) {
461
+ dc->writeback_running = false;
462
+ pr_err("%s: failed to run non-existent writeback thread\n",
463
+ dc->disk.disk->disk_name);
464
+ }
465
+ } else
466
+ /*
467
+ * writeback kthread will check if dc->writeback_running
468
+ * is true or false.
469
+ */
470
+ bch_writeback_queue(dc);
471
+ }
418472
419473 /*
420474 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
....@@ -466,6 +520,8 @@
466520 &sysfs_verify,
467521 &sysfs_bypass_torture_test,
468522 #endif
523
+ &sysfs_backing_dev_name,
524
+ &sysfs_backing_dev_uuid,
469525 NULL
470526 };
471527 KTYPE(bch_cached_dev);
....@@ -494,6 +550,10 @@
494550 struct bcache_device *d = container_of(kobj, struct bcache_device,
495551 kobj);
496552 struct uuid_entry *u = &d->c->uuids[d->id];
553
+
554
+ /* no user space access if system is rebooting */
555
+ if (bcache_is_reboot)
556
+ return -EBUSY;
497557
498558 sysfs_strtoul(data_csum, d->data_csum);
499559
....@@ -651,10 +711,10 @@
651711 {
652712 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
653713
654
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
714
+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
655715 sysfs_print(journal_delay_ms, c->journal_delay_ms);
656
- sysfs_hprint(bucket_size, bucket_bytes(c));
657
- sysfs_hprint(block_size, block_bytes(c));
716
+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
717
+ sysfs_hprint(block_size, block_bytes(c->cache));
658718 sysfs_print(tree_depth, c->root->level);
659719 sysfs_print(root_usage_percent, bch_root_usage(c));
660720
....@@ -677,11 +737,11 @@
677737 sysfs_print(reclaim,
678738 atomic_long_read(&c->reclaim));
679739
740
+ sysfs_print(reclaimed_journal_buckets,
741
+ atomic_long_read(&c->reclaimed_journal_buckets));
742
+
680743 sysfs_print(flush_write,
681744 atomic_long_read(&c->flush_write));
682
-
683
- sysfs_print(retry_flush_write,
684
- atomic_long_read(&c->retry_flush_write));
685745
686746 sysfs_print(writeback_keys_done,
687747 atomic_long_read(&c->writeback_keys_done));
....@@ -703,6 +763,9 @@
703763 sysfs_print(congested_write_threshold_us,
704764 c->congested_write_threshold_us);
705765
766
+ sysfs_print(cutoff_writeback, bch_cutoff_writeback);
767
+ sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
768
+
706769 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
707770 sysfs_printf(verify, "%i", c->verify);
708771 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
....@@ -711,11 +774,21 @@
711774 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
712775 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
713776 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
777
+ sysfs_printf(idle_max_writeback_rate, "%i",
778
+ c->idle_max_writeback_rate_enabled);
779
+ sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
714780 sysfs_printf(io_disable, "%i",
715781 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
716782
717783 if (attr == &sysfs_bset_tree_stats)
718784 return bch_bset_print_stats(c, buf);
785
+
786
+ if (attr == &sysfs_feature_compat)
787
+ return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
788
+ if (attr == &sysfs_feature_ro_compat)
789
+ return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
790
+ if (attr == &sysfs_feature_incompat)
791
+ return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
719792
720793 return 0;
721794 }
....@@ -726,6 +799,10 @@
726799 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
727800 ssize_t v;
728801
802
+ /* no user space access if system is rebooting */
803
+ if (bcache_is_reboot)
804
+ return -EBUSY;
805
+
729806 if (attr == &sysfs_unregister)
730807 bch_cache_set_unregister(c);
731808
....@@ -735,8 +812,8 @@
735812 if (attr == &sysfs_synchronous) {
736813 bool sync = strtoul_or_return(buf);
737814
738
- if (sync != CACHE_SYNC(&c->sb)) {
739
- SET_CACHE_SYNC(&c->sb, sync);
815
+ if (sync != CACHE_SYNC(&c->cache->sb)) {
816
+ SET_CACHE_SYNC(&c->cache->sb, sync);
740817 bcache_write_super(c);
741818 }
742819 }
....@@ -760,21 +837,8 @@
760837 bch_cache_accounting_clear(&c->accounting);
761838 }
762839
763
- if (attr == &sysfs_trigger_gc) {
764
- /*
765
- * Garbage collection thread only works when sectors_to_gc < 0,
766
- * when users write to sysfs entry trigger_gc, most of time
767
- * they want to forcibly triger gargage collection. Here -1 is
768
- * set to c->sectors_to_gc, to make gc_should_run() give a
769
- * chance to permit gc thread to run. "give a chance" means
770
- * before going into gc_should_run(), there is still chance
771
- * that c->sectors_to_gc being set to other positive value. So
772
- * writing sysfs entry trigger_gc won't always make sure gc
773
- * thread takes effect.
774
- */
775
- atomic_set(&c->sectors_to_gc, -1);
776
- wake_up_gc(c);
777
- }
840
+ if (attr == &sysfs_trigger_gc)
841
+ force_wake_up_gc(c);
778842
779843 if (attr == &sysfs_prune_cache) {
780844 struct shrink_control sc;
....@@ -784,10 +848,12 @@
784848 c->shrink.scan_objects(&c->shrink, &sc);
785849 }
786850
787
- sysfs_strtoul(congested_read_threshold_us,
788
- c->congested_read_threshold_us);
789
- sysfs_strtoul(congested_write_threshold_us,
790
- c->congested_write_threshold_us);
851
+ sysfs_strtoul_clamp(congested_read_threshold_us,
852
+ c->congested_read_threshold_us,
853
+ 0, UINT_MAX);
854
+ sysfs_strtoul_clamp(congested_write_threshold_us,
855
+ c->congested_write_threshold_us,
856
+ 0, UINT_MAX);
791857
792858 if (attr == &sysfs_errors) {
793859 v = __sysfs_match_string(error_actions, -1, buf);
....@@ -797,8 +863,7 @@
797863 c->on_error = v;
798864 }
799865
800
- if (attr == &sysfs_io_error_limit)
801
- c->error_limit = strtoul_or_return(buf);
866
+ sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
802867
803868 /* See count_io_errors() for why 88 */
804869 if (attr == &sysfs_io_error_halflife) {
....@@ -818,21 +883,32 @@
818883 if (v) {
819884 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
820885 &c->flags))
821
- pr_warn("CACHE_SET_IO_DISABLE already set");
886
+ pr_warn("CACHE_SET_IO_DISABLE already set\n");
822887 } else {
823888 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
824889 &c->flags))
825
- pr_warn("CACHE_SET_IO_DISABLE already cleared");
890
+ pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
826891 }
827892 }
828893
829
- sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
830
- sysfs_strtoul(verify, c->verify);
831
- sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
894
+ sysfs_strtoul_clamp(journal_delay_ms,
895
+ c->journal_delay_ms,
896
+ 0, USHRT_MAX);
897
+ sysfs_strtoul_bool(verify, c->verify);
898
+ sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
832899 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
833
- sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
834
- sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
835
- sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
900
+ sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
901
+ sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
902
+ sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
903
+ sysfs_strtoul_bool(idle_max_writeback_rate,
904
+ c->idle_max_writeback_rate_enabled);
905
+
906
+ /*
907
+ * write gc_after_writeback here may overwrite an already set
908
+ * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
909
+ * set in next chance.
910
+ */
911
+ sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
836912
837913 return size;
838914 }
....@@ -848,6 +924,10 @@
848924 STORE(bch_cache_set_internal)
849925 {
850926 struct cache_set *c = container_of(kobj, struct cache_set, internal);
927
+
928
+ /* no user space access if system is rebooting */
929
+ if (bcache_is_reboot)
930
+ return -EBUSY;
851931
852932 return bch_cache_set_store(&c->kobj, attr, buf, size);
853933 }
....@@ -898,8 +978,8 @@
898978 &sysfs_bset_tree_stats,
899979 &sysfs_cache_read_races,
900980 &sysfs_reclaim,
981
+ &sysfs_reclaimed_journal_buckets,
901982 &sysfs_flush_write,
902
- &sysfs_retry_flush_write,
903983 &sysfs_writeback_keys_done,
904984 &sysfs_writeback_keys_failed,
905985
....@@ -913,13 +993,21 @@
913993 &sysfs_gc_always_rewrite,
914994 &sysfs_btree_shrinker_disabled,
915995 &sysfs_copy_gc_enabled,
996
+ &sysfs_idle_max_writeback_rate,
997
+ &sysfs_gc_after_writeback,
916998 &sysfs_io_disable,
999
+ &sysfs_cutoff_writeback,
1000
+ &sysfs_cutoff_writeback_sync,
1001
+ &sysfs_feature_compat,
1002
+ &sysfs_feature_ro_compat,
1003
+ &sysfs_feature_incompat,
9171004 NULL
9181005 };
9191006 KTYPE(bch_cache_set_internal);
9201007
9211008 static int __bch_cache_cmp(const void *l, const void *r)
9221009 {
1010
+ cond_resched();
9231011 return *((uint16_t *)r) - *((uint16_t *)l);
9241012 }
9251013
....@@ -982,8 +1070,6 @@
9821070 !cached[n - 1])
9831071 --n;
9841072
985
- unused = ca->sb.nbuckets - n;
986
-
9871073 while (cached < p + n &&
9881074 *cached == BTREE_PRIO)
9891075 cached++, n--;
....@@ -1033,6 +1119,10 @@
10331119 struct cache *ca = container_of(kobj, struct cache, kobj);
10341120 ssize_t v;
10351121
1122
+ /* no user space access if system is rebooting */
1123
+ if (bcache_is_reboot)
1124
+ return -EBUSY;
1125
+
10361126 if (attr == &sysfs_discard) {
10371127 bool v = strtoul_or_return(buf);
10381128