hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/md/bcache/bcache.h
....@@ -176,7 +176,7 @@
176176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
177177 */
178178
179
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
180180
181181 #include <linux/bcache.h>
182182 #include <linux/bio.h>
....@@ -301,6 +301,7 @@
301301 struct block_device *bdev;
302302
303303 struct cache_sb sb;
304
+ struct cache_sb_disk *sb_disk;
304305 struct bio sb_bio;
305306 struct bio_vec sb_bv[1];
306307 struct closure sb_write;
....@@ -406,6 +407,7 @@
406407 struct cache {
407408 struct cache_set *set;
408409 struct cache_sb sb;
410
+ struct cache_sb_disk *sb_disk;
409411 struct bio sb_bio;
410412 struct bio_vec sb_bv[1];
411413
....@@ -515,11 +517,7 @@
515517 atomic_t idle_counter;
516518 atomic_t at_max_writeback_rate;
517519
518
- struct cache_sb sb;
519
-
520
- struct cache *cache[MAX_CACHES_PER_SET];
521
- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
522
- int caches_loaded;
520
+ struct cache *cache;
523521
524522 struct bcache_device **devices;
525523 unsigned int devices_max_used;
....@@ -631,6 +629,20 @@
631629 struct bkey gc_done;
632630
633631 /*
632
+ * For automatical garbage collection after writeback completed, this
633
+ * varialbe is used as bit fields,
634
+ * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
635
+ * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback
636
+ * This is an optimization for following write request after writeback
637
+ * finished, but read hit rate dropped due to clean data on cache is
638
+ * discarded. Unless user explicitly sets it via sysfs, it won't be
639
+ * enabled.
640
+ */
641
+#define BCH_ENABLE_AUTO_GC 1
642
+#define BCH_DO_AUTO_GC 2
643
+ uint8_t gc_after_writeback;
644
+
645
+ /*
634646 * The allocation code needs gc_mark in struct bucket to be correct, but
635647 * it's not while a gc is in progress. Protected by bucket_lock.
636648 */
....@@ -654,6 +666,7 @@
654666 struct mutex verify_lock;
655667 #endif
656668
669
+ uint8_t set_uuid[16];
657670 unsigned int nr_uuids;
658671 struct uuid_entry *uuids;
659672 BKEY_PADDED(uuid_bucket);
....@@ -662,7 +675,11 @@
662675
663676 /*
664677 * A btree node on disk could have too many bsets for an iterator to fit
665
- * on the stack - have to dynamically allocate them
678
+ * on the stack - have to dynamically allocate them.
679
+ * bch_cache_set_alloc() will make sure the pool can allocate iterators
680
+ * equipped with enough room that can host
681
+ * (sb.bucket_size / sb.block_size)
682
+ * btree_iter_sets, which is more than static MAX_BSETS.
666683 */
667684 mempool_t fill_iter;
668685
....@@ -691,8 +708,8 @@
691708 atomic_long_t writeback_keys_failed;
692709
693710 atomic_long_t reclaim;
711
+ atomic_long_t reclaimed_journal_buckets;
694712 atomic_long_t flush_write;
695
- atomic_long_t retry_flush_write;
696713
697714 enum {
698715 ON_ERROR_UNREGISTER,
....@@ -709,6 +726,7 @@
709726 unsigned int gc_always_rewrite:1;
710727 unsigned int shrinker_disabled:1;
711728 unsigned int copy_gc_enabled:1;
729
+ unsigned int idle_max_writeback_rate_enabled:1;
712730
713731 #define BUCKET_HASH_BITS 12
714732 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
....@@ -737,15 +755,35 @@
737755 #define btree_default_blocks(c) \
738756 ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
739757
740
-#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
741
-#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
742
-#define block_bytes(c) ((c)->sb.block_size << 9)
758
+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
759
+#define block_bytes(ca) ((ca)->sb.block_size << 9)
743760
744
-#define prios_per_bucket(c) \
745
- ((bucket_bytes(c) - sizeof(struct prio_set)) / \
761
+static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
762
+{
763
+ unsigned int n, max_pages;
764
+
765
+ max_pages = min_t(unsigned int,
766
+ __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
767
+ MAX_ORDER_NR_PAGES);
768
+
769
+ n = sb->bucket_size / PAGE_SECTORS;
770
+ if (n > max_pages)
771
+ n = max_pages;
772
+
773
+ return n;
774
+}
775
+
776
+static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
777
+{
778
+ return meta_bucket_pages(sb) << PAGE_SHIFT;
779
+}
780
+
781
+#define prios_per_bucket(ca) \
782
+ ((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) / \
746783 sizeof(struct bucket_disk))
747
-#define prio_buckets(c) \
748
- DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
784
+
785
+#define prio_buckets(ca) \
786
+ DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
749787
750788 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
751789 {
....@@ -759,14 +797,14 @@
759797
760798 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
761799 {
762
- return s & (c->sb.bucket_size - 1);
800
+ return s & (c->cache->sb.bucket_size - 1);
763801 }
764802
765803 static inline struct cache *PTR_CACHE(struct cache_set *c,
766804 const struct bkey *k,
767805 unsigned int ptr)
768806 {
769
- return c->cache[PTR_DEV(k, ptr)];
807
+ return c->cache;
770808 }
771809
772810 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
....@@ -847,9 +885,6 @@
847885
848886 /* Looping macros */
849887
850
-#define for_each_cache(ca, cs, iter) \
851
- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
852
-
853888 #define for_each_bucket(b, ca) \
854889 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
855890 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
....@@ -891,11 +926,9 @@
891926
892927 static inline void wake_up_allocators(struct cache_set *c)
893928 {
894
- struct cache *ca;
895
- unsigned int i;
929
+ struct cache *ca = c->cache;
896930
897
- for_each_cache(ca, c, i)
898
- wake_up_process(ca->alloc_thread);
931
+ wake_up_process(ca->alloc_thread);
899932 }
900933
901934 static inline void closure_bio_submit(struct cache_set *c,
....@@ -908,7 +941,7 @@
908941 bio_endio(bio);
909942 return;
910943 }
911
- generic_make_request(bio);
944
+ submit_bio_noacct(bio);
912945 }
913946
914947 /*
....@@ -952,9 +985,9 @@
952985
953986 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
954987 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
955
- struct bkey *k, int n, bool wait);
988
+ struct bkey *k, bool wait);
956989 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
957
- struct bkey *k, int n, bool wait);
990
+ struct bkey *k, bool wait);
958991 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
959992 unsigned int sectors, unsigned int write_point,
960993 unsigned int write_prio, bool wait);
....@@ -968,6 +1001,7 @@
9681001
9691002 extern struct workqueue_struct *bcache_wq;
9701003 extern struct workqueue_struct *bch_journal_wq;
1004
+extern struct workqueue_struct *bch_flush_wq;
9711005 extern struct mutex bch_register_lock;
9721006 extern struct list_head bch_cache_sets;
9731007
....@@ -990,7 +1024,7 @@
9901024 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
9911025 uint8_t *set_uuid);
9921026 void bch_cached_dev_detach(struct cached_dev *dc);
993
-void bch_cached_dev_run(struct cached_dev *dc);
1027
+int bch_cached_dev_run(struct cached_dev *dc);
9941028 void bcache_device_stop(struct bcache_device *d);
9951029
9961030 void bch_cache_set_unregister(struct cache_set *c);
....@@ -1006,8 +1040,10 @@
10061040 int bch_cache_allocator_start(struct cache *ca);
10071041
10081042 void bch_debug_exit(void);
1009
-void bch_debug_init(struct kobject *kobj);
1043
+void bch_debug_init(void);
10101044 void bch_request_exit(void);
10111045 int bch_request_init(void);
1046
+void bch_btree_exit(void);
1047
+int bch_btree_init(void);
10121048
10131049 #endif /* _BCACHE_H */