hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/include/linux/buffer_head.h
....@@ -22,9 +22,6 @@
2222 BH_Dirty, /* Is dirty */
2323 BH_Lock, /* Is locked */
2424 BH_Req, /* Has been submitted for I/O */
25
- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
26
- * IO completion of other buffers in the page
27
- */
2825
2926 BH_Mapped, /* Has a disk mapping */
3027 BH_New, /* Disk mapping was newly created by get_block */
....@@ -76,49 +73,10 @@
7673 struct address_space *b_assoc_map; /* mapping this buffer is
7774 associated with */
7875 atomic_t b_count; /* users using this buffer_head */
79
-#ifdef CONFIG_PREEMPT_RT_BASE
80
- spinlock_t b_uptodate_lock;
81
-#if IS_ENABLED(CONFIG_JBD2)
82
- spinlock_t b_state_lock;
83
- spinlock_t b_journal_head_lock;
84
-#endif
85
-#endif
76
+ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
77
+ * serialise IO completion of other
78
+ * buffers in the page */
8679 };
87
-
88
-static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
89
-{
90
- unsigned long flags;
91
-
92
-#ifndef CONFIG_PREEMPT_RT_BASE
93
- local_irq_save(flags);
94
- bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
95
-#else
96
- spin_lock_irqsave(&bh->b_uptodate_lock, flags);
97
-#endif
98
- return flags;
99
-}
100
-
101
-static inline void
102
-bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
103
-{
104
-#ifndef CONFIG_PREEMPT_RT_BASE
105
- bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
106
- local_irq_restore(flags);
107
-#else
108
- spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
109
-#endif
110
-}
111
-
112
-static inline void buffer_head_init_locks(struct buffer_head *bh)
113
-{
114
-#ifdef CONFIG_PREEMPT_RT_BASE
115
- spin_lock_init(&bh->b_uptodate_lock);
116
-#if IS_ENABLED(CONFIG_JBD2)
117
- spin_lock_init(&bh->b_state_lock);
118
- spin_lock_init(&bh->b_journal_head_lock);
119
-#endif
120
-#endif
121
-}
12280
12381 /*
12482 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
....@@ -159,7 +117,6 @@
159117 * of the form "mark_buffer_foo()". These are higher-level functions which
160118 * do something in addition to setting a b_state bit.
161119 */
162
-BUFFER_FNS(Uptodate, uptodate)
163120 BUFFER_FNS(Dirty, dirty)
164121 TAS_BUFFER_FNS(Dirty, dirty)
165122 BUFFER_FNS(Lock, locked)
....@@ -176,6 +133,41 @@
176133 BUFFER_FNS(Meta, meta)
177134 BUFFER_FNS(Prio, prio)
178135 BUFFER_FNS(Defer_Completion, defer_completion)
136
+
137
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
138
+{
139
+ /*
140
+ * If somebody else already set this uptodate, they will
141
+ * have done the memory barrier, and a reader will thus
142
+ * see *some* valid buffer state.
143
+ *
144
+ * Any other serialization (with IO errors or whatever that
145
+ * might clear the bit) has to come from other state (eg BH_Lock).
146
+ */
147
+ if (test_bit(BH_Uptodate, &bh->b_state))
148
+ return;
149
+
150
+ /*
151
+ * make it consistent with folio_mark_uptodate
152
+ * pairs with smp_load_acquire in buffer_uptodate
153
+ */
154
+ smp_mb__before_atomic();
155
+ set_bit(BH_Uptodate, &bh->b_state);
156
+}
157
+
158
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
159
+{
160
+ clear_bit(BH_Uptodate, &bh->b_state);
161
+}
162
+
163
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
164
+{
165
+ /*
166
+ * make it consistent with folio_test_uptodate
167
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
168
+ */
169
+ return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
170
+}
179171
180172 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
181173
....@@ -236,6 +228,8 @@
236228 struct buffer_head *__bread_gfp(struct block_device *,
237229 sector_t block, unsigned size, gfp_t gfp);
238230 void invalidate_bh_lrus(void);
231
+void invalidate_bh_lrus_cpu(void);
232
+bool has_bh_in_lru(int cpu, void *dummy);
239233 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
240234 void free_buffer_head(struct buffer_head * bh);
241235 void unlock_buffer(struct buffer_head *bh);
....@@ -286,7 +280,7 @@
286280 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
287281 get_block_t get_block);
288282 /* Convert errno to return value from ->page_mkwrite() call */
289
-static inline int block_page_mkwrite_return(int err)
283
+static inline vm_fault_t block_page_mkwrite_return(int err)
290284 {
291285 if (err == 0)
292286 return VM_FAULT_LOCKED;
....@@ -313,14 +307,6 @@
313307 /*
314308 * inline definitions
315309 */
316
-
317
-static inline void attach_page_buffers(struct page *page,
318
- struct buffer_head *head)
319
-{
320
- get_page(page);
321
- SetPagePrivate(page);
322
- set_page_private(page, (unsigned long)head);
323
-}
324310
325311 static inline void get_bh(struct buffer_head *bh)
326312 {
....@@ -456,6 +442,9 @@
456442 static inline void invalidate_inode_buffers(struct inode *inode) {}
457443 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
458444 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
445
+static inline void invalidate_bh_lrus_cpu(void) {}
446
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
447
+#define buffer_heads_over_limit 0
459448
460449 #endif /* CONFIG_BLOCK */
461450 #endif /* _LINUX_BUFFER_HEAD_H */