| .. | .. |
|---|
| 22 | 22 | BH_Dirty, /* Is dirty */ |
|---|
| 23 | 23 | BH_Lock, /* Is locked */ |
|---|
| 24 | 24 | BH_Req, /* Has been submitted for I/O */ |
|---|
| 25 | | - BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise |
|---|
| 26 | | - * IO completion of other buffers in the page |
|---|
| 27 | | - */ |
|---|
| 28 | 25 | |
|---|
| 29 | 26 | BH_Mapped, /* Has a disk mapping */ |
|---|
| 30 | 27 | BH_New, /* Disk mapping was newly created by get_block */ |
|---|
| .. | .. |
|---|
| 76 | 73 | struct address_space *b_assoc_map; /* mapping this buffer is |
|---|
| 77 | 74 | associated with */ |
|---|
| 78 | 75 | atomic_t b_count; /* users using this buffer_head */ |
|---|
| 76 | + spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to |
|---|
| 77 | + * serialise IO completion of other |
|---|
| 78 | + * buffers in the page */ |
|---|
| 79 | 79 | }; |
|---|
| 80 | 80 | |
|---|
| 81 | 81 | /* |
|---|
| .. | .. |
|---|
| 117 | 117 | * of the form "mark_buffer_foo()". These are higher-level functions which |
|---|
| 118 | 118 | * do something in addition to setting a b_state bit. |
|---|
| 119 | 119 | */ |
|---|
| 120 | | -BUFFER_FNS(Uptodate, uptodate) |
|---|
| 121 | 120 | BUFFER_FNS(Dirty, dirty) |
|---|
| 122 | 121 | TAS_BUFFER_FNS(Dirty, dirty) |
|---|
| 123 | 122 | BUFFER_FNS(Lock, locked) |
|---|
| .. | .. |
|---|
| 134 | 133 | BUFFER_FNS(Meta, meta) |
|---|
| 135 | 134 | BUFFER_FNS(Prio, prio) |
|---|
| 136 | 135 | BUFFER_FNS(Defer_Completion, defer_completion) |
|---|
| 136 | + |
|---|
| 137 | +static __always_inline void set_buffer_uptodate(struct buffer_head *bh) |
|---|
| 138 | +{ |
|---|
| 139 | + /* |
|---|
| 140 | + * If somebody else already set this uptodate, they will |
|---|
| 141 | + * have done the memory barrier, and a reader will thus |
|---|
| 142 | + * see *some* valid buffer state. |
|---|
| 143 | + * |
|---|
| 144 | + * Any other serialization (with IO errors or whatever that |
|---|
| 145 | + * might clear the bit) has to come from other state (eg BH_Lock). |
|---|
| 146 | + */ |
|---|
| 147 | + if (test_bit(BH_Uptodate, &bh->b_state)) |
|---|
| 148 | + return; |
|---|
| 149 | + |
|---|
| 150 | + /* |
|---|
| 151 | + * make it consistent with folio_mark_uptodate |
|---|
| 152 | + * pairs with smp_load_acquire in buffer_uptodate |
|---|
| 153 | + */ |
|---|
| 154 | + smp_mb__before_atomic(); |
|---|
| 155 | + set_bit(BH_Uptodate, &bh->b_state); |
|---|
| 156 | +} |
|---|
| 157 | + |
|---|
| 158 | +static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) |
|---|
| 159 | +{ |
|---|
| 160 | + clear_bit(BH_Uptodate, &bh->b_state); |
|---|
| 161 | +} |
|---|
| 162 | + |
|---|
| 163 | +static __always_inline int buffer_uptodate(const struct buffer_head *bh) |
|---|
| 164 | +{ |
|---|
| 165 | + /* |
|---|
| 166 | + * make it consistent with folio_test_uptodate |
|---|
| 167 | + * pairs with smp_mb__before_atomic in set_buffer_uptodate |
|---|
| 168 | + */ |
|---|
| 169 | + return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; |
|---|
| 170 | +} |
|---|
| 137 | 171 | |
|---|
| 138 | 172 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
|---|
| 139 | 173 | |
|---|
| .. | .. |
|---|
| 194 | 228 | struct buffer_head *__bread_gfp(struct block_device *, |
|---|
| 195 | 229 | sector_t block, unsigned size, gfp_t gfp); |
|---|
| 196 | 230 | void invalidate_bh_lrus(void); |
|---|
| 231 | +void invalidate_bh_lrus_cpu(void); |
|---|
| 232 | +bool has_bh_in_lru(int cpu, void *dummy); |
|---|
| 197 | 233 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); |
|---|
| 198 | 234 | void free_buffer_head(struct buffer_head * bh); |
|---|
| 199 | 235 | void unlock_buffer(struct buffer_head *bh); |
|---|
| .. | .. |
|---|
| 244 | 280 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
|---|
| 245 | 281 | get_block_t get_block); |
|---|
| 246 | 282 | /* Convert errno to return value from ->page_mkwrite() call */ |
|---|
| 247 | | -static inline int block_page_mkwrite_return(int err) |
|---|
| 283 | +static inline vm_fault_t block_page_mkwrite_return(int err) |
|---|
| 248 | 284 | { |
|---|
| 249 | 285 | if (err == 0) |
|---|
| 250 | 286 | return VM_FAULT_LOCKED; |
|---|
| .. | .. |
|---|
| 271 | 307 | /* |
|---|
| 272 | 308 | * inline definitions |
|---|
| 273 | 309 | */ |
|---|
| 274 | | - |
|---|
| 275 | | -static inline void attach_page_buffers(struct page *page, |
|---|
| 276 | | - struct buffer_head *head) |
|---|
| 277 | | -{ |
|---|
| 278 | | - get_page(page); |
|---|
| 279 | | - SetPagePrivate(page); |
|---|
| 280 | | - set_page_private(page, (unsigned long)head); |
|---|
| 281 | | -} |
|---|
| 282 | 310 | |
|---|
| 283 | 311 | static inline void get_bh(struct buffer_head *bh) |
|---|
| 284 | 312 | { |
|---|
| .. | .. |
|---|
| 414 | 442 | static inline void invalidate_inode_buffers(struct inode *inode) {} |
|---|
| 415 | 443 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } |
|---|
| 416 | 444 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
|---|
| 445 | +static inline void invalidate_bh_lrus_cpu(void) {} |
|---|
| 446 | +static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } |
|---|
| 447 | +#define buffer_heads_over_limit 0 |
|---|
| 417 | 448 | |
|---|
| 418 | 449 | #endif /* CONFIG_BLOCK */ |
|---|
| 419 | 450 | #endif /* _LINUX_BUFFER_HEAD_H */ |
|---|