| .. | .. |
|---|
| 22 | 22 | BH_Dirty, /* Is dirty */ |
|---|
| 23 | 23 | BH_Lock, /* Is locked */ |
|---|
| 24 | 24 | BH_Req, /* Has been submitted for I/O */ |
|---|
| 25 | | - BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise |
|---|
| 26 | | - * IO completion of other buffers in the page |
|---|
| 27 | | - */ |
|---|
| 28 | 25 | |
|---|
| 29 | 26 | BH_Mapped, /* Has a disk mapping */ |
|---|
| 30 | 27 | BH_New, /* Disk mapping was newly created by get_block */ |
|---|
| .. | .. |
|---|
| 76 | 73 | struct address_space *b_assoc_map; /* mapping this buffer is |
|---|
| 77 | 74 | associated with */ |
|---|
| 78 | 75 | atomic_t b_count; /* users using this buffer_head */ |
|---|
| 79 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 80 | | - spinlock_t b_uptodate_lock; |
|---|
| 81 | | -#if IS_ENABLED(CONFIG_JBD2) |
|---|
| 82 | | - spinlock_t b_state_lock; |
|---|
| 83 | | - spinlock_t b_journal_head_lock; |
|---|
| 84 | | -#endif |
|---|
| 85 | | -#endif |
|---|
| 76 | + spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to |
|---|
| 77 | + * serialise IO completion of other |
|---|
| 78 | + * buffers in the page */ |
|---|
| 86 | 79 | }; |
|---|
| 87 | | - |
|---|
| 88 | | -static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) |
|---|
| 89 | | -{ |
|---|
| 90 | | - unsigned long flags; |
|---|
| 91 | | - |
|---|
| 92 | | -#ifndef CONFIG_PREEMPT_RT_BASE |
|---|
| 93 | | - local_irq_save(flags); |
|---|
| 94 | | - bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); |
|---|
| 95 | | -#else |
|---|
| 96 | | - spin_lock_irqsave(&bh->b_uptodate_lock, flags); |
|---|
| 97 | | -#endif |
|---|
| 98 | | - return flags; |
|---|
| 99 | | -} |
|---|
| 100 | | - |
|---|
| 101 | | -static inline void |
|---|
| 102 | | -bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) |
|---|
| 103 | | -{ |
|---|
| 104 | | -#ifndef CONFIG_PREEMPT_RT_BASE |
|---|
| 105 | | - bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); |
|---|
| 106 | | - local_irq_restore(flags); |
|---|
| 107 | | -#else |
|---|
| 108 | | - spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); |
|---|
| 109 | | -#endif |
|---|
| 110 | | -} |
|---|
| 111 | | - |
|---|
| 112 | | -static inline void buffer_head_init_locks(struct buffer_head *bh) |
|---|
| 113 | | -{ |
|---|
| 114 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 115 | | - spin_lock_init(&bh->b_uptodate_lock); |
|---|
| 116 | | -#if IS_ENABLED(CONFIG_JBD2) |
|---|
| 117 | | - spin_lock_init(&bh->b_state_lock); |
|---|
| 118 | | - spin_lock_init(&bh->b_journal_head_lock); |
|---|
| 119 | | -#endif |
|---|
| 120 | | -#endif |
|---|
| 121 | | -} |
|---|
| 122 | 80 | |
|---|
| 123 | 81 | /* |
|---|
| 124 | 82 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
|---|
| .. | .. |
|---|
| 159 | 117 | * of the form "mark_buffer_foo()". These are higher-level functions which |
|---|
| 160 | 118 | * do something in addition to setting a b_state bit. |
|---|
| 161 | 119 | */ |
|---|
| 162 | | -BUFFER_FNS(Uptodate, uptodate) |
|---|
| 163 | 120 | BUFFER_FNS(Dirty, dirty) |
|---|
| 164 | 121 | TAS_BUFFER_FNS(Dirty, dirty) |
|---|
| 165 | 122 | BUFFER_FNS(Lock, locked) |
|---|
| .. | .. |
|---|
| 176 | 133 | BUFFER_FNS(Meta, meta) |
|---|
| 177 | 134 | BUFFER_FNS(Prio, prio) |
|---|
| 178 | 135 | BUFFER_FNS(Defer_Completion, defer_completion) |
|---|
| 136 | + |
|---|
| 137 | +static __always_inline void set_buffer_uptodate(struct buffer_head *bh) |
|---|
| 138 | +{ |
|---|
| 139 | + /* |
|---|
| 140 | + * If somebody else already set this uptodate, they will |
|---|
| 141 | + * have done the memory barrier, and a reader will thus |
|---|
| 142 | + * see *some* valid buffer state. |
|---|
| 143 | + * |
|---|
| 144 | + * Any other serialization (with IO errors or whatever that |
|---|
| 145 | + * might clear the bit) has to come from other state (eg BH_Lock). |
|---|
| 146 | + */ |
|---|
| 147 | + if (test_bit(BH_Uptodate, &bh->b_state)) |
|---|
| 148 | + return; |
|---|
| 149 | + |
|---|
| 150 | + /* |
|---|
| 151 | + * make it consistent with folio_mark_uptodate |
|---|
| 152 | + * pairs with smp_load_acquire in buffer_uptodate |
|---|
| 153 | + */ |
|---|
| 154 | + smp_mb__before_atomic(); |
|---|
| 155 | + set_bit(BH_Uptodate, &bh->b_state); |
|---|
| 156 | +} |
|---|
| 157 | + |
|---|
| 158 | +static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) |
|---|
| 159 | +{ |
|---|
| 160 | + clear_bit(BH_Uptodate, &bh->b_state); |
|---|
| 161 | +} |
|---|
| 162 | + |
|---|
| 163 | +static __always_inline int buffer_uptodate(const struct buffer_head *bh) |
|---|
| 164 | +{ |
|---|
| 165 | + /* |
|---|
| 166 | + * make it consistent with folio_test_uptodate |
|---|
| 167 | + * pairs with smp_mb__before_atomic in set_buffer_uptodate |
|---|
| 168 | + */ |
|---|
| 169 | + return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; |
|---|
| 170 | +} |
|---|
| 179 | 171 | |
|---|
| 180 | 172 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
|---|
| 181 | 173 | |
|---|
| .. | .. |
|---|
| 236 | 228 | struct buffer_head *__bread_gfp(struct block_device *, |
|---|
| 237 | 229 | sector_t block, unsigned size, gfp_t gfp); |
|---|
| 238 | 230 | void invalidate_bh_lrus(void); |
|---|
| 231 | +void invalidate_bh_lrus_cpu(void); |
|---|
| 232 | +bool has_bh_in_lru(int cpu, void *dummy); |
|---|
| 239 | 233 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); |
|---|
| 240 | 234 | void free_buffer_head(struct buffer_head * bh); |
|---|
| 241 | 235 | void unlock_buffer(struct buffer_head *bh); |
|---|
| .. | .. |
|---|
| 286 | 280 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
|---|
| 287 | 281 | get_block_t get_block); |
|---|
| 288 | 282 | /* Convert errno to return value from ->page_mkwrite() call */ |
|---|
| 289 | | -static inline int block_page_mkwrite_return(int err) |
|---|
| 283 | +static inline vm_fault_t block_page_mkwrite_return(int err) |
|---|
| 290 | 284 | { |
|---|
| 291 | 285 | if (err == 0) |
|---|
| 292 | 286 | return VM_FAULT_LOCKED; |
|---|
| .. | .. |
|---|
| 313 | 307 | /* |
|---|
| 314 | 308 | * inline definitions |
|---|
| 315 | 309 | */ |
|---|
| 316 | | - |
|---|
| 317 | | -static inline void attach_page_buffers(struct page *page, |
|---|
| 318 | | - struct buffer_head *head) |
|---|
| 319 | | -{ |
|---|
| 320 | | - get_page(page); |
|---|
| 321 | | - SetPagePrivate(page); |
|---|
| 322 | | - set_page_private(page, (unsigned long)head); |
|---|
| 323 | | -} |
|---|
| 324 | 310 | |
|---|
| 325 | 311 | static inline void get_bh(struct buffer_head *bh) |
|---|
| 326 | 312 | { |
|---|
| .. | .. |
|---|
| 456 | 442 | static inline void invalidate_inode_buffers(struct inode *inode) {} |
|---|
| 457 | 443 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } |
|---|
| 458 | 444 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
|---|
| 445 | +static inline void invalidate_bh_lrus_cpu(void) {} |
|---|
| 446 | +static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } |
|---|
| 447 | +#define buffer_heads_over_limit 0 |
|---|
| 459 | 448 | |
|---|
| 460 | 449 | #endif /* CONFIG_BLOCK */ |
|---|
| 461 | 450 | #endif /* _LINUX_BUFFER_HEAD_H */ |
|---|