.. | .. |
---|
76 | 76 | struct address_space *b_assoc_map; /* mapping this buffer is |
---|
77 | 77 | associated with */ |
---|
78 | 78 | atomic_t b_count; /* users using this buffer_head */ |
---|
| 79 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 80 | + spinlock_t b_uptodate_lock; |
---|
| 81 | +#if IS_ENABLED(CONFIG_JBD2) |
---|
| 82 | + spinlock_t b_state_lock; |
---|
| 83 | + spinlock_t b_journal_head_lock; |
---|
| 84 | +#endif |
---|
| 85 | +#endif |
---|
79 | 86 | }; |
---|
80 | 87 | |
---|
| 88 | +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) |
---|
| 89 | +{ |
---|
| 90 | + unsigned long flags; |
---|
| 91 | + |
---|
| 92 | +#ifndef CONFIG_PREEMPT_RT_BASE |
---|
| 93 | + local_irq_save(flags); |
---|
| 94 | + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); |
---|
| 95 | +#else |
---|
| 96 | + spin_lock_irqsave(&bh->b_uptodate_lock, flags); |
---|
| 97 | +#endif |
---|
| 98 | + return flags; |
---|
| 99 | +} |
---|
| 100 | + |
---|
| 101 | +static inline void |
---|
| 102 | +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) |
---|
| 103 | +{ |
---|
| 104 | +#ifndef CONFIG_PREEMPT_RT_BASE |
---|
| 105 | + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); |
---|
| 106 | + local_irq_restore(flags); |
---|
| 107 | +#else |
---|
| 108 | + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); |
---|
| 109 | +#endif |
---|
| 110 | +} |
---|
| 111 | + |
---|
| 112 | +static inline void buffer_head_init_locks(struct buffer_head *bh) |
---|
| 113 | +{ |
---|
| 114 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 115 | + spin_lock_init(&bh->b_uptodate_lock); |
---|
| 116 | +#if IS_ENABLED(CONFIG_JBD2) |
---|
| 117 | + spin_lock_init(&bh->b_state_lock); |
---|
| 118 | + spin_lock_init(&bh->b_journal_head_lock); |
---|
| 119 | +#endif |
---|
| 120 | +#endif |
---|
| 121 | +} |
---|
| 122 | + |
---|
81 | 123 | /* |
---|
82 | 124 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
---|
83 | 125 | * and buffer_foo() functions. |
---|