.. | .. |
---|
13 | 13 | #include <linux/workqueue.h> |
---|
14 | 14 | #include <linux/kref.h> |
---|
15 | 15 | #include <linux/refcount.h> |
---|
| 16 | +#include <linux/android_kabi.h> |
---|
16 | 17 | |
---|
17 | 18 | struct page; |
---|
18 | 19 | struct device; |
---|
.. | .. |
---|
33 | 34 | WB_sync_congested, /* The sync queue is getting full */ |
---|
34 | 35 | }; |
---|
35 | 36 | |
---|
36 | | -typedef int (congested_fn)(void *, int); |
---|
37 | | - |
---|
38 | 37 | enum wb_stat_item { |
---|
39 | 38 | WB_RECLAIMABLE, |
---|
40 | 39 | WB_WRITEBACK, |
---|
.. | .. |
---|
54 | 53 | WB_REASON_SYNC, |
---|
55 | 54 | WB_REASON_PERIODIC, |
---|
56 | 55 | WB_REASON_LAPTOP_TIMER, |
---|
57 | | - WB_REASON_FREE_MORE_MEM, |
---|
58 | 56 | WB_REASON_FS_FREE_SPACE, |
---|
59 | 57 | /* |
---|
60 | 58 | * There is no bdi forker thread any more and works are done |
---|
.. | .. |
---|
63 | 61 | * so it has a mismatch name. |
---|
64 | 62 | */ |
---|
65 | 63 | WB_REASON_FORKER_THREAD, |
---|
| 64 | + WB_REASON_FOREIGN_FLUSH, |
---|
66 | 65 | |
---|
67 | 66 | WB_REASON_MAX, |
---|
68 | 67 | }; |
---|
69 | 68 | |
---|
70 | | -/* |
---|
71 | | - * For cgroup writeback, multiple wb's may map to the same blkcg. Those |
---|
72 | | - * wb's can operate mostly independently but should share the congested |
---|
73 | | - * state. To facilitate such sharing, the congested state is tracked using |
---|
74 | | - * the following struct which is created on demand, indexed by blkcg ID on |
---|
75 | | - * its bdi, and refcounted. |
---|
76 | | - */ |
---|
77 | | -struct bdi_writeback_congested { |
---|
78 | | - unsigned long state; /* WB_[a]sync_congested flags */ |
---|
79 | | - refcount_t refcnt; /* nr of attached wb's and blkg */ |
---|
80 | | - |
---|
81 | | -#ifdef CONFIG_CGROUP_WRITEBACK |
---|
82 | | - struct backing_dev_info *__bdi; /* the associated bdi, set to NULL |
---|
83 | | - * on bdi unregistration. For memcg-wb |
---|
84 | | - * internal use only! */ |
---|
85 | | - int blkcg_id; /* ID of the associated blkcg */ |
---|
86 | | - struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ |
---|
87 | | -#endif |
---|
| 69 | +struct wb_completion { |
---|
| 70 | + atomic_t cnt; |
---|
| 71 | + wait_queue_head_t *waitq; |
---|
88 | 72 | }; |
---|
| 73 | + |
---|
| 74 | +#define __WB_COMPLETION_INIT(_waitq) \ |
---|
| 75 | + (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } |
---|
| 76 | + |
---|
| 77 | +/* |
---|
| 78 | + * If one wants to wait for one or more wb_writeback_works, each work's |
---|
| 79 | + * ->done should be set to a wb_completion defined using the following |
---|
| 80 | + * macro. Once all work items are issued with wb_queue_work(), the caller |
---|
| 81 | + * can wait for the completion of all using wb_wait_for_completion(). Work |
---|
| 82 | + * items which are waited upon aren't freed automatically on completion. |
---|
| 83 | + */ |
---|
| 84 | +#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) |
---|
| 85 | + |
---|
| 86 | +#define DEFINE_WB_COMPLETION(cmpl, bdi) \ |
---|
| 87 | + struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) |
---|
89 | 88 | |
---|
90 | 89 | /* |
---|
91 | 90 | * Each wb (bdi_writeback) can perform writeback operations, is measured |
---|
.. | .. |
---|
120 | 119 | |
---|
121 | 120 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
---|
122 | 121 | |
---|
123 | | - struct bdi_writeback_congested *congested; |
---|
| 122 | + unsigned long congested; /* WB_[a]sync_congested flags */ |
---|
124 | 123 | |
---|
125 | 124 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
---|
126 | 125 | unsigned long dirtied_stamp; |
---|
.. | .. |
---|
162 | 161 | struct rcu_head rcu; |
---|
163 | 162 | }; |
---|
164 | 163 | #endif |
---|
| 164 | + |
---|
| 165 | + ANDROID_KABI_RESERVE(1); |
---|
| 166 | + ANDROID_KABI_RESERVE(2); |
---|
165 | 167 | }; |
---|
166 | 168 | |
---|
167 | 169 | struct backing_dev_info { |
---|
| 170 | + u64 id; |
---|
| 171 | + struct rb_node rb_node; /* keyed by ->id */ |
---|
168 | 172 | struct list_head bdi_list; |
---|
169 | 173 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
---|
170 | 174 | unsigned long io_pages; /* max allowed IO size */ |
---|
171 | | - congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
---|
172 | | - void *congested_data; /* Pointer to aux data for congested func */ |
---|
173 | | - |
---|
174 | | - const char *name; |
---|
175 | 175 | |
---|
176 | 176 | struct kref refcnt; /* Reference counter for the structure */ |
---|
177 | 177 | unsigned int capabilities; /* Device capabilities */ |
---|
.. | .. |
---|
188 | 188 | struct list_head wb_list; /* list of all wbs */ |
---|
189 | 189 | #ifdef CONFIG_CGROUP_WRITEBACK |
---|
190 | 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
---|
191 | | - struct rb_root cgwb_congested_tree; /* their congested states */ |
---|
192 | 191 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
---|
193 | 192 | struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ |
---|
194 | | -#else |
---|
195 | | - struct bdi_writeback_congested *wb_congested; |
---|
196 | 193 | #endif |
---|
197 | 194 | wait_queue_head_t wb_waitq; |
---|
198 | 195 | |
---|
199 | 196 | struct device *dev; |
---|
| 197 | + char dev_name[64]; |
---|
200 | 198 | struct device *owner; |
---|
201 | 199 | |
---|
202 | 200 | struct timer_list laptop_mode_wb_timer; |
---|
203 | 201 | |
---|
204 | 202 | #ifdef CONFIG_DEBUG_FS |
---|
205 | 203 | struct dentry *debug_dir; |
---|
206 | | - struct dentry *debug_stats; |
---|
207 | 204 | #endif |
---|
| 205 | + |
---|
| 206 | + ANDROID_KABI_RESERVE(1); |
---|
| 207 | + ANDROID_KABI_RESERVE(2); |
---|
208 | 208 | }; |
---|
209 | 209 | |
---|
210 | 210 | enum { |
---|
.. | .. |
---|
212 | 212 | BLK_RW_SYNC = 1, |
---|
213 | 213 | }; |
---|
214 | 214 | |
---|
215 | | -void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); |
---|
216 | | -void set_wb_congested(struct bdi_writeback_congested *congested, int sync); |
---|
217 | | - |
---|
218 | | -static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
---|
219 | | -{ |
---|
220 | | - clear_wb_congested(bdi->wb.congested, sync); |
---|
221 | | -} |
---|
222 | | - |
---|
223 | | -static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
---|
224 | | -{ |
---|
225 | | - set_wb_congested(bdi->wb.congested, sync); |
---|
226 | | -} |
---|
| 215 | +void clear_bdi_congested(struct backing_dev_info *bdi, int sync); |
---|
| 216 | +void set_bdi_congested(struct backing_dev_info *bdi, int sync); |
---|
227 | 217 | |
---|
228 | 218 | struct wb_lock_cookie { |
---|
229 | 219 | bool locked; |
---|