.. | .. |
---|
11 | 11 | #include <linux/rwsem.h> |
---|
12 | 12 | #include <linux/memcontrol.h> |
---|
13 | 13 | #include <linux/highmem.h> |
---|
| 14 | +#ifndef __GENKSYMS__ |
---|
| 15 | +#define PROTECT_TRACE_INCLUDE_PATH |
---|
| 16 | +#include <trace/hooks/mm.h> |
---|
| 17 | +#endif |
---|
14 | 18 | |
---|
15 | 19 | /* |
---|
16 | 20 | * The anon_vma heads a list of private "related" vmas, to scan if |
---|
.. | .. |
---|
77 | 81 | struct anon_vma_chain { |
---|
78 | 82 | struct vm_area_struct *vma; |
---|
79 | 83 | struct anon_vma *anon_vma; |
---|
80 | | - struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
---|
| 84 | + struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ |
---|
81 | 85 | struct rb_node rb; /* locked by anon_vma->rwsem */ |
---|
82 | 86 | unsigned long rb_subtree_last; |
---|
83 | 87 | #ifdef CONFIG_DEBUG_VM_RB |
---|
.. | .. |
---|
91 | 95 | |
---|
92 | 96 | TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ |
---|
93 | 97 | TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ |
---|
94 | | - TTU_IGNORE_ACCESS = 0x10, /* don't age */ |
---|
| 98 | + TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ |
---|
95 | 99 | TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ |
---|
96 | 100 | TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible |
---|
97 | 101 | * and caller guarantees they will |
---|
98 | 102 | * do a final flush if necessary */ |
---|
99 | 103 | TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: |
---|
100 | 104 | * caller holds it */ |
---|
101 | | - TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
---|
102 | | - TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */ |
---|
| 105 | + TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
---|
103 | 106 | }; |
---|
104 | 107 | |
---|
105 | 108 | #ifdef CONFIG_MMU |
---|
.. | .. |
---|
129 | 132 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) |
---|
130 | 133 | { |
---|
131 | 134 | down_read(&anon_vma->root->rwsem); |
---|
| 135 | +} |
---|
| 136 | + |
---|
| 137 | +static inline int anon_vma_trylock_read(struct anon_vma *anon_vma) |
---|
| 138 | +{ |
---|
| 139 | + return down_read_trylock(&anon_vma->root->rwsem); |
---|
132 | 140 | } |
---|
133 | 141 | |
---|
134 | 142 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) |
---|
.. | .. |
---|
175 | 183 | unsigned long, bool); |
---|
176 | 184 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
---|
177 | 185 | unsigned long, int); |
---|
178 | | -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
---|
179 | | - unsigned long, bool); |
---|
| 186 | +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, |
---|
| 187 | + unsigned long address, bool compound); |
---|
| 188 | +static inline void page_add_new_anon_rmap(struct page *page, |
---|
| 189 | + struct vm_area_struct *vma, |
---|
| 190 | + unsigned long address, bool compound) |
---|
| 191 | +{ |
---|
| 192 | + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
---|
| 193 | + __page_add_new_anon_rmap(page, vma, address, compound); |
---|
| 194 | +} |
---|
| 195 | + |
---|
180 | 196 | void page_add_file_rmap(struct page *, bool); |
---|
181 | 197 | void page_remove_rmap(struct page *, bool); |
---|
182 | 198 | |
---|
.. | .. |
---|
187 | 203 | |
---|
188 | 204 | static inline void page_dup_rmap(struct page *page, bool compound) |
---|
189 | 205 | { |
---|
190 | | - atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
---|
| 206 | + bool success = false; |
---|
| 207 | + |
---|
| 208 | + if (!compound) |
---|
| 209 | + trace_android_vh_update_page_mapcount(page, true, compound, NULL, &success); |
---|
| 210 | + if (!success) |
---|
| 211 | + atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
---|
191 | 212 | } |
---|
192 | 213 | |
---|
193 | 214 | /* |
---|
.. | .. |
---|
245 | 266 | |
---|
246 | 267 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
---|
247 | 268 | |
---|
248 | | -/* |
---|
249 | | - * Called by memory-failure.c to kill processes. |
---|
250 | | - */ |
---|
251 | | -struct anon_vma *page_lock_anon_vma_read(struct page *page); |
---|
252 | | -void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
---|
253 | 269 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
---|
254 | 270 | |
---|
255 | 271 | /* |
---|
256 | 272 | * rmap_walk_control: To control rmap traversing for specific needs |
---|
257 | 273 | * |
---|
258 | 274 | * arg: passed to rmap_one() and invalid_vma() |
---|
| 275 | + * try_lock: bail out if the rmap lock is contended |
---|
| 276 | + * contended: indicate the rmap traversal bailed out due to lock contention |
---|
259 | 277 | * rmap_one: executed on each vma where page is mapped |
---|
260 | 278 | * done: for checking traversing termination condition |
---|
261 | 279 | * anon_lock: for getting anon_lock by optimized way rather than default |
---|
.. | .. |
---|
263 | 281 | */ |
---|
264 | 282 | struct rmap_walk_control { |
---|
265 | 283 | void *arg; |
---|
| 284 | + bool try_lock; |
---|
| 285 | + bool contended; |
---|
266 | 286 | /* |
---|
267 | 287 | * Return false if page table scanning in rmap_walk should be stopped. |
---|
268 | 288 | * Otherwise, return true. |
---|
.. | .. |
---|
270 | 290 | bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
---|
271 | 291 | unsigned long addr, void *arg); |
---|
272 | 292 | int (*done)(struct page *page); |
---|
273 | | - struct anon_vma *(*anon_lock)(struct page *page); |
---|
| 293 | + struct anon_vma *(*anon_lock)(struct page *page, |
---|
| 294 | + struct rmap_walk_control *rwc); |
---|
274 | 295 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
---|
275 | 296 | }; |
---|
276 | 297 | |
---|
277 | 298 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
---|
278 | 299 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
---|
279 | 300 | |
---|
| 301 | +/* |
---|
| 302 | + * Called by memory-failure.c to kill processes. |
---|
| 303 | + */ |
---|
| 304 | +struct anon_vma *page_lock_anon_vma_read(struct page *page, |
---|
| 305 | + struct rmap_walk_control *rwc); |
---|
| 306 | +void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
---|
| 307 | + |
---|
280 | 308 | #else /* !CONFIG_MMU */ |
---|
281 | 309 | |
---|
282 | 310 | #define anon_vma_init() do {} while (0) |
---|