.. | .. |
---|
11 | 11 | #include <linux/rwsem.h> |
---|
12 | 12 | #include <linux/memcontrol.h> |
---|
13 | 13 | #include <linux/highmem.h> |
---|
| 14 | +#ifndef __GENKSYMS__ |
---|
| 15 | +#define PROTECT_TRACE_INCLUDE_PATH |
---|
| 16 | +#include <trace/hooks/mm.h> |
---|
| 17 | +#endif |
---|
14 | 18 | |
---|
15 | 19 | /* |
---|
16 | 20 | * The anon_vma heads a list of private "related" vmas, to scan if |
---|
.. | .. |
---|
38 | 42 | */ |
---|
39 | 43 | atomic_t refcount; |
---|
40 | 44 | |
---|
41 | | - /* |
---|
42 | | - * Count of child anon_vmas and VMAs which points to this anon_vma. |
---|
43 | | - * |
---|
44 | | - * This counter is used for making decision about reusing anon_vma |
---|
45 | | - * instead of forking new one. See comments in function anon_vma_clone. |
---|
46 | | - */ |
---|
47 | | - unsigned degree; |
---|
| 45 | + unsigned degree; /* ANDROID: KABI preservation, DO NOT USE! */ |
---|
48 | 46 | |
---|
49 | 47 | struct anon_vma *parent; /* Parent of this anon_vma */ |
---|
50 | 48 | |
---|
.. | .. |
---|
59 | 57 | |
---|
60 | 58 | /* Interval tree of private "related" vmas */ |
---|
61 | 59 | struct rb_root_cached rb_root; |
---|
| 60 | + |
---|
| 61 | + /* |
---|
| 62 | + * ANDROID: KABI preservation, it's safe to put these at the end of this structure as it's |
---|
| 63 | + * only passed by a pointer everywhere, the size and internal structures are local to the |
---|
| 64 | + * core kernel. |
---|
| 65 | + */ |
---|
| 66 | +#ifndef __GENKSYMS__ |
---|
| 67 | + /* |
---|
| 68 | + * Count of child anon_vmas. Equals to the count of all anon_vmas that |
---|
| 69 | + * have ->parent pointing to this one, including itself. |
---|
| 70 | + * |
---|
| 71 | + * This counter is used for making decision about reusing anon_vma |
---|
| 72 | + * instead of forking new one. See comments in function anon_vma_clone. |
---|
| 73 | + */ |
---|
| 74 | + unsigned long num_children; |
---|
| 75 | + /* Count of VMAs whose ->anon_vma pointer points to this object. */ |
---|
| 76 | + unsigned long num_active_vmas; |
---|
| 77 | +#endif |
---|
| 78 | + |
---|
62 | 79 | }; |
---|
63 | 80 | |
---|
64 | 81 | /* |
---|
.. | .. |
---|
77 | 94 | struct anon_vma_chain { |
---|
78 | 95 | struct vm_area_struct *vma; |
---|
79 | 96 | struct anon_vma *anon_vma; |
---|
80 | | - struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
---|
| 97 | + struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ |
---|
81 | 98 | struct rb_node rb; /* locked by anon_vma->rwsem */ |
---|
82 | 99 | unsigned long rb_subtree_last; |
---|
83 | 100 | #ifdef CONFIG_DEBUG_VM_RB |
---|
.. | .. |
---|
91 | 108 | |
---|
92 | 109 | TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ |
---|
93 | 110 | TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ |
---|
94 | | - TTU_IGNORE_ACCESS = 0x10, /* don't age */ |
---|
| 111 | + TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ |
---|
95 | 112 | TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ |
---|
96 | 113 | TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible |
---|
97 | 114 | * and caller guarantees they will |
---|
98 | 115 | * do a final flush if necessary */ |
---|
99 | 116 | TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: |
---|
100 | 117 | * caller holds it */ |
---|
101 | | - TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
---|
102 | | - TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */ |
---|
| 118 | + TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
---|
103 | 119 | }; |
---|
104 | 120 | |
---|
105 | 121 | #ifdef CONFIG_MMU |
---|
.. | .. |
---|
129 | 145 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) |
---|
130 | 146 | { |
---|
131 | 147 | down_read(&anon_vma->root->rwsem); |
---|
| 148 | +} |
---|
| 149 | + |
---|
| 150 | +static inline int anon_vma_trylock_read(struct anon_vma *anon_vma) |
---|
| 151 | +{ |
---|
| 152 | + return down_read_trylock(&anon_vma->root->rwsem); |
---|
132 | 153 | } |
---|
133 | 154 | |
---|
134 | 155 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) |
---|
.. | .. |
---|
175 | 196 | unsigned long, bool); |
---|
176 | 197 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
---|
177 | 198 | unsigned long, int); |
---|
178 | | -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
---|
179 | | - unsigned long, bool); |
---|
| 199 | +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, |
---|
| 200 | + unsigned long address, bool compound); |
---|
| 201 | +static inline void page_add_new_anon_rmap(struct page *page, |
---|
| 202 | + struct vm_area_struct *vma, |
---|
| 203 | + unsigned long address, bool compound) |
---|
| 204 | +{ |
---|
| 205 | + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
---|
| 206 | + __page_add_new_anon_rmap(page, vma, address, compound); |
---|
| 207 | +} |
---|
| 208 | + |
---|
180 | 209 | void page_add_file_rmap(struct page *, bool); |
---|
181 | 210 | void page_remove_rmap(struct page *, bool); |
---|
182 | 211 | |
---|
.. | .. |
---|
187 | 216 | |
---|
188 | 217 | static inline void page_dup_rmap(struct page *page, bool compound) |
---|
189 | 218 | { |
---|
190 | | - atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
---|
| 219 | + bool success = false; |
---|
| 220 | + |
---|
| 221 | + if (!compound) |
---|
| 222 | + trace_android_vh_update_page_mapcount(page, true, compound, NULL, &success); |
---|
| 223 | + if (!success) |
---|
| 224 | + atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
---|
191 | 225 | } |
---|
192 | 226 | |
---|
193 | 227 | /* |
---|
.. | .. |
---|
245 | 279 | |
---|
246 | 280 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
---|
247 | 281 | |
---|
248 | | -/* |
---|
249 | | - * Called by memory-failure.c to kill processes. |
---|
250 | | - */ |
---|
251 | | -struct anon_vma *page_lock_anon_vma_read(struct page *page); |
---|
252 | | -void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
---|
253 | 282 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
---|
254 | 283 | |
---|
255 | 284 | /* |
---|
256 | 285 | * rmap_walk_control: To control rmap traversing for specific needs |
---|
257 | 286 | * |
---|
258 | 287 | * arg: passed to rmap_one() and invalid_vma() |
---|
| 288 | + * try_lock: bail out if the rmap lock is contended |
---|
| 289 | + * contended: indicate the rmap traversal bailed out due to lock contention |
---|
259 | 290 | * rmap_one: executed on each vma where page is mapped |
---|
260 | 291 | * done: for checking traversing termination condition |
---|
261 | 292 | * anon_lock: for getting anon_lock by optimized way rather than default |
---|
.. | .. |
---|
263 | 294 | */ |
---|
264 | 295 | struct rmap_walk_control { |
---|
265 | 296 | void *arg; |
---|
| 297 | + bool try_lock; |
---|
| 298 | + bool contended; |
---|
266 | 299 | /* |
---|
267 | 300 | * Return false if page table scanning in rmap_walk should be stopped. |
---|
268 | 301 | * Otherwise, return true. |
---|
.. | .. |
---|
270 | 303 | bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
---|
271 | 304 | unsigned long addr, void *arg); |
---|
272 | 305 | int (*done)(struct page *page); |
---|
273 | | - struct anon_vma *(*anon_lock)(struct page *page); |
---|
| 306 | + struct anon_vma *(*anon_lock)(struct page *page, |
---|
| 307 | + struct rmap_walk_control *rwc); |
---|
274 | 308 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
---|
275 | 309 | }; |
---|
276 | 310 | |
---|
277 | 311 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
---|
278 | 312 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
---|
279 | 313 | |
---|
| 314 | +/* |
---|
| 315 | + * Called by memory-failure.c to kill processes. |
---|
| 316 | + */ |
---|
| 317 | +struct anon_vma *page_lock_anon_vma_read(struct page *page, |
---|
| 318 | + struct rmap_walk_control *rwc); |
---|
| 319 | +void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
---|
| 320 | + |
---|
280 | 321 | #else /* !CONFIG_MMU */ |
---|
281 | 322 | |
---|
282 | 323 | #define anon_vma_init() do {} while (0) |
---|