hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/rmap.h
....@@ -11,6 +11,10 @@
1111 #include <linux/rwsem.h>
1212 #include <linux/memcontrol.h>
1313 #include <linux/highmem.h>
14
+#ifndef __GENKSYMS__
15
+#define PROTECT_TRACE_INCLUDE_PATH
16
+#include <trace/hooks/mm.h>
17
+#endif
1418
1519 /*
1620 * The anon_vma heads a list of private "related" vmas, to scan if
....@@ -77,7 +81,7 @@
7781 struct anon_vma_chain {
7882 struct vm_area_struct *vma;
7983 struct anon_vma *anon_vma;
80
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
84
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
8185 struct rb_node rb; /* locked by anon_vma->rwsem */
8286 unsigned long rb_subtree_last;
8387 #ifdef CONFIG_DEBUG_VM_RB
....@@ -91,15 +95,14 @@
9195
9296 TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
9397 TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
94
- TTU_IGNORE_ACCESS = 0x10, /* don't age */
98
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
9599 TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
96100 TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
97101 * and caller guarantees they will
98102 * do a final flush if necessary */
99103 TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
100104 * caller holds it */
101
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
102
- TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */
105
+ TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
103106 };
104107
105108 #ifdef CONFIG_MMU
....@@ -129,6 +132,11 @@
129132 static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
130133 {
131134 down_read(&anon_vma->root->rwsem);
135
+}
136
+
137
+static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
138
+{
139
+ return down_read_trylock(&anon_vma->root->rwsem);
132140 }
133141
134142 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
....@@ -175,8 +183,16 @@
175183 unsigned long, bool);
176184 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
177185 unsigned long, int);
178
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
179
- unsigned long, bool);
186
+void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
187
+ unsigned long address, bool compound);
188
+static inline void page_add_new_anon_rmap(struct page *page,
189
+ struct vm_area_struct *vma,
190
+ unsigned long address, bool compound)
191
+{
192
+ VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
193
+ __page_add_new_anon_rmap(page, vma, address, compound);
194
+}
195
+
180196 void page_add_file_rmap(struct page *, bool);
181197 void page_remove_rmap(struct page *, bool);
182198
....@@ -187,7 +203,12 @@
187203
188204 static inline void page_dup_rmap(struct page *page, bool compound)
189205 {
190
- atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
206
+ bool success = false;
207
+
208
+ if (!compound)
209
+ trace_android_vh_update_page_mapcount(page, true, compound, NULL, &success);
210
+ if (!success)
211
+ atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
191212 }
192213
193214 /*
....@@ -245,17 +266,14 @@
245266
246267 void remove_migration_ptes(struct page *old, struct page *new, bool locked);
247268
248
-/*
249
- * Called by memory-failure.c to kill processes.
250
- */
251
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
252
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
253269 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
254270
255271 /*
256272 * rmap_walk_control: To control rmap traversing for specific needs
257273 *
258274 * arg: passed to rmap_one() and invalid_vma()
275
+ * try_lock: bail out if the rmap lock is contended
276
+ * contended: indicate the rmap traversal bailed out due to lock contention
259277 * rmap_one: executed on each vma where page is mapped
260278 * done: for checking traversing termination condition
261279 * anon_lock: for getting anon_lock by optimized way rather than default
....@@ -263,6 +281,8 @@
263281 */
264282 struct rmap_walk_control {
265283 void *arg;
284
+ bool try_lock;
285
+ bool contended;
266286 /*
267287 * Return false if page table scanning in rmap_walk should be stopped.
268288 * Otherwise, return true.
....@@ -270,13 +290,21 @@
270290 bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
271291 unsigned long addr, void *arg);
272292 int (*done)(struct page *page);
273
- struct anon_vma *(*anon_lock)(struct page *page);
293
+ struct anon_vma *(*anon_lock)(struct page *page,
294
+ struct rmap_walk_control *rwc);
274295 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
275296 };
276297
277298 void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
278299 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
279300
301
+/*
302
+ * Called by memory-failure.c to kill processes.
303
+ */
304
+struct anon_vma *page_lock_anon_vma_read(struct page *page,
305
+ struct rmap_walk_control *rwc);
306
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
307
+
280308 #else /* !CONFIG_MMU */
281309
282310 #define anon_vma_init() do {} while (0)