hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/include/linux/vmalloc.h
....@@ -9,6 +9,9 @@
99 #include <asm/page.h> /* pgprot_t */
1010 #include <linux/rbtree.h>
1111 #include <linux/overflow.h>
12
+#include <linux/android_vendor.h>
13
+
14
+#include <asm/vmalloc.h>
1215
1316 struct vm_area_struct; /* vma defining user mapping in mm_types.h */
1417 struct notifier_block; /* in notifier.h */
....@@ -18,9 +21,24 @@
1821 #define VM_ALLOC 0x00000002 /* vmalloc() */
1922 #define VM_MAP 0x00000004 /* vmap()ed pages */
2023 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
24
+#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
2125 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
2226 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
2327 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
28
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
29
+#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
30
+
31
+/*
32
+ * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
33
+ *
34
+ * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
35
+ * shadow memory has been mapped. It's used to handle allocation errors so that
36
+ * we don't try to poision shadow on free if it was never allocated.
37
+ *
38
+ * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
39
+ * determine which allocations need the module shadow freed.
40
+ */
41
+
2442 /* bits [20..32] reserved for arch specific ioremap internals */
2543
2644 /*
....@@ -40,25 +58,35 @@
4058 unsigned int nr_pages;
4159 phys_addr_t phys_addr;
4260 const void *caller;
61
+ ANDROID_OEM_DATA(1);
4362 };
4463
4564 struct vmap_area {
4665 unsigned long va_start;
4766 unsigned long va_end;
48
- unsigned long flags;
67
+
4968 struct rb_node rb_node; /* address sorted rbtree */
5069 struct list_head list; /* address sorted list */
51
- struct llist_node purge_list; /* "lazy purge" list */
52
- struct vm_struct *vm;
53
- struct rcu_head rcu_head;
70
+
71
+ /*
72
+ * The following three variables can be packed, because
73
+ * a vmap_area object is always one of the three states:
74
+ * 1) in "free" tree (root is vmap_area_root)
75
+ * 2) in "busy" tree (root is free_vmap_area_root)
76
+ * 3) in purge list (head is vmap_purge_list)
77
+ */
78
+ union {
79
+ unsigned long subtree_max_size; /* in "free" tree */
80
+ struct vm_struct *vm; /* in "busy" tree */
81
+ struct llist_node purge_list; /* in purge list */
82
+ };
5483 };
5584
5685 /*
5786 * Highlevel APIs for driver use
5887 */
5988 extern void vm_unmap_ram(const void *mem, unsigned int count);
60
-extern void *vm_map_ram(struct page **pages, unsigned int count,
61
- int node, pgprot_t prot);
89
+extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
6290 extern void vm_unmap_aliases(void);
6391
6492 #ifdef CONFIG_MMU
....@@ -76,31 +104,22 @@
76104 extern void *vmalloc_user(unsigned long size);
77105 extern void *vmalloc_node(unsigned long size, int node);
78106 extern void *vzalloc_node(unsigned long size, int node);
79
-extern void *vmalloc_exec(unsigned long size);
80107 extern void *vmalloc_32(unsigned long size);
81108 extern void *vmalloc_32_user(unsigned long size);
82
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
109
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
83110 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
84111 unsigned long start, unsigned long end, gfp_t gfp_mask,
85112 pgprot_t prot, unsigned long vm_flags, int node,
86113 const void *caller);
87
-#ifndef CONFIG_MMU
88
-extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
89
-static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
90
- gfp_t flags, void *caller)
91
-{
92
- return __vmalloc_node_flags(size, node, flags);
93
-}
94
-#else
95
-extern void *__vmalloc_node_flags_caller(unsigned long size,
96
- int node, gfp_t flags, void *caller);
97
-#endif
114
+void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
115
+ int node, const void *caller);
98116
99117 extern void vfree(const void *addr);
100118 extern void vfree_atomic(const void *addr);
101119
102120 extern void *vmap(struct page **pages, unsigned int count,
103121 unsigned long flags, pgprot_t prot);
122
+void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
104123 extern void vunmap(const void *addr);
105124
106125 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
....@@ -109,8 +128,22 @@
109128
110129 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
111130 unsigned long pgoff);
112
-void vmalloc_sync_mappings(void);
113
-void vmalloc_sync_unmappings(void);
131
+
132
+/*
133
+ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
134
+ * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
135
+ * needs to be called.
136
+ */
137
+#ifndef ARCH_PAGE_TABLE_SYNC_MASK
138
+#define ARCH_PAGE_TABLE_SYNC_MASK 0
139
+#endif
140
+
141
+/*
142
+ * There is no default implementation for arch_sync_kernel_mappings(). It is
143
+ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
144
+ * is 0.
145
+ */
146
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
114147
115148 /*
116149 * Lowlevel-APIs (not for driver use!)
....@@ -129,22 +162,28 @@
129162 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
130163 extern struct vm_struct *get_vm_area_caller(unsigned long size,
131164 unsigned long flags, const void *caller);
132
-extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
133
- unsigned long start, unsigned long end);
134165 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
135166 unsigned long flags,
136167 unsigned long start, unsigned long end,
137168 const void *caller);
169
+void free_vm_area(struct vm_struct *area);
138170 extern struct vm_struct *remove_vm_area(const void *addr);
139171 extern struct vm_struct *find_vm_area(const void *addr);
140172
141
-extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
142
- struct page **pages);
143173 #ifdef CONFIG_MMU
144174 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
145175 pgprot_t prot, struct page **pages);
176
+int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
177
+ struct page **pages);
146178 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
147179 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
180
+static inline void set_vm_flush_reset_perms(void *addr)
181
+{
182
+ struct vm_struct *vm = find_vm_area(addr);
183
+
184
+ if (vm)
185
+ vm->flags |= VM_FLUSH_RESET_PERMS;
186
+}
148187 #else
149188 static inline int
150189 map_kernel_range_noflush(unsigned long start, unsigned long size,
....@@ -152,19 +191,16 @@
152191 {
153192 return size >> PAGE_SHIFT;
154193 }
194
+#define map_kernel_range map_kernel_range_noflush
155195 static inline void
156196 unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
157197 {
158198 }
159
-static inline void
160
-unmap_kernel_range(unsigned long addr, unsigned long size)
199
+#define unmap_kernel_range unmap_kernel_range_noflush
200
+static inline void set_vm_flush_reset_perms(void *addr)
161201 {
162202 }
163203 #endif
164
-
165
-/* Allocate/destroy a 'vmalloc' VM area. */
166
-extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
167
-extern void free_vm_area(struct vm_struct *area);
168204
169205 /* for /dev/kmem */
170206 extern long vread(char *buf, char *addr, unsigned long count);
....@@ -209,4 +245,7 @@
209245 int register_vmap_purge_notifier(struct notifier_block *nb);
210246 int unregister_vmap_purge_notifier(struct notifier_block *nb);
211247
248
+/* Allow disabling lazy TLB flushing */
249
+extern bool lazy_vunmap_enable;
250
+
212251 #endif /* _LINUX_VMALLOC_H */