.. | .. |
---|
9 | 9 | #include <asm/page.h> /* pgprot_t */ |
---|
10 | 10 | #include <linux/rbtree.h> |
---|
11 | 11 | #include <linux/overflow.h> |
---|
| 12 | +#include <linux/android_vendor.h> |
---|
| 13 | + |
---|
| 14 | +#include <asm/vmalloc.h> |
---|
12 | 15 | |
---|
13 | 16 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
---|
14 | 17 | struct notifier_block; /* in notifier.h */ |
---|
.. | .. |
---|
18 | 21 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
---|
19 | 22 | #define VM_MAP 0x00000004 /* vmap()ed pages */ |
---|
20 | 23 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
---|
| 24 | +#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ |
---|
21 | 25 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
---|
22 | 26 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
---|
23 | 27 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
---|
| 28 | +#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ |
---|
| 29 | +#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ |
---|
| 30 | + |
---|
| 31 | +/* |
---|
| 32 | + * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. |
---|
| 33 | + * |
---|
| 34 | + * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after |
---|
| 35 | + * shadow memory has been mapped. It's used to handle allocation errors so that |
---|
| 36 | + * we don't try to poision shadow on free if it was never allocated. |
---|
| 37 | + * |
---|
| 38 | + * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to |
---|
| 39 | + * determine which allocations need the module shadow freed. |
---|
| 40 | + */ |
---|
| 41 | + |
---|
24 | 42 | /* bits [20..32] reserved for arch specific ioremap internals */ |
---|
25 | 43 | |
---|
26 | 44 | /* |
---|
.. | .. |
---|
40 | 58 | unsigned int nr_pages; |
---|
41 | 59 | phys_addr_t phys_addr; |
---|
42 | 60 | const void *caller; |
---|
| 61 | + ANDROID_OEM_DATA(1); |
---|
43 | 62 | }; |
---|
44 | 63 | |
---|
45 | 64 | struct vmap_area { |
---|
46 | 65 | unsigned long va_start; |
---|
47 | 66 | unsigned long va_end; |
---|
48 | | - unsigned long flags; |
---|
| 67 | + |
---|
49 | 68 | struct rb_node rb_node; /* address sorted rbtree */ |
---|
50 | 69 | struct list_head list; /* address sorted list */ |
---|
51 | | - struct llist_node purge_list; /* "lazy purge" list */ |
---|
52 | | - struct vm_struct *vm; |
---|
53 | | - struct rcu_head rcu_head; |
---|
| 70 | + |
---|
| 71 | + /* |
---|
| 72 | + * The following three variables can be packed, because |
---|
| 73 | + * a vmap_area object is always one of the three states: |
---|
| 74 | + * 1) in "free" tree (root is vmap_area_root) |
---|
| 75 | + * 2) in "busy" tree (root is free_vmap_area_root) |
---|
| 76 | + * 3) in purge list (head is vmap_purge_list) |
---|
| 77 | + */ |
---|
| 78 | + union { |
---|
| 79 | + unsigned long subtree_max_size; /* in "free" tree */ |
---|
| 80 | + struct vm_struct *vm; /* in "busy" tree */ |
---|
| 81 | + struct llist_node purge_list; /* in purge list */ |
---|
| 82 | + }; |
---|
54 | 83 | }; |
---|
55 | 84 | |
---|
56 | 85 | /* |
---|
57 | 86 | * Highlevel APIs for driver use |
---|
58 | 87 | */ |
---|
59 | 88 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
---|
60 | | -extern void *vm_map_ram(struct page **pages, unsigned int count, |
---|
61 | | - int node, pgprot_t prot); |
---|
| 89 | +extern void *vm_map_ram(struct page **pages, unsigned int count, int node); |
---|
62 | 90 | extern void vm_unmap_aliases(void); |
---|
63 | 91 | |
---|
64 | 92 | #ifdef CONFIG_MMU |
---|
.. | .. |
---|
76 | 104 | extern void *vmalloc_user(unsigned long size); |
---|
77 | 105 | extern void *vmalloc_node(unsigned long size, int node); |
---|
78 | 106 | extern void *vzalloc_node(unsigned long size, int node); |
---|
79 | | -extern void *vmalloc_exec(unsigned long size); |
---|
80 | 107 | extern void *vmalloc_32(unsigned long size); |
---|
81 | 108 | extern void *vmalloc_32_user(unsigned long size); |
---|
82 | | -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
---|
| 109 | +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); |
---|
83 | 110 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
---|
84 | 111 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
---|
85 | 112 | pgprot_t prot, unsigned long vm_flags, int node, |
---|
86 | 113 | const void *caller); |
---|
87 | | -#ifndef CONFIG_MMU |
---|
88 | | -extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); |
---|
89 | | -static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, |
---|
90 | | - gfp_t flags, void *caller) |
---|
91 | | -{ |
---|
92 | | - return __vmalloc_node_flags(size, node, flags); |
---|
93 | | -} |
---|
94 | | -#else |
---|
95 | | -extern void *__vmalloc_node_flags_caller(unsigned long size, |
---|
96 | | - int node, gfp_t flags, void *caller); |
---|
97 | | -#endif |
---|
| 114 | +void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
---|
| 115 | + int node, const void *caller); |
---|
98 | 116 | |
---|
99 | 117 | extern void vfree(const void *addr); |
---|
100 | 118 | extern void vfree_atomic(const void *addr); |
---|
101 | 119 | |
---|
102 | 120 | extern void *vmap(struct page **pages, unsigned int count, |
---|
103 | 121 | unsigned long flags, pgprot_t prot); |
---|
| 122 | +void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); |
---|
104 | 123 | extern void vunmap(const void *addr); |
---|
105 | 124 | |
---|
106 | 125 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
---|
.. | .. |
---|
109 | 128 | |
---|
110 | 129 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
---|
111 | 130 | unsigned long pgoff); |
---|
112 | | -void vmalloc_sync_mappings(void); |
---|
113 | | -void vmalloc_sync_unmappings(void); |
---|
| 131 | + |
---|
| 132 | +/* |
---|
| 133 | + * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values |
---|
| 134 | + * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() |
---|
| 135 | + * needs to be called. |
---|
| 136 | + */ |
---|
| 137 | +#ifndef ARCH_PAGE_TABLE_SYNC_MASK |
---|
| 138 | +#define ARCH_PAGE_TABLE_SYNC_MASK 0 |
---|
| 139 | +#endif |
---|
| 140 | + |
---|
| 141 | +/* |
---|
| 142 | + * There is no default implementation for arch_sync_kernel_mappings(). It is |
---|
| 143 | + * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK |
---|
| 144 | + * is 0. |
---|
| 145 | + */ |
---|
| 146 | +void arch_sync_kernel_mappings(unsigned long start, unsigned long end); |
---|
114 | 147 | |
---|
115 | 148 | /* |
---|
116 | 149 | * Lowlevel-APIs (not for driver use!) |
---|
.. | .. |
---|
129 | 162 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
---|
130 | 163 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
---|
131 | 164 | unsigned long flags, const void *caller); |
---|
132 | | -extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
---|
133 | | - unsigned long start, unsigned long end); |
---|
134 | 165 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
---|
135 | 166 | unsigned long flags, |
---|
136 | 167 | unsigned long start, unsigned long end, |
---|
137 | 168 | const void *caller); |
---|
| 169 | +void free_vm_area(struct vm_struct *area); |
---|
138 | 170 | extern struct vm_struct *remove_vm_area(const void *addr); |
---|
139 | 171 | extern struct vm_struct *find_vm_area(const void *addr); |
---|
140 | 172 | |
---|
141 | | -extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
---|
142 | | - struct page **pages); |
---|
143 | 173 | #ifdef CONFIG_MMU |
---|
144 | 174 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, |
---|
145 | 175 | pgprot_t prot, struct page **pages); |
---|
| 176 | +int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, |
---|
| 177 | + struct page **pages); |
---|
146 | 178 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); |
---|
147 | 179 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
---|
| 180 | +static inline void set_vm_flush_reset_perms(void *addr) |
---|
| 181 | +{ |
---|
| 182 | + struct vm_struct *vm = find_vm_area(addr); |
---|
| 183 | + |
---|
| 184 | + if (vm) |
---|
| 185 | + vm->flags |= VM_FLUSH_RESET_PERMS; |
---|
| 186 | +} |
---|
148 | 187 | #else |
---|
149 | 188 | static inline int |
---|
150 | 189 | map_kernel_range_noflush(unsigned long start, unsigned long size, |
---|
.. | .. |
---|
152 | 191 | { |
---|
153 | 192 | return size >> PAGE_SHIFT; |
---|
154 | 193 | } |
---|
| 194 | +#define map_kernel_range map_kernel_range_noflush |
---|
155 | 195 | static inline void |
---|
156 | 196 | unmap_kernel_range_noflush(unsigned long addr, unsigned long size) |
---|
157 | 197 | { |
---|
158 | 198 | } |
---|
159 | | -static inline void |
---|
160 | | -unmap_kernel_range(unsigned long addr, unsigned long size) |
---|
| 199 | +#define unmap_kernel_range unmap_kernel_range_noflush |
---|
| 200 | +static inline void set_vm_flush_reset_perms(void *addr) |
---|
161 | 201 | { |
---|
162 | 202 | } |
---|
163 | 203 | #endif |
---|
164 | | - |
---|
165 | | -/* Allocate/destroy a 'vmalloc' VM area. */ |
---|
166 | | -extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); |
---|
167 | | -extern void free_vm_area(struct vm_struct *area); |
---|
168 | 204 | |
---|
169 | 205 | /* for /dev/kmem */ |
---|
170 | 206 | extern long vread(char *buf, char *addr, unsigned long count); |
---|
.. | .. |
---|
209 | 245 | int register_vmap_purge_notifier(struct notifier_block *nb); |
---|
210 | 246 | int unregister_vmap_purge_notifier(struct notifier_block *nb); |
---|
211 | 247 | |
---|
| 248 | +/* Allow disabling lazy TLB flushing */ |
---|
| 249 | +extern bool lazy_vunmap_enable; |
---|
| 250 | + |
---|
212 | 251 | #endif /* _LINUX_VMALLOC_H */ |
---|