| .. | .. |
|---|
| 6 | 6 | #ifndef _LINUX_MEMPOLICY_H |
|---|
| 7 | 7 | #define _LINUX_MEMPOLICY_H 1 |
|---|
| 8 | 8 | |
|---|
| 9 | | - |
|---|
| 9 | +#include <linux/sched.h> |
|---|
| 10 | 10 | #include <linux/mmzone.h> |
|---|
| 11 | 11 | #include <linux/dax.h> |
|---|
| 12 | 12 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 28 | 28 | * the process policy is used. Interrupts ignore the memory policy |
|---|
| 29 | 29 | * of the current process. |
|---|
| 30 | 30 | * |
|---|
| 31 | | - * Locking policy for interlave: |
|---|
| 31 | + * Locking policy for interleave: |
|---|
| 32 | 32 | * In process context there is no locking because only the process accesses |
|---|
| 33 | 33 | * its own state. All vma manipulation is somewhat protected by a down_read on |
|---|
| 34 | | - * mmap_sem. |
|---|
| 34 | + * mmap_lock. |
|---|
| 35 | 35 | * |
|---|
| 36 | 36 | * Freeing policy: |
|---|
| 37 | 37 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
|---|
| .. | .. |
|---|
| 152 | 152 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
|---|
| 153 | 153 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
|---|
| 154 | 154 | const nodemask_t *mask); |
|---|
| 155 | +extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); |
|---|
| 156 | + |
|---|
| 157 | +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) |
|---|
| 158 | +{ |
|---|
| 159 | + struct mempolicy *mpol = get_task_policy(current); |
|---|
| 160 | + |
|---|
| 161 | + return policy_nodemask(gfp, mpol); |
|---|
| 162 | +} |
|---|
| 163 | + |
|---|
| 155 | 164 | extern unsigned int mempolicy_slab_node(void); |
|---|
| 156 | 165 | |
|---|
| 157 | 166 | extern enum zone_type policy_zone; |
|---|
| .. | .. |
|---|
| 173 | 182 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
|---|
| 174 | 183 | |
|---|
| 175 | 184 | /* Check if a vma is migratable */ |
|---|
| 176 | | -static inline bool vma_migratable(struct vm_area_struct *vma) |
|---|
| 177 | | -{ |
|---|
| 178 | | - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
|---|
| 179 | | - return false; |
|---|
| 180 | | - |
|---|
| 181 | | - /* |
|---|
| 182 | | - * DAX device mappings require predictable access latency, so avoid |
|---|
| 183 | | - * incurring periodic faults. |
|---|
| 184 | | - */ |
|---|
| 185 | | - if (vma_is_dax(vma)) |
|---|
| 186 | | - return false; |
|---|
| 187 | | - |
|---|
| 188 | | -#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
|---|
| 189 | | - if (vma->vm_flags & VM_HUGETLB) |
|---|
| 190 | | - return false; |
|---|
| 191 | | -#endif |
|---|
| 192 | | - |
|---|
| 193 | | - /* |
|---|
| 194 | | - * Migration allocates pages in the highest zone. If we cannot |
|---|
| 195 | | - * do so then migration (at least from node to node) is not |
|---|
| 196 | | - * possible. |
|---|
| 197 | | - */ |
|---|
| 198 | | - if (vma->vm_file && |
|---|
| 199 | | - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) |
|---|
| 200 | | - < policy_zone) |
|---|
| 201 | | - return false; |
|---|
| 202 | | - return true; |
|---|
| 203 | | -} |
|---|
| 185 | +extern bool vma_migratable(struct vm_area_struct *vma); |
|---|
| 204 | 186 | |
|---|
| 205 | 187 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
|---|
| 206 | 188 | extern void mpol_put_task_policy(struct task_struct *); |
|---|
| .. | .. |
|---|
| 308 | 290 | static inline void mpol_put_task_policy(struct task_struct *task) |
|---|
| 309 | 291 | { |
|---|
| 310 | 292 | } |
|---|
| 293 | + |
|---|
| 294 | +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) |
|---|
| 295 | +{ |
|---|
| 296 | + return NULL; |
|---|
| 297 | +} |
|---|
| 311 | 298 | #endif /* CONFIG_NUMA */ |
|---|
| 312 | 299 | #endif |
|---|