.. | .. |
---|
10 | 10 | typedef struct page *new_page_t(struct page *page, unsigned long private); |
---|
11 | 11 | typedef void free_page_t(struct page *page, unsigned long private); |
---|
12 | 12 | |
---|
| 13 | +struct migration_target_control; |
---|
| 14 | + |
---|
13 | 15 | /* |
---|
14 | 16 | * Return values from addresss_space_operations.migratepage(): |
---|
15 | 17 | * - negative errno on page migration failure; |
---|
.. | .. |
---|
29 | 31 | }; |
---|
30 | 32 | |
---|
31 | 33 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
---|
32 | | -extern char *migrate_reason_names[MR_TYPES]; |
---|
33 | | - |
---|
34 | | -static inline struct page *new_page_nodemask(struct page *page, |
---|
35 | | - int preferred_nid, nodemask_t *nodemask) |
---|
36 | | -{ |
---|
37 | | - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; |
---|
38 | | - unsigned int order = 0; |
---|
39 | | - struct page *new_page = NULL; |
---|
40 | | - |
---|
41 | | - if (PageHuge(page)) |
---|
42 | | - return alloc_huge_page_nodemask(page_hstate(compound_head(page)), |
---|
43 | | - preferred_nid, nodemask); |
---|
44 | | - |
---|
45 | | - if (PageTransHuge(page)) { |
---|
46 | | - gfp_mask |= GFP_TRANSHUGE; |
---|
47 | | - order = HPAGE_PMD_ORDER; |
---|
48 | | - } |
---|
49 | | - |
---|
50 | | - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) |
---|
51 | | - gfp_mask |= __GFP_HIGHMEM; |
---|
52 | | - |
---|
53 | | - new_page = __alloc_pages_nodemask(gfp_mask, order, |
---|
54 | | - preferred_nid, nodemask); |
---|
55 | | - |
---|
56 | | - if (new_page && PageTransHuge(new_page)) |
---|
57 | | - prep_transhuge_page(new_page); |
---|
58 | | - |
---|
59 | | - return new_page; |
---|
60 | | -} |
---|
| 34 | +extern const char *migrate_reason_names[MR_TYPES]; |
---|
61 | 35 | |
---|
62 | 36 | #ifdef CONFIG_MIGRATION |
---|
63 | 37 | |
---|
.. | .. |
---|
67 | 41 | enum migrate_mode mode); |
---|
68 | 42 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
---|
69 | 43 | unsigned long private, enum migrate_mode mode, int reason); |
---|
| 44 | +extern struct page *alloc_migration_target(struct page *page, unsigned long private); |
---|
70 | 45 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
---|
71 | 46 | extern void putback_movable_page(struct page *page); |
---|
72 | 47 | |
---|
73 | | -extern int migrate_prep(void); |
---|
74 | | -extern int migrate_prep_local(void); |
---|
75 | 48 | extern void migrate_page_states(struct page *newpage, struct page *page); |
---|
76 | 49 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
---|
77 | 50 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
---|
78 | 51 | struct page *newpage, struct page *page); |
---|
79 | 52 | extern int migrate_page_move_mapping(struct address_space *mapping, |
---|
80 | | - struct page *newpage, struct page *page, |
---|
81 | | - struct buffer_head *head, enum migrate_mode mode, |
---|
82 | | - int extra_count); |
---|
| 53 | + struct page *newpage, struct page *page, int extra_count); |
---|
83 | 54 | #else |
---|
84 | 55 | |
---|
85 | 56 | static inline void putback_movable_pages(struct list_head *l) {} |
---|
.. | .. |
---|
87 | 58 | free_page_t free, unsigned long private, enum migrate_mode mode, |
---|
88 | 59 | int reason) |
---|
89 | 60 | { return -ENOSYS; } |
---|
| 61 | +static inline struct page *alloc_migration_target(struct page *page, |
---|
| 62 | + unsigned long private) |
---|
| 63 | + { return NULL; } |
---|
90 | 64 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) |
---|
91 | 65 | { return -EBUSY; } |
---|
92 | | - |
---|
93 | | -static inline int migrate_prep(void) { return -ENOSYS; } |
---|
94 | | -static inline int migrate_prep_local(void) { return -ENOSYS; } |
---|
95 | 66 | |
---|
96 | 67 | static inline void migrate_page_states(struct page *newpage, struct page *page) |
---|
97 | 68 | { |
---|
.. | .. |
---|
126 | 97 | #ifdef CONFIG_NUMA_BALANCING |
---|
127 | 98 | extern bool pmd_trans_migrating(pmd_t pmd); |
---|
128 | 99 | extern int migrate_misplaced_page(struct page *page, |
---|
129 | | - struct vm_area_struct *vma, int node); |
---|
| 100 | + struct vm_fault *vmf, int node); |
---|
130 | 101 | #else |
---|
131 | 102 | static inline bool pmd_trans_migrating(pmd_t pmd) |
---|
132 | 103 | { |
---|
133 | 104 | return false; |
---|
134 | 105 | } |
---|
135 | 106 | static inline int migrate_misplaced_page(struct page *page, |
---|
136 | | - struct vm_area_struct *vma, int node) |
---|
| 107 | + struct vm_fault *vmf, int node) |
---|
137 | 108 | { |
---|
138 | 109 | return -EAGAIN; /* can't migrate now */ |
---|
139 | 110 | } |
---|
.. | .. |
---|
168 | 139 | #define MIGRATE_PFN_MIGRATE (1UL << 1) |
---|
169 | 140 | #define MIGRATE_PFN_LOCKED (1UL << 2) |
---|
170 | 141 | #define MIGRATE_PFN_WRITE (1UL << 3) |
---|
171 | | -#define MIGRATE_PFN_DEVICE (1UL << 4) |
---|
172 | | -#define MIGRATE_PFN_ERROR (1UL << 5) |
---|
173 | 142 | #define MIGRATE_PFN_SHIFT 6 |
---|
174 | 143 | |
---|
175 | 144 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) |
---|
.. | .. |
---|
184 | 153 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; |
---|
185 | 154 | } |
---|
186 | 155 | |
---|
187 | | -/* |
---|
188 | | - * struct migrate_vma_ops - migrate operation callback |
---|
189 | | - * |
---|
190 | | - * @alloc_and_copy: alloc destination memory and copy source memory to it |
---|
191 | | - * @finalize_and_map: allow caller to map the successfully migrated pages |
---|
192 | | - * |
---|
193 | | - * |
---|
194 | | - * The alloc_and_copy() callback happens once all source pages have been locked, |
---|
195 | | - * unmapped and checked (checked whether pinned or not). All pages that can be |
---|
196 | | - * migrated will have an entry in the src array set with the pfn value of the |
---|
197 | | - * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other |
---|
198 | | - * flags might be set but should be ignored by the callback). |
---|
199 | | - * |
---|
200 | | - * The alloc_and_copy() callback can then allocate destination memory and copy |
---|
201 | | - * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and |
---|
202 | | - * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the |
---|
203 | | - * callback must update each corresponding entry in the dst array with the pfn |
---|
204 | | - * value of the destination page and with the MIGRATE_PFN_VALID and |
---|
205 | | - * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages |
---|
206 | | - * locked, via lock_page()). |
---|
207 | | - * |
---|
208 | | - * At this point the alloc_and_copy() callback is done and returns. |
---|
209 | | - * |
---|
210 | | - * Note that the callback does not have to migrate all the pages that are |
---|
211 | | - * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration |
---|
212 | | - * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also |
---|
213 | | - * set in the src array entry). If the device driver cannot migrate a device |
---|
214 | | - * page back to system memory, then it must set the corresponding dst array |
---|
215 | | - * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to |
---|
216 | | - * access any of the virtual addresses originally backed by this page. Because |
---|
217 | | - * a SIGBUS is such a severe result for the userspace process, the device |
---|
218 | | - * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an |
---|
219 | | - * unrecoverable state. |
---|
220 | | - * |
---|
221 | | - * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we |
---|
222 | | - * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus |
---|
223 | | - * allowing device driver to allocate device memory for those unback virtual |
---|
224 | | - * address. For this the device driver simply have to allocate device memory |
---|
225 | | - * and properly set the destination entry like for regular migration. Note that |
---|
226 | | - * this can still fails and thus inside the device driver must check if the |
---|
227 | | - * migration was successful for those entry inside the finalize_and_map() |
---|
228 | | - * callback just like for regular migration. |
---|
229 | | - * |
---|
230 | | - * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES |
---|
231 | | - * OR BAD THINGS WILL HAPPEN ! |
---|
232 | | - * |
---|
233 | | - * |
---|
234 | | - * The finalize_and_map() callback happens after struct page migration from |
---|
235 | | - * source to destination (destination struct pages are the struct pages for the |
---|
236 | | - * memory allocated by the alloc_and_copy() callback). Migration can fail, and |
---|
237 | | - * thus the finalize_and_map() allows the driver to inspect which pages were |
---|
238 | | - * successfully migrated, and which were not. Successfully migrated pages will |
---|
239 | | - * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. |
---|
240 | | - * |
---|
241 | | - * It is safe to update device page table from within the finalize_and_map() |
---|
242 | | - * callback because both destination and source page are still locked, and the |
---|
243 | | - * mmap_sem is held in read mode (hence no one can unmap the range being |
---|
244 | | - * migrated). |
---|
245 | | - * |
---|
246 | | - * Once callback is done cleaning up things and updating its page table (if it |
---|
247 | | - * chose to do so, this is not an obligation) then it returns. At this point, |
---|
248 | | - * the HMM core will finish up the final steps, and the migration is complete. |
---|
249 | | - * |
---|
250 | | - * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY |
---|
251 | | - * ENTRIES OR BAD THINGS WILL HAPPEN ! |
---|
252 | | - */ |
---|
253 | | -struct migrate_vma_ops { |
---|
254 | | - void (*alloc_and_copy)(struct vm_area_struct *vma, |
---|
255 | | - const unsigned long *src, |
---|
256 | | - unsigned long *dst, |
---|
257 | | - unsigned long start, |
---|
258 | | - unsigned long end, |
---|
259 | | - void *private); |
---|
260 | | - void (*finalize_and_map)(struct vm_area_struct *vma, |
---|
261 | | - const unsigned long *src, |
---|
262 | | - const unsigned long *dst, |
---|
263 | | - unsigned long start, |
---|
264 | | - unsigned long end, |
---|
265 | | - void *private); |
---|
| 156 | +enum migrate_vma_direction { |
---|
| 157 | + MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, |
---|
| 158 | + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, |
---|
266 | 159 | }; |
---|
267 | 160 | |
---|
268 | | -#if defined(CONFIG_MIGRATE_VMA_HELPER) |
---|
269 | | -int migrate_vma(const struct migrate_vma_ops *ops, |
---|
270 | | - struct vm_area_struct *vma, |
---|
271 | | - unsigned long start, |
---|
272 | | - unsigned long end, |
---|
273 | | - unsigned long *src, |
---|
274 | | - unsigned long *dst, |
---|
275 | | - void *private); |
---|
276 | | -#else |
---|
277 | | -static inline int migrate_vma(const struct migrate_vma_ops *ops, |
---|
278 | | - struct vm_area_struct *vma, |
---|
279 | | - unsigned long start, |
---|
280 | | - unsigned long end, |
---|
281 | | - unsigned long *src, |
---|
282 | | - unsigned long *dst, |
---|
283 | | - void *private) |
---|
284 | | -{ |
---|
285 | | - return -EINVAL; |
---|
286 | | -} |
---|
287 | | -#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ |
---|
| 161 | +struct migrate_vma { |
---|
| 162 | + struct vm_area_struct *vma; |
---|
| 163 | + /* |
---|
| 164 | + * Both src and dst array must be big enough for |
---|
| 165 | + * (end - start) >> PAGE_SHIFT entries. |
---|
| 166 | + * |
---|
| 167 | + * The src array must not be modified by the caller after |
---|
| 168 | + * migrate_vma_setup(), and must not change the dst array after |
---|
| 169 | + * migrate_vma_pages() returns. |
---|
| 170 | + */ |
---|
| 171 | + unsigned long *dst; |
---|
| 172 | + unsigned long *src; |
---|
| 173 | + unsigned long cpages; |
---|
| 174 | + unsigned long npages; |
---|
| 175 | + unsigned long start; |
---|
| 176 | + unsigned long end; |
---|
| 177 | + |
---|
| 178 | + /* |
---|
| 179 | + * Set to the owner value also stored in page->pgmap->owner for |
---|
| 180 | + * migrating out of device private memory. The flags also need to |
---|
| 181 | + * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. |
---|
| 182 | + * The caller should always set this field when using mmu notifier |
---|
| 183 | + * callbacks to avoid device MMU invalidations for device private |
---|
| 184 | + * pages that are not being migrated. |
---|
| 185 | + */ |
---|
| 186 | + void *pgmap_owner; |
---|
| 187 | + unsigned long flags; |
---|
| 188 | +}; |
---|
| 189 | + |
---|
| 190 | +int migrate_vma_setup(struct migrate_vma *args); |
---|
| 191 | +void migrate_vma_pages(struct migrate_vma *migrate); |
---|
| 192 | +void migrate_vma_finalize(struct migrate_vma *migrate); |
---|
288 | 193 | |
---|
289 | 194 | #endif /* CONFIG_MIGRATION */ |
---|
290 | 195 | |
---|