.. | .. |
---|
23 | 23 | * will still exist later on and mmget_not_zero() has to be used before |
---|
24 | 24 | * accessing it. |
---|
25 | 25 | * |
---|
26 | | - * This is a preferred way to to pin @mm for a longer/unbounded amount |
---|
| 26 | + * This is a preferred way to pin @mm for a longer/unbounded amount |
---|
27 | 27 | * of time. |
---|
28 | 28 | * |
---|
29 | 29 | * Use mmdrop() to release the reference acquired by mmgrab(). |
---|
.. | .. |
---|
49 | 49 | __mmdrop(mm); |
---|
50 | 50 | } |
---|
51 | 51 | |
---|
52 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 52 | +#ifdef CONFIG_PREEMPT_RT |
---|
53 | 53 | extern void __mmdrop_delayed(struct rcu_head *rhp); |
---|
54 | 54 | static inline void mmdrop_delayed(struct mm_struct *mm) |
---|
55 | 55 | { |
---|
.. | .. |
---|
57 | 57 | call_rcu(&mm->delayed_drop, __mmdrop_delayed); |
---|
58 | 58 | } |
---|
59 | 59 | #else |
---|
60 | | -# define mmdrop_delayed(mm) mmdrop(mm) |
---|
| 60 | +# define mmdrop_delayed(mm) mmdrop(mm) |
---|
61 | 61 | #endif |
---|
62 | | - |
---|
63 | | -void mmdrop(struct mm_struct *mm); |
---|
64 | | - |
---|
65 | | -/* |
---|
66 | | - * This has to be called after a get_task_mm()/mmget_not_zero() |
---|
67 | | - * followed by taking the mmap_sem for writing before modifying the |
---|
68 | | - * vmas or anything the coredump pretends not to change from under it. |
---|
69 | | - * |
---|
70 | | - * It also has to be called when mmgrab() is used in the context of |
---|
71 | | - * the process, but then the mm_count refcount is transferred outside |
---|
72 | | - * the context of the process to run down_write() on that pinned mm. |
---|
73 | | - * |
---|
74 | | - * NOTE: find_extend_vma() called from GUP context is the only place |
---|
75 | | - * that can modify the "mm" (notably the vm_start/end) under mmap_sem |
---|
76 | | - * for reading and outside the context of the process, so it is also |
---|
77 | | - * the only case that holds the mmap_sem for reading that must call |
---|
78 | | - * this function. Generally if the mmap_sem is hold for reading |
---|
79 | | - * there's no need of this check after get_task_mm()/mmget_not_zero(). |
---|
80 | | - * |
---|
81 | | - * This function can be obsoleted and the check can be removed, after |
---|
82 | | - * the coredump code will hold the mmap_sem for writing before |
---|
83 | | - * invoking the ->core_dump methods. |
---|
84 | | - */ |
---|
85 | | -static inline bool mmget_still_valid(struct mm_struct *mm) |
---|
86 | | -{ |
---|
87 | | - return likely(!mm->core_state); |
---|
88 | | -} |
---|
89 | 62 | |
---|
90 | 63 | /** |
---|
91 | 64 | * mmget() - Pin the address space associated with a &struct mm_struct. |
---|
.. | .. |
---|
144 | 117 | #endif /* CONFIG_MEMCG */ |
---|
145 | 118 | |
---|
146 | 119 | #ifdef CONFIG_MMU |
---|
| 120 | +#ifndef arch_get_mmap_end |
---|
| 121 | +#define arch_get_mmap_end(addr) (TASK_SIZE) |
---|
| 122 | +#endif |
---|
| 123 | + |
---|
| 124 | +#ifndef arch_get_mmap_base |
---|
| 125 | +#define arch_get_mmap_base(addr, base) (base) |
---|
| 126 | +#endif |
---|
| 127 | + |
---|
147 | 128 | extern void arch_pick_mmap_layout(struct mm_struct *mm, |
---|
148 | 129 | struct rlimit *rlim_stack); |
---|
149 | 130 | extern unsigned long |
---|
.. | .. |
---|
192 | 173 | */ |
---|
193 | 174 | static inline gfp_t current_gfp_context(gfp_t flags) |
---|
194 | 175 | { |
---|
195 | | - /* |
---|
196 | | - * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
197 | | - * so always make sure it makes precendence |
---|
198 | | - */ |
---|
199 | | - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
---|
200 | | - flags &= ~(__GFP_IO | __GFP_FS); |
---|
201 | | - else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) |
---|
202 | | - flags &= ~__GFP_FS; |
---|
| 176 | + unsigned int pflags = READ_ONCE(current->flags); |
---|
| 177 | + |
---|
| 178 | + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { |
---|
| 179 | + /* |
---|
| 180 | + * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
| 181 | + * so always make sure it makes precedence |
---|
| 182 | + */ |
---|
| 183 | + if (pflags & PF_MEMALLOC_NOIO) |
---|
| 184 | + flags &= ~(__GFP_IO | __GFP_FS); |
---|
| 185 | + else if (pflags & PF_MEMALLOC_NOFS) |
---|
| 186 | + flags &= ~__GFP_FS; |
---|
| 187 | + } |
---|
203 | 188 | return flags; |
---|
204 | 189 | } |
---|
205 | 190 | |
---|
.. | .. |
---|
238 | 223 | * @flags: Flags to restore. |
---|
239 | 224 | * |
---|
240 | 225 | * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. |
---|
241 | | - * Always make sure that that the given flags is the return value from the |
---|
| 226 | + * Always make sure that the given flags is the return value from the |
---|
242 | 227 | * pairing memalloc_noio_save call. |
---|
243 | 228 | */ |
---|
244 | 229 | static inline void memalloc_noio_restore(unsigned int flags) |
---|
.. | .. |
---|
269 | 254 | * @flags: Flags to restore. |
---|
270 | 255 | * |
---|
271 | 256 | * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. |
---|
272 | | - * Always make sure that that the given flags is the return value from the |
---|
| 257 | + * Always make sure that the given flags is the return value from the |
---|
273 | 258 | * pairing memalloc_nofs_save call. |
---|
274 | 259 | */ |
---|
275 | 260 | static inline void memalloc_nofs_restore(unsigned int flags) |
---|
.. | .. |
---|
289 | 274 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
---|
290 | 275 | } |
---|
291 | 276 | |
---|
| 277 | +#ifdef CONFIG_CMA |
---|
| 278 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 279 | +{ |
---|
| 280 | + unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; |
---|
| 281 | + |
---|
| 282 | + current->flags |= PF_MEMALLOC_NOCMA; |
---|
| 283 | + return flags; |
---|
| 284 | +} |
---|
| 285 | + |
---|
| 286 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 287 | +{ |
---|
| 288 | + current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; |
---|
| 289 | +} |
---|
| 290 | +#else |
---|
| 291 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 292 | +{ |
---|
| 293 | + return 0; |
---|
| 294 | +} |
---|
| 295 | + |
---|
| 296 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 297 | +{ |
---|
| 298 | +} |
---|
| 299 | +#endif |
---|
| 300 | + |
---|
292 | 301 | #ifdef CONFIG_MEMCG |
---|
| 302 | +DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); |
---|
293 | 303 | /** |
---|
294 | | - * memalloc_use_memcg - Starts the remote memcg charging scope. |
---|
| 304 | + * set_active_memcg - Starts the remote memcg charging scope. |
---|
295 | 305 | * @memcg: memcg to charge. |
---|
296 | 306 | * |
---|
297 | 307 | * This function marks the beginning of the remote memcg charging scope. All the |
---|
298 | 308 | * __GFP_ACCOUNT allocations till the end of the scope will be charged to the |
---|
299 | 309 | * given memcg. |
---|
300 | 310 | * |
---|
301 | | - * NOTE: This function is not nesting safe. |
---|
| 311 | + * NOTE: This function can nest. Users must save the return value and |
---|
| 312 | + * reset the previous value after their own charging scope is over. |
---|
302 | 313 | */ |
---|
303 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 314 | +static inline struct mem_cgroup * |
---|
| 315 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
304 | 316 | { |
---|
305 | | - WARN_ON_ONCE(current->active_memcg); |
---|
306 | | - current->active_memcg = memcg; |
---|
307 | | -} |
---|
| 317 | + struct mem_cgroup *old; |
---|
308 | 318 | |
---|
309 | | -/** |
---|
310 | | - * memalloc_unuse_memcg - Ends the remote memcg charging scope. |
---|
311 | | - * |
---|
312 | | - * This function marks the end of the remote memcg charging scope started by |
---|
313 | | - * memalloc_use_memcg(). |
---|
314 | | - */ |
---|
315 | | -static inline void memalloc_unuse_memcg(void) |
---|
316 | | -{ |
---|
317 | | - current->active_memcg = NULL; |
---|
| 319 | + if (in_interrupt()) { |
---|
| 320 | + old = this_cpu_read(int_active_memcg); |
---|
| 321 | + this_cpu_write(int_active_memcg, memcg); |
---|
| 322 | + } else { |
---|
| 323 | + old = current->active_memcg; |
---|
| 324 | + current->active_memcg = memcg; |
---|
| 325 | + } |
---|
| 326 | + |
---|
| 327 | + return old; |
---|
318 | 328 | } |
---|
319 | 329 | #else |
---|
320 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 330 | +static inline struct mem_cgroup * |
---|
| 331 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
321 | 332 | { |
---|
322 | | -} |
---|
323 | | - |
---|
324 | | -static inline void memalloc_unuse_memcg(void) |
---|
325 | | -{ |
---|
| 333 | + return NULL; |
---|
326 | 334 | } |
---|
327 | 335 | #endif |
---|
328 | 336 | |
---|
.. | .. |
---|
334 | 342 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), |
---|
335 | 343 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), |
---|
336 | 344 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), |
---|
| 345 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), |
---|
| 346 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), |
---|
337 | 347 | }; |
---|
338 | 348 | |
---|
339 | 349 | enum { |
---|
340 | 350 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), |
---|
| 351 | + MEMBARRIER_FLAG_RSEQ = (1U << 1), |
---|
341 | 352 | }; |
---|
342 | 353 | |
---|
343 | 354 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
.. | .. |
---|
354 | 365 | sync_core_before_usermode(); |
---|
355 | 366 | } |
---|
356 | 367 | |
---|
357 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
358 | | -{ |
---|
359 | | - atomic_set(&t->mm->membarrier_state, 0); |
---|
360 | | -} |
---|
| 368 | +extern void membarrier_exec_mmap(struct mm_struct *mm); |
---|
| 369 | + |
---|
361 | 370 | #else |
---|
362 | 371 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
363 | 372 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, |
---|
.. | .. |
---|
366 | 375 | { |
---|
367 | 376 | } |
---|
368 | 377 | #endif |
---|
369 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
| 378 | +static inline void membarrier_exec_mmap(struct mm_struct *mm) |
---|
370 | 379 | { |
---|
371 | 380 | } |
---|
372 | 381 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
---|