.. | .. |
---|
23 | 23 | * will still exist later on and mmget_not_zero() has to be used before |
---|
24 | 24 | * accessing it. |
---|
25 | 25 | * |
---|
26 | | - * This is a preferred way to to pin @mm for a longer/unbounded amount |
---|
| 26 | + * This is a preferred way to pin @mm for a longer/unbounded amount |
---|
27 | 27 | * of time. |
---|
28 | 28 | * |
---|
29 | 29 | * Use mmdrop() to release the reference acquired by mmgrab(). |
---|
.. | .. |
---|
47 | 47 | */ |
---|
48 | 48 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) |
---|
49 | 49 | __mmdrop(mm); |
---|
50 | | -} |
---|
51 | | - |
---|
52 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
53 | | -extern void __mmdrop_delayed(struct rcu_head *rhp); |
---|
54 | | -static inline void mmdrop_delayed(struct mm_struct *mm) |
---|
55 | | -{ |
---|
56 | | - if (atomic_dec_and_test(&mm->mm_count)) |
---|
57 | | - call_rcu(&mm->delayed_drop, __mmdrop_delayed); |
---|
58 | | -} |
---|
59 | | -#else |
---|
60 | | -# define mmdrop_delayed(mm) mmdrop(mm) |
---|
61 | | -#endif |
---|
62 | | - |
---|
63 | | -void mmdrop(struct mm_struct *mm); |
---|
64 | | - |
---|
65 | | -/* |
---|
66 | | - * This has to be called after a get_task_mm()/mmget_not_zero() |
---|
67 | | - * followed by taking the mmap_sem for writing before modifying the |
---|
68 | | - * vmas or anything the coredump pretends not to change from under it. |
---|
69 | | - * |
---|
70 | | - * It also has to be called when mmgrab() is used in the context of |
---|
71 | | - * the process, but then the mm_count refcount is transferred outside |
---|
72 | | - * the context of the process to run down_write() on that pinned mm. |
---|
73 | | - * |
---|
74 | | - * NOTE: find_extend_vma() called from GUP context is the only place |
---|
75 | | - * that can modify the "mm" (notably the vm_start/end) under mmap_sem |
---|
76 | | - * for reading and outside the context of the process, so it is also |
---|
77 | | - * the only case that holds the mmap_sem for reading that must call |
---|
78 | | - * this function. Generally if the mmap_sem is hold for reading |
---|
79 | | - * there's no need of this check after get_task_mm()/mmget_not_zero(). |
---|
80 | | - * |
---|
81 | | - * This function can be obsoleted and the check can be removed, after |
---|
82 | | - * the coredump code will hold the mmap_sem for writing before |
---|
83 | | - * invoking the ->core_dump methods. |
---|
84 | | - */ |
---|
85 | | -static inline bool mmget_still_valid(struct mm_struct *mm) |
---|
86 | | -{ |
---|
87 | | - return likely(!mm->core_state); |
---|
88 | 50 | } |
---|
89 | 51 | |
---|
90 | 52 | /** |
---|
.. | .. |
---|
144 | 106 | #endif /* CONFIG_MEMCG */ |
---|
145 | 107 | |
---|
146 | 108 | #ifdef CONFIG_MMU |
---|
| 109 | +#ifndef arch_get_mmap_end |
---|
| 110 | +#define arch_get_mmap_end(addr) (TASK_SIZE) |
---|
| 111 | +#endif |
---|
| 112 | + |
---|
| 113 | +#ifndef arch_get_mmap_base |
---|
| 114 | +#define arch_get_mmap_base(addr, base) (base) |
---|
| 115 | +#endif |
---|
| 116 | + |
---|
147 | 117 | extern void arch_pick_mmap_layout(struct mm_struct *mm, |
---|
148 | 118 | struct rlimit *rlim_stack); |
---|
149 | 119 | extern unsigned long |
---|
.. | .. |
---|
192 | 162 | */ |
---|
193 | 163 | static inline gfp_t current_gfp_context(gfp_t flags) |
---|
194 | 164 | { |
---|
195 | | - /* |
---|
196 | | - * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
197 | | - * so always make sure it makes precendence |
---|
198 | | - */ |
---|
199 | | - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
---|
200 | | - flags &= ~(__GFP_IO | __GFP_FS); |
---|
201 | | - else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) |
---|
202 | | - flags &= ~__GFP_FS; |
---|
| 165 | + unsigned int pflags = READ_ONCE(current->flags); |
---|
| 166 | + |
---|
| 167 | + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { |
---|
| 168 | + /* |
---|
| 169 | + * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
| 170 | + * so always make sure it makes precedence |
---|
| 171 | + */ |
---|
| 172 | + if (pflags & PF_MEMALLOC_NOIO) |
---|
| 173 | + flags &= ~(__GFP_IO | __GFP_FS); |
---|
| 174 | + else if (pflags & PF_MEMALLOC_NOFS) |
---|
| 175 | + flags &= ~__GFP_FS; |
---|
| 176 | + } |
---|
203 | 177 | return flags; |
---|
204 | 178 | } |
---|
205 | 179 | |
---|
.. | .. |
---|
238 | 212 | * @flags: Flags to restore. |
---|
239 | 213 | * |
---|
240 | 214 | * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. |
---|
241 | | - * Always make sure that that the given flags is the return value from the |
---|
| 215 | + * Always make sure that the given flags is the return value from the |
---|
242 | 216 | * pairing memalloc_noio_save call. |
---|
243 | 217 | */ |
---|
244 | 218 | static inline void memalloc_noio_restore(unsigned int flags) |
---|
.. | .. |
---|
269 | 243 | * @flags: Flags to restore. |
---|
270 | 244 | * |
---|
271 | 245 | * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. |
---|
272 | | - * Always make sure that that the given flags is the return value from the |
---|
| 246 | + * Always make sure that the given flags is the return value from the |
---|
273 | 247 | * pairing memalloc_nofs_save call. |
---|
274 | 248 | */ |
---|
275 | 249 | static inline void memalloc_nofs_restore(unsigned int flags) |
---|
.. | .. |
---|
289 | 263 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
---|
290 | 264 | } |
---|
291 | 265 | |
---|
| 266 | +#ifdef CONFIG_CMA |
---|
| 267 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 268 | +{ |
---|
| 269 | + unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; |
---|
| 270 | + |
---|
| 271 | + current->flags |= PF_MEMALLOC_NOCMA; |
---|
| 272 | + return flags; |
---|
| 273 | +} |
---|
| 274 | + |
---|
| 275 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 276 | +{ |
---|
| 277 | + current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; |
---|
| 278 | +} |
---|
| 279 | +#else |
---|
| 280 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 281 | +{ |
---|
| 282 | + return 0; |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 286 | +{ |
---|
| 287 | +} |
---|
| 288 | +#endif |
---|
| 289 | + |
---|
292 | 290 | #ifdef CONFIG_MEMCG |
---|
| 291 | +DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); |
---|
293 | 292 | /** |
---|
294 | | - * memalloc_use_memcg - Starts the remote memcg charging scope. |
---|
| 293 | + * set_active_memcg - Starts the remote memcg charging scope. |
---|
295 | 294 | * @memcg: memcg to charge. |
---|
296 | 295 | * |
---|
297 | 296 | * This function marks the beginning of the remote memcg charging scope. All the |
---|
298 | 297 | * __GFP_ACCOUNT allocations till the end of the scope will be charged to the |
---|
299 | 298 | * given memcg. |
---|
300 | 299 | * |
---|
301 | | - * NOTE: This function is not nesting safe. |
---|
| 300 | + * NOTE: This function can nest. Users must save the return value and |
---|
| 301 | + * reset the previous value after their own charging scope is over. |
---|
302 | 302 | */ |
---|
303 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 303 | +static inline struct mem_cgroup * |
---|
| 304 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
304 | 305 | { |
---|
305 | | - WARN_ON_ONCE(current->active_memcg); |
---|
306 | | - current->active_memcg = memcg; |
---|
307 | | -} |
---|
| 306 | + struct mem_cgroup *old; |
---|
308 | 307 | |
---|
309 | | -/** |
---|
310 | | - * memalloc_unuse_memcg - Ends the remote memcg charging scope. |
---|
311 | | - * |
---|
312 | | - * This function marks the end of the remote memcg charging scope started by |
---|
313 | | - * memalloc_use_memcg(). |
---|
314 | | - */ |
---|
315 | | -static inline void memalloc_unuse_memcg(void) |
---|
316 | | -{ |
---|
317 | | - current->active_memcg = NULL; |
---|
| 308 | + if (in_interrupt()) { |
---|
| 309 | + old = this_cpu_read(int_active_memcg); |
---|
| 310 | + this_cpu_write(int_active_memcg, memcg); |
---|
| 311 | + } else { |
---|
| 312 | + old = current->active_memcg; |
---|
| 313 | + current->active_memcg = memcg; |
---|
| 314 | + } |
---|
| 315 | + |
---|
| 316 | + return old; |
---|
318 | 317 | } |
---|
319 | 318 | #else |
---|
320 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 319 | +static inline struct mem_cgroup * |
---|
| 320 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
321 | 321 | { |
---|
322 | | -} |
---|
323 | | - |
---|
324 | | -static inline void memalloc_unuse_memcg(void) |
---|
325 | | -{ |
---|
| 322 | + return NULL; |
---|
326 | 323 | } |
---|
327 | 324 | #endif |
---|
328 | 325 | |
---|
.. | .. |
---|
334 | 331 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), |
---|
335 | 332 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), |
---|
336 | 333 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), |
---|
| 334 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), |
---|
| 335 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), |
---|
337 | 336 | }; |
---|
338 | 337 | |
---|
339 | 338 | enum { |
---|
340 | 339 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), |
---|
| 340 | + MEMBARRIER_FLAG_RSEQ = (1U << 1), |
---|
341 | 341 | }; |
---|
342 | 342 | |
---|
343 | 343 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
.. | .. |
---|
354 | 354 | sync_core_before_usermode(); |
---|
355 | 355 | } |
---|
356 | 356 | |
---|
357 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
358 | | -{ |
---|
359 | | - atomic_set(&t->mm->membarrier_state, 0); |
---|
360 | | -} |
---|
| 357 | +extern void membarrier_exec_mmap(struct mm_struct *mm); |
---|
| 358 | + |
---|
361 | 359 | #else |
---|
362 | 360 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
363 | 361 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, |
---|
.. | .. |
---|
366 | 364 | { |
---|
367 | 365 | } |
---|
368 | 366 | #endif |
---|
369 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
| 367 | +static inline void membarrier_exec_mmap(struct mm_struct *mm) |
---|
370 | 368 | { |
---|
371 | 369 | } |
---|
372 | 370 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
---|