.. | .. |
---|
23 | 23 | * will still exist later on and mmget_not_zero() has to be used before |
---|
24 | 24 | * accessing it. |
---|
25 | 25 | * |
---|
26 | | - * This is a preferred way to to pin @mm for a longer/unbounded amount |
---|
| 26 | + * This is a preferred way to pin @mm for a longer/unbounded amount |
---|
27 | 27 | * of time. |
---|
28 | 28 | * |
---|
29 | 29 | * Use mmdrop() to release the reference acquired by mmgrab(). |
---|
.. | .. |
---|
49 | 49 | __mmdrop(mm); |
---|
50 | 50 | } |
---|
51 | 51 | |
---|
52 | | -void mmdrop(struct mm_struct *mm); |
---|
53 | | - |
---|
54 | | -/* |
---|
55 | | - * This has to be called after a get_task_mm()/mmget_not_zero() |
---|
56 | | - * followed by taking the mmap_sem for writing before modifying the |
---|
57 | | - * vmas or anything the coredump pretends not to change from under it. |
---|
58 | | - * |
---|
59 | | - * It also has to be called when mmgrab() is used in the context of |
---|
60 | | - * the process, but then the mm_count refcount is transferred outside |
---|
61 | | - * the context of the process to run down_write() on that pinned mm. |
---|
62 | | - * |
---|
63 | | - * NOTE: find_extend_vma() called from GUP context is the only place |
---|
64 | | - * that can modify the "mm" (notably the vm_start/end) under mmap_sem |
---|
65 | | - * for reading and outside the context of the process, so it is also |
---|
66 | | - * the only case that holds the mmap_sem for reading that must call |
---|
67 | | - * this function. Generally if the mmap_sem is hold for reading |
---|
68 | | - * there's no need of this check after get_task_mm()/mmget_not_zero(). |
---|
69 | | - * |
---|
70 | | - * This function can be obsoleted and the check can be removed, after |
---|
71 | | - * the coredump code will hold the mmap_sem for writing before |
---|
72 | | - * invoking the ->core_dump methods. |
---|
73 | | - */ |
---|
74 | | -static inline bool mmget_still_valid(struct mm_struct *mm) |
---|
| 52 | +#ifdef CONFIG_PREEMPT_RT |
---|
| 53 | +extern void __mmdrop_delayed(struct rcu_head *rhp); |
---|
| 54 | +static inline void mmdrop_delayed(struct mm_struct *mm) |
---|
75 | 55 | { |
---|
76 | | - return likely(!mm->core_state); |
---|
| 56 | + if (atomic_dec_and_test(&mm->mm_count)) |
---|
| 57 | + call_rcu(&mm->delayed_drop, __mmdrop_delayed); |
---|
77 | 58 | } |
---|
| 59 | +#else |
---|
| 60 | +# define mmdrop_delayed(mm) mmdrop(mm) |
---|
| 61 | +#endif |
---|
78 | 62 | |
---|
79 | 63 | /** |
---|
80 | 64 | * mmget() - Pin the address space associated with a &struct mm_struct. |
---|
.. | .. |
---|
133 | 117 | #endif /* CONFIG_MEMCG */ |
---|
134 | 118 | |
---|
135 | 119 | #ifdef CONFIG_MMU |
---|
| 120 | +#ifndef arch_get_mmap_end |
---|
| 121 | +#define arch_get_mmap_end(addr) (TASK_SIZE) |
---|
| 122 | +#endif |
---|
| 123 | + |
---|
| 124 | +#ifndef arch_get_mmap_base |
---|
| 125 | +#define arch_get_mmap_base(addr, base) (base) |
---|
| 126 | +#endif |
---|
| 127 | + |
---|
136 | 128 | extern void arch_pick_mmap_layout(struct mm_struct *mm, |
---|
137 | 129 | struct rlimit *rlim_stack); |
---|
138 | 130 | extern unsigned long |
---|
.. | .. |
---|
181 | 173 | */ |
---|
182 | 174 | static inline gfp_t current_gfp_context(gfp_t flags) |
---|
183 | 175 | { |
---|
184 | | - /* |
---|
185 | | - * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
186 | | - * so always make sure it makes precendence |
---|
187 | | - */ |
---|
188 | | - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
---|
189 | | - flags &= ~(__GFP_IO | __GFP_FS); |
---|
190 | | - else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) |
---|
191 | | - flags &= ~__GFP_FS; |
---|
| 176 | + unsigned int pflags = READ_ONCE(current->flags); |
---|
| 177 | + |
---|
| 178 | + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { |
---|
| 179 | + /* |
---|
| 180 | + * NOIO implies both NOIO and NOFS and it is a weaker context |
---|
| 181 | + * so always make sure it makes precedence |
---|
| 182 | + */ |
---|
| 183 | + if (pflags & PF_MEMALLOC_NOIO) |
---|
| 184 | + flags &= ~(__GFP_IO | __GFP_FS); |
---|
| 185 | + else if (pflags & PF_MEMALLOC_NOFS) |
---|
| 186 | + flags &= ~__GFP_FS; |
---|
| 187 | + } |
---|
192 | 188 | return flags; |
---|
193 | 189 | } |
---|
194 | 190 | |
---|
.. | .. |
---|
227 | 223 | * @flags: Flags to restore. |
---|
228 | 224 | * |
---|
229 | 225 | * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. |
---|
230 | | - * Always make sure that that the given flags is the return value from the |
---|
| 226 | + * Always make sure that the given flags is the return value from the |
---|
231 | 227 | * pairing memalloc_noio_save call. |
---|
232 | 228 | */ |
---|
233 | 229 | static inline void memalloc_noio_restore(unsigned int flags) |
---|
.. | .. |
---|
258 | 254 | * @flags: Flags to restore. |
---|
259 | 255 | * |
---|
260 | 256 | * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. |
---|
261 | | - * Always make sure that that the given flags is the return value from the |
---|
| 257 | + * Always make sure that the given flags is the return value from the |
---|
262 | 258 | * pairing memalloc_nofs_save call. |
---|
263 | 259 | */ |
---|
264 | 260 | static inline void memalloc_nofs_restore(unsigned int flags) |
---|
.. | .. |
---|
278 | 274 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
---|
279 | 275 | } |
---|
280 | 276 | |
---|
| 277 | +#ifdef CONFIG_CMA |
---|
| 278 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 279 | +{ |
---|
| 280 | + unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; |
---|
| 281 | + |
---|
| 282 | + current->flags |= PF_MEMALLOC_NOCMA; |
---|
| 283 | + return flags; |
---|
| 284 | +} |
---|
| 285 | + |
---|
| 286 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 287 | +{ |
---|
| 288 | + current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; |
---|
| 289 | +} |
---|
| 290 | +#else |
---|
| 291 | +static inline unsigned int memalloc_nocma_save(void) |
---|
| 292 | +{ |
---|
| 293 | + return 0; |
---|
| 294 | +} |
---|
| 295 | + |
---|
| 296 | +static inline void memalloc_nocma_restore(unsigned int flags) |
---|
| 297 | +{ |
---|
| 298 | +} |
---|
| 299 | +#endif |
---|
| 300 | + |
---|
281 | 301 | #ifdef CONFIG_MEMCG |
---|
| 302 | +DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); |
---|
282 | 303 | /** |
---|
283 | | - * memalloc_use_memcg - Starts the remote memcg charging scope. |
---|
| 304 | + * set_active_memcg - Starts the remote memcg charging scope. |
---|
284 | 305 | * @memcg: memcg to charge. |
---|
285 | 306 | * |
---|
286 | 307 | * This function marks the beginning of the remote memcg charging scope. All the |
---|
287 | 308 | * __GFP_ACCOUNT allocations till the end of the scope will be charged to the |
---|
288 | 309 | * given memcg. |
---|
289 | 310 | * |
---|
290 | | - * NOTE: This function is not nesting safe. |
---|
| 311 | + * NOTE: This function can nest. Users must save the return value and |
---|
| 312 | + * reset the previous value after their own charging scope is over. |
---|
291 | 313 | */ |
---|
292 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 314 | +static inline struct mem_cgroup * |
---|
| 315 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
293 | 316 | { |
---|
294 | | - WARN_ON_ONCE(current->active_memcg); |
---|
295 | | - current->active_memcg = memcg; |
---|
296 | | -} |
---|
| 317 | + struct mem_cgroup *old; |
---|
297 | 318 | |
---|
298 | | -/** |
---|
299 | | - * memalloc_unuse_memcg - Ends the remote memcg charging scope. |
---|
300 | | - * |
---|
301 | | - * This function marks the end of the remote memcg charging scope started by |
---|
302 | | - * memalloc_use_memcg(). |
---|
303 | | - */ |
---|
304 | | -static inline void memalloc_unuse_memcg(void) |
---|
305 | | -{ |
---|
306 | | - current->active_memcg = NULL; |
---|
| 319 | + if (in_interrupt()) { |
---|
| 320 | + old = this_cpu_read(int_active_memcg); |
---|
| 321 | + this_cpu_write(int_active_memcg, memcg); |
---|
| 322 | + } else { |
---|
| 323 | + old = current->active_memcg; |
---|
| 324 | + current->active_memcg = memcg; |
---|
| 325 | + } |
---|
| 326 | + |
---|
| 327 | + return old; |
---|
307 | 328 | } |
---|
308 | 329 | #else |
---|
309 | | -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) |
---|
| 330 | +static inline struct mem_cgroup * |
---|
| 331 | +set_active_memcg(struct mem_cgroup *memcg) |
---|
310 | 332 | { |
---|
311 | | -} |
---|
312 | | - |
---|
313 | | -static inline void memalloc_unuse_memcg(void) |
---|
314 | | -{ |
---|
| 333 | + return NULL; |
---|
315 | 334 | } |
---|
316 | 335 | #endif |
---|
317 | 336 | |
---|
.. | .. |
---|
323 | 342 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), |
---|
324 | 343 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), |
---|
325 | 344 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), |
---|
| 345 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), |
---|
| 346 | + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), |
---|
326 | 347 | }; |
---|
327 | 348 | |
---|
328 | 349 | enum { |
---|
329 | 350 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), |
---|
| 351 | + MEMBARRIER_FLAG_RSEQ = (1U << 1), |
---|
330 | 352 | }; |
---|
331 | 353 | |
---|
332 | 354 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
.. | .. |
---|
343 | 365 | sync_core_before_usermode(); |
---|
344 | 366 | } |
---|
345 | 367 | |
---|
346 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
347 | | -{ |
---|
348 | | - atomic_set(&t->mm->membarrier_state, 0); |
---|
349 | | -} |
---|
| 368 | +extern void membarrier_exec_mmap(struct mm_struct *mm); |
---|
| 369 | + |
---|
350 | 370 | #else |
---|
351 | 371 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
---|
352 | 372 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, |
---|
.. | .. |
---|
355 | 375 | { |
---|
356 | 376 | } |
---|
357 | 377 | #endif |
---|
358 | | -static inline void membarrier_execve(struct task_struct *t) |
---|
| 378 | +static inline void membarrier_exec_mmap(struct mm_struct *mm) |
---|
359 | 379 | { |
---|
360 | 380 | } |
---|
361 | 381 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
---|