hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/linux/sched/mm.h
....@@ -23,7 +23,7 @@
2323 * will still exist later on and mmget_not_zero() has to be used before
2424 * accessing it.
2525 *
26
- * This is a preferred way to to pin @mm for a longer/unbounded amount
26
+ * This is a preferred way to pin @mm for a longer/unbounded amount
2727 * of time.
2828 *
2929 * Use mmdrop() to release the reference acquired by mmgrab().
....@@ -49,7 +49,7 @@
4949 __mmdrop(mm);
5050 }
5151
52
-#ifdef CONFIG_PREEMPT_RT_BASE
52
+#ifdef CONFIG_PREEMPT_RT
5353 extern void __mmdrop_delayed(struct rcu_head *rhp);
5454 static inline void mmdrop_delayed(struct mm_struct *mm)
5555 {
....@@ -57,35 +57,8 @@
5757 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
5858 }
5959 #else
60
-# define mmdrop_delayed(mm) mmdrop(mm)
60
+# define mmdrop_delayed(mm) mmdrop(mm)
6161 #endif
62
-
63
-void mmdrop(struct mm_struct *mm);
64
-
65
-/*
66
- * This has to be called after a get_task_mm()/mmget_not_zero()
67
- * followed by taking the mmap_sem for writing before modifying the
68
- * vmas or anything the coredump pretends not to change from under it.
69
- *
70
- * It also has to be called when mmgrab() is used in the context of
71
- * the process, but then the mm_count refcount is transferred outside
72
- * the context of the process to run down_write() on that pinned mm.
73
- *
74
- * NOTE: find_extend_vma() called from GUP context is the only place
75
- * that can modify the "mm" (notably the vm_start/end) under mmap_sem
76
- * for reading and outside the context of the process, so it is also
77
- * the only case that holds the mmap_sem for reading that must call
78
- * this function. Generally if the mmap_sem is hold for reading
79
- * there's no need of this check after get_task_mm()/mmget_not_zero().
80
- *
81
- * This function can be obsoleted and the check can be removed, after
82
- * the coredump code will hold the mmap_sem for writing before
83
- * invoking the ->core_dump methods.
84
- */
85
-static inline bool mmget_still_valid(struct mm_struct *mm)
86
-{
87
- return likely(!mm->core_state);
88
-}
8962
9063 /**
9164 * mmget() - Pin the address space associated with a &struct mm_struct.
....@@ -144,6 +117,14 @@
144117 #endif /* CONFIG_MEMCG */
145118
146119 #ifdef CONFIG_MMU
120
+#ifndef arch_get_mmap_end
121
+#define arch_get_mmap_end(addr) (TASK_SIZE)
122
+#endif
123
+
124
+#ifndef arch_get_mmap_base
125
+#define arch_get_mmap_base(addr, base) (base)
126
+#endif
127
+
147128 extern void arch_pick_mmap_layout(struct mm_struct *mm,
148129 struct rlimit *rlim_stack);
149130 extern unsigned long
....@@ -192,14 +173,18 @@
192173 */
193174 static inline gfp_t current_gfp_context(gfp_t flags)
194175 {
195
- /*
196
- * NOIO implies both NOIO and NOFS and it is a weaker context
197
- * so always make sure it makes precendence
198
- */
199
- if (unlikely(current->flags & PF_MEMALLOC_NOIO))
200
- flags &= ~(__GFP_IO | __GFP_FS);
201
- else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
202
- flags &= ~__GFP_FS;
176
+ unsigned int pflags = READ_ONCE(current->flags);
177
+
178
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
179
+ /*
180
+ * NOIO implies both NOIO and NOFS and it is a weaker context
181
+ * so always make sure it makes precedence
182
+ */
183
+ if (pflags & PF_MEMALLOC_NOIO)
184
+ flags &= ~(__GFP_IO | __GFP_FS);
185
+ else if (pflags & PF_MEMALLOC_NOFS)
186
+ flags &= ~__GFP_FS;
187
+ }
203188 return flags;
204189 }
205190
....@@ -238,7 +223,7 @@
238223 * @flags: Flags to restore.
239224 *
240225 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
241
- * Always make sure that that the given flags is the return value from the
226
+ * Always make sure that the given flags is the return value from the
242227 * pairing memalloc_noio_save call.
243228 */
244229 static inline void memalloc_noio_restore(unsigned int flags)
....@@ -269,7 +254,7 @@
269254 * @flags: Flags to restore.
270255 *
271256 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
272
- * Always make sure that that the given flags is the return value from the
257
+ * Always make sure that the given flags is the return value from the
273258 * pairing memalloc_nofs_save call.
274259 */
275260 static inline void memalloc_nofs_restore(unsigned int flags)
....@@ -289,40 +274,63 @@
289274 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
290275 }
291276
277
+#ifdef CONFIG_CMA
278
+static inline unsigned int memalloc_nocma_save(void)
279
+{
280
+ unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
281
+
282
+ current->flags |= PF_MEMALLOC_NOCMA;
283
+ return flags;
284
+}
285
+
286
+static inline void memalloc_nocma_restore(unsigned int flags)
287
+{
288
+ current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
289
+}
290
+#else
291
+static inline unsigned int memalloc_nocma_save(void)
292
+{
293
+ return 0;
294
+}
295
+
296
+static inline void memalloc_nocma_restore(unsigned int flags)
297
+{
298
+}
299
+#endif
300
+
292301 #ifdef CONFIG_MEMCG
302
+DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
293303 /**
294
- * memalloc_use_memcg - Starts the remote memcg charging scope.
304
+ * set_active_memcg - Starts the remote memcg charging scope.
295305 * @memcg: memcg to charge.
296306 *
297307 * This function marks the beginning of the remote memcg charging scope. All the
298308 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
299309 * given memcg.
300310 *
301
- * NOTE: This function is not nesting safe.
311
+ * NOTE: This function can nest. Users must save the return value and
312
+ * reset the previous value after their own charging scope is over.
302313 */
303
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
314
+static inline struct mem_cgroup *
315
+set_active_memcg(struct mem_cgroup *memcg)
304316 {
305
- WARN_ON_ONCE(current->active_memcg);
306
- current->active_memcg = memcg;
307
-}
317
+ struct mem_cgroup *old;
308318
309
-/**
310
- * memalloc_unuse_memcg - Ends the remote memcg charging scope.
311
- *
312
- * This function marks the end of the remote memcg charging scope started by
313
- * memalloc_use_memcg().
314
- */
315
-static inline void memalloc_unuse_memcg(void)
316
-{
317
- current->active_memcg = NULL;
319
+ if (in_interrupt()) {
320
+ old = this_cpu_read(int_active_memcg);
321
+ this_cpu_write(int_active_memcg, memcg);
322
+ } else {
323
+ old = current->active_memcg;
324
+ current->active_memcg = memcg;
325
+ }
326
+
327
+ return old;
318328 }
319329 #else
320
-static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
330
+static inline struct mem_cgroup *
331
+set_active_memcg(struct mem_cgroup *memcg)
321332 {
322
-}
323
-
324
-static inline void memalloc_unuse_memcg(void)
325
-{
333
+ return NULL;
326334 }
327335 #endif
328336
....@@ -334,10 +342,13 @@
334342 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
335343 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
336344 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
345
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
346
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
337347 };
338348
339349 enum {
340350 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
351
+ MEMBARRIER_FLAG_RSEQ = (1U << 1),
341352 };
342353
343354 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
....@@ -354,10 +365,8 @@
354365 sync_core_before_usermode();
355366 }
356367
357
-static inline void membarrier_execve(struct task_struct *t)
358
-{
359
- atomic_set(&t->mm->membarrier_state, 0);
360
-}
368
+extern void membarrier_exec_mmap(struct mm_struct *mm);
369
+
361370 #else
362371 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
363372 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
....@@ -366,7 +375,7 @@
366375 {
367376 }
368377 #endif
369
-static inline void membarrier_execve(struct task_struct *t)
378
+static inline void membarrier_exec_mmap(struct mm_struct *mm)
370379 {
371380 }
372381 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)