hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/mm/page_poison.c
....@@ -8,34 +8,16 @@
88 #include <linux/ratelimit.h>
99 #include <linux/kasan.h>
1010
11
-static bool want_page_poisoning __read_mostly;
11
+bool _page_poisoning_enabled_early;
12
+EXPORT_SYMBOL(_page_poisoning_enabled_early);
13
+DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
14
+EXPORT_SYMBOL(_page_poisoning_enabled);
1215
1316 static int __init early_page_poison_param(char *buf)
1417 {
15
- if (!buf)
16
- return -EINVAL;
17
- return strtobool(buf, &want_page_poisoning);
18
+ return kstrtobool(buf, &_page_poisoning_enabled_early);
1819 }
1920 early_param("page_poison", early_page_poison_param);
20
-
21
-/**
22
- * page_poisoning_enabled - check if page poisoning is enabled
23
- *
24
- * Return true if page poisoning is enabled, or false if not.
25
- */
26
-bool page_poisoning_enabled(void)
27
-{
28
- /*
29
- * Assumes that debug_pagealloc_enabled is set before
30
- * free_all_bootmem.
31
- * Page poisoning is debug page alloc for some arches. If
32
- * either of those options are enabled, enable poisoning.
33
- */
34
- return (want_page_poisoning ||
35
- (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
36
- debug_pagealloc_enabled()));
37
-}
38
-EXPORT_SYMBOL_GPL(page_poisoning_enabled);
3921
4022 static void poison_page(struct page *page)
4123 {
....@@ -43,12 +25,12 @@
4325
4426 /* KASAN still think the page is in-use, so skip it. */
4527 kasan_disable_current();
46
- memset(addr, PAGE_POISON, PAGE_SIZE);
28
+ memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
4729 kasan_enable_current();
4830 kunmap_atomic(addr);
4931 }
5032
51
-static void poison_pages(struct page *page, int n)
33
+void __kernel_poison_pages(struct page *page, int n)
5234 {
5335 int i;
5436
....@@ -68,9 +50,6 @@
6850 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
6951 unsigned char *start;
7052 unsigned char *end;
71
-
72
- if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
73
- return;
7453
7554 start = memchr_inv(mem, PAGE_POISON, bytes);
7655 if (!start)
....@@ -98,32 +77,23 @@
9877 void *addr;
9978
10079 addr = kmap_atomic(page);
80
+ kasan_disable_current();
10181 /*
10282 * Page poisoning when enabled poisons each and every page
10383 * that is freed to buddy. Thus no extra check is done to
104
- * see if a page was posioned.
84
+ * see if a page was poisoned.
10585 */
106
- check_poison_mem(addr, PAGE_SIZE);
86
+ check_poison_mem(kasan_reset_tag(addr), PAGE_SIZE);
87
+ kasan_enable_current();
10788 kunmap_atomic(addr);
10889 }
10990
110
-static void unpoison_pages(struct page *page, int n)
91
+void __kernel_unpoison_pages(struct page *page, int n)
11192 {
11293 int i;
11394
11495 for (i = 0; i < n; i++)
11596 unpoison_page(page + i);
116
-}
117
-
118
-void kernel_poison_pages(struct page *page, int numpages, int enable)
119
-{
120
- if (!page_poisoning_enabled())
121
- return;
122
-
123
- if (enable)
124
- unpoison_pages(page, numpages);
125
- else
126
- poison_pages(page, numpages);
12797 }
12898
12999 #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC