hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/power/snapshot.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/kernel/power/snapshot.c
34 *
....@@ -5,12 +6,9 @@
56 *
67 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
78 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8
- *
9
- * This file is released under the GPLv2.
10
- *
119 */
1210
13
-#define pr_fmt(fmt) "PM: " fmt
11
+#define pr_fmt(fmt) "PM: hibernation: " fmt
1412
1513 #include <linux/version.h>
1614 #include <linux/module.h>
....@@ -23,7 +21,7 @@
2321 #include <linux/pm.h>
2422 #include <linux/device.h>
2523 #include <linux/init.h>
26
-#include <linux/bootmem.h>
24
+#include <linux/memblock.h>
2725 #include <linux/nmi.h>
2826 #include <linux/syscalls.h>
2927 #include <linux/console.h>
....@@ -36,7 +34,6 @@
3634
3735 #include <linux/uaccess.h>
3836 #include <asm/mmu_context.h>
39
-#include <asm/pgtable.h>
4037 #include <asm/tlbflush.h>
4138 #include <asm/io.h>
4239
....@@ -105,7 +102,7 @@
105102
106103 void __init hibernate_image_size_init(void)
107104 {
108
- image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
105
+ image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
109106 }
110107
111108 /*
....@@ -738,7 +735,7 @@
738735 */
739736
740737 /*
741
- * If the zone we wish to scan is the the current zone and the
738
+ * If the zone we wish to scan is the current zone and the
742739 * pfn falls into the current node then we do not need to walk
743740 * the tree.
744741 */
....@@ -947,8 +944,7 @@
947944 * Register a range of page frames the contents of which should not be saved
948945 * during hibernation (to be used in the early initialization code).
949946 */
950
-void __init __register_nosave_region(unsigned long start_pfn,
951
- unsigned long end_pfn, int use_kmalloc)
947
+void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
952948 {
953949 struct nosave_region *region;
954950
....@@ -964,14 +960,12 @@
964960 goto Report;
965961 }
966962 }
967
- if (use_kmalloc) {
968
- /* During init, this shouldn't fail */
969
- region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
970
- BUG_ON(!region);
971
- } else {
972
- /* This allocation cannot fail */
973
- region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
974
- }
963
+ /* This allocation cannot fail */
964
+ region = memblock_alloc(sizeof(struct nosave_region),
965
+ SMP_CACHE_BYTES);
966
+ if (!region)
967
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
968
+ sizeof(struct nosave_region));
975969 region->start_pfn = start_pfn;
976970 region->end_pfn = end_pfn;
977971 list_add_tail(&region->list, &nosave_regions);
....@@ -1143,7 +1137,15 @@
11431137 pr_debug("Basic memory bitmaps freed\n");
11441138 }
11451139
1146
-void clear_free_pages(void)
1140
+static void clear_or_poison_free_page(struct page *page)
1141
+{
1142
+ if (page_poisoning_enabled_static())
1143
+ __kernel_poison_pages(page, 1);
1144
+ else if (want_init_on_free())
1145
+ clear_highpage(page);
1146
+}
1147
+
1148
+void clear_or_poison_free_pages(void)
11471149 {
11481150 struct memory_bitmap *bm = free_pages_map;
11491151 unsigned long pfn;
....@@ -1151,12 +1153,12 @@
11511153 if (WARN_ON(!(free_pages_map)))
11521154 return;
11531155
1154
- if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
1156
+ if (page_poisoning_enabled() || want_init_on_free()) {
11551157 memory_bm_position_reset(bm);
11561158 pfn = memory_bm_next_pfn(bm);
11571159 while (pfn != BM_END_OF_MAP) {
11581160 if (pfn_valid(pfn))
1159
- clear_highpage(pfn_to_page(pfn));
1161
+ clear_or_poison_free_page(pfn_to_page(pfn));
11601162
11611163 pfn = memory_bm_next_pfn(bm);
11621164 }
....@@ -1221,14 +1223,16 @@
12211223 if (!pfn_valid(pfn))
12221224 return NULL;
12231225
1224
- page = pfn_to_page(pfn);
1225
- if (page_zone(page) != zone)
1226
+ page = pfn_to_online_page(pfn);
1227
+ if (!page || page_zone(page) != zone)
12261228 return NULL;
12271229
12281230 BUG_ON(!PageHighMem(page));
12291231
1230
- if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1231
- PageReserved(page))
1232
+ if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1233
+ return NULL;
1234
+
1235
+ if (PageReserved(page) || PageOffline(page))
12321236 return NULL;
12331237
12341238 if (page_is_guard(page))
....@@ -1283,13 +1287,16 @@
12831287 if (!pfn_valid(pfn))
12841288 return NULL;
12851289
1286
- page = pfn_to_page(pfn);
1287
- if (page_zone(page) != zone)
1290
+ page = pfn_to_online_page(pfn);
1291
+ if (!page || page_zone(page) != zone)
12881292 return NULL;
12891293
12901294 BUG_ON(PageHighMem(page));
12911295
12921296 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1297
+ return NULL;
1298
+
1299
+ if (PageOffline(page))
12931300 return NULL;
12941301
12951302 if (PageReserved(page)
....@@ -1340,8 +1347,9 @@
13401347 * safe_copy_page - Copy a page in a safe way.
13411348 *
13421349 * Check if the page we are going to copy is marked as present in the kernel
1343
- * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1344
- * and in that case kernel_page_present() always returns 'true').
1350
+ * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1351
+ * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1352
+ * always returns 'true'.
13451353 */
13461354 static void safe_copy_page(void *dst, struct page *s_page)
13471355 {
....@@ -1558,9 +1566,7 @@
15581566 */
15591567 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
15601568 {
1561
- x *= multiplier;
1562
- do_div(x, base);
1563
- return (unsigned long)x;
1569
+ return div64_u64(x * multiplier, base);
15641570 }
15651571
15661572 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
....@@ -1658,7 +1664,7 @@
16581664 {
16591665 unsigned long size;
16601666
1661
- size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1667
+ size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
16621668 + global_node_page_state(NR_ACTIVE_ANON)
16631669 + global_node_page_state(NR_INACTIVE_ANON)
16641670 + global_node_page_state(NR_ACTIVE_FILE)
....@@ -1675,12 +1681,12 @@
16751681 * hibernation for allocations made while saving the image and for device
16761682 * drivers, in case they need to allocate memory from their hibernation
16771683 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1678
- * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1684
+ * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
16791685 * /sys/power/reserved_size, respectively). To make this happen, we compute the
16801686 * total number of available page frames and allocate at least
16811687 *
1682
- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1683
- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1688
+ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1689
+ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
16841690 *
16851691 * of them, which corresponds to the maximum size of a hibernation image.
16861692 *
....@@ -1697,16 +1703,20 @@
16971703 ktime_t start, stop;
16981704 int error;
16991705
1700
- pr_info("Preallocating image memory... ");
1706
+ pr_info("Preallocating image memory\n");
17011707 start = ktime_get();
17021708
17031709 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1704
- if (error)
1710
+ if (error) {
1711
+ pr_err("Cannot allocate original bitmap\n");
17051712 goto err_out;
1713
+ }
17061714
17071715 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1708
- if (error)
1716
+ if (error) {
1717
+ pr_err("Cannot allocate copy bitmap\n");
17091718 goto err_out;
1719
+ }
17101720
17111721 alloc_normal = 0;
17121722 alloc_highmem = 0;
....@@ -1733,9 +1743,6 @@
17331743 avail_normal = count;
17341744 count += highmem;
17351745 count -= totalreserve_pages;
1736
-
1737
- /* Add number of pages required for page keys (s390 only). */
1738
- size += page_key_additional_pages(saveable);
17391746
17401747 /* Compute the maximum number of saveable pages to leave in memory. */
17411748 max_size = (count - (size + PAGES_FOR_IO)) / 2
....@@ -1796,8 +1803,11 @@
17961803 alloc -= pages;
17971804 pages += pages_highmem;
17981805 pages_highmem = preallocate_image_highmem(alloc);
1799
- if (pages_highmem < alloc)
1806
+ if (pages_highmem < alloc) {
1807
+ pr_err("Image allocation is %lu pages short\n",
1808
+ alloc - pages_highmem);
18001809 goto err_out;
1810
+ }
18011811 pages += pages_highmem;
18021812 /*
18031813 * size is the desired number of saveable pages to leave in
....@@ -1828,13 +1838,12 @@
18281838
18291839 out:
18301840 stop = ktime_get();
1831
- pr_cont("done (allocated %lu pages)\n", pages);
1841
+ pr_info("Allocated %lu pages for snapshot\n", pages);
18321842 swsusp_show_speed(start, stop, pages, "Allocated");
18331843
18341844 return 0;
18351845
18361846 err_out:
1837
- pr_cont("\n");
18381847 swsusp_free();
18391848 return -ENOMEM;
18401849 }
....@@ -1968,7 +1977,7 @@
19681977 {
19691978 unsigned int nr_pages, nr_highmem;
19701979
1971
- pr_info("Creating hibernation image:\n");
1980
+ pr_info("Creating image:\n");
19721981
19731982 drain_local_pages(NULL);
19741983 nr_pages = count_data_pages();
....@@ -2002,7 +2011,7 @@
20022011 nr_copy_pages = nr_pages;
20032012 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
20042013
2005
- pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2014
+ pr_info("Image created (%d pages copied)\n", nr_pages);
20062015
20072016 return 0;
20082017 }
....@@ -2015,7 +2024,7 @@
20152024 return 0;
20162025 }
20172026
2018
-static char *check_image_kernel(struct swsusp_info *info)
2027
+static const char *check_image_kernel(struct swsusp_info *info)
20192028 {
20202029 if (info->version_code != LINUX_VERSION_CODE)
20212030 return "kernel version";
....@@ -2063,8 +2072,6 @@
20632072 buf[j] = memory_bm_next_pfn(bm);
20642073 if (unlikely(buf[j] == BM_END_OF_MAP))
20652074 break;
2066
- /* Save page key for data page (s390 only). */
2067
- page_key_read(buf + j);
20682075 }
20692076 }
20702077
....@@ -2170,7 +2177,7 @@
21702177
21712178 static int check_header(struct swsusp_info *info)
21722179 {
2173
- char *reason;
2180
+ const char *reason;
21742181
21752182 reason = check_image_kernel(info);
21762183 if (!reason && info->num_physpages != get_num_physpages())
....@@ -2213,9 +2220,6 @@
22132220 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
22142221 if (unlikely(buf[j] == BM_END_OF_MAP))
22152222 break;
2216
-
2217
- /* Extract and buffer page key for data page (s390 only). */
2218
- page_key_memorize(buf + j);
22192223
22202224 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
22212225 memory_bm_set_bit(bm, buf[j]);
....@@ -2611,11 +2615,6 @@
26112615 if (error)
26122616 return error;
26132617
2614
- /* Allocate buffer for page keys. */
2615
- error = page_key_alloc(nr_copy_pages);
2616
- if (error)
2617
- return error;
2618
-
26192618 hibernate_restore_protection_begin();
26202619 } else if (handle->cur <= nr_meta_pages + 1) {
26212620 error = unpack_orig_pfns(buffer, &copy_bm);
....@@ -2637,8 +2636,6 @@
26372636 }
26382637 } else {
26392638 copy_last_highmem_page();
2640
- /* Restore page key for data page (s390 only). */
2641
- page_key_write(handle->buffer);
26422639 hibernate_restore_protect_page(handle->buffer);
26432640 handle->buffer = get_buffer(&orig_bm, &ca);
26442641 if (IS_ERR(handle->buffer))
....@@ -2661,9 +2658,6 @@
26612658 void snapshot_write_finalize(struct snapshot_handle *handle)
26622659 {
26632660 copy_last_highmem_page();
2664
- /* Restore page key for data page (s390 only). */
2665
- page_key_write(handle->buffer);
2666
- page_key_free();
26672661 hibernate_restore_protect_page(handle->buffer);
26682662 /* Do that only if we have loaded the image entirely */
26692663 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {