.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/kernel/power/swap.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
---|
8 | 9 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
---|
9 | 10 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
---|
10 | | - * |
---|
11 | | - * This file is released under the GPLv2. |
---|
12 | | - * |
---|
13 | 11 | */ |
---|
14 | 12 | |
---|
15 | 13 | #define pr_fmt(fmt) "PM: " fmt |
---|
.. | .. |
---|
228 | 226 | atomic_t count; |
---|
229 | 227 | wait_queue_head_t wait; |
---|
230 | 228 | blk_status_t error; |
---|
| 229 | + struct blk_plug plug; |
---|
231 | 230 | }; |
---|
232 | 231 | |
---|
233 | 232 | static void hib_init_batch(struct hib_bio_batch *hb) |
---|
.. | .. |
---|
235 | 234 | atomic_set(&hb->count, 0); |
---|
236 | 235 | init_waitqueue_head(&hb->wait); |
---|
237 | 236 | hb->error = BLK_STS_OK; |
---|
| 237 | + blk_start_plug(&hb->plug); |
---|
| 238 | +} |
---|
| 239 | + |
---|
| 240 | +static void hib_finish_batch(struct hib_bio_batch *hb) |
---|
| 241 | +{ |
---|
| 242 | + blk_finish_plug(&hb->plug); |
---|
238 | 243 | } |
---|
239 | 244 | |
---|
240 | 245 | static void hib_end_io(struct bio *bio) |
---|
.. | .. |
---|
296 | 301 | |
---|
297 | 302 | static int hib_wait_io(struct hib_bio_batch *hb) |
---|
298 | 303 | { |
---|
| 304 | + /* |
---|
| 305 | + * We are relying on the behavior of blk_plug that a thread with |
---|
| 306 | + * a plug will flush the plug list before sleeping. |
---|
| 307 | + */ |
---|
299 | 308 | wait_event(hb->wait, atomic_read(&hb->count) == 0); |
---|
300 | 309 | return blk_status_to_errno(hb->error); |
---|
301 | 310 | } |
---|
.. | .. |
---|
337 | 346 | { |
---|
338 | 347 | int res; |
---|
339 | 348 | |
---|
340 | | - res = swap_type_of(swsusp_resume_device, swsusp_resume_block, |
---|
341 | | - &hib_resume_bdev); |
---|
| 349 | + if (swsusp_resume_device) |
---|
| 350 | + res = swap_type_of(swsusp_resume_device, swsusp_resume_block); |
---|
| 351 | + else |
---|
| 352 | + res = find_first_swap(&swsusp_resume_device); |
---|
342 | 353 | if (res < 0) |
---|
343 | 354 | return res; |
---|
344 | | - |
---|
345 | 355 | root_swap = res; |
---|
346 | | - res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); |
---|
347 | | - if (res) |
---|
348 | | - return res; |
---|
| 356 | + |
---|
| 357 | + hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE, |
---|
| 358 | + NULL); |
---|
| 359 | + if (IS_ERR(hib_resume_bdev)) |
---|
| 360 | + return PTR_ERR(hib_resume_bdev); |
---|
349 | 361 | |
---|
350 | 362 | res = set_blocksize(hib_resume_bdev, PAGE_SIZE); |
---|
351 | 363 | if (res < 0) |
---|
352 | 364 | blkdev_put(hib_resume_bdev, FMODE_WRITE); |
---|
353 | 365 | |
---|
354 | | - /* |
---|
355 | | - * Update the resume device to the one actually used, |
---|
356 | | - * so the test_resume mode can use it in case it is |
---|
357 | | - * invoked from hibernate() to test the snapshot. |
---|
358 | | - */ |
---|
359 | | - swsusp_resume_device = hib_resume_bdev->bd_dev; |
---|
360 | 366 | return res; |
---|
361 | 367 | } |
---|
362 | 368 | |
---|
.. | .. |
---|
563 | 569 | nr_pages++; |
---|
564 | 570 | } |
---|
565 | 571 | err2 = hib_wait_io(&hb); |
---|
| 572 | + hib_finish_batch(&hb); |
---|
566 | 573 | stop = ktime_get(); |
---|
567 | 574 | if (!ret) |
---|
568 | 575 | ret = err2; |
---|
.. | .. |
---|
856 | 863 | pr_info("Image saving done\n"); |
---|
857 | 864 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); |
---|
858 | 865 | out_clean: |
---|
| 866 | + hib_finish_batch(&hb); |
---|
859 | 867 | if (crc) { |
---|
860 | 868 | if (crc->thr) |
---|
861 | 869 | kthread_stop(crc->thr); |
---|
.. | .. |
---|
976 | 984 | last = handle->maps = NULL; |
---|
977 | 985 | offset = swsusp_header->image; |
---|
978 | 986 | while (offset) { |
---|
979 | | - tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); |
---|
| 987 | + tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); |
---|
980 | 988 | if (!tmp) { |
---|
981 | 989 | release_swap_reader(handle); |
---|
982 | 990 | return -ENOMEM; |
---|
983 | 991 | } |
---|
984 | | - memset(tmp, 0, sizeof(*tmp)); |
---|
985 | 992 | if (!handle->maps) |
---|
986 | 993 | handle->maps = tmp; |
---|
987 | 994 | if (last) |
---|
.. | .. |
---|
1087 | 1094 | nr_pages++; |
---|
1088 | 1095 | } |
---|
1089 | 1096 | err2 = hib_wait_io(&hb); |
---|
| 1097 | + hib_finish_batch(&hb); |
---|
1090 | 1098 | stop = ktime_get(); |
---|
1091 | 1099 | if (!ret) |
---|
1092 | 1100 | ret = err2; |
---|
.. | .. |
---|
1450 | 1458 | } |
---|
1451 | 1459 | swsusp_show_speed(start, stop, nr_to_read, "Read"); |
---|
1452 | 1460 | out_clean: |
---|
| 1461 | + hib_finish_batch(&hb); |
---|
1453 | 1462 | for (i = 0; i < ring_size; i++) |
---|
1454 | 1463 | free_page((unsigned long)page[i]); |
---|
1455 | 1464 | if (crc) { |
---|
.. | .. |
---|
1594 | 1603 | } |
---|
1595 | 1604 | #endif |
---|
1596 | 1605 | |
---|
1597 | | -static int swsusp_header_init(void) |
---|
| 1606 | +static int __init swsusp_header_init(void) |
---|
1598 | 1607 | { |
---|
1599 | 1608 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); |
---|
1600 | 1609 | if (!swsusp_header) |
---|