.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /*: |
---|
2 | 3 | * Hibernate support specific for ARM64 |
---|
3 | 4 | * |
---|
.. | .. |
---|
11 | 12 | * https://patchwork.kernel.org/patch/96442/ |
---|
12 | 13 | * |
---|
13 | 14 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
---|
14 | | - * |
---|
15 | | - * License terms: GNU General Public License (GPL) version 2 |
---|
16 | 15 | */ |
---|
17 | 16 | #define pr_fmt(x) "hibernate: " x |
---|
18 | 17 | #include <linux/cpu.h> |
---|
.. | .. |
---|
22 | 21 | #include <linux/sched.h> |
---|
23 | 22 | #include <linux/suspend.h> |
---|
24 | 23 | #include <linux/utsname.h> |
---|
25 | | -#include <linux/version.h> |
---|
26 | 24 | |
---|
27 | 25 | #include <asm/barrier.h> |
---|
28 | 26 | #include <asm/cacheflush.h> |
---|
.. | .. |
---|
32 | 30 | #include <asm/kexec.h> |
---|
33 | 31 | #include <asm/memory.h> |
---|
34 | 32 | #include <asm/mmu_context.h> |
---|
| 33 | +#include <asm/mte.h> |
---|
35 | 34 | #include <asm/pgalloc.h> |
---|
36 | | -#include <asm/pgtable.h> |
---|
37 | 35 | #include <asm/pgtable-hwdef.h> |
---|
38 | 36 | #include <asm/sections.h> |
---|
39 | 37 | #include <asm/smp.h> |
---|
.. | .. |
---|
167 | 165 | sleep_cpu = -EINVAL; |
---|
168 | 166 | return -EINVAL; |
---|
169 | 167 | } |
---|
170 | | - if (!cpu_online(sleep_cpu)) { |
---|
171 | | - pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); |
---|
172 | | - ret = cpu_up(sleep_cpu); |
---|
173 | | - if (ret) { |
---|
174 | | - pr_err("Failed to bring hibernate-CPU up!\n"); |
---|
175 | | - sleep_cpu = -EINVAL; |
---|
176 | | - return ret; |
---|
177 | | - } |
---|
| 168 | + |
---|
| 169 | + ret = bringup_hibernate_cpu(sleep_cpu); |
---|
| 170 | + if (ret) { |
---|
| 171 | + sleep_cpu = -EINVAL; |
---|
| 172 | + return ret; |
---|
178 | 173 | } |
---|
179 | 174 | |
---|
180 | 175 | resume_hdr = *hdr; |
---|
.. | .. |
---|
183 | 178 | } |
---|
184 | 179 | EXPORT_SYMBOL(arch_hibernation_header_restore); |
---|
185 | 180 | |
---|
| 181 | +static int trans_pgd_map_page(pgd_t *trans_pgd, void *page, |
---|
| 182 | + unsigned long dst_addr, |
---|
| 183 | + pgprot_t pgprot) |
---|
| 184 | +{ |
---|
| 185 | + pgd_t *pgdp; |
---|
| 186 | + p4d_t *p4dp; |
---|
| 187 | + pud_t *pudp; |
---|
| 188 | + pmd_t *pmdp; |
---|
| 189 | + pte_t *ptep; |
---|
| 190 | + |
---|
| 191 | + pgdp = pgd_offset_pgd(trans_pgd, dst_addr); |
---|
| 192 | + if (pgd_none(READ_ONCE(*pgdp))) { |
---|
| 193 | + pudp = (void *)get_safe_page(GFP_ATOMIC); |
---|
| 194 | + if (!pudp) |
---|
| 195 | + return -ENOMEM; |
---|
| 196 | + pgd_populate(&init_mm, pgdp, pudp); |
---|
| 197 | + } |
---|
| 198 | + |
---|
| 199 | + p4dp = p4d_offset(pgdp, dst_addr); |
---|
| 200 | + if (p4d_none(READ_ONCE(*p4dp))) { |
---|
| 201 | + pudp = (void *)get_safe_page(GFP_ATOMIC); |
---|
| 202 | + if (!pudp) |
---|
| 203 | + return -ENOMEM; |
---|
| 204 | + p4d_populate(&init_mm, p4dp, pudp); |
---|
| 205 | + } |
---|
| 206 | + |
---|
| 207 | + pudp = pud_offset(p4dp, dst_addr); |
---|
| 208 | + if (pud_none(READ_ONCE(*pudp))) { |
---|
| 209 | + pmdp = (void *)get_safe_page(GFP_ATOMIC); |
---|
| 210 | + if (!pmdp) |
---|
| 211 | + return -ENOMEM; |
---|
| 212 | + pud_populate(&init_mm, pudp, pmdp); |
---|
| 213 | + } |
---|
| 214 | + |
---|
| 215 | + pmdp = pmd_offset(pudp, dst_addr); |
---|
| 216 | + if (pmd_none(READ_ONCE(*pmdp))) { |
---|
| 217 | + ptep = (void *)get_safe_page(GFP_ATOMIC); |
---|
| 218 | + if (!ptep) |
---|
| 219 | + return -ENOMEM; |
---|
| 220 | + pmd_populate_kernel(&init_mm, pmdp, ptep); |
---|
| 221 | + } |
---|
| 222 | + |
---|
| 223 | + ptep = pte_offset_kernel(pmdp, dst_addr); |
---|
| 224 | + set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC)); |
---|
| 225 | + |
---|
| 226 | + return 0; |
---|
| 227 | +} |
---|
| 228 | + |
---|
186 | 229 | /* |
---|
187 | 230 | * Copies length bytes, starting at src_start into an new page, |
---|
188 | | - * perform cache maintentance, then maps it at the specified address low |
---|
| 231 | + * perform cache maintenance, then maps it at the specified address low |
---|
189 | 232 | * address as executable. |
---|
190 | 233 | * |
---|
191 | 234 | * This is used by hibernate to copy the code it needs to execute when |
---|
.. | .. |
---|
197 | 240 | */ |
---|
198 | 241 | static int create_safe_exec_page(void *src_start, size_t length, |
---|
199 | 242 | unsigned long dst_addr, |
---|
200 | | - phys_addr_t *phys_dst_addr, |
---|
201 | | - void *(*allocator)(gfp_t mask), |
---|
202 | | - gfp_t mask) |
---|
| 243 | + phys_addr_t *phys_dst_addr) |
---|
203 | 244 | { |
---|
204 | | - int rc = 0; |
---|
| 245 | + void *page = (void *)get_safe_page(GFP_ATOMIC); |
---|
205 | 246 | pgd_t *trans_pgd; |
---|
206 | | - pgd_t *pgdp; |
---|
207 | | - pud_t *pudp; |
---|
208 | | - pmd_t *pmdp; |
---|
209 | | - pte_t *ptep; |
---|
210 | | - unsigned long dst = (unsigned long)allocator(mask); |
---|
| 247 | + int rc; |
---|
211 | 248 | |
---|
212 | | - if (!dst) { |
---|
213 | | - rc = -ENOMEM; |
---|
214 | | - goto out; |
---|
215 | | - } |
---|
| 249 | + if (!page) |
---|
| 250 | + return -ENOMEM; |
---|
216 | 251 | |
---|
217 | | - memcpy((void *)dst, src_start, length); |
---|
218 | | - __flush_icache_range(dst, dst + length); |
---|
| 252 | + memcpy(page, src_start, length); |
---|
| 253 | + __flush_icache_range((unsigned long)page, (unsigned long)page + length); |
---|
219 | 254 | |
---|
220 | | - trans_pgd = allocator(mask); |
---|
221 | | - if (!trans_pgd) { |
---|
222 | | - rc = -ENOMEM; |
---|
223 | | - goto out; |
---|
224 | | - } |
---|
| 255 | + trans_pgd = (void *)get_safe_page(GFP_ATOMIC); |
---|
| 256 | + if (!trans_pgd) |
---|
| 257 | + return -ENOMEM; |
---|
225 | 258 | |
---|
226 | | - pgdp = pgd_offset_raw(trans_pgd, dst_addr); |
---|
227 | | - if (pgd_none(READ_ONCE(*pgdp))) { |
---|
228 | | - pudp = allocator(mask); |
---|
229 | | - if (!pudp) { |
---|
230 | | - rc = -ENOMEM; |
---|
231 | | - goto out; |
---|
232 | | - } |
---|
233 | | - pgd_populate(&init_mm, pgdp, pudp); |
---|
234 | | - } |
---|
235 | | - |
---|
236 | | - pudp = pud_offset(pgdp, dst_addr); |
---|
237 | | - if (pud_none(READ_ONCE(*pudp))) { |
---|
238 | | - pmdp = allocator(mask); |
---|
239 | | - if (!pmdp) { |
---|
240 | | - rc = -ENOMEM; |
---|
241 | | - goto out; |
---|
242 | | - } |
---|
243 | | - pud_populate(&init_mm, pudp, pmdp); |
---|
244 | | - } |
---|
245 | | - |
---|
246 | | - pmdp = pmd_offset(pudp, dst_addr); |
---|
247 | | - if (pmd_none(READ_ONCE(*pmdp))) { |
---|
248 | | - ptep = allocator(mask); |
---|
249 | | - if (!ptep) { |
---|
250 | | - rc = -ENOMEM; |
---|
251 | | - goto out; |
---|
252 | | - } |
---|
253 | | - pmd_populate_kernel(&init_mm, pmdp, ptep); |
---|
254 | | - } |
---|
255 | | - |
---|
256 | | - ptep = pte_offset_kernel(pmdp, dst_addr); |
---|
257 | | - set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC)); |
---|
| 259 | + rc = trans_pgd_map_page(trans_pgd, page, dst_addr, |
---|
| 260 | + PAGE_KERNEL_EXEC); |
---|
| 261 | + if (rc) |
---|
| 262 | + return rc; |
---|
258 | 263 | |
---|
259 | 264 | /* |
---|
260 | 265 | * Load our new page tables. A strict BBM approach requires that we |
---|
.. | .. |
---|
270 | 275 | */ |
---|
271 | 276 | cpu_set_reserved_ttbr0(); |
---|
272 | 277 | local_flush_tlb_all(); |
---|
273 | | - write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1); |
---|
| 278 | + write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1); |
---|
274 | 279 | isb(); |
---|
275 | 280 | |
---|
276 | | - *phys_dst_addr = virt_to_phys((void *)dst); |
---|
| 281 | + *phys_dst_addr = virt_to_phys(page); |
---|
277 | 282 | |
---|
278 | | -out: |
---|
279 | | - return rc; |
---|
| 283 | + return 0; |
---|
280 | 284 | } |
---|
281 | 285 | |
---|
282 | 286 | #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) |
---|
| 287 | + |
---|
| 288 | +#ifdef CONFIG_ARM64_MTE |
---|
| 289 | + |
---|
| 290 | +static DEFINE_XARRAY(mte_pages); |
---|
| 291 | + |
---|
| 292 | +static int save_tags(struct page *page, unsigned long pfn) |
---|
| 293 | +{ |
---|
| 294 | + void *tag_storage, *ret; |
---|
| 295 | + |
---|
| 296 | + tag_storage = mte_allocate_tag_storage(); |
---|
| 297 | + if (!tag_storage) |
---|
| 298 | + return -ENOMEM; |
---|
| 299 | + |
---|
| 300 | + mte_save_page_tags(page_address(page), tag_storage); |
---|
| 301 | + |
---|
| 302 | + ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); |
---|
| 303 | + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { |
---|
| 304 | + mte_free_tag_storage(tag_storage); |
---|
| 305 | + return xa_err(ret); |
---|
| 306 | + } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { |
---|
| 307 | + mte_free_tag_storage(ret); |
---|
| 308 | + } |
---|
| 309 | + |
---|
| 310 | + return 0; |
---|
| 311 | +} |
---|
| 312 | + |
---|
| 313 | +static void swsusp_mte_free_storage(void) |
---|
| 314 | +{ |
---|
| 315 | + XA_STATE(xa_state, &mte_pages, 0); |
---|
| 316 | + void *tags; |
---|
| 317 | + |
---|
| 318 | + xa_lock(&mte_pages); |
---|
| 319 | + xas_for_each(&xa_state, tags, ULONG_MAX) { |
---|
| 320 | + mte_free_tag_storage(tags); |
---|
| 321 | + } |
---|
| 322 | + xa_unlock(&mte_pages); |
---|
| 323 | + |
---|
| 324 | + xa_destroy(&mte_pages); |
---|
| 325 | +} |
---|
| 326 | + |
---|
| 327 | +static int swsusp_mte_save_tags(void) |
---|
| 328 | +{ |
---|
| 329 | + struct zone *zone; |
---|
| 330 | + unsigned long pfn, max_zone_pfn; |
---|
| 331 | + int ret = 0; |
---|
| 332 | + int n = 0; |
---|
| 333 | + |
---|
| 334 | + if (!system_supports_mte()) |
---|
| 335 | + return 0; |
---|
| 336 | + |
---|
| 337 | + for_each_populated_zone(zone) { |
---|
| 338 | + max_zone_pfn = zone_end_pfn(zone); |
---|
| 339 | + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
---|
| 340 | + struct page *page = pfn_to_online_page(pfn); |
---|
| 341 | + |
---|
| 342 | + if (!page) |
---|
| 343 | + continue; |
---|
| 344 | + |
---|
| 345 | + if (!test_bit(PG_mte_tagged, &page->flags)) |
---|
| 346 | + continue; |
---|
| 347 | + |
---|
| 348 | + ret = save_tags(page, pfn); |
---|
| 349 | + if (ret) { |
---|
| 350 | + swsusp_mte_free_storage(); |
---|
| 351 | + goto out; |
---|
| 352 | + } |
---|
| 353 | + |
---|
| 354 | + n++; |
---|
| 355 | + } |
---|
| 356 | + } |
---|
| 357 | + pr_info("Saved %d MTE pages\n", n); |
---|
| 358 | + |
---|
| 359 | +out: |
---|
| 360 | + return ret; |
---|
| 361 | +} |
---|
| 362 | + |
---|
| 363 | +static void swsusp_mte_restore_tags(void) |
---|
| 364 | +{ |
---|
| 365 | + XA_STATE(xa_state, &mte_pages, 0); |
---|
| 366 | + int n = 0; |
---|
| 367 | + void *tags; |
---|
| 368 | + |
---|
| 369 | + xa_lock(&mte_pages); |
---|
| 370 | + xas_for_each(&xa_state, tags, ULONG_MAX) { |
---|
| 371 | + unsigned long pfn = xa_state.xa_index; |
---|
| 372 | + struct page *page = pfn_to_online_page(pfn); |
---|
| 373 | + |
---|
| 374 | + /* |
---|
| 375 | + * It is not required to invoke page_kasan_tag_reset(page) |
---|
| 376 | + * at this point since the tags stored in page->flags are |
---|
| 377 | + * already restored. |
---|
| 378 | + */ |
---|
| 379 | + mte_restore_page_tags(page_address(page), tags); |
---|
| 380 | + |
---|
| 381 | + mte_free_tag_storage(tags); |
---|
| 382 | + n++; |
---|
| 383 | + } |
---|
| 384 | + xa_unlock(&mte_pages); |
---|
| 385 | + |
---|
| 386 | + pr_info("Restored %d MTE pages\n", n); |
---|
| 387 | + |
---|
| 388 | + xa_destroy(&mte_pages); |
---|
| 389 | +} |
---|
| 390 | + |
---|
| 391 | +#else /* CONFIG_ARM64_MTE */ |
---|
| 392 | + |
---|
| 393 | +static int swsusp_mte_save_tags(void) |
---|
| 394 | +{ |
---|
| 395 | + return 0; |
---|
| 396 | +} |
---|
| 397 | + |
---|
| 398 | +static void swsusp_mte_restore_tags(void) |
---|
| 399 | +{ |
---|
| 400 | +} |
---|
| 401 | + |
---|
| 402 | +#endif /* CONFIG_ARM64_MTE */ |
---|
283 | 403 | |
---|
284 | 404 | int swsusp_arch_suspend(void) |
---|
285 | 405 | { |
---|
.. | .. |
---|
298 | 418 | /* make the crash dump kernel image visible/saveable */ |
---|
299 | 419 | crash_prepare_suspend(); |
---|
300 | 420 | |
---|
| 421 | + ret = swsusp_mte_save_tags(); |
---|
| 422 | + if (ret) |
---|
| 423 | + return ret; |
---|
| 424 | + |
---|
301 | 425 | sleep_cpu = smp_processor_id(); |
---|
302 | 426 | ret = swsusp_save(); |
---|
303 | 427 | } else { |
---|
.. | .. |
---|
310 | 434 | dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); |
---|
311 | 435 | dcache_clean_range(__hyp_text_start, __hyp_text_end); |
---|
312 | 436 | } |
---|
| 437 | + |
---|
| 438 | + swsusp_mte_restore_tags(); |
---|
313 | 439 | |
---|
314 | 440 | /* make the crash dump kernel image protected again */ |
---|
315 | 441 | crash_post_resume(); |
---|
.. | .. |
---|
328 | 454 | * mitigation off behind our back, let's set the state |
---|
329 | 455 | * to what we expect it to be. |
---|
330 | 456 | */ |
---|
331 | | - switch (arm64_get_ssbd_state()) { |
---|
332 | | - case ARM64_SSBD_FORCE_ENABLE: |
---|
333 | | - case ARM64_SSBD_KERNEL: |
---|
334 | | - arm64_set_ssbd_mitigation(true); |
---|
335 | | - } |
---|
| 457 | + spectre_v4_enable_mitigation(NULL); |
---|
336 | 458 | } |
---|
337 | 459 | |
---|
338 | 460 | local_daif_restore(flags); |
---|
.. | .. |
---|
423 | 545 | return 0; |
---|
424 | 546 | } |
---|
425 | 547 | |
---|
426 | | -static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, |
---|
| 548 | +static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, |
---|
427 | 549 | unsigned long end) |
---|
428 | 550 | { |
---|
429 | 551 | pud_t *dst_pudp; |
---|
.. | .. |
---|
431 | 553 | unsigned long next; |
---|
432 | 554 | unsigned long addr = start; |
---|
433 | 555 | |
---|
434 | | - if (pgd_none(READ_ONCE(*dst_pgdp))) { |
---|
| 556 | + if (p4d_none(READ_ONCE(*dst_p4dp))) { |
---|
435 | 557 | dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC); |
---|
436 | 558 | if (!dst_pudp) |
---|
437 | 559 | return -ENOMEM; |
---|
438 | | - pgd_populate(&init_mm, dst_pgdp, dst_pudp); |
---|
| 560 | + p4d_populate(&init_mm, dst_p4dp, dst_pudp); |
---|
439 | 561 | } |
---|
440 | | - dst_pudp = pud_offset(dst_pgdp, start); |
---|
| 562 | + dst_pudp = pud_offset(dst_p4dp, start); |
---|
441 | 563 | |
---|
442 | | - src_pudp = pud_offset(src_pgdp, start); |
---|
| 564 | + src_pudp = pud_offset(src_p4dp, start); |
---|
443 | 565 | do { |
---|
444 | 566 | pud_t pud = READ_ONCE(*src_pudp); |
---|
445 | 567 | |
---|
.. | .. |
---|
451 | 573 | return -ENOMEM; |
---|
452 | 574 | } else { |
---|
453 | 575 | set_pud(dst_pudp, |
---|
454 | | - __pud(pud_val(pud) & ~PMD_SECT_RDONLY)); |
---|
| 576 | + __pud(pud_val(pud) & ~PUD_SECT_RDONLY)); |
---|
455 | 577 | } |
---|
456 | 578 | } while (dst_pudp++, src_pudp++, addr = next, addr != end); |
---|
| 579 | + |
---|
| 580 | + return 0; |
---|
| 581 | +} |
---|
| 582 | + |
---|
| 583 | +static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, |
---|
| 584 | + unsigned long end) |
---|
| 585 | +{ |
---|
| 586 | + p4d_t *dst_p4dp; |
---|
| 587 | + p4d_t *src_p4dp; |
---|
| 588 | + unsigned long next; |
---|
| 589 | + unsigned long addr = start; |
---|
| 590 | + |
---|
| 591 | + dst_p4dp = p4d_offset(dst_pgdp, start); |
---|
| 592 | + src_p4dp = p4d_offset(src_pgdp, start); |
---|
| 593 | + do { |
---|
| 594 | + next = p4d_addr_end(addr, end); |
---|
| 595 | + if (p4d_none(READ_ONCE(*src_p4dp))) |
---|
| 596 | + continue; |
---|
| 597 | + if (copy_pud(dst_p4dp, src_p4dp, addr, next)) |
---|
| 598 | + return -ENOMEM; |
---|
| 599 | + } while (dst_p4dp++, src_p4dp++, addr = next, addr != end); |
---|
457 | 600 | |
---|
458 | 601 | return 0; |
---|
459 | 602 | } |
---|
.. | .. |
---|
465 | 608 | unsigned long addr = start; |
---|
466 | 609 | pgd_t *src_pgdp = pgd_offset_k(start); |
---|
467 | 610 | |
---|
468 | | - dst_pgdp = pgd_offset_raw(dst_pgdp, start); |
---|
| 611 | + dst_pgdp = pgd_offset_pgd(dst_pgdp, start); |
---|
469 | 612 | do { |
---|
470 | 613 | next = pgd_addr_end(addr, end); |
---|
471 | 614 | if (pgd_none(READ_ONCE(*src_pgdp))) |
---|
472 | 615 | continue; |
---|
473 | | - if (copy_pud(dst_pgdp, src_pgdp, addr, next)) |
---|
| 616 | + if (copy_p4d(dst_pgdp, src_pgdp, addr, next)) |
---|
474 | 617 | return -ENOMEM; |
---|
475 | 618 | } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); |
---|
476 | 619 | |
---|
477 | 620 | return 0; |
---|
| 621 | +} |
---|
| 622 | + |
---|
| 623 | +static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start, |
---|
| 624 | + unsigned long end) |
---|
| 625 | +{ |
---|
| 626 | + int rc; |
---|
| 627 | + pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); |
---|
| 628 | + |
---|
| 629 | + if (!trans_pgd) { |
---|
| 630 | + pr_err("Failed to allocate memory for temporary page tables.\n"); |
---|
| 631 | + return -ENOMEM; |
---|
| 632 | + } |
---|
| 633 | + |
---|
| 634 | + rc = copy_page_tables(trans_pgd, start, end); |
---|
| 635 | + if (!rc) |
---|
| 636 | + *dst_pgdp = trans_pgd; |
---|
| 637 | + |
---|
| 638 | + return rc; |
---|
478 | 639 | } |
---|
479 | 640 | |
---|
480 | 641 | /* |
---|
.. | .. |
---|
485 | 646 | */ |
---|
486 | 647 | int swsusp_arch_resume(void) |
---|
487 | 648 | { |
---|
488 | | - int rc = 0; |
---|
| 649 | + int rc; |
---|
489 | 650 | void *zero_page; |
---|
490 | 651 | size_t exit_size; |
---|
491 | 652 | pgd_t *tmp_pg_dir; |
---|
.. | .. |
---|
498 | 659 | * Create a second copy of just the linear map, and use this when |
---|
499 | 660 | * restoring. |
---|
500 | 661 | */ |
---|
501 | | - tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); |
---|
502 | | - if (!tmp_pg_dir) { |
---|
503 | | - pr_err("Failed to allocate memory for temporary page tables.\n"); |
---|
504 | | - rc = -ENOMEM; |
---|
505 | | - goto out; |
---|
506 | | - } |
---|
507 | | - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); |
---|
| 662 | + rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END); |
---|
508 | 663 | if (rc) |
---|
509 | | - goto out; |
---|
| 664 | + return rc; |
---|
510 | 665 | |
---|
511 | 666 | /* |
---|
512 | 667 | * We need a zero page that is zero before & after resume in order to |
---|
.. | .. |
---|
515 | 670 | zero_page = (void *)get_safe_page(GFP_ATOMIC); |
---|
516 | 671 | if (!zero_page) { |
---|
517 | 672 | pr_err("Failed to allocate zero page.\n"); |
---|
518 | | - rc = -ENOMEM; |
---|
519 | | - goto out; |
---|
| 673 | + return -ENOMEM; |
---|
520 | 674 | } |
---|
521 | 675 | |
---|
522 | 676 | /* |
---|
.. | .. |
---|
531 | 685 | */ |
---|
532 | 686 | rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, |
---|
533 | 687 | (unsigned long)hibernate_exit, |
---|
534 | | - &phys_hibernate_exit, |
---|
535 | | - (void *)get_safe_page, GFP_ATOMIC); |
---|
| 688 | + &phys_hibernate_exit); |
---|
536 | 689 | if (rc) { |
---|
537 | 690 | pr_err("Failed to create safe executable page for hibernate_exit code.\n"); |
---|
538 | | - goto out; |
---|
| 691 | + return rc; |
---|
539 | 692 | } |
---|
540 | 693 | |
---|
541 | 694 | /* |
---|
.. | .. |
---|
562 | 715 | resume_hdr.reenter_kernel, restore_pblist, |
---|
563 | 716 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); |
---|
564 | 717 | |
---|
565 | | -out: |
---|
566 | | - return rc; |
---|
| 718 | + return 0; |
---|
567 | 719 | } |
---|
568 | 720 | |
---|
569 | 721 | int hibernate_resume_nonboot_cpu_disable(void) |
---|