.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * ARM64 Specific Low-Level ACPI Boot Support |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Author: Hanjun Guo <hanjun.guo@linaro.org> |
---|
8 | 9 | * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org> |
---|
9 | 10 | * Author: Naresh Bhat <naresh.bhat@linaro.org> |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or modify |
---|
12 | | - * it under the terms of the GNU General Public License version 2 as |
---|
13 | | - * published by the Free Software Foundation. |
---|
14 | 11 | */ |
---|
15 | 12 | |
---|
16 | 13 | #define pr_fmt(fmt) "ACPI: " fmt |
---|
17 | 14 | |
---|
18 | 15 | #include <linux/acpi.h> |
---|
19 | | -#include <linux/bootmem.h> |
---|
20 | 16 | #include <linux/cpumask.h> |
---|
21 | 17 | #include <linux/efi.h> |
---|
22 | 18 | #include <linux/efi-bgrt.h> |
---|
23 | 19 | #include <linux/init.h> |
---|
24 | 20 | #include <linux/irq.h> |
---|
25 | 21 | #include <linux/irqdomain.h> |
---|
| 22 | +#include <linux/irq_work.h> |
---|
26 | 23 | #include <linux/memblock.h> |
---|
27 | 24 | #include <linux/of_fdt.h> |
---|
28 | 25 | #include <linux/smp.h> |
---|
29 | 26 | #include <linux/serial_core.h> |
---|
| 27 | +#include <linux/pgtable.h> |
---|
30 | 28 | |
---|
| 29 | +#include <acpi/ghes.h> |
---|
31 | 30 | #include <asm/cputype.h> |
---|
32 | 31 | #include <asm/cpu_ops.h> |
---|
33 | | -#include <asm/pgtable.h> |
---|
| 32 | +#include <asm/daifflags.h> |
---|
34 | 33 | #include <asm/smp_plat.h> |
---|
35 | 34 | |
---|
36 | 35 | int acpi_noirq = 1; /* skip ACPI IRQ initialization */ |
---|
.. | .. |
---|
261 | 260 | return __pgprot(PROT_NORMAL_NC); |
---|
262 | 261 | return __pgprot(PROT_DEVICE_nGnRnE); |
---|
263 | 262 | } |
---|
| 263 | + |
---|
| 264 | +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) |
---|
| 265 | +{ |
---|
| 266 | + efi_memory_desc_t *md, *region = NULL; |
---|
| 267 | + pgprot_t prot; |
---|
| 268 | + |
---|
| 269 | + if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP))) |
---|
| 270 | + return NULL; |
---|
| 271 | + |
---|
| 272 | + for_each_efi_memory_desc(md) { |
---|
| 273 | + u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); |
---|
| 274 | + |
---|
| 275 | + if (phys < md->phys_addr || phys >= end) |
---|
| 276 | + continue; |
---|
| 277 | + |
---|
| 278 | + if (phys + size > end) { |
---|
| 279 | + pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n"); |
---|
| 280 | + return NULL; |
---|
| 281 | + } |
---|
| 282 | + region = md; |
---|
| 283 | + break; |
---|
| 284 | + } |
---|
| 285 | + |
---|
| 286 | + /* |
---|
| 287 | + * It is fine for AML to remap regions that are not represented in the |
---|
| 288 | + * EFI memory map at all, as it only describes normal memory, and MMIO |
---|
| 289 | + * regions that require a virtual mapping to make them accessible to |
---|
| 290 | + * the EFI runtime services. |
---|
| 291 | + */ |
---|
| 292 | + prot = __pgprot(PROT_DEVICE_nGnRnE); |
---|
| 293 | + if (region) { |
---|
| 294 | + switch (region->type) { |
---|
| 295 | + case EFI_LOADER_CODE: |
---|
| 296 | + case EFI_LOADER_DATA: |
---|
| 297 | + case EFI_BOOT_SERVICES_CODE: |
---|
| 298 | + case EFI_BOOT_SERVICES_DATA: |
---|
| 299 | + case EFI_CONVENTIONAL_MEMORY: |
---|
| 300 | + case EFI_PERSISTENT_MEMORY: |
---|
| 301 | + if (memblock_is_map_memory(phys) || |
---|
| 302 | + !memblock_is_region_memory(phys, size)) { |
---|
| 303 | + pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys); |
---|
| 304 | + return NULL; |
---|
| 305 | + } |
---|
| 306 | + /* |
---|
| 307 | + * Mapping kernel memory is permitted if the region in |
---|
| 308 | + * question is covered by a single memblock with the |
---|
| 309 | + * NOMAP attribute set: this enables the use of ACPI |
---|
| 310 | + * table overrides passed via initramfs, which are |
---|
| 311 | + * reserved in memory using arch_reserve_mem_area() |
---|
| 312 | + * below. As this particular use case only requires |
---|
| 313 | + * read access, fall through to the R/O mapping case. |
---|
| 314 | + */ |
---|
| 315 | + fallthrough; |
---|
| 316 | + |
---|
| 317 | + case EFI_RUNTIME_SERVICES_CODE: |
---|
| 318 | + /* |
---|
| 319 | + * This would be unusual, but not problematic per se, |
---|
| 320 | + * as long as we take care not to create a writable |
---|
| 321 | + * mapping for executable code. |
---|
| 322 | + */ |
---|
| 323 | + prot = PAGE_KERNEL_RO; |
---|
| 324 | + break; |
---|
| 325 | + |
---|
| 326 | + case EFI_ACPI_RECLAIM_MEMORY: |
---|
| 327 | + /* |
---|
| 328 | + * ACPI reclaim memory is used to pass firmware tables |
---|
| 329 | + * and other data that is intended for consumption by |
---|
| 330 | + * the OS only, which may decide it wants to reclaim |
---|
| 331 | + * that memory and use it for something else. We never |
---|
| 332 | + * do that, but we usually add it to the linear map |
---|
| 333 | + * anyway, in which case we should use the existing |
---|
| 334 | + * mapping. |
---|
| 335 | + */ |
---|
| 336 | + if (memblock_is_map_memory(phys)) |
---|
| 337 | + return (void __iomem *)__phys_to_virt(phys); |
---|
| 338 | + fallthrough; |
---|
| 339 | + |
---|
| 340 | + default: |
---|
| 341 | + if (region->attribute & EFI_MEMORY_WB) |
---|
| 342 | + prot = PAGE_KERNEL; |
---|
| 343 | + else if (region->attribute & EFI_MEMORY_WT) |
---|
| 344 | + prot = __pgprot(PROT_NORMAL_WT); |
---|
| 345 | + else if (region->attribute & EFI_MEMORY_WC) |
---|
| 346 | + prot = __pgprot(PROT_NORMAL_NC); |
---|
| 347 | + } |
---|
| 348 | + } |
---|
| 349 | + return __ioremap(phys, size, prot); |
---|
| 350 | +} |
---|
| 351 | + |
---|
| 352 | +/* |
---|
| 353 | + * Claim Synchronous External Aborts as a firmware first notification. |
---|
| 354 | + * |
---|
| 355 | + * Used by KVM and the arch do_sea handler. |
---|
| 356 | + * @regs may be NULL when called from process context. |
---|
| 357 | + */ |
---|
| 358 | +int apei_claim_sea(struct pt_regs *regs) |
---|
| 359 | +{ |
---|
| 360 | + int err = -ENOENT; |
---|
| 361 | + bool return_to_irqs_enabled; |
---|
| 362 | + unsigned long current_flags; |
---|
| 363 | + |
---|
| 364 | + if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) |
---|
| 365 | + return err; |
---|
| 366 | + |
---|
| 367 | + current_flags = local_daif_save_flags(); |
---|
| 368 | + |
---|
| 369 | + /* current_flags isn't useful here as daif doesn't tell us about pNMI */ |
---|
| 370 | + return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); |
---|
| 371 | + |
---|
| 372 | + if (regs) |
---|
| 373 | + return_to_irqs_enabled = interrupts_enabled(regs); |
---|
| 374 | + |
---|
| 375 | + /* |
---|
| 376 | + * SEA can interrupt SError, mask it and describe this as an NMI so |
---|
| 377 | + * that APEI defers the handling. |
---|
| 378 | + */ |
---|
| 379 | + local_daif_restore(DAIF_ERRCTX); |
---|
| 380 | + nmi_enter(); |
---|
| 381 | + err = ghes_notify_sea(); |
---|
| 382 | + nmi_exit(); |
---|
| 383 | + |
---|
| 384 | + /* |
---|
| 385 | + * APEI NMI-like notifications are deferred to irq_work. Unless |
---|
| 386 | + * we interrupted irqs-masked code, we can do that now. |
---|
| 387 | + */ |
---|
| 388 | + if (!err) { |
---|
| 389 | + if (return_to_irqs_enabled) { |
---|
| 390 | + local_daif_restore(DAIF_PROCCTX_NOIRQ); |
---|
| 391 | + __irq_enter(); |
---|
| 392 | + irq_work_run(); |
---|
| 393 | + __irq_exit(); |
---|
| 394 | + } else { |
---|
| 395 | + pr_warn_ratelimited("APEI work queued but not completed"); |
---|
| 396 | + err = -EINPROGRESS; |
---|
| 397 | + } |
---|
| 398 | + } |
---|
| 399 | + |
---|
| 400 | + local_daif_restore(current_flags); |
---|
| 401 | + |
---|
| 402 | + return err; |
---|
| 403 | +} |
---|
| 404 | + |
---|
| 405 | +void arch_reserve_mem_area(acpi_physical_address addr, size_t size) |
---|
| 406 | +{ |
---|
| 407 | + memblock_mark_nomap(addr, size); |
---|
| 408 | +} |
---|