.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * acpi_osl.c - OS-dependent functions ($Revision: 83 $) |
---|
3 | 4 | * |
---|
.. | .. |
---|
6 | 7 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
---|
7 | 8 | * Copyright (c) 2008 Intel Corporation |
---|
8 | 9 | * Author: Matthew Wilcox <willy@linux.intel.com> |
---|
9 | | - * |
---|
10 | | - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
---|
11 | | - * |
---|
12 | | - * This program is free software; you can redistribute it and/or modify |
---|
13 | | - * it under the terms of the GNU General Public License as published by |
---|
14 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
15 | | - * (at your option) any later version. |
---|
16 | | - * |
---|
17 | | - * This program is distributed in the hope that it will be useful, |
---|
18 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
19 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
20 | | - * GNU General Public License for more details. |
---|
21 | | - * |
---|
22 | | - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
---|
23 | | - * |
---|
24 | 10 | */ |
---|
25 | 11 | |
---|
26 | 12 | #include <linux/module.h> |
---|
.. | .. |
---|
28 | 14 | #include <linux/slab.h> |
---|
29 | 15 | #include <linux/mm.h> |
---|
30 | 16 | #include <linux/highmem.h> |
---|
| 17 | +#include <linux/lockdep.h> |
---|
31 | 18 | #include <linux/pci.h> |
---|
32 | 19 | #include <linux/interrupt.h> |
---|
33 | 20 | #include <linux/kmod.h> |
---|
.. | .. |
---|
40 | 27 | #include <linux/list.h> |
---|
41 | 28 | #include <linux/jiffies.h> |
---|
42 | 29 | #include <linux/semaphore.h> |
---|
| 30 | +#include <linux/security.h> |
---|
43 | 31 | |
---|
44 | 32 | #include <asm/io.h> |
---|
45 | 33 | #include <linux/uaccess.h> |
---|
.. | .. |
---|
89 | 77 | void __iomem *virt; |
---|
90 | 78 | acpi_physical_address phys; |
---|
91 | 79 | acpi_size size; |
---|
92 | | - unsigned long refcount; |
---|
| 80 | + union { |
---|
| 81 | + unsigned long refcount; |
---|
| 82 | + struct rcu_work rwork; |
---|
| 83 | + } track; |
---|
93 | 84 | }; |
---|
94 | 85 | |
---|
95 | 86 | static LIST_HEAD(acpi_ioremaps); |
---|
96 | 87 | static DEFINE_MUTEX(acpi_ioremap_lock); |
---|
| 88 | +#define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) |
---|
97 | 89 | |
---|
98 | 90 | static void __init acpi_request_region (struct acpi_generic_address *gas, |
---|
99 | 91 | unsigned int length, char *desc) |
---|
.. | .. |
---|
194 | 186 | acpi_physical_address pa; |
---|
195 | 187 | |
---|
196 | 188 | #ifdef CONFIG_KEXEC |
---|
197 | | - if (acpi_rsdp) |
---|
| 189 | + /* |
---|
| 190 | + * We may have been provided with an RSDP on the command line, |
---|
| 191 | + * but if a malicious user has done so they may be pointing us |
---|
| 192 | + * at modified ACPI tables that could alter kernel behaviour - |
---|
| 193 | + * so, we check the lockdown status before making use of |
---|
| 194 | + * it. If we trust it then also stash it in an architecture |
---|
| 195 | + * specific location (if appropriate) so it can be carried |
---|
| 196 | + * over further kexec()s. |
---|
| 197 | + */ |
---|
| 198 | + if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { |
---|
| 199 | + acpi_arch_set_root_pointer(acpi_rsdp); |
---|
198 | 200 | return acpi_rsdp; |
---|
| 201 | + } |
---|
199 | 202 | #endif |
---|
200 | 203 | pa = acpi_arch_get_root_pointer(); |
---|
201 | 204 | if (pa) |
---|
.. | .. |
---|
220 | 223 | { |
---|
221 | 224 | struct acpi_ioremap *map; |
---|
222 | 225 | |
---|
223 | | - list_for_each_entry_rcu(map, &acpi_ioremaps, list) |
---|
| 226 | + list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) |
---|
224 | 227 | if (map->phys <= phys && |
---|
225 | 228 | phys + size <= map->phys + map->size) |
---|
226 | 229 | return map; |
---|
.. | .. |
---|
250 | 253 | map = acpi_map_lookup(phys, size); |
---|
251 | 254 | if (map) { |
---|
252 | 255 | virt = map->virt + (phys - map->phys); |
---|
253 | | - map->refcount++; |
---|
| 256 | + map->track.refcount++; |
---|
254 | 257 | } |
---|
255 | 258 | mutex_unlock(&acpi_ioremap_lock); |
---|
256 | 259 | return virt; |
---|
.. | .. |
---|
263 | 266 | { |
---|
264 | 267 | struct acpi_ioremap *map; |
---|
265 | 268 | |
---|
266 | | - list_for_each_entry_rcu(map, &acpi_ioremaps, list) |
---|
| 269 | + list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) |
---|
267 | 270 | if (map->virt <= virt && |
---|
268 | 271 | virt + size <= map->virt + map->size) |
---|
269 | 272 | return map; |
---|
.. | .. |
---|
315 | 318 | * During early init (when acpi_permanent_mmap has not been set yet) this |
---|
316 | 319 | * routine simply calls __acpi_map_table() to get the job done. |
---|
317 | 320 | */ |
---|
318 | | -void __iomem *__ref |
---|
319 | | -acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) |
---|
| 321 | +void __iomem __ref |
---|
| 322 | +*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) |
---|
320 | 323 | { |
---|
321 | 324 | struct acpi_ioremap *map; |
---|
322 | 325 | void __iomem *virt; |
---|
.. | .. |
---|
335 | 338 | /* Check if there's a suitable mapping already. */ |
---|
336 | 339 | map = acpi_map_lookup(phys, size); |
---|
337 | 340 | if (map) { |
---|
338 | | - map->refcount++; |
---|
| 341 | + map->track.refcount++; |
---|
339 | 342 | goto out; |
---|
340 | 343 | } |
---|
341 | 344 | |
---|
.. | .. |
---|
347 | 350 | |
---|
348 | 351 | pg_off = round_down(phys, PAGE_SIZE); |
---|
349 | 352 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; |
---|
350 | | - virt = acpi_map(pg_off, pg_sz); |
---|
| 353 | + virt = acpi_map(phys, size); |
---|
351 | 354 | if (!virt) { |
---|
352 | 355 | mutex_unlock(&acpi_ioremap_lock); |
---|
353 | 356 | kfree(map); |
---|
.. | .. |
---|
355 | 358 | } |
---|
356 | 359 | |
---|
357 | 360 | INIT_LIST_HEAD(&map->list); |
---|
358 | | - map->virt = virt; |
---|
| 361 | + map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); |
---|
359 | 362 | map->phys = pg_off; |
---|
360 | 363 | map->size = pg_sz; |
---|
361 | | - map->refcount = 1; |
---|
| 364 | + map->track.refcount = 1; |
---|
362 | 365 | |
---|
363 | 366 | list_add_tail_rcu(&map->list, &acpi_ioremaps); |
---|
364 | 367 | |
---|
.. | .. |
---|
374 | 377 | } |
---|
375 | 378 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
---|
376 | 379 | |
---|
377 | | -/* Must be called with mutex_lock(&acpi_ioremap_lock) */ |
---|
378 | | -static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map) |
---|
| 380 | +static void acpi_os_map_remove(struct work_struct *work) |
---|
379 | 381 | { |
---|
380 | | - unsigned long refcount = --map->refcount; |
---|
| 382 | + struct acpi_ioremap *map = container_of(to_rcu_work(work), |
---|
| 383 | + struct acpi_ioremap, |
---|
| 384 | + track.rwork); |
---|
381 | 385 | |
---|
382 | | - if (!refcount) |
---|
383 | | - list_del_rcu(&map->list); |
---|
384 | | - return refcount; |
---|
385 | | -} |
---|
386 | | - |
---|
387 | | -static void acpi_os_map_cleanup(struct acpi_ioremap *map) |
---|
388 | | -{ |
---|
389 | | - synchronize_rcu_expedited(); |
---|
390 | 386 | acpi_unmap(map->phys, map->virt); |
---|
391 | 387 | kfree(map); |
---|
| 388 | +} |
---|
| 389 | + |
---|
| 390 | +/* Must be called with mutex_lock(&acpi_ioremap_lock) */ |
---|
| 391 | +static void acpi_os_drop_map_ref(struct acpi_ioremap *map) |
---|
| 392 | +{ |
---|
| 393 | + if (--map->track.refcount) |
---|
| 394 | + return; |
---|
| 395 | + |
---|
| 396 | + list_del_rcu(&map->list); |
---|
| 397 | + |
---|
| 398 | + INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); |
---|
| 399 | + queue_rcu_work(system_wq, &map->track.rwork); |
---|
392 | 400 | } |
---|
393 | 401 | |
---|
394 | 402 | /** |
---|
.. | .. |
---|
397 | 405 | * @size: Size of the address range to drop a reference to. |
---|
398 | 406 | * |
---|
399 | 407 | * Look up the given virtual address range in the list of existing ACPI memory |
---|
400 | | - * mappings, drop a reference to it and unmap it if there are no more active |
---|
401 | | - * references to it. |
---|
| 408 | + * mappings, drop a reference to it and if there are no more active references |
---|
| 409 | + * to it, queue it up for later removal. |
---|
402 | 410 | * |
---|
403 | 411 | * During early init (when acpi_permanent_mmap has not been set yet) this |
---|
404 | 412 | * routine simply calls __acpi_unmap_table() to get the job done. Since |
---|
.. | .. |
---|
408 | 416 | void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) |
---|
409 | 417 | { |
---|
410 | 418 | struct acpi_ioremap *map; |
---|
411 | | - unsigned long refcount; |
---|
412 | 419 | |
---|
413 | 420 | if (!acpi_permanent_mmap) { |
---|
414 | 421 | __acpi_unmap_table(virt, size); |
---|
.. | .. |
---|
416 | 423 | } |
---|
417 | 424 | |
---|
418 | 425 | mutex_lock(&acpi_ioremap_lock); |
---|
| 426 | + |
---|
419 | 427 | map = acpi_map_lookup_virt(virt, size); |
---|
420 | 428 | if (!map) { |
---|
421 | 429 | mutex_unlock(&acpi_ioremap_lock); |
---|
422 | 430 | WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); |
---|
423 | 431 | return; |
---|
424 | 432 | } |
---|
425 | | - refcount = acpi_os_drop_map_ref(map); |
---|
426 | | - mutex_unlock(&acpi_ioremap_lock); |
---|
| 433 | + acpi_os_drop_map_ref(map); |
---|
427 | 434 | |
---|
428 | | - if (!refcount) |
---|
429 | | - acpi_os_map_cleanup(map); |
---|
| 435 | + mutex_unlock(&acpi_ioremap_lock); |
---|
430 | 436 | } |
---|
431 | 437 | EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); |
---|
432 | 438 | |
---|
| 439 | +/** |
---|
| 440 | + * acpi_os_unmap_memory - Drop a memory mapping reference. |
---|
| 441 | + * @virt: Start of the address range to drop a reference to. |
---|
| 442 | + * @size: Size of the address range to drop a reference to. |
---|
| 443 | + */ |
---|
433 | 444 | void __ref acpi_os_unmap_memory(void *virt, acpi_size size) |
---|
434 | 445 | { |
---|
435 | | - return acpi_os_unmap_iomem((void __iomem *)virt, size); |
---|
| 446 | + acpi_os_unmap_iomem((void __iomem *)virt, size); |
---|
436 | 447 | } |
---|
437 | 448 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); |
---|
438 | 449 | |
---|
439 | | -int acpi_os_map_generic_address(struct acpi_generic_address *gas) |
---|
| 450 | +void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) |
---|
440 | 451 | { |
---|
441 | 452 | u64 addr; |
---|
442 | | - void __iomem *virt; |
---|
443 | 453 | |
---|
444 | 454 | if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) |
---|
445 | | - return 0; |
---|
| 455 | + return NULL; |
---|
446 | 456 | |
---|
447 | 457 | /* Handle possible alignment issues */ |
---|
448 | 458 | memcpy(&addr, &gas->address, sizeof(addr)); |
---|
449 | 459 | if (!addr || !gas->bit_width) |
---|
450 | | - return -EINVAL; |
---|
| 460 | + return NULL; |
---|
451 | 461 | |
---|
452 | | - virt = acpi_os_map_iomem(addr, gas->bit_width / 8); |
---|
453 | | - if (!virt) |
---|
454 | | - return -EIO; |
---|
455 | | - |
---|
456 | | - return 0; |
---|
| 462 | + return acpi_os_map_iomem(addr, gas->bit_width / 8); |
---|
457 | 463 | } |
---|
458 | 464 | EXPORT_SYMBOL(acpi_os_map_generic_address); |
---|
459 | 465 | |
---|
.. | .. |
---|
461 | 467 | { |
---|
462 | 468 | u64 addr; |
---|
463 | 469 | struct acpi_ioremap *map; |
---|
464 | | - unsigned long refcount; |
---|
465 | 470 | |
---|
466 | 471 | if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) |
---|
467 | 472 | return; |
---|
.. | .. |
---|
472 | 477 | return; |
---|
473 | 478 | |
---|
474 | 479 | mutex_lock(&acpi_ioremap_lock); |
---|
| 480 | + |
---|
475 | 481 | map = acpi_map_lookup(addr, gas->bit_width / 8); |
---|
476 | 482 | if (!map) { |
---|
477 | 483 | mutex_unlock(&acpi_ioremap_lock); |
---|
478 | 484 | return; |
---|
479 | 485 | } |
---|
480 | | - refcount = acpi_os_drop_map_ref(map); |
---|
481 | | - mutex_unlock(&acpi_ioremap_lock); |
---|
| 486 | + acpi_os_drop_map_ref(map); |
---|
482 | 487 | |
---|
483 | | - if (!refcount) |
---|
484 | | - acpi_os_map_cleanup(map); |
---|
| 488 | + mutex_unlock(&acpi_ioremap_lock); |
---|
485 | 489 | } |
---|
486 | 490 | EXPORT_SYMBOL(acpi_os_unmap_generic_address); |
---|
487 | 491 | |
---|
.. | .. |
---|
775 | 779 | return AE_OK; |
---|
776 | 780 | } |
---|
777 | 781 | |
---|
| 782 | +#ifdef CONFIG_PCI |
---|
778 | 783 | acpi_status |
---|
779 | 784 | acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, |
---|
780 | 785 | u64 *value, u32 width) |
---|
.. | .. |
---|
833 | 838 | |
---|
834 | 839 | return (result ? AE_ERROR : AE_OK); |
---|
835 | 840 | } |
---|
| 841 | +#endif |
---|
836 | 842 | |
---|
837 | 843 | static void acpi_os_execute_deferred(struct work_struct *work) |
---|
838 | 844 | { |
---|
.. | .. |
---|
1564 | 1570 | acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, |
---|
1565 | 1571 | u32 level) |
---|
1566 | 1572 | { |
---|
| 1573 | + acpi_status status; |
---|
| 1574 | + |
---|
1567 | 1575 | if (!(res->flags & IORESOURCE_MEM)) |
---|
1568 | 1576 | return AE_TYPE; |
---|
1569 | 1577 | |
---|
1570 | | - return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, |
---|
1571 | | - acpi_deactivate_mem_region, NULL, res, NULL); |
---|
| 1578 | + status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, |
---|
| 1579 | + acpi_deactivate_mem_region, NULL, |
---|
| 1580 | + res, NULL); |
---|
| 1581 | + if (ACPI_FAILURE(status)) |
---|
| 1582 | + return status; |
---|
| 1583 | + |
---|
| 1584 | + /* |
---|
| 1585 | + * Wait for all of the mappings queued up for removal by |
---|
| 1586 | + * acpi_deactivate_mem_region() to actually go away. |
---|
| 1587 | + */ |
---|
| 1588 | + synchronize_rcu(); |
---|
| 1589 | + rcu_barrier(); |
---|
| 1590 | + flush_scheduled_work(); |
---|
| 1591 | + |
---|
| 1592 | + return AE_OK; |
---|
1572 | 1593 | } |
---|
1573 | 1594 | EXPORT_SYMBOL_GPL(acpi_release_memory); |
---|
1574 | 1595 | |
---|
.. | .. |
---|
1596 | 1617 | */ |
---|
1597 | 1618 | |
---|
1598 | 1619 | acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) |
---|
| 1620 | + __acquires(lockp) |
---|
1599 | 1621 | { |
---|
1600 | 1622 | acpi_cpu_flags flags; |
---|
1601 | 1623 | spin_lock_irqsave(lockp, flags); |
---|
.. | .. |
---|
1607 | 1629 | */ |
---|
1608 | 1630 | |
---|
1609 | 1631 | void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) |
---|
| 1632 | + __releases(lockp) |
---|
1610 | 1633 | { |
---|
1611 | 1634 | spin_unlock_irqrestore(lockp, flags); |
---|
1612 | 1635 | } |
---|
.. | .. |
---|
1721 | 1744 | { |
---|
1722 | 1745 | acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); |
---|
1723 | 1746 | acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); |
---|
1724 | | - acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); |
---|
1725 | | - acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); |
---|
| 1747 | + |
---|
| 1748 | + acpi_gbl_xgpe0_block_logical_address = |
---|
| 1749 | + (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); |
---|
| 1750 | + acpi_gbl_xgpe1_block_logical_address = |
---|
| 1751 | + (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); |
---|
| 1752 | + |
---|
1726 | 1753 | if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { |
---|
1727 | 1754 | /* |
---|
1728 | 1755 | * Use acpi_os_map_generic_address to pre-map the reset |
---|
1729 | 1756 | * register if it's in system memory. |
---|
1730 | 1757 | */ |
---|
1731 | | - int rv; |
---|
| 1758 | + void *rv; |
---|
1732 | 1759 | |
---|
1733 | 1760 | rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); |
---|
1734 | | - pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); |
---|
| 1761 | + pr_debug(PREFIX "%s: map reset_reg %s\n", __func__, |
---|
| 1762 | + rv ? "successful" : "failed"); |
---|
1735 | 1763 | } |
---|
1736 | 1764 | acpi_os_initialized = true; |
---|
1737 | 1765 | |
---|
.. | .. |
---|
1759 | 1787 | |
---|
1760 | 1788 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); |
---|
1761 | 1789 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); |
---|
| 1790 | + acpi_gbl_xgpe0_block_logical_address = 0UL; |
---|
| 1791 | + acpi_gbl_xgpe1_block_logical_address = 0UL; |
---|
| 1792 | + |
---|
1762 | 1793 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); |
---|
1763 | 1794 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); |
---|
| 1795 | + |
---|
1764 | 1796 | if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) |
---|
1765 | 1797 | acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); |
---|
1766 | 1798 | |
---|