hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/acpi/osl.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
34 *
....@@ -6,21 +7,6 @@
67 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
78 * Copyright (c) 2008 Intel Corporation
89 * Author: Matthew Wilcox <willy@linux.intel.com>
9
- *
10
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11
- *
12
- * This program is free software; you can redistribute it and/or modify
13
- * it under the terms of the GNU General Public License as published by
14
- * the Free Software Foundation; either version 2 of the License, or
15
- * (at your option) any later version.
16
- *
17
- * This program is distributed in the hope that it will be useful,
18
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
- * GNU General Public License for more details.
21
- *
22
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23
- *
2410 */
2511
2612 #include <linux/module.h>
....@@ -28,6 +14,7 @@
2814 #include <linux/slab.h>
2915 #include <linux/mm.h>
3016 #include <linux/highmem.h>
17
+#include <linux/lockdep.h>
3118 #include <linux/pci.h>
3219 #include <linux/interrupt.h>
3320 #include <linux/kmod.h>
....@@ -40,6 +27,7 @@
4027 #include <linux/list.h>
4128 #include <linux/jiffies.h>
4229 #include <linux/semaphore.h>
30
+#include <linux/security.h>
4331
4432 #include <asm/io.h>
4533 #include <linux/uaccess.h>
....@@ -89,11 +77,15 @@
8977 void __iomem *virt;
9078 acpi_physical_address phys;
9179 acpi_size size;
92
- unsigned long refcount;
80
+ union {
81
+ unsigned long refcount;
82
+ struct rcu_work rwork;
83
+ } track;
9384 };
9485
9586 static LIST_HEAD(acpi_ioremaps);
9687 static DEFINE_MUTEX(acpi_ioremap_lock);
88
+#define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
9789
9890 static void __init acpi_request_region (struct acpi_generic_address *gas,
9991 unsigned int length, char *desc)
....@@ -194,8 +186,19 @@
194186 acpi_physical_address pa;
195187
196188 #ifdef CONFIG_KEXEC
197
- if (acpi_rsdp)
189
+ /*
190
+ * We may have been provided with an RSDP on the command line,
191
+ * but if a malicious user has done so they may be pointing us
192
+ * at modified ACPI tables that could alter kernel behaviour -
193
+ * so, we check the lockdown status before making use of
194
+ * it. If we trust it then also stash it in an architecture
195
+ * specific location (if appropriate) so it can be carried
196
+ * over further kexec()s.
197
+ */
198
+ if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
199
+ acpi_arch_set_root_pointer(acpi_rsdp);
198200 return acpi_rsdp;
201
+ }
199202 #endif
200203 pa = acpi_arch_get_root_pointer();
201204 if (pa)
....@@ -220,7 +223,7 @@
220223 {
221224 struct acpi_ioremap *map;
222225
223
- list_for_each_entry_rcu(map, &acpi_ioremaps, list)
226
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
224227 if (map->phys <= phys &&
225228 phys + size <= map->phys + map->size)
226229 return map;
....@@ -250,7 +253,7 @@
250253 map = acpi_map_lookup(phys, size);
251254 if (map) {
252255 virt = map->virt + (phys - map->phys);
253
- map->refcount++;
256
+ map->track.refcount++;
254257 }
255258 mutex_unlock(&acpi_ioremap_lock);
256259 return virt;
....@@ -263,7 +266,7 @@
263266 {
264267 struct acpi_ioremap *map;
265268
266
- list_for_each_entry_rcu(map, &acpi_ioremaps, list)
269
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
267270 if (map->virt <= virt &&
268271 virt + size <= map->virt + map->size)
269272 return map;
....@@ -315,8 +318,8 @@
315318 * During early init (when acpi_permanent_mmap has not been set yet) this
316319 * routine simply calls __acpi_map_table() to get the job done.
317320 */
318
-void __iomem *__ref
319
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
321
+void __iomem __ref
322
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
320323 {
321324 struct acpi_ioremap *map;
322325 void __iomem *virt;
....@@ -335,7 +338,7 @@
335338 /* Check if there's a suitable mapping already. */
336339 map = acpi_map_lookup(phys, size);
337340 if (map) {
338
- map->refcount++;
341
+ map->track.refcount++;
339342 goto out;
340343 }
341344
....@@ -347,7 +350,7 @@
347350
348351 pg_off = round_down(phys, PAGE_SIZE);
349352 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
350
- virt = acpi_map(pg_off, pg_sz);
353
+ virt = acpi_map(phys, size);
351354 if (!virt) {
352355 mutex_unlock(&acpi_ioremap_lock);
353356 kfree(map);
....@@ -355,10 +358,10 @@
355358 }
356359
357360 INIT_LIST_HEAD(&map->list);
358
- map->virt = virt;
361
+ map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
359362 map->phys = pg_off;
360363 map->size = pg_sz;
361
- map->refcount = 1;
364
+ map->track.refcount = 1;
362365
363366 list_add_tail_rcu(&map->list, &acpi_ioremaps);
364367
....@@ -374,21 +377,26 @@
374377 }
375378 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
376379
377
-/* Must be called with mutex_lock(&acpi_ioremap_lock) */
378
-static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
380
+static void acpi_os_map_remove(struct work_struct *work)
379381 {
380
- unsigned long refcount = --map->refcount;
382
+ struct acpi_ioremap *map = container_of(to_rcu_work(work),
383
+ struct acpi_ioremap,
384
+ track.rwork);
381385
382
- if (!refcount)
383
- list_del_rcu(&map->list);
384
- return refcount;
385
-}
386
-
387
-static void acpi_os_map_cleanup(struct acpi_ioremap *map)
388
-{
389
- synchronize_rcu_expedited();
390386 acpi_unmap(map->phys, map->virt);
391387 kfree(map);
388
+}
389
+
390
+/* Must be called with mutex_lock(&acpi_ioremap_lock) */
391
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
392
+{
393
+ if (--map->track.refcount)
394
+ return;
395
+
396
+ list_del_rcu(&map->list);
397
+
398
+ INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
399
+ queue_rcu_work(system_wq, &map->track.rwork);
392400 }
393401
394402 /**
....@@ -397,8 +405,8 @@
397405 * @size: Size of the address range to drop a reference to.
398406 *
399407 * Look up the given virtual address range in the list of existing ACPI memory
400
- * mappings, drop a reference to it and unmap it if there are no more active
401
- * references to it.
408
+ * mappings, drop a reference to it and if there are no more active references
409
+ * to it, queue it up for later removal.
402410 *
403411 * During early init (when acpi_permanent_mmap has not been set yet) this
404412 * routine simply calls __acpi_unmap_table() to get the job done. Since
....@@ -408,7 +416,6 @@
408416 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
409417 {
410418 struct acpi_ioremap *map;
411
- unsigned long refcount;
412419
413420 if (!acpi_permanent_mmap) {
414421 __acpi_unmap_table(virt, size);
....@@ -416,44 +423,43 @@
416423 }
417424
418425 mutex_lock(&acpi_ioremap_lock);
426
+
419427 map = acpi_map_lookup_virt(virt, size);
420428 if (!map) {
421429 mutex_unlock(&acpi_ioremap_lock);
422430 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
423431 return;
424432 }
425
- refcount = acpi_os_drop_map_ref(map);
426
- mutex_unlock(&acpi_ioremap_lock);
433
+ acpi_os_drop_map_ref(map);
427434
428
- if (!refcount)
429
- acpi_os_map_cleanup(map);
435
+ mutex_unlock(&acpi_ioremap_lock);
430436 }
431437 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
432438
439
+/**
440
+ * acpi_os_unmap_memory - Drop a memory mapping reference.
441
+ * @virt: Start of the address range to drop a reference to.
442
+ * @size: Size of the address range to drop a reference to.
443
+ */
433444 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
434445 {
435
- return acpi_os_unmap_iomem((void __iomem *)virt, size);
446
+ acpi_os_unmap_iomem((void __iomem *)virt, size);
436447 }
437448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
438449
439
-int acpi_os_map_generic_address(struct acpi_generic_address *gas)
450
+void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
440451 {
441452 u64 addr;
442
- void __iomem *virt;
443453
444454 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
445
- return 0;
455
+ return NULL;
446456
447457 /* Handle possible alignment issues */
448458 memcpy(&addr, &gas->address, sizeof(addr));
449459 if (!addr || !gas->bit_width)
450
- return -EINVAL;
460
+ return NULL;
451461
452
- virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
453
- if (!virt)
454
- return -EIO;
455
-
456
- return 0;
462
+ return acpi_os_map_iomem(addr, gas->bit_width / 8);
457463 }
458464 EXPORT_SYMBOL(acpi_os_map_generic_address);
459465
....@@ -461,7 +467,6 @@
461467 {
462468 u64 addr;
463469 struct acpi_ioremap *map;
464
- unsigned long refcount;
465470
466471 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
467472 return;
....@@ -472,16 +477,15 @@
472477 return;
473478
474479 mutex_lock(&acpi_ioremap_lock);
480
+
475481 map = acpi_map_lookup(addr, gas->bit_width / 8);
476482 if (!map) {
477483 mutex_unlock(&acpi_ioremap_lock);
478484 return;
479485 }
480
- refcount = acpi_os_drop_map_ref(map);
481
- mutex_unlock(&acpi_ioremap_lock);
486
+ acpi_os_drop_map_ref(map);
482487
483
- if (!refcount)
484
- acpi_os_map_cleanup(map);
488
+ mutex_unlock(&acpi_ioremap_lock);
485489 }
486490 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
487491
....@@ -775,6 +779,7 @@
775779 return AE_OK;
776780 }
777781
782
+#ifdef CONFIG_PCI
778783 acpi_status
779784 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
780785 u64 *value, u32 width)
....@@ -833,6 +838,7 @@
833838
834839 return (result ? AE_ERROR : AE_OK);
835840 }
841
+#endif
836842
837843 static void acpi_os_execute_deferred(struct work_struct *work)
838844 {
....@@ -1564,11 +1570,26 @@
15641570 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
15651571 u32 level)
15661572 {
1573
+ acpi_status status;
1574
+
15671575 if (!(res->flags & IORESOURCE_MEM))
15681576 return AE_TYPE;
15691577
1570
- return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
1571
- acpi_deactivate_mem_region, NULL, res, NULL);
1578
+ status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
1579
+ acpi_deactivate_mem_region, NULL,
1580
+ res, NULL);
1581
+ if (ACPI_FAILURE(status))
1582
+ return status;
1583
+
1584
+ /*
1585
+ * Wait for all of the mappings queued up for removal by
1586
+ * acpi_deactivate_mem_region() to actually go away.
1587
+ */
1588
+ synchronize_rcu();
1589
+ rcu_barrier();
1590
+ flush_scheduled_work();
1591
+
1592
+ return AE_OK;
15721593 }
15731594 EXPORT_SYMBOL_GPL(acpi_release_memory);
15741595
....@@ -1596,6 +1617,7 @@
15961617 */
15971618
15981619 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1620
+ __acquires(lockp)
15991621 {
16001622 acpi_cpu_flags flags;
16011623 spin_lock_irqsave(lockp, flags);
....@@ -1607,6 +1629,7 @@
16071629 */
16081630
16091631 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1632
+ __releases(lockp)
16101633 {
16111634 spin_unlock_irqrestore(lockp, flags);
16121635 }
....@@ -1721,17 +1744,22 @@
17211744 {
17221745 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
17231746 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1724
- acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1725
- acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1747
+
1748
+ acpi_gbl_xgpe0_block_logical_address =
1749
+ (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1750
+ acpi_gbl_xgpe1_block_logical_address =
1751
+ (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1752
+
17261753 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
17271754 /*
17281755 * Use acpi_os_map_generic_address to pre-map the reset
17291756 * register if it's in system memory.
17301757 */
1731
- int rv;
1758
+ void *rv;
17321759
17331760 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1734
- pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1761
+ pr_debug(PREFIX "%s: map reset_reg %s\n", __func__,
1762
+ rv ? "successful" : "failed");
17351763 }
17361764 acpi_os_initialized = true;
17371765
....@@ -1759,8 +1787,12 @@
17591787
17601788 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
17611789 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1790
+ acpi_gbl_xgpe0_block_logical_address = 0UL;
1791
+ acpi_gbl_xgpe1_block_logical_address = 0UL;
1792
+
17621793 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
17631794 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1795
+
17641796 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
17651797 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
17661798