| .. | .. |
|---|
| 13 | 13 | #include <linux/delay.h> |
|---|
| 14 | 14 | #include <linux/dmi.h> |
|---|
| 15 | 15 | #include <linux/init.h> |
|---|
| 16 | +#include <linux/msi.h> |
|---|
| 16 | 17 | #include <linux/of.h> |
|---|
| 17 | | -#include <linux/of_pci.h> |
|---|
| 18 | 18 | #include <linux/pci.h> |
|---|
| 19 | 19 | #include <linux/pm.h> |
|---|
| 20 | 20 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 29 | 29 | #include <linux/pm_runtime.h> |
|---|
| 30 | 30 | #include <linux/pci_hotplug.h> |
|---|
| 31 | 31 | #include <linux/vmalloc.h> |
|---|
| 32 | | -#include <linux/pci-ats.h> |
|---|
| 33 | | -#include <asm/setup.h> |
|---|
| 34 | 32 | #include <asm/dma.h> |
|---|
| 35 | 33 | #include <linux/aer.h> |
|---|
| 34 | +#ifndef __GENKSYMS__ |
|---|
| 35 | +#include <trace/hooks/pci.h> |
|---|
| 36 | +#endif |
|---|
| 36 | 37 | #include "pci.h" |
|---|
| 37 | 38 | |
|---|
| 38 | 39 | DEFINE_MUTEX(pci_slot_mutex); |
|---|
| .. | .. |
|---|
| 48 | 49 | int pci_pci_problems; |
|---|
| 49 | 50 | EXPORT_SYMBOL(pci_pci_problems); |
|---|
| 50 | 51 | |
|---|
| 51 | | -unsigned int pci_pm_d3_delay; |
|---|
| 52 | +unsigned int pci_pm_d3hot_delay; |
|---|
| 52 | 53 | |
|---|
| 53 | 54 | static void pci_pme_list_scan(struct work_struct *work); |
|---|
| 54 | 55 | |
|---|
| .. | .. |
|---|
| 65 | 66 | |
|---|
| 66 | 67 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
|---|
| 67 | 68 | { |
|---|
| 68 | | - unsigned int delay = dev->d3_delay; |
|---|
| 69 | + unsigned int delay = dev->d3hot_delay; |
|---|
| 70 | + int err = -EOPNOTSUPP; |
|---|
| 69 | 71 | |
|---|
| 70 | | - if (delay < pci_pm_d3_delay) |
|---|
| 71 | | - delay = pci_pm_d3_delay; |
|---|
| 72 | + if (delay < pci_pm_d3hot_delay) |
|---|
| 73 | + delay = pci_pm_d3hot_delay; |
|---|
| 72 | 74 | |
|---|
| 73 | | - if (delay) |
|---|
| 74 | | - msleep(delay); |
|---|
| 75 | + if (delay) { |
|---|
| 76 | + trace_android_rvh_pci_d3_sleep(dev, delay, &err); |
|---|
| 77 | + if (err == -EOPNOTSUPP) |
|---|
| 78 | + msleep(delay); |
|---|
| 79 | + } |
|---|
| 75 | 80 | } |
|---|
| 76 | 81 | |
|---|
| 77 | 82 | #ifdef CONFIG_PCI_DOMAINS |
|---|
| .. | .. |
|---|
| 85 | 90 | unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; |
|---|
| 86 | 91 | |
|---|
| 87 | 92 | #define DEFAULT_HOTPLUG_IO_SIZE (256) |
|---|
| 88 | | -#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) |
|---|
| 89 | | -/* pci=hpmemsize=nnM,hpiosize=nn can override this */ |
|---|
| 93 | +#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) |
|---|
| 94 | +#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) |
|---|
| 95 | +/* hpiosize=nn can override this */ |
|---|
| 90 | 96 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
|---|
| 91 | | -unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
|---|
| 97 | +/* |
|---|
| 98 | + * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, |
|---|
| 99 | + * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; |
|---|
| 100 | + * pci=hpmemsize=nnM overrides both |
|---|
| 101 | + */ |
|---|
| 102 | +unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; |
|---|
| 103 | +unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; |
|---|
| 92 | 104 | |
|---|
| 93 | 105 | #define DEFAULT_HOTPLUG_BUS_SIZE 1 |
|---|
| 94 | 106 | unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; |
|---|
| 95 | 107 | |
|---|
| 108 | + |
|---|
| 109 | +/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ |
|---|
| 110 | +#ifdef CONFIG_PCIE_BUS_TUNE_OFF |
|---|
| 111 | +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
|---|
| 112 | +#elif defined CONFIG_PCIE_BUS_SAFE |
|---|
| 113 | +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; |
|---|
| 114 | +#elif defined CONFIG_PCIE_BUS_PERFORMANCE |
|---|
| 115 | +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; |
|---|
| 116 | +#elif defined CONFIG_PCIE_BUS_PEER2PEER |
|---|
| 117 | +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; |
|---|
| 118 | +#else |
|---|
| 96 | 119 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; |
|---|
| 120 | +#endif |
|---|
| 97 | 121 | |
|---|
| 98 | 122 | /* |
|---|
| 99 | 123 | * The default CLS is used if arch didn't set CLS explicitly and not |
|---|
| .. | .. |
|---|
| 123 | 147 | { |
|---|
| 124 | 148 | return pcie_ats_disabled; |
|---|
| 125 | 149 | } |
|---|
| 150 | +EXPORT_SYMBOL_GPL(pci_ats_disabled); |
|---|
| 126 | 151 | |
|---|
| 127 | 152 | /* Disable bridge_d3 for all PCIe ports */ |
|---|
| 128 | 153 | static bool pci_bridge_d3_disable; |
|---|
| .. | .. |
|---|
| 138 | 163 | return 1; |
|---|
| 139 | 164 | } |
|---|
| 140 | 165 | __setup("pcie_port_pm=", pcie_port_pm_setup); |
|---|
| 141 | | - |
|---|
| 142 | | -/* Time to wait after a reset for device to become responsive */ |
|---|
| 143 | | -#define PCIE_RESET_READY_POLL_MS 60000 |
|---|
| 144 | 166 | |
|---|
| 145 | 167 | /** |
|---|
| 146 | 168 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
|---|
| .. | .. |
|---|
| 164 | 186 | } |
|---|
| 165 | 187 | EXPORT_SYMBOL_GPL(pci_bus_max_busnr); |
|---|
| 166 | 188 | |
|---|
| 189 | +/** |
|---|
| 190 | + * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS |
|---|
| 191 | + * @pdev: the PCI device |
|---|
| 192 | + * |
|---|
| 193 | + * Returns error bits set in PCI_STATUS and clears them. |
|---|
| 194 | + */ |
|---|
| 195 | +int pci_status_get_and_clear_errors(struct pci_dev *pdev) |
|---|
| 196 | +{ |
|---|
| 197 | + u16 status; |
|---|
| 198 | + int ret; |
|---|
| 199 | + |
|---|
| 200 | + ret = pci_read_config_word(pdev, PCI_STATUS, &status); |
|---|
| 201 | + if (ret != PCIBIOS_SUCCESSFUL) |
|---|
| 202 | + return -EIO; |
|---|
| 203 | + |
|---|
| 204 | + status &= PCI_STATUS_ERROR_BITS; |
|---|
| 205 | + if (status) |
|---|
| 206 | + pci_write_config_word(pdev, PCI_STATUS, status); |
|---|
| 207 | + |
|---|
| 208 | + return status; |
|---|
| 209 | +} |
|---|
| 210 | +EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); |
|---|
| 211 | + |
|---|
| 167 | 212 | #ifdef CONFIG_HAS_IOMEM |
|---|
| 168 | 213 | void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) |
|---|
| 169 | 214 | { |
|---|
| .. | .. |
|---|
| 176 | 221 | pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res); |
|---|
| 177 | 222 | return NULL; |
|---|
| 178 | 223 | } |
|---|
| 179 | | - return ioremap_nocache(res->start, resource_size(res)); |
|---|
| 224 | + return ioremap(res->start, resource_size(res)); |
|---|
| 180 | 225 | } |
|---|
| 181 | 226 | EXPORT_SYMBOL_GPL(pci_ioremap_bar); |
|---|
| 182 | 227 | |
|---|
| .. | .. |
|---|
| 197 | 242 | |
|---|
| 198 | 243 | /** |
|---|
| 199 | 244 | * pci_dev_str_match_path - test if a path string matches a device |
|---|
| 200 | | - * @dev: the PCI device to test |
|---|
| 201 | | - * @p: string to match the device against |
|---|
| 245 | + * @dev: the PCI device to test |
|---|
| 246 | + * @path: string to match the device against |
|---|
| 202 | 247 | * @endptr: pointer to the string after the match |
|---|
| 203 | 248 | * |
|---|
| 204 | 249 | * Test if a string (typically from a kernel parameter) formatted as a |
|---|
| .. | .. |
|---|
| 280 | 325 | |
|---|
| 281 | 326 | /** |
|---|
| 282 | 327 | * pci_dev_str_match - test if a string matches a device |
|---|
| 283 | | - * @dev: the PCI device to test |
|---|
| 284 | | - * @p: string to match the device against |
|---|
| 328 | + * @dev: the PCI device to test |
|---|
| 329 | + * @p: string to match the device against |
|---|
| 285 | 330 | * @endptr: pointer to the string after the match |
|---|
| 286 | 331 | * |
|---|
| 287 | 332 | * Test if a string (typically from a kernel parameter) matches a specified |
|---|
| .. | .. |
|---|
| 341 | 386 | } else { |
|---|
| 342 | 387 | /* |
|---|
| 343 | 388 | * PCI Bus, Device, Function IDs are specified |
|---|
| 344 | | - * (optionally, may include a path of devfns following it) |
|---|
| 389 | + * (optionally, may include a path of devfns following it) |
|---|
| 345 | 390 | */ |
|---|
| 346 | 391 | ret = pci_dev_str_match_path(dev, p, &p); |
|---|
| 347 | 392 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 425 | 470 | * Tell if a device supports a given PCI capability. |
|---|
| 426 | 471 | * Returns the address of the requested capability structure within the |
|---|
| 427 | 472 | * device's PCI configuration space or 0 in case the device does not |
|---|
| 428 | | - * support it. Possible values for @cap: |
|---|
| 473 | + * support it. Possible values for @cap include: |
|---|
| 429 | 474 | * |
|---|
| 430 | 475 | * %PCI_CAP_ID_PM Power Management |
|---|
| 431 | 476 | * %PCI_CAP_ID_AGP Accelerated Graphics Port |
|---|
| .. | .. |
|---|
| 450 | 495 | |
|---|
| 451 | 496 | /** |
|---|
| 452 | 497 | * pci_bus_find_capability - query for devices' capabilities |
|---|
| 453 | | - * @bus: the PCI bus to query |
|---|
| 498 | + * @bus: the PCI bus to query |
|---|
| 454 | 499 | * @devfn: PCI device to query |
|---|
| 455 | | - * @cap: capability code |
|---|
| 500 | + * @cap: capability code |
|---|
| 456 | 501 | * |
|---|
| 457 | | - * Like pci_find_capability() but works for pci devices that do not have a |
|---|
| 502 | + * Like pci_find_capability() but works for PCI devices that do not have a |
|---|
| 458 | 503 | * pci_dev structure set up yet. |
|---|
| 459 | 504 | * |
|---|
| 460 | 505 | * Returns the address of the requested capability structure within the |
|---|
| .. | .. |
|---|
| 535 | 580 | * |
|---|
| 536 | 581 | * Returns the address of the requested extended capability structure |
|---|
| 537 | 582 | * within the device's PCI configuration space or 0 if the device does |
|---|
| 538 | | - * not support it. Possible values for @cap: |
|---|
| 583 | + * not support it. Possible values for @cap include: |
|---|
| 539 | 584 | * |
|---|
| 540 | 585 | * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting |
|---|
| 541 | 586 | * %PCI_EXT_CAP_ID_VC Virtual Channel |
|---|
| .. | .. |
|---|
| 547 | 592 | return pci_find_next_ext_capability(dev, 0, cap); |
|---|
| 548 | 593 | } |
|---|
| 549 | 594 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
|---|
| 595 | + |
|---|
| 596 | +/** |
|---|
| 597 | + * pci_get_dsn - Read and return the 8-byte Device Serial Number |
|---|
| 598 | + * @dev: PCI device to query |
|---|
| 599 | + * |
|---|
| 600 | + * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial |
|---|
| 601 | + * Number. |
|---|
| 602 | + * |
|---|
| 603 | + * Returns the DSN, or zero if the capability does not exist. |
|---|
| 604 | + */ |
|---|
| 605 | +u64 pci_get_dsn(struct pci_dev *dev) |
|---|
| 606 | +{ |
|---|
| 607 | + u32 dword; |
|---|
| 608 | + u64 dsn; |
|---|
| 609 | + int pos; |
|---|
| 610 | + |
|---|
| 611 | + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); |
|---|
| 612 | + if (!pos) |
|---|
| 613 | + return 0; |
|---|
| 614 | + |
|---|
| 615 | + /* |
|---|
| 616 | + * The Device Serial Number is two dwords offset 4 bytes from the |
|---|
| 617 | + * capability position. The specification says that the first dword is |
|---|
| 618 | + * the lower half, and the second dword is the upper half. |
|---|
| 619 | + */ |
|---|
| 620 | + pos += 4; |
|---|
| 621 | + pci_read_config_dword(dev, pos, &dword); |
|---|
| 622 | + dsn = (u64)dword; |
|---|
| 623 | + pci_read_config_dword(dev, pos + 4, &dword); |
|---|
| 624 | + dsn |= ((u64)dword) << 32; |
|---|
| 625 | + |
|---|
| 626 | + return dsn; |
|---|
| 627 | +} |
|---|
| 628 | +EXPORT_SYMBOL_GPL(pci_get_dsn); |
|---|
| 550 | 629 | |
|---|
| 551 | 630 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
|---|
| 552 | 631 | { |
|---|
| .. | .. |
|---|
| 618 | 697 | EXPORT_SYMBOL_GPL(pci_find_ht_capability); |
|---|
| 619 | 698 | |
|---|
| 620 | 699 | /** |
|---|
| 621 | | - * pci_find_parent_resource - return resource region of parent bus of given region |
|---|
| 700 | + * pci_find_parent_resource - return resource region of parent bus of given |
|---|
| 701 | + * region |
|---|
| 622 | 702 | * @dev: PCI device structure contains resources to be searched |
|---|
| 623 | 703 | * @res: child resource record for which parent is sought |
|---|
| 624 | 704 | * |
|---|
| 625 | | - * For given resource region of given device, return the resource |
|---|
| 626 | | - * region of parent bus the given region is contained in. |
|---|
| 705 | + * For given resource region of given device, return the resource region of |
|---|
| 706 | + * parent bus the given region is contained in. |
|---|
| 627 | 707 | */ |
|---|
| 628 | 708 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, |
|---|
| 629 | 709 | struct resource *res) |
|---|
| .. | .. |
|---|
| 673 | 753 | { |
|---|
| 674 | 754 | int i; |
|---|
| 675 | 755 | |
|---|
| 676 | | - for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
|---|
| 756 | + for (i = 0; i < PCI_STD_NUM_BARS; i++) { |
|---|
| 677 | 757 | struct resource *r = &dev->resource[i]; |
|---|
| 678 | 758 | |
|---|
| 679 | 759 | if (r->start && resource_contains(r, res)) |
|---|
| .. | .. |
|---|
| 683 | 763 | return NULL; |
|---|
| 684 | 764 | } |
|---|
| 685 | 765 | EXPORT_SYMBOL(pci_find_resource); |
|---|
| 686 | | - |
|---|
| 687 | | -/** |
|---|
| 688 | | - * pci_find_pcie_root_port - return PCIe Root Port |
|---|
| 689 | | - * @dev: PCI device to query |
|---|
| 690 | | - * |
|---|
| 691 | | - * Traverse up the parent chain and return the PCIe Root Port PCI Device |
|---|
| 692 | | - * for a given PCI Device. |
|---|
| 693 | | - */ |
|---|
| 694 | | -struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) |
|---|
| 695 | | -{ |
|---|
| 696 | | - struct pci_dev *bridge, *highest_pcie_bridge = dev; |
|---|
| 697 | | - |
|---|
| 698 | | - bridge = pci_upstream_bridge(dev); |
|---|
| 699 | | - while (bridge && pci_is_pcie(bridge)) { |
|---|
| 700 | | - highest_pcie_bridge = bridge; |
|---|
| 701 | | - bridge = pci_upstream_bridge(bridge); |
|---|
| 702 | | - } |
|---|
| 703 | | - |
|---|
| 704 | | - if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT) |
|---|
| 705 | | - return NULL; |
|---|
| 706 | | - |
|---|
| 707 | | - return highest_pcie_bridge; |
|---|
| 708 | | -} |
|---|
| 709 | | -EXPORT_SYMBOL(pci_find_pcie_root_port); |
|---|
| 710 | 766 | |
|---|
| 711 | 767 | /** |
|---|
| 712 | 768 | * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos |
|---|
| .. | .. |
|---|
| 732 | 788 | } |
|---|
| 733 | 789 | |
|---|
| 734 | 790 | return 0; |
|---|
| 791 | +} |
|---|
| 792 | + |
|---|
| 793 | +static int pci_acs_enable; |
|---|
| 794 | + |
|---|
| 795 | +/** |
|---|
| 796 | + * pci_request_acs - ask for ACS to be enabled if supported |
|---|
| 797 | + */ |
|---|
| 798 | +void pci_request_acs(void) |
|---|
| 799 | +{ |
|---|
| 800 | + pci_acs_enable = 1; |
|---|
| 801 | +} |
|---|
| 802 | + |
|---|
| 803 | +static const char *disable_acs_redir_param; |
|---|
| 804 | + |
|---|
| 805 | +/** |
|---|
| 806 | + * pci_disable_acs_redir - disable ACS redirect capabilities |
|---|
| 807 | + * @dev: the PCI device |
|---|
| 808 | + * |
|---|
| 809 | + * For only devices specified in the disable_acs_redir parameter. |
|---|
| 810 | + */ |
|---|
| 811 | +static void pci_disable_acs_redir(struct pci_dev *dev) |
|---|
| 812 | +{ |
|---|
| 813 | + int ret = 0; |
|---|
| 814 | + const char *p; |
|---|
| 815 | + int pos; |
|---|
| 816 | + u16 ctrl; |
|---|
| 817 | + |
|---|
| 818 | + if (!disable_acs_redir_param) |
|---|
| 819 | + return; |
|---|
| 820 | + |
|---|
| 821 | + p = disable_acs_redir_param; |
|---|
| 822 | + while (*p) { |
|---|
| 823 | + ret = pci_dev_str_match(dev, p, &p); |
|---|
| 824 | + if (ret < 0) { |
|---|
| 825 | + pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", |
|---|
| 826 | + disable_acs_redir_param); |
|---|
| 827 | + |
|---|
| 828 | + break; |
|---|
| 829 | + } else if (ret == 1) { |
|---|
| 830 | + /* Found a match */ |
|---|
| 831 | + break; |
|---|
| 832 | + } |
|---|
| 833 | + |
|---|
| 834 | + if (*p != ';' && *p != ',') { |
|---|
| 835 | + /* End of param or invalid format */ |
|---|
| 836 | + break; |
|---|
| 837 | + } |
|---|
| 838 | + p++; |
|---|
| 839 | + } |
|---|
| 840 | + |
|---|
| 841 | + if (ret != 1) |
|---|
| 842 | + return; |
|---|
| 843 | + |
|---|
| 844 | + if (!pci_dev_specific_disable_acs_redir(dev)) |
|---|
| 845 | + return; |
|---|
| 846 | + |
|---|
| 847 | + pos = dev->acs_cap; |
|---|
| 848 | + if (!pos) { |
|---|
| 849 | + pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); |
|---|
| 850 | + return; |
|---|
| 851 | + } |
|---|
| 852 | + |
|---|
| 853 | + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
|---|
| 854 | + |
|---|
| 855 | + /* P2P Request & Completion Redirect */ |
|---|
| 856 | + ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); |
|---|
| 857 | + |
|---|
| 858 | + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
|---|
| 859 | + |
|---|
| 860 | + pci_info(dev, "disabled ACS redirect\n"); |
|---|
| 861 | +} |
|---|
| 862 | + |
|---|
| 863 | +/** |
|---|
| 864 | + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities |
|---|
| 865 | + * @dev: the PCI device |
|---|
| 866 | + */ |
|---|
| 867 | +static void pci_std_enable_acs(struct pci_dev *dev) |
|---|
| 868 | +{ |
|---|
| 869 | + int pos; |
|---|
| 870 | + u16 cap; |
|---|
| 871 | + u16 ctrl; |
|---|
| 872 | + |
|---|
| 873 | + pos = dev->acs_cap; |
|---|
| 874 | + if (!pos) |
|---|
| 875 | + return; |
|---|
| 876 | + |
|---|
| 877 | + pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); |
|---|
| 878 | + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
|---|
| 879 | + |
|---|
| 880 | + /* Source Validation */ |
|---|
| 881 | + ctrl |= (cap & PCI_ACS_SV); |
|---|
| 882 | + |
|---|
| 883 | + /* P2P Request Redirect */ |
|---|
| 884 | + ctrl |= (cap & PCI_ACS_RR); |
|---|
| 885 | + |
|---|
| 886 | + /* P2P Completion Redirect */ |
|---|
| 887 | + ctrl |= (cap & PCI_ACS_CR); |
|---|
| 888 | + |
|---|
| 889 | + /* Upstream Forwarding */ |
|---|
| 890 | + ctrl |= (cap & PCI_ACS_UF); |
|---|
| 891 | + |
|---|
| 892 | + /* Enable Translation Blocking for external devices */ |
|---|
| 893 | + if (dev->external_facing || dev->untrusted) |
|---|
| 894 | + ctrl |= (cap & PCI_ACS_TB); |
|---|
| 895 | + |
|---|
| 896 | + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
|---|
| 897 | +} |
|---|
| 898 | + |
|---|
| 899 | +/** |
|---|
| 900 | + * pci_enable_acs - enable ACS if hardware support it |
|---|
| 901 | + * @dev: the PCI device |
|---|
| 902 | + */ |
|---|
| 903 | +static void pci_enable_acs(struct pci_dev *dev) |
|---|
| 904 | +{ |
|---|
| 905 | + if (!pci_acs_enable) |
|---|
| 906 | + goto disable_acs_redir; |
|---|
| 907 | + |
|---|
| 908 | + if (!pci_dev_specific_enable_acs(dev)) |
|---|
| 909 | + goto disable_acs_redir; |
|---|
| 910 | + |
|---|
| 911 | + pci_std_enable_acs(dev); |
|---|
| 912 | + |
|---|
| 913 | +disable_acs_redir: |
|---|
| 914 | + /* |
|---|
| 915 | + * Note: pci_disable_acs_redir() must be called even if ACS was not |
|---|
| 916 | + * enabled by the kernel because it may have been enabled by |
|---|
| 917 | + * platform firmware. So if we are told to disable it, we should |
|---|
| 918 | + * always disable it after setting the kernel's default |
|---|
| 919 | + * preferences. |
|---|
| 920 | + */ |
|---|
| 921 | + pci_disable_acs_redir(dev); |
|---|
| 735 | 922 | } |
|---|
| 736 | 923 | |
|---|
| 737 | 924 | /** |
|---|
| .. | .. |
|---|
| 776 | 963 | return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN; |
|---|
| 777 | 964 | } |
|---|
| 778 | 965 | |
|---|
| 966 | +static inline void platform_pci_refresh_power_state(struct pci_dev *dev) |
|---|
| 967 | +{ |
|---|
| 968 | + if (pci_platform_pm && pci_platform_pm->refresh_state) |
|---|
| 969 | + pci_platform_pm->refresh_state(dev); |
|---|
| 970 | +} |
|---|
| 971 | + |
|---|
| 779 | 972 | static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) |
|---|
| 780 | 973 | { |
|---|
| 781 | 974 | return pci_platform_pm ? |
|---|
| .. | .. |
|---|
| 793 | 986 | return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false; |
|---|
| 794 | 987 | } |
|---|
| 795 | 988 | |
|---|
| 989 | +static inline bool platform_pci_bridge_d3(struct pci_dev *dev) |
|---|
| 990 | +{ |
|---|
| 991 | + if (pci_platform_pm && pci_platform_pm->bridge_d3) |
|---|
| 992 | + return pci_platform_pm->bridge_d3(dev); |
|---|
| 993 | + return false; |
|---|
| 994 | +} |
|---|
| 995 | + |
|---|
| 796 | 996 | /** |
|---|
| 797 | 997 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
|---|
| 798 | | - * given PCI device |
|---|
| 998 | + * given PCI device |
|---|
| 799 | 999 | * @dev: PCI device to handle. |
|---|
| 800 | 1000 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. |
|---|
| 801 | 1001 | * |
|---|
| .. | .. |
|---|
| 821 | 1021 | if (state < PCI_D0 || state > PCI_D3hot) |
|---|
| 822 | 1022 | return -EINVAL; |
|---|
| 823 | 1023 | |
|---|
| 824 | | - /* Validate current state: |
|---|
| 825 | | - * Can enter D0 from any state, but if we can only go deeper |
|---|
| 826 | | - * to sleep if we're already in a low power state |
|---|
| 1024 | + /* |
|---|
| 1025 | + * Validate transition: We can enter D0 from any state, but if |
|---|
| 1026 | + * we're already in a low-power state, we can only go deeper. E.g., |
|---|
| 1027 | + * we can go from D1 to D3, but we can't go directly from D3 to D1; |
|---|
| 1028 | + * we'd have to go from D3 to D0, then to D1. |
|---|
| 827 | 1029 | */ |
|---|
| 828 | 1030 | if (state != PCI_D0 && dev->current_state <= PCI_D3cold |
|---|
| 829 | 1031 | && dev->current_state > state) { |
|---|
| 830 | | - pci_err(dev, "invalid power transition (from state %d to %d)\n", |
|---|
| 831 | | - dev->current_state, state); |
|---|
| 1032 | + pci_err(dev, "invalid power transition (from %s to %s)\n", |
|---|
| 1033 | + pci_power_name(dev->current_state), |
|---|
| 1034 | + pci_power_name(state)); |
|---|
| 832 | 1035 | return -EINVAL; |
|---|
| 833 | 1036 | } |
|---|
| 834 | 1037 | |
|---|
| 835 | | - /* check if this device supports the desired state */ |
|---|
| 1038 | + /* Check if this device supports the desired state */ |
|---|
| 836 | 1039 | if ((state == PCI_D1 && !dev->d1_support) |
|---|
| 837 | 1040 | || (state == PCI_D2 && !dev->d2_support)) |
|---|
| 838 | 1041 | return -EIO; |
|---|
| 839 | 1042 | |
|---|
| 840 | 1043 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
|---|
| 1044 | + if (pmcsr == (u16) ~0) { |
|---|
| 1045 | + pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n", |
|---|
| 1046 | + pci_power_name(dev->current_state), |
|---|
| 1047 | + pci_power_name(state)); |
|---|
| 1048 | + return -EIO; |
|---|
| 1049 | + } |
|---|
| 841 | 1050 | |
|---|
| 842 | | - /* If we're (effectively) in D3, force entire word to 0. |
|---|
| 1051 | + /* |
|---|
| 1052 | + * If we're (effectively) in D3, force entire word to 0. |
|---|
| 843 | 1053 | * This doesn't affect PME_Status, disables PME_En, and |
|---|
| 844 | 1054 | * sets PowerState to 0. |
|---|
| 845 | 1055 | */ |
|---|
| .. | .. |
|---|
| 856 | 1066 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
|---|
| 857 | 1067 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
|---|
| 858 | 1068 | need_restore = true; |
|---|
| 859 | | - /* Fall-through: force to D0 */ |
|---|
| 1069 | + fallthrough; /* force to D0 */ |
|---|
| 860 | 1070 | default: |
|---|
| 861 | 1071 | pmcsr = 0; |
|---|
| 862 | 1072 | break; |
|---|
| 863 | 1073 | } |
|---|
| 864 | 1074 | |
|---|
| 865 | | - /* enter specified state */ |
|---|
| 1075 | + /* Enter specified state */ |
|---|
| 866 | 1076 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
|---|
| 867 | 1077 | |
|---|
| 868 | | - /* Mandatory power management transition delays */ |
|---|
| 869 | | - /* see PCI PM 1.1 5.6.1 table 18 */ |
|---|
| 1078 | + /* |
|---|
| 1079 | + * Mandatory power management transition delays; see PCI PM 1.1 |
|---|
| 1080 | + * 5.6.1 table 18 |
|---|
| 1081 | + */ |
|---|
| 870 | 1082 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
|---|
| 871 | 1083 | pci_dev_d3_sleep(dev); |
|---|
| 872 | 1084 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
|---|
| .. | .. |
|---|
| 874 | 1086 | |
|---|
| 875 | 1087 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
|---|
| 876 | 1088 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); |
|---|
| 877 | | - if (dev->current_state != state && printk_ratelimit()) |
|---|
| 878 | | - pci_info(dev, "Refused to change power state, currently in D%d\n", |
|---|
| 879 | | - dev->current_state); |
|---|
| 1089 | + if (dev->current_state != state) |
|---|
| 1090 | + pci_info_ratelimited(dev, "refused to change power state from %s to %s\n", |
|---|
| 1091 | + pci_power_name(dev->current_state), |
|---|
| 1092 | + pci_power_name(state)); |
|---|
| 880 | 1093 | |
|---|
| 881 | 1094 | /* |
|---|
| 882 | 1095 | * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
|---|
| .. | .. |
|---|
| 928 | 1141 | } |
|---|
| 929 | 1142 | |
|---|
| 930 | 1143 | /** |
|---|
| 1144 | + * pci_refresh_power_state - Refresh the given device's power state data |
|---|
| 1145 | + * @dev: Target PCI device. |
|---|
| 1146 | + * |
|---|
| 1147 | + * Ask the platform to refresh the devices power state information and invoke |
|---|
| 1148 | + * pci_update_current_state() to update its current PCI power state. |
|---|
| 1149 | + */ |
|---|
| 1150 | +void pci_refresh_power_state(struct pci_dev *dev) |
|---|
| 1151 | +{ |
|---|
| 1152 | + if (platform_pci_power_manageable(dev)) |
|---|
| 1153 | + platform_pci_refresh_power_state(dev); |
|---|
| 1154 | + |
|---|
| 1155 | + pci_update_current_state(dev, dev->current_state); |
|---|
| 1156 | +} |
|---|
| 1157 | + |
|---|
| 1158 | +/** |
|---|
| 931 | 1159 | * pci_platform_power_transition - Use platform to change device power state |
|---|
| 932 | 1160 | * @dev: PCI device to handle. |
|---|
| 933 | 1161 | * @state: State to put the device into. |
|---|
| 934 | 1162 | */ |
|---|
| 935 | | -static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) |
|---|
| 1163 | +int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) |
|---|
| 936 | 1164 | { |
|---|
| 937 | 1165 | int error; |
|---|
| 938 | 1166 | |
|---|
| .. | .. |
|---|
| 948 | 1176 | |
|---|
| 949 | 1177 | return error; |
|---|
| 950 | 1178 | } |
|---|
| 1179 | +EXPORT_SYMBOL_GPL(pci_platform_power_transition); |
|---|
| 951 | 1180 | |
|---|
| 952 | 1181 | /** |
|---|
| 953 | 1182 | * pci_wakeup - Wake up a PCI device |
|---|
| .. | .. |
|---|
| 971 | 1200 | pci_walk_bus(bus, pci_wakeup, NULL); |
|---|
| 972 | 1201 | } |
|---|
| 973 | 1202 | |
|---|
| 974 | | -/** |
|---|
| 975 | | - * __pci_start_power_transition - Start power transition of a PCI device |
|---|
| 976 | | - * @dev: PCI device to handle. |
|---|
| 977 | | - * @state: State to put the device into. |
|---|
| 978 | | - */ |
|---|
| 979 | | -static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) |
|---|
| 1203 | +static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) |
|---|
| 980 | 1204 | { |
|---|
| 981 | | - if (state == PCI_D0) { |
|---|
| 982 | | - pci_platform_power_transition(dev, PCI_D0); |
|---|
| 983 | | - /* |
|---|
| 984 | | - * Mandatory power management transition delays, see |
|---|
| 985 | | - * PCI Express Base Specification Revision 2.0 Section |
|---|
| 986 | | - * 6.6.1: Conventional Reset. Do not delay for |
|---|
| 987 | | - * devices powered on/off by corresponding bridge, |
|---|
| 988 | | - * because have already delayed for the bridge. |
|---|
| 989 | | - */ |
|---|
| 990 | | - if (dev->runtime_d3cold) { |
|---|
| 991 | | - if (dev->d3cold_delay) |
|---|
| 992 | | - msleep(dev->d3cold_delay); |
|---|
| 993 | | - /* |
|---|
| 994 | | - * When powering on a bridge from D3cold, the |
|---|
| 995 | | - * whole hierarchy may be powered on into |
|---|
| 996 | | - * D0uninitialized state, resume them to give |
|---|
| 997 | | - * them a chance to suspend again |
|---|
| 998 | | - */ |
|---|
| 999 | | - pci_wakeup_bus(dev->subordinate); |
|---|
| 1205 | + int delay = 1; |
|---|
| 1206 | + u32 id; |
|---|
| 1207 | + |
|---|
| 1208 | + /* |
|---|
| 1209 | + * After reset, the device should not silently discard config |
|---|
| 1210 | + * requests, but it may still indicate that it needs more time by |
|---|
| 1211 | + * responding to them with CRS completions. The Root Port will |
|---|
| 1212 | + * generally synthesize ~0 data to complete the read (except when |
|---|
| 1213 | + * CRS SV is enabled and the read was for the Vendor ID; in that |
|---|
| 1214 | + * case it synthesizes 0x0001 data). |
|---|
| 1215 | + * |
|---|
| 1216 | + * Wait for the device to return a non-CRS completion. Read the |
|---|
| 1217 | + * Command register instead of Vendor ID so we don't have to |
|---|
| 1218 | + * contend with the CRS SV value. |
|---|
| 1219 | + */ |
|---|
| 1220 | + pci_read_config_dword(dev, PCI_COMMAND, &id); |
|---|
| 1221 | + while (id == ~0) { |
|---|
| 1222 | + if (delay > timeout) { |
|---|
| 1223 | + pci_warn(dev, "not ready %dms after %s; giving up\n", |
|---|
| 1224 | + delay - 1, reset_type); |
|---|
| 1225 | + return -ENOTTY; |
|---|
| 1000 | 1226 | } |
|---|
| 1227 | + |
|---|
| 1228 | + if (delay > PCI_RESET_WAIT) |
|---|
| 1229 | + pci_info(dev, "not ready %dms after %s; waiting\n", |
|---|
| 1230 | + delay - 1, reset_type); |
|---|
| 1231 | + |
|---|
| 1232 | + msleep(delay); |
|---|
| 1233 | + delay *= 2; |
|---|
| 1234 | + pci_read_config_dword(dev, PCI_COMMAND, &id); |
|---|
| 1001 | 1235 | } |
|---|
| 1236 | + |
|---|
| 1237 | + if (delay > PCI_RESET_WAIT) |
|---|
| 1238 | + pci_info(dev, "ready %dms after %s\n", delay - 1, |
|---|
| 1239 | + reset_type); |
|---|
| 1240 | + |
|---|
| 1241 | + return 0; |
|---|
| 1242 | +} |
|---|
| 1243 | + |
|---|
| 1244 | +/** |
|---|
| 1245 | + * pci_power_up - Put the given device into D0 |
|---|
| 1246 | + * @dev: PCI device to power up |
|---|
| 1247 | + */ |
|---|
| 1248 | +int pci_power_up(struct pci_dev *dev) |
|---|
| 1249 | +{ |
|---|
| 1250 | + pci_platform_power_transition(dev, PCI_D0); |
|---|
| 1251 | + |
|---|
| 1252 | + /* |
|---|
| 1253 | + * Mandatory power management transition delays are handled in |
|---|
| 1254 | + * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the |
|---|
| 1255 | + * corresponding bridge. |
|---|
| 1256 | + */ |
|---|
| 1257 | + if (dev->runtime_d3cold) { |
|---|
| 1258 | + /* |
|---|
| 1259 | + * When powering on a bridge from D3cold, the whole hierarchy |
|---|
| 1260 | + * may be powered on into D0uninitialized state, resume them to |
|---|
| 1261 | + * give them a chance to suspend again |
|---|
| 1262 | + */ |
|---|
| 1263 | + pci_wakeup_bus(dev->subordinate); |
|---|
| 1264 | + } |
|---|
| 1265 | + |
|---|
| 1266 | + return pci_raw_set_power_state(dev, PCI_D0); |
|---|
| 1002 | 1267 | } |
|---|
| 1003 | 1268 | |
|---|
| 1004 | 1269 | /** |
|---|
| .. | .. |
|---|
| 1026 | 1291 | } |
|---|
| 1027 | 1292 | |
|---|
| 1028 | 1293 | /** |
|---|
| 1029 | | - * __pci_complete_power_transition - Complete power transition of a PCI device |
|---|
| 1030 | | - * @dev: PCI device to handle. |
|---|
| 1031 | | - * @state: State to put the device into. |
|---|
| 1032 | | - * |
|---|
| 1033 | | - * This function should not be called directly by device drivers. |
|---|
| 1034 | | - */ |
|---|
| 1035 | | -int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
|---|
| 1036 | | -{ |
|---|
| 1037 | | - int ret; |
|---|
| 1038 | | - |
|---|
| 1039 | | - if (state <= PCI_D0) |
|---|
| 1040 | | - return -EINVAL; |
|---|
| 1041 | | - ret = pci_platform_power_transition(dev, state); |
|---|
| 1042 | | - /* Power off the bridge may power off the whole hierarchy */ |
|---|
| 1043 | | - if (!ret && state == PCI_D3cold) |
|---|
| 1044 | | - pci_bus_set_current_state(dev->subordinate, PCI_D3cold); |
|---|
| 1045 | | - return ret; |
|---|
| 1046 | | -} |
|---|
| 1047 | | -EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
|---|
| 1048 | | - |
|---|
| 1049 | | -/** |
|---|
| 1050 | 1294 | * pci_set_power_state - Set the power state of a PCI device |
|---|
| 1051 | 1295 | * @dev: PCI device to handle. |
|---|
| 1052 | 1296 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. |
|---|
| .. | .. |
|---|
| 1067 | 1311 | { |
|---|
| 1068 | 1312 | int error; |
|---|
| 1069 | 1313 | |
|---|
| 1070 | | - /* bound the state we're entering */ |
|---|
| 1314 | + /* Bound the state we're entering */ |
|---|
| 1071 | 1315 | if (state > PCI_D3cold) |
|---|
| 1072 | 1316 | state = PCI_D3cold; |
|---|
| 1073 | 1317 | else if (state < PCI_D0) |
|---|
| 1074 | 1318 | state = PCI_D0; |
|---|
| 1075 | 1319 | else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
|---|
| 1320 | + |
|---|
| 1076 | 1321 | /* |
|---|
| 1077 | | - * If the device or the parent bridge do not support PCI PM, |
|---|
| 1078 | | - * ignore the request if we're doing anything other than putting |
|---|
| 1079 | | - * it into D0 (which would only happen on boot). |
|---|
| 1322 | + * If the device or the parent bridge do not support PCI |
|---|
| 1323 | + * PM, ignore the request if we're doing anything other |
|---|
| 1324 | + * than putting it into D0 (which would only happen on |
|---|
| 1325 | + * boot). |
|---|
| 1080 | 1326 | */ |
|---|
| 1081 | 1327 | return 0; |
|---|
| 1082 | 1328 | |
|---|
| .. | .. |
|---|
| 1084 | 1330 | if (dev->current_state == state) |
|---|
| 1085 | 1331 | return 0; |
|---|
| 1086 | 1332 | |
|---|
| 1087 | | - __pci_start_power_transition(dev, state); |
|---|
| 1333 | + if (state == PCI_D0) |
|---|
| 1334 | + return pci_power_up(dev); |
|---|
| 1088 | 1335 | |
|---|
| 1089 | | - /* This device is quirked not to be put into D3, so |
|---|
| 1090 | | - don't put it in D3 */ |
|---|
| 1336 | + /* |
|---|
| 1337 | + * This device is quirked not to be put into D3, so don't put it in |
|---|
| 1338 | + * D3 |
|---|
| 1339 | + */ |
|---|
| 1091 | 1340 | if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) |
|---|
| 1092 | 1341 | return 0; |
|---|
| 1093 | 1342 | |
|---|
| .. | .. |
|---|
| 1098 | 1347 | error = pci_raw_set_power_state(dev, state > PCI_D3hot ? |
|---|
| 1099 | 1348 | PCI_D3hot : state); |
|---|
| 1100 | 1349 | |
|---|
| 1101 | | - if (!__pci_complete_power_transition(dev, state)) |
|---|
| 1102 | | - error = 0; |
|---|
| 1350 | + if (pci_platform_power_transition(dev, state)) |
|---|
| 1351 | + return error; |
|---|
| 1103 | 1352 | |
|---|
| 1104 | | - return error; |
|---|
| 1353 | + /* Powering off a bridge may power off the whole hierarchy */ |
|---|
| 1354 | + if (state == PCI_D3cold) |
|---|
| 1355 | + pci_bus_set_current_state(dev->subordinate, PCI_D3cold); |
|---|
| 1356 | + |
|---|
| 1357 | + return 0; |
|---|
| 1105 | 1358 | } |
|---|
| 1106 | 1359 | EXPORT_SYMBOL(pci_set_power_state); |
|---|
| 1107 | | - |
|---|
| 1108 | | -/** |
|---|
| 1109 | | - * pci_power_up - Put the given device into D0 forcibly |
|---|
| 1110 | | - * @dev: PCI device to power up |
|---|
| 1111 | | - */ |
|---|
| 1112 | | -void pci_power_up(struct pci_dev *dev) |
|---|
| 1113 | | -{ |
|---|
| 1114 | | - __pci_start_power_transition(dev, PCI_D0); |
|---|
| 1115 | | - pci_raw_set_power_state(dev, PCI_D0); |
|---|
| 1116 | | - pci_update_current_state(dev, PCI_D0); |
|---|
| 1117 | | -} |
|---|
| 1118 | 1360 | |
|---|
| 1119 | 1361 | /** |
|---|
| 1120 | 1362 | * pci_choose_state - Choose the power state of a PCI device |
|---|
| 1121 | 1363 | * @dev: PCI device to be suspended |
|---|
| 1122 | 1364 | * @state: target sleep state for the whole system. This is the value |
|---|
| 1123 | | - * that is passed to suspend() function. |
|---|
| 1365 | + * that is passed to suspend() function. |
|---|
| 1124 | 1366 | * |
|---|
| 1125 | 1367 | * Returns PCI power state suitable for given device and given system |
|---|
| 1126 | 1368 | * message. |
|---|
| 1127 | 1369 | */ |
|---|
| 1128 | | - |
|---|
| 1129 | 1370 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) |
|---|
| 1130 | 1371 | { |
|---|
| 1131 | 1372 | pci_power_t ret; |
|---|
| .. | .. |
|---|
| 1226 | 1467 | pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); |
|---|
| 1227 | 1468 | } |
|---|
| 1228 | 1469 | |
|---|
| 1229 | | - |
|---|
| 1230 | 1470 | static int pci_save_pcix_state(struct pci_dev *dev) |
|---|
| 1231 | 1471 | { |
|---|
| 1232 | 1472 | int pos; |
|---|
| .. | .. |
|---|
| 1263 | 1503 | pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); |
|---|
| 1264 | 1504 | } |
|---|
| 1265 | 1505 | |
|---|
| 1506 | +static void pci_save_ltr_state(struct pci_dev *dev) |
|---|
| 1507 | +{ |
|---|
| 1508 | + int ltr; |
|---|
| 1509 | + struct pci_cap_saved_state *save_state; |
|---|
| 1510 | + u16 *cap; |
|---|
| 1511 | + |
|---|
| 1512 | + if (!pci_is_pcie(dev)) |
|---|
| 1513 | + return; |
|---|
| 1514 | + |
|---|
| 1515 | + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); |
|---|
| 1516 | + if (!ltr) |
|---|
| 1517 | + return; |
|---|
| 1518 | + |
|---|
| 1519 | + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); |
|---|
| 1520 | + if (!save_state) { |
|---|
| 1521 | + pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); |
|---|
| 1522 | + return; |
|---|
| 1523 | + } |
|---|
| 1524 | + |
|---|
| 1525 | + cap = (u16 *)&save_state->cap.data[0]; |
|---|
| 1526 | + pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); |
|---|
| 1527 | + pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); |
|---|
| 1528 | +} |
|---|
| 1529 | + |
|---|
| 1530 | +static void pci_restore_ltr_state(struct pci_dev *dev) |
|---|
| 1531 | +{ |
|---|
| 1532 | + struct pci_cap_saved_state *save_state; |
|---|
| 1533 | + int ltr; |
|---|
| 1534 | + u16 *cap; |
|---|
| 1535 | + |
|---|
| 1536 | + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); |
|---|
| 1537 | + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); |
|---|
| 1538 | + if (!save_state || !ltr) |
|---|
| 1539 | + return; |
|---|
| 1540 | + |
|---|
| 1541 | + cap = (u16 *)&save_state->cap.data[0]; |
|---|
| 1542 | + pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); |
|---|
| 1543 | + pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); |
|---|
| 1544 | +} |
|---|
| 1266 | 1545 | |
|---|
| 1267 | 1546 | /** |
|---|
| 1268 | | - * pci_save_state - save the PCI configuration space of a device before suspending |
|---|
| 1269 | | - * @dev: - PCI device that we're dealing with |
|---|
| 1547 | + * pci_save_state - save the PCI configuration space of a device before |
|---|
| 1548 | + * suspending |
|---|
| 1549 | + * @dev: PCI device that we're dealing with |
|---|
| 1270 | 1550 | */ |
|---|
| 1271 | 1551 | int pci_save_state(struct pci_dev *dev) |
|---|
| 1272 | 1552 | { |
|---|
| 1273 | 1553 | int i; |
|---|
| 1274 | 1554 | /* XXX: 100% dword access ok here? */ |
|---|
| 1275 | | - for (i = 0; i < 16; i++) |
|---|
| 1555 | + for (i = 0; i < 16; i++) { |
|---|
| 1276 | 1556 | pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); |
|---|
| 1557 | + pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n", |
|---|
| 1558 | + i * 4, dev->saved_config_space[i]); |
|---|
| 1559 | + } |
|---|
| 1277 | 1560 | dev->state_saved = true; |
|---|
| 1278 | 1561 | |
|---|
| 1279 | 1562 | i = pci_save_pcie_state(dev); |
|---|
| .. | .. |
|---|
| 1284 | 1567 | if (i != 0) |
|---|
| 1285 | 1568 | return i; |
|---|
| 1286 | 1569 | |
|---|
| 1570 | + pci_save_ltr_state(dev); |
|---|
| 1571 | + pci_save_dpc_state(dev); |
|---|
| 1572 | + pci_save_aer_state(dev); |
|---|
| 1287 | 1573 | return pci_save_vc_state(dev); |
|---|
| 1288 | 1574 | } |
|---|
| 1289 | 1575 | EXPORT_SYMBOL(pci_save_state); |
|---|
| .. | .. |
|---|
| 1375 | 1661 | |
|---|
| 1376 | 1662 | /** |
|---|
| 1377 | 1663 | * pci_restore_state - Restore the saved state of a PCI device |
|---|
| 1378 | | - * @dev: - PCI device that we're dealing with |
|---|
| 1664 | + * @dev: PCI device that we're dealing with |
|---|
| 1379 | 1665 | */ |
|---|
| 1380 | 1666 | void pci_restore_state(struct pci_dev *dev) |
|---|
| 1381 | 1667 | { |
|---|
| 1382 | 1668 | if (!dev->state_saved) |
|---|
| 1383 | 1669 | return; |
|---|
| 1384 | 1670 | |
|---|
| 1385 | | - /* PCI Express register must be restored first */ |
|---|
| 1671 | + /* |
|---|
| 1672 | + * Restore max latencies (in the LTR capability) before enabling |
|---|
| 1673 | + * LTR itself (in the PCIe capability). |
|---|
| 1674 | + */ |
|---|
| 1675 | + pci_restore_ltr_state(dev); |
|---|
| 1676 | + |
|---|
| 1386 | 1677 | pci_restore_pcie_state(dev); |
|---|
| 1387 | 1678 | pci_restore_pasid_state(dev); |
|---|
| 1388 | 1679 | pci_restore_pri_state(dev); |
|---|
| 1389 | 1680 | pci_restore_ats_state(dev); |
|---|
| 1390 | 1681 | pci_restore_vc_state(dev); |
|---|
| 1391 | 1682 | pci_restore_rebar_state(dev); |
|---|
| 1683 | + pci_restore_dpc_state(dev); |
|---|
| 1392 | 1684 | |
|---|
| 1393 | | - pci_cleanup_aer_error_status_regs(dev); |
|---|
| 1685 | + pci_aer_clear_status(dev); |
|---|
| 1686 | + pci_restore_aer_state(dev); |
|---|
| 1394 | 1687 | |
|---|
| 1395 | 1688 | pci_restore_config_space(dev); |
|---|
| 1396 | 1689 | |
|---|
| .. | .. |
|---|
| 1407 | 1700 | |
|---|
| 1408 | 1701 | struct pci_saved_state { |
|---|
| 1409 | 1702 | u32 config_space[16]; |
|---|
| 1410 | | - struct pci_cap_saved_data cap[0]; |
|---|
| 1703 | + struct pci_cap_saved_data cap[]; |
|---|
| 1411 | 1704 | }; |
|---|
| 1412 | 1705 | |
|---|
| 1413 | 1706 | /** |
|---|
| .. | .. |
|---|
| 1546 | 1839 | * pci_reenable_device - Resume abandoned device |
|---|
| 1547 | 1840 | * @dev: PCI device to be resumed |
|---|
| 1548 | 1841 | * |
|---|
| 1549 | | - * Note this function is a backend of pci_default_resume and is not supposed |
|---|
| 1550 | | - * to be called by normal code, write proper resume handler and use it instead. |
|---|
| 1842 | + * NOTE: This function is a backend of pci_default_resume() and is not supposed |
|---|
| 1843 | + * to be called by normal code, write proper resume handler and use it instead. |
|---|
| 1551 | 1844 | */ |
|---|
| 1552 | 1845 | int pci_reenable_device(struct pci_dev *dev) |
|---|
| 1553 | 1846 | { |
|---|
| .. | .. |
|---|
| 1618 | 1911 | * pci_enable_device_io - Initialize a device for use with IO space |
|---|
| 1619 | 1912 | * @dev: PCI device to be initialized |
|---|
| 1620 | 1913 | * |
|---|
| 1621 | | - * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1622 | | - * to enable I/O resources. Wake up the device if it was suspended. |
|---|
| 1623 | | - * Beware, this function can fail. |
|---|
| 1914 | + * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1915 | + * to enable I/O resources. Wake up the device if it was suspended. |
|---|
| 1916 | + * Beware, this function can fail. |
|---|
| 1624 | 1917 | */ |
|---|
| 1625 | 1918 | int pci_enable_device_io(struct pci_dev *dev) |
|---|
| 1626 | 1919 | { |
|---|
| .. | .. |
|---|
| 1632 | 1925 | * pci_enable_device_mem - Initialize a device for use with Memory space |
|---|
| 1633 | 1926 | * @dev: PCI device to be initialized |
|---|
| 1634 | 1927 | * |
|---|
| 1635 | | - * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1636 | | - * to enable Memory resources. Wake up the device if it was suspended. |
|---|
| 1637 | | - * Beware, this function can fail. |
|---|
| 1928 | + * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1929 | + * to enable Memory resources. Wake up the device if it was suspended. |
|---|
| 1930 | + * Beware, this function can fail. |
|---|
| 1638 | 1931 | */ |
|---|
| 1639 | 1932 | int pci_enable_device_mem(struct pci_dev *dev) |
|---|
| 1640 | 1933 | { |
|---|
| .. | .. |
|---|
| 1646 | 1939 | * pci_enable_device - Initialize device before it's used by a driver. |
|---|
| 1647 | 1940 | * @dev: PCI device to be initialized |
|---|
| 1648 | 1941 | * |
|---|
| 1649 | | - * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1650 | | - * to enable I/O and memory. Wake up the device if it was suspended. |
|---|
| 1651 | | - * Beware, this function can fail. |
|---|
| 1942 | + * Initialize device before it's used by a driver. Ask low-level code |
|---|
| 1943 | + * to enable I/O and memory. Wake up the device if it was suspended. |
|---|
| 1944 | + * Beware, this function can fail. |
|---|
| 1652 | 1945 | * |
|---|
| 1653 | | - * Note we don't actually enable the device many times if we call |
|---|
| 1654 | | - * this function repeatedly (we just increment the count). |
|---|
| 1946 | + * Note we don't actually enable the device many times if we call |
|---|
| 1947 | + * this function repeatedly (we just increment the count). |
|---|
| 1655 | 1948 | */ |
|---|
| 1656 | 1949 | int pci_enable_device(struct pci_dev *dev) |
|---|
| 1657 | 1950 | { |
|---|
| .. | .. |
|---|
| 1660 | 1953 | EXPORT_SYMBOL(pci_enable_device); |
|---|
| 1661 | 1954 | |
|---|
| 1662 | 1955 | /* |
|---|
| 1663 | | - * Managed PCI resources. This manages device on/off, intx/msi/msix |
|---|
| 1664 | | - * on/off and BAR regions. pci_dev itself records msi/msix status, so |
|---|
| 1956 | + * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X |
|---|
| 1957 | + * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so |
|---|
| 1665 | 1958 | * there's no need to track it separately. pci_devres is initialized |
|---|
| 1666 | 1959 | * when a device is enabled using managed PCI device enable interface. |
|---|
| 1667 | 1960 | */ |
|---|
| .. | .. |
|---|
| 1779 | 2072 | } |
|---|
| 1780 | 2073 | |
|---|
| 1781 | 2074 | /** |
|---|
| 1782 | | - * pcibios_release_device - provide arch specific hooks when releasing device dev |
|---|
| 2075 | + * pcibios_release_device - provide arch specific hooks when releasing |
|---|
| 2076 | + * device dev |
|---|
| 1783 | 2077 | * @dev: the PCI device being released |
|---|
| 1784 | 2078 | * |
|---|
| 1785 | 2079 | * Permits the platform to provide architecture specific functionality when |
|---|
| .. | .. |
|---|
| 1870 | 2164 | * @dev: the PCIe device reset |
|---|
| 1871 | 2165 | * @state: Reset state to enter into |
|---|
| 1872 | 2166 | * |
|---|
| 1873 | | - * |
|---|
| 1874 | | - * Sets the PCIe reset state for the device. This is the default |
|---|
| 2167 | + * Set the PCIe reset state for the device. This is the default |
|---|
| 1875 | 2168 | * implementation. Architecture implementations can override this. |
|---|
| 1876 | 2169 | */ |
|---|
| 1877 | 2170 | int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, |
|---|
| .. | .. |
|---|
| 1885 | 2178 | * @dev: the PCIe device reset |
|---|
| 1886 | 2179 | * @state: Reset state to enter into |
|---|
| 1887 | 2180 | * |
|---|
| 1888 | | - * |
|---|
| 1889 | 2181 | * Sets the PCI reset state for the device. |
|---|
| 1890 | 2182 | */ |
|---|
| 1891 | 2183 | int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
|---|
| .. | .. |
|---|
| 1893 | 2185 | return pcibios_set_pcie_reset_state(dev, state); |
|---|
| 1894 | 2186 | } |
|---|
| 1895 | 2187 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
|---|
| 2188 | + |
|---|
| 2189 | +void pcie_clear_device_status(struct pci_dev *dev) |
|---|
| 2190 | +{ |
|---|
| 2191 | + u16 sta; |
|---|
| 2192 | + |
|---|
| 2193 | + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); |
|---|
| 2194 | + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); |
|---|
| 2195 | +} |
|---|
| 1896 | 2196 | |
|---|
| 1897 | 2197 | /** |
|---|
| 1898 | 2198 | * pcie_clear_root_pme_status - Clear root port PME interrupt status. |
|---|
| .. | .. |
|---|
| 2147 | 2447 | int ret = 0; |
|---|
| 2148 | 2448 | |
|---|
| 2149 | 2449 | /* |
|---|
| 2150 | | - * Bridges can only signal wakeup on behalf of subordinate devices, |
|---|
| 2151 | | - * but that is set up elsewhere, so skip them. |
|---|
| 2450 | + * Bridges that are not power-manageable directly only signal |
|---|
| 2451 | + * wakeup on behalf of subordinate devices which is set up |
|---|
| 2452 | + * elsewhere, so skip them. However, bridges that are |
|---|
| 2453 | + * power-manageable may signal wakeup for themselves (for example, |
|---|
| 2454 | + * on a hotplug event) and they need to be covered here. |
|---|
| 2152 | 2455 | */ |
|---|
| 2153 | | - if (pci_has_subordinate(dev)) |
|---|
| 2456 | + if (!pci_power_manageable(dev)) |
|---|
| 2154 | 2457 | return 0; |
|---|
| 2155 | 2458 | |
|---|
| 2156 | 2459 | /* Don't do the same thing twice in a row for one device. */ |
|---|
| .. | .. |
|---|
| 2258 | 2561 | case PCI_D2: |
|---|
| 2259 | 2562 | if (pci_no_d1d2(dev)) |
|---|
| 2260 | 2563 | break; |
|---|
| 2261 | | - /* else: fall through */ |
|---|
| 2564 | + fallthrough; |
|---|
| 2262 | 2565 | default: |
|---|
| 2263 | 2566 | target_state = state; |
|---|
| 2264 | 2567 | } |
|---|
| .. | .. |
|---|
| 2297 | 2600 | } |
|---|
| 2298 | 2601 | |
|---|
| 2299 | 2602 | /** |
|---|
| 2300 | | - * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state |
|---|
| 2603 | + * pci_prepare_to_sleep - prepare PCI device for system-wide transition |
|---|
| 2604 | + * into a sleep state |
|---|
| 2301 | 2605 | * @dev: Device to handle. |
|---|
| 2302 | 2606 | * |
|---|
| 2303 | 2607 | * Choose the power state appropriate for the device depending on whether |
|---|
| .. | .. |
|---|
| 2325 | 2629 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
|---|
| 2326 | 2630 | |
|---|
| 2327 | 2631 | /** |
|---|
| 2328 | | - * pci_back_from_sleep - turn PCI device on during system-wide transition into working state |
|---|
| 2632 | + * pci_back_from_sleep - turn PCI device on during system-wide transition |
|---|
| 2633 | + * into working state |
|---|
| 2329 | 2634 | * @dev: Device to handle. |
|---|
| 2330 | 2635 | * |
|---|
| 2331 | 2636 | * Disable device's system wake-up capability and put it into D0. |
|---|
| .. | .. |
|---|
| 2407 | 2712 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); |
|---|
| 2408 | 2713 | |
|---|
| 2409 | 2714 | /** |
|---|
| 2410 | | - * pci_dev_keep_suspended - Check if the device can stay in the suspended state. |
|---|
| 2715 | + * pci_dev_need_resume - Check if it is necessary to resume the device. |
|---|
| 2411 | 2716 | * @pci_dev: Device to check. |
|---|
| 2412 | 2717 | * |
|---|
| 2413 | | - * Return 'true' if the device is runtime-suspended, it doesn't have to be |
|---|
| 2718 | + * Return 'true' if the device is not runtime-suspended or it has to be |
|---|
| 2414 | 2719 | * reconfigured due to wakeup settings difference between system and runtime |
|---|
| 2415 | | - * suspend and the current power state of it is suitable for the upcoming |
|---|
| 2416 | | - * (system) transition. |
|---|
| 2417 | | - * |
|---|
| 2418 | | - * If the device is not configured for system wakeup, disable PME for it before |
|---|
| 2419 | | - * returning 'true' to prevent it from waking up the system unnecessarily. |
|---|
| 2720 | + * suspend, or the current power state of it is not suitable for the upcoming |
|---|
| 2721 | + * (system-wide) transition. |
|---|
| 2420 | 2722 | */ |
|---|
| 2421 | | -bool pci_dev_keep_suspended(struct pci_dev *pci_dev) |
|---|
| 2723 | +bool pci_dev_need_resume(struct pci_dev *pci_dev) |
|---|
| 2422 | 2724 | { |
|---|
| 2423 | 2725 | struct device *dev = &pci_dev->dev; |
|---|
| 2424 | | - bool wakeup = device_may_wakeup(dev); |
|---|
| 2726 | + pci_power_t target_state; |
|---|
| 2425 | 2727 | |
|---|
| 2426 | | - if (!pm_runtime_suspended(dev) |
|---|
| 2427 | | - || pci_target_state(pci_dev, wakeup) != pci_dev->current_state |
|---|
| 2428 | | - || platform_pci_need_resume(pci_dev)) |
|---|
| 2429 | | - return false; |
|---|
| 2728 | + if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) |
|---|
| 2729 | + return true; |
|---|
| 2730 | + |
|---|
| 2731 | + target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); |
|---|
| 2430 | 2732 | |
|---|
| 2431 | 2733 | /* |
|---|
| 2432 | | - * At this point the device is good to go unless it's been configured |
|---|
| 2433 | | - * to generate PME at the runtime suspend time, but it is not supposed |
|---|
| 2434 | | - * to wake up the system. In that case, simply disable PME for it |
|---|
| 2435 | | - * (it will have to be re-enabled on exit from system resume). |
|---|
| 2436 | | - * |
|---|
| 2437 | | - * If the device's power state is D3cold and the platform check above |
|---|
| 2438 | | - * hasn't triggered, the device's configuration is suitable and we don't |
|---|
| 2439 | | - * need to manipulate it at all. |
|---|
| 2734 | + * If the earlier platform check has not triggered, D3cold is just power |
|---|
| 2735 | + * removal on top of D3hot, so no need to resume the device in that |
|---|
| 2736 | + * case. |
|---|
| 2440 | 2737 | */ |
|---|
| 2738 | + return target_state != pci_dev->current_state && |
|---|
| 2739 | + target_state != PCI_D3cold && |
|---|
| 2740 | + pci_dev->current_state != PCI_D3hot; |
|---|
| 2741 | +} |
|---|
| 2742 | + |
|---|
| 2743 | +/** |
|---|
| 2744 | + * pci_dev_adjust_pme - Adjust PME setting for a suspended device. |
|---|
| 2745 | + * @pci_dev: Device to check. |
|---|
| 2746 | + * |
|---|
| 2747 | + * If the device is suspended and it is not configured for system wakeup, |
|---|
| 2748 | + * disable PME for it to prevent it from waking up the system unnecessarily. |
|---|
| 2749 | + * |
|---|
| 2750 | + * Note that if the device's power state is D3cold and the platform check in |
|---|
| 2751 | + * pci_dev_need_resume() has not triggered, the device's configuration need not |
|---|
| 2752 | + * be changed. |
|---|
| 2753 | + */ |
|---|
| 2754 | +void pci_dev_adjust_pme(struct pci_dev *pci_dev) |
|---|
| 2755 | +{ |
|---|
| 2756 | + struct device *dev = &pci_dev->dev; |
|---|
| 2757 | + |
|---|
| 2441 | 2758 | spin_lock_irq(&dev->power.lock); |
|---|
| 2442 | 2759 | |
|---|
| 2443 | | - if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && |
|---|
| 2444 | | - !wakeup) |
|---|
| 2760 | + if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && |
|---|
| 2761 | + pci_dev->current_state < PCI_D3cold) |
|---|
| 2445 | 2762 | __pci_pme_active(pci_dev, false); |
|---|
| 2446 | 2763 | |
|---|
| 2447 | 2764 | spin_unlock_irq(&dev->power.lock); |
|---|
| 2448 | | - return true; |
|---|
| 2449 | 2765 | } |
|---|
| 2450 | 2766 | |
|---|
| 2451 | 2767 | /** |
|---|
| .. | .. |
|---|
| 2518 | 2834 | DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), |
|---|
| 2519 | 2835 | }, |
|---|
| 2520 | 2836 | }, |
|---|
| 2837 | + { |
|---|
| 2838 | + /* |
|---|
| 2839 | + * Downstream device is not accessible after putting a root port |
|---|
| 2840 | + * into D3cold and back into D0 on Elo Continental Z2 board |
|---|
| 2841 | + */ |
|---|
| 2842 | + .ident = "Elo Continental Z2", |
|---|
| 2843 | + .matches = { |
|---|
| 2844 | + DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), |
|---|
| 2845 | + DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), |
|---|
| 2846 | + DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), |
|---|
| 2847 | + }, |
|---|
| 2848 | + }, |
|---|
| 2521 | 2849 | #endif |
|---|
| 2522 | 2850 | { } |
|---|
| 2523 | 2851 | }; |
|---|
| .. | .. |
|---|
| 2553 | 2881 | |
|---|
| 2554 | 2882 | /* Even the oldest 2010 Thunderbolt controller supports D3. */ |
|---|
| 2555 | 2883 | if (bridge->is_thunderbolt) |
|---|
| 2884 | + return true; |
|---|
| 2885 | + |
|---|
| 2886 | + /* Platform might know better if the bridge supports D3 */ |
|---|
| 2887 | + if (platform_pci_bridge_d3(bridge)) |
|---|
| 2556 | 2888 | return true; |
|---|
| 2557 | 2889 | |
|---|
| 2558 | 2890 | /* |
|---|
| .. | .. |
|---|
| 2691 | 3023 | void pci_pm_init(struct pci_dev *dev) |
|---|
| 2692 | 3024 | { |
|---|
| 2693 | 3025 | int pm; |
|---|
| 3026 | + u16 status; |
|---|
| 2694 | 3027 | u16 pmc; |
|---|
| 2695 | 3028 | |
|---|
| 2696 | 3029 | pm_runtime_forbid(&dev->dev); |
|---|
| .. | .. |
|---|
| 2716 | 3049 | } |
|---|
| 2717 | 3050 | |
|---|
| 2718 | 3051 | dev->pm_cap = pm; |
|---|
| 2719 | | - dev->d3_delay = PCI_PM_D3_WAIT; |
|---|
| 3052 | + dev->d3hot_delay = PCI_PM_D3HOT_WAIT; |
|---|
| 2720 | 3053 | dev->d3cold_delay = PCI_PM_D3COLD_WAIT; |
|---|
| 2721 | 3054 | dev->bridge_d3 = pci_bridge_d3_possible(dev); |
|---|
| 2722 | 3055 | dev->d3cold_allowed = true; |
|---|
| .. | .. |
|---|
| 2730 | 3063 | dev->d2_support = true; |
|---|
| 2731 | 3064 | |
|---|
| 2732 | 3065 | if (dev->d1_support || dev->d2_support) |
|---|
| 2733 | | - pci_printk(KERN_DEBUG, dev, "supports%s%s\n", |
|---|
| 3066 | + pci_info(dev, "supports%s%s\n", |
|---|
| 2734 | 3067 | dev->d1_support ? " D1" : "", |
|---|
| 2735 | 3068 | dev->d2_support ? " D2" : ""); |
|---|
| 2736 | 3069 | } |
|---|
| 2737 | 3070 | |
|---|
| 2738 | 3071 | pmc &= PCI_PM_CAP_PME_MASK; |
|---|
| 2739 | 3072 | if (pmc) { |
|---|
| 2740 | | - pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n", |
|---|
| 3073 | + pci_info(dev, "PME# supported from%s%s%s%s%s\n", |
|---|
| 2741 | 3074 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
|---|
| 2742 | 3075 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
|---|
| 2743 | 3076 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
|---|
| 2744 | | - (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", |
|---|
| 3077 | + (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", |
|---|
| 2745 | 3078 | (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); |
|---|
| 2746 | 3079 | dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; |
|---|
| 2747 | 3080 | dev->pme_poll = true; |
|---|
| .. | .. |
|---|
| 2753 | 3086 | /* Disable the PME# generation functionality */ |
|---|
| 2754 | 3087 | pci_pme_active(dev, false); |
|---|
| 2755 | 3088 | } |
|---|
| 3089 | + |
|---|
| 3090 | + pci_read_config_word(dev, PCI_STATUS, &status); |
|---|
| 3091 | + if (status & PCI_STATUS_IMM_READY) |
|---|
| 3092 | + dev->imm_ready = 1; |
|---|
| 2756 | 3093 | } |
|---|
| 2757 | 3094 | |
|---|
| 2758 | 3095 | static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) |
|---|
| .. | .. |
|---|
| 2901 | 3238 | res->flags = flags; |
|---|
| 2902 | 3239 | |
|---|
| 2903 | 3240 | if (bei <= PCI_EA_BEI_BAR5) |
|---|
| 2904 | | - pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 3241 | + pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 2905 | 3242 | bei, res, prop); |
|---|
| 2906 | 3243 | else if (bei == PCI_EA_BEI_ROM) |
|---|
| 2907 | | - pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 3244 | + pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 2908 | 3245 | res, prop); |
|---|
| 2909 | 3246 | else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) |
|---|
| 2910 | | - pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 3247 | + pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 2911 | 3248 | bei - PCI_EA_BEI_VF_BAR0, res, prop); |
|---|
| 2912 | 3249 | else |
|---|
| 2913 | | - pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 3250 | + pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", |
|---|
| 2914 | 3251 | bei, res, prop); |
|---|
| 2915 | 3252 | |
|---|
| 2916 | 3253 | out: |
|---|
| .. | .. |
|---|
| 2954 | 3291 | |
|---|
| 2955 | 3292 | /** |
|---|
| 2956 | 3293 | * _pci_add_cap_save_buffer - allocate buffer for saving given |
|---|
| 2957 | | - * capability registers |
|---|
| 3294 | + * capability registers |
|---|
| 2958 | 3295 | * @dev: the PCI device |
|---|
| 2959 | 3296 | * @cap: the capability to allocate the buffer for |
|---|
| 2960 | 3297 | * @extended: Standard or Extended capability ID |
|---|
| .. | .. |
|---|
| 3013 | 3350 | if (error) |
|---|
| 3014 | 3351 | pci_err(dev, "unable to preallocate PCI-X save buffer\n"); |
|---|
| 3015 | 3352 | |
|---|
| 3353 | + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, |
|---|
| 3354 | + 2 * sizeof(u16)); |
|---|
| 3355 | + if (error) |
|---|
| 3356 | + pci_err(dev, "unable to allocate suspend buffer for LTR\n"); |
|---|
| 3357 | + |
|---|
| 3016 | 3358 | pci_allocate_vc_save_buffers(dev); |
|---|
| 3017 | 3359 | } |
|---|
| 3018 | 3360 | |
|---|
| .. | .. |
|---|
| 3059 | 3401 | } |
|---|
| 3060 | 3402 | } |
|---|
| 3061 | 3403 | |
|---|
| 3062 | | -static int pci_acs_enable; |
|---|
| 3063 | | - |
|---|
| 3064 | | -/** |
|---|
| 3065 | | - * pci_request_acs - ask for ACS to be enabled if supported |
|---|
| 3066 | | - */ |
|---|
| 3067 | | -void pci_request_acs(void) |
|---|
| 3068 | | -{ |
|---|
| 3069 | | - pci_acs_enable = 1; |
|---|
| 3070 | | -} |
|---|
| 3071 | | -EXPORT_SYMBOL_GPL(pci_request_acs); |
|---|
| 3072 | | - |
|---|
| 3073 | | -static const char *disable_acs_redir_param; |
|---|
| 3074 | | - |
|---|
| 3075 | | -/** |
|---|
| 3076 | | - * pci_disable_acs_redir - disable ACS redirect capabilities |
|---|
| 3077 | | - * @dev: the PCI device |
|---|
| 3078 | | - * |
|---|
| 3079 | | - * For only devices specified in the disable_acs_redir parameter. |
|---|
| 3080 | | - */ |
|---|
| 3081 | | -static void pci_disable_acs_redir(struct pci_dev *dev) |
|---|
| 3082 | | -{ |
|---|
| 3083 | | - int ret = 0; |
|---|
| 3084 | | - const char *p; |
|---|
| 3085 | | - int pos; |
|---|
| 3086 | | - u16 ctrl; |
|---|
| 3087 | | - |
|---|
| 3088 | | - if (!disable_acs_redir_param) |
|---|
| 3089 | | - return; |
|---|
| 3090 | | - |
|---|
| 3091 | | - p = disable_acs_redir_param; |
|---|
| 3092 | | - while (*p) { |
|---|
| 3093 | | - ret = pci_dev_str_match(dev, p, &p); |
|---|
| 3094 | | - if (ret < 0) { |
|---|
| 3095 | | - pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", |
|---|
| 3096 | | - disable_acs_redir_param); |
|---|
| 3097 | | - |
|---|
| 3098 | | - break; |
|---|
| 3099 | | - } else if (ret == 1) { |
|---|
| 3100 | | - /* Found a match */ |
|---|
| 3101 | | - break; |
|---|
| 3102 | | - } |
|---|
| 3103 | | - |
|---|
| 3104 | | - if (*p != ';' && *p != ',') { |
|---|
| 3105 | | - /* End of param or invalid format */ |
|---|
| 3106 | | - break; |
|---|
| 3107 | | - } |
|---|
| 3108 | | - p++; |
|---|
| 3109 | | - } |
|---|
| 3110 | | - |
|---|
| 3111 | | - if (ret != 1) |
|---|
| 3112 | | - return; |
|---|
| 3113 | | - |
|---|
| 3114 | | - if (!pci_dev_specific_disable_acs_redir(dev)) |
|---|
| 3115 | | - return; |
|---|
| 3116 | | - |
|---|
| 3117 | | - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); |
|---|
| 3118 | | - if (!pos) { |
|---|
| 3119 | | - pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); |
|---|
| 3120 | | - return; |
|---|
| 3121 | | - } |
|---|
| 3122 | | - |
|---|
| 3123 | | - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
|---|
| 3124 | | - |
|---|
| 3125 | | - /* P2P Request & Completion Redirect */ |
|---|
| 3126 | | - ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); |
|---|
| 3127 | | - |
|---|
| 3128 | | - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
|---|
| 3129 | | - |
|---|
| 3130 | | - pci_info(dev, "disabled ACS redirect\n"); |
|---|
| 3131 | | -} |
|---|
| 3132 | | - |
|---|
| 3133 | | -/** |
|---|
| 3134 | | - * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites |
|---|
| 3135 | | - * @dev: the PCI device |
|---|
| 3136 | | - */ |
|---|
| 3137 | | -static void pci_std_enable_acs(struct pci_dev *dev) |
|---|
| 3138 | | -{ |
|---|
| 3139 | | - int pos; |
|---|
| 3140 | | - u16 cap; |
|---|
| 3141 | | - u16 ctrl; |
|---|
| 3142 | | - |
|---|
| 3143 | | - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); |
|---|
| 3144 | | - if (!pos) |
|---|
| 3145 | | - return; |
|---|
| 3146 | | - |
|---|
| 3147 | | - pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); |
|---|
| 3148 | | - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
|---|
| 3149 | | - |
|---|
| 3150 | | - /* Source Validation */ |
|---|
| 3151 | | - ctrl |= (cap & PCI_ACS_SV); |
|---|
| 3152 | | - |
|---|
| 3153 | | - /* P2P Request Redirect */ |
|---|
| 3154 | | - ctrl |= (cap & PCI_ACS_RR); |
|---|
| 3155 | | - |
|---|
| 3156 | | - /* P2P Completion Redirect */ |
|---|
| 3157 | | - ctrl |= (cap & PCI_ACS_CR); |
|---|
| 3158 | | - |
|---|
| 3159 | | - /* Upstream Forwarding */ |
|---|
| 3160 | | - ctrl |= (cap & PCI_ACS_UF); |
|---|
| 3161 | | - |
|---|
| 3162 | | - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
|---|
| 3163 | | -} |
|---|
| 3164 | | - |
|---|
| 3165 | | -/** |
|---|
| 3166 | | - * pci_enable_acs - enable ACS if hardware support it |
|---|
| 3167 | | - * @dev: the PCI device |
|---|
| 3168 | | - */ |
|---|
| 3169 | | -void pci_enable_acs(struct pci_dev *dev) |
|---|
| 3170 | | -{ |
|---|
| 3171 | | - if (!pci_acs_enable) |
|---|
| 3172 | | - goto disable_acs_redir; |
|---|
| 3173 | | - |
|---|
| 3174 | | - if (!pci_dev_specific_enable_acs(dev)) |
|---|
| 3175 | | - goto disable_acs_redir; |
|---|
| 3176 | | - |
|---|
| 3177 | | - pci_std_enable_acs(dev); |
|---|
| 3178 | | - |
|---|
| 3179 | | -disable_acs_redir: |
|---|
| 3180 | | - /* |
|---|
| 3181 | | - * Note: pci_disable_acs_redir() must be called even if ACS was not |
|---|
| 3182 | | - * enabled by the kernel because it may have been enabled by |
|---|
| 3183 | | - * platform firmware. So if we are told to disable it, we should |
|---|
| 3184 | | - * always disable it after setting the kernel's default |
|---|
| 3185 | | - * preferences. |
|---|
| 3186 | | - */ |
|---|
| 3187 | | - pci_disable_acs_redir(dev); |
|---|
| 3188 | | -} |
|---|
| 3189 | | - |
|---|
| 3190 | 3404 | static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) |
|---|
| 3191 | 3405 | { |
|---|
| 3192 | 3406 | int pos; |
|---|
| 3193 | 3407 | u16 cap, ctrl; |
|---|
| 3194 | 3408 | |
|---|
| 3195 | | - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); |
|---|
| 3409 | + pos = pdev->acs_cap; |
|---|
| 3196 | 3410 | if (!pos) |
|---|
| 3197 | 3411 | return false; |
|---|
| 3198 | 3412 | |
|---|
| .. | .. |
|---|
| 3315 | 3529 | } while (pdev != end); |
|---|
| 3316 | 3530 | |
|---|
| 3317 | 3531 | return true; |
|---|
| 3532 | +} |
|---|
| 3533 | + |
|---|
| 3534 | +/** |
|---|
| 3535 | + * pci_acs_init - Initialize ACS if hardware supports it |
|---|
| 3536 | + * @dev: the PCI device |
|---|
| 3537 | + */ |
|---|
| 3538 | +void pci_acs_init(struct pci_dev *dev) |
|---|
| 3539 | +{ |
|---|
| 3540 | + dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); |
|---|
| 3541 | + |
|---|
| 3542 | + /* |
|---|
| 3543 | + * Attempt to enable ACS regardless of capability because some Root |
|---|
| 3544 | + * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have |
|---|
| 3545 | + * the standard ACS capability but still support ACS via those |
|---|
| 3546 | + * quirks. |
|---|
| 3547 | + */ |
|---|
| 3548 | + pci_enable_acs(dev); |
|---|
| 3318 | 3549 | } |
|---|
| 3319 | 3550 | |
|---|
| 3320 | 3551 | /** |
|---|
| .. | .. |
|---|
| 3484 | 3715 | } |
|---|
| 3485 | 3716 | |
|---|
| 3486 | 3717 | /* Ensure upstream ports don't block AtomicOps on egress */ |
|---|
| 3487 | | - if (!bridge->has_secondary_link) { |
|---|
| 3718 | + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { |
|---|
| 3488 | 3719 | pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, |
|---|
| 3489 | 3720 | &ctl2); |
|---|
| 3490 | 3721 | if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) |
|---|
| .. | .. |
|---|
| 3561 | 3792 | EXPORT_SYMBOL_GPL(pci_common_swizzle); |
|---|
| 3562 | 3793 | |
|---|
| 3563 | 3794 | /** |
|---|
| 3564 | | - * pci_release_region - Release a PCI bar |
|---|
| 3565 | | - * @pdev: PCI device whose resources were previously reserved by pci_request_region |
|---|
| 3566 | | - * @bar: BAR to release |
|---|
| 3795 | + * pci_release_region - Release a PCI bar |
|---|
| 3796 | + * @pdev: PCI device whose resources were previously reserved by |
|---|
| 3797 | + * pci_request_region() |
|---|
| 3798 | + * @bar: BAR to release |
|---|
| 3567 | 3799 | * |
|---|
| 3568 | | - * Releases the PCI I/O and memory resources previously reserved by a |
|---|
| 3569 | | - * successful call to pci_request_region. Call this function only |
|---|
| 3570 | | - * after all use of the PCI regions has ceased. |
|---|
| 3800 | + * Releases the PCI I/O and memory resources previously reserved by a |
|---|
| 3801 | + * successful call to pci_request_region(). Call this function only |
|---|
| 3802 | + * after all use of the PCI regions has ceased. |
|---|
| 3571 | 3803 | */ |
|---|
| 3572 | 3804 | void pci_release_region(struct pci_dev *pdev, int bar) |
|---|
| 3573 | 3805 | { |
|---|
| .. | .. |
|---|
| 3589 | 3821 | EXPORT_SYMBOL(pci_release_region); |
|---|
| 3590 | 3822 | |
|---|
| 3591 | 3823 | /** |
|---|
| 3592 | | - * __pci_request_region - Reserved PCI I/O and memory resource |
|---|
| 3593 | | - * @pdev: PCI device whose resources are to be reserved |
|---|
| 3594 | | - * @bar: BAR to be reserved |
|---|
| 3595 | | - * @res_name: Name to be associated with resource. |
|---|
| 3596 | | - * @exclusive: whether the region access is exclusive or not |
|---|
| 3824 | + * __pci_request_region - Reserved PCI I/O and memory resource |
|---|
| 3825 | + * @pdev: PCI device whose resources are to be reserved |
|---|
| 3826 | + * @bar: BAR to be reserved |
|---|
| 3827 | + * @res_name: Name to be associated with resource. |
|---|
| 3828 | + * @exclusive: whether the region access is exclusive or not |
|---|
| 3597 | 3829 | * |
|---|
| 3598 | | - * Mark the PCI region associated with PCI device @pdev BR @bar as |
|---|
| 3599 | | - * being reserved by owner @res_name. Do not access any |
|---|
| 3600 | | - * address inside the PCI regions unless this call returns |
|---|
| 3601 | | - * successfully. |
|---|
| 3830 | + * Mark the PCI region associated with PCI device @pdev BAR @bar as |
|---|
| 3831 | + * being reserved by owner @res_name. Do not access any |
|---|
| 3832 | + * address inside the PCI regions unless this call returns |
|---|
| 3833 | + * successfully. |
|---|
| 3602 | 3834 | * |
|---|
| 3603 | | - * If @exclusive is set, then the region is marked so that userspace |
|---|
| 3604 | | - * is explicitly not allowed to map the resource via /dev/mem or |
|---|
| 3605 | | - * sysfs MMIO access. |
|---|
| 3835 | + * If @exclusive is set, then the region is marked so that userspace |
|---|
| 3836 | + * is explicitly not allowed to map the resource via /dev/mem or |
|---|
| 3837 | + * sysfs MMIO access. |
|---|
| 3606 | 3838 | * |
|---|
| 3607 | | - * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3608 | | - * message is also printed on failure. |
|---|
| 3839 | + * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3840 | + * message is also printed on failure. |
|---|
| 3609 | 3841 | */ |
|---|
| 3610 | 3842 | static int __pci_request_region(struct pci_dev *pdev, int bar, |
|---|
| 3611 | 3843 | const char *res_name, int exclusive) |
|---|
| .. | .. |
|---|
| 3639 | 3871 | } |
|---|
| 3640 | 3872 | |
|---|
| 3641 | 3873 | /** |
|---|
| 3642 | | - * pci_request_region - Reserve PCI I/O and memory resource |
|---|
| 3643 | | - * @pdev: PCI device whose resources are to be reserved |
|---|
| 3644 | | - * @bar: BAR to be reserved |
|---|
| 3645 | | - * @res_name: Name to be associated with resource |
|---|
| 3874 | + * pci_request_region - Reserve PCI I/O and memory resource |
|---|
| 3875 | + * @pdev: PCI device whose resources are to be reserved |
|---|
| 3876 | + * @bar: BAR to be reserved |
|---|
| 3877 | + * @res_name: Name to be associated with resource |
|---|
| 3646 | 3878 | * |
|---|
| 3647 | | - * Mark the PCI region associated with PCI device @pdev BAR @bar as |
|---|
| 3648 | | - * being reserved by owner @res_name. Do not access any |
|---|
| 3649 | | - * address inside the PCI regions unless this call returns |
|---|
| 3650 | | - * successfully. |
|---|
| 3879 | + * Mark the PCI region associated with PCI device @pdev BAR @bar as |
|---|
| 3880 | + * being reserved by owner @res_name. Do not access any |
|---|
| 3881 | + * address inside the PCI regions unless this call returns |
|---|
| 3882 | + * successfully. |
|---|
| 3651 | 3883 | * |
|---|
| 3652 | | - * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3653 | | - * message is also printed on failure. |
|---|
| 3884 | + * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3885 | + * message is also printed on failure. |
|---|
| 3654 | 3886 | */ |
|---|
| 3655 | 3887 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) |
|---|
| 3656 | 3888 | { |
|---|
| 3657 | 3889 | return __pci_request_region(pdev, bar, res_name, 0); |
|---|
| 3658 | 3890 | } |
|---|
| 3659 | 3891 | EXPORT_SYMBOL(pci_request_region); |
|---|
| 3660 | | - |
|---|
| 3661 | | -/** |
|---|
| 3662 | | - * pci_request_region_exclusive - Reserved PCI I/O and memory resource |
|---|
| 3663 | | - * @pdev: PCI device whose resources are to be reserved |
|---|
| 3664 | | - * @bar: BAR to be reserved |
|---|
| 3665 | | - * @res_name: Name to be associated with resource. |
|---|
| 3666 | | - * |
|---|
| 3667 | | - * Mark the PCI region associated with PCI device @pdev BR @bar as |
|---|
| 3668 | | - * being reserved by owner @res_name. Do not access any |
|---|
| 3669 | | - * address inside the PCI regions unless this call returns |
|---|
| 3670 | | - * successfully. |
|---|
| 3671 | | - * |
|---|
| 3672 | | - * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3673 | | - * message is also printed on failure. |
|---|
| 3674 | | - * |
|---|
| 3675 | | - * The key difference that _exclusive makes it that userspace is |
|---|
| 3676 | | - * explicitly not allowed to map the resource via /dev/mem or |
|---|
| 3677 | | - * sysfs. |
|---|
| 3678 | | - */ |
|---|
| 3679 | | -int pci_request_region_exclusive(struct pci_dev *pdev, int bar, |
|---|
| 3680 | | - const char *res_name) |
|---|
| 3681 | | -{ |
|---|
| 3682 | | - return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); |
|---|
| 3683 | | -} |
|---|
| 3684 | | -EXPORT_SYMBOL(pci_request_region_exclusive); |
|---|
| 3685 | 3892 | |
|---|
| 3686 | 3893 | /** |
|---|
| 3687 | 3894 | * pci_release_selected_regions - Release selected PCI I/O and memory resources |
|---|
| .. | .. |
|---|
| 3695 | 3902 | { |
|---|
| 3696 | 3903 | int i; |
|---|
| 3697 | 3904 | |
|---|
| 3698 | | - for (i = 0; i < 6; i++) |
|---|
| 3905 | + for (i = 0; i < PCI_STD_NUM_BARS; i++) |
|---|
| 3699 | 3906 | if (bars & (1 << i)) |
|---|
| 3700 | 3907 | pci_release_region(pdev, i); |
|---|
| 3701 | 3908 | } |
|---|
| .. | .. |
|---|
| 3706 | 3913 | { |
|---|
| 3707 | 3914 | int i; |
|---|
| 3708 | 3915 | |
|---|
| 3709 | | - for (i = 0; i < 6; i++) |
|---|
| 3916 | + for (i = 0; i < PCI_STD_NUM_BARS; i++) |
|---|
| 3710 | 3917 | if (bars & (1 << i)) |
|---|
| 3711 | 3918 | if (__pci_request_region(pdev, i, res_name, excl)) |
|---|
| 3712 | 3919 | goto err_out; |
|---|
| .. | .. |
|---|
| 3743 | 3950 | EXPORT_SYMBOL(pci_request_selected_regions_exclusive); |
|---|
| 3744 | 3951 | |
|---|
| 3745 | 3952 | /** |
|---|
| 3746 | | - * pci_release_regions - Release reserved PCI I/O and memory resources |
|---|
| 3747 | | - * @pdev: PCI device whose resources were previously reserved by pci_request_regions |
|---|
| 3953 | + * pci_release_regions - Release reserved PCI I/O and memory resources |
|---|
| 3954 | + * @pdev: PCI device whose resources were previously reserved by |
|---|
| 3955 | + * pci_request_regions() |
|---|
| 3748 | 3956 | * |
|---|
| 3749 | | - * Releases all PCI I/O and memory resources previously reserved by a |
|---|
| 3750 | | - * successful call to pci_request_regions. Call this function only |
|---|
| 3751 | | - * after all use of the PCI regions has ceased. |
|---|
| 3957 | + * Releases all PCI I/O and memory resources previously reserved by a |
|---|
| 3958 | + * successful call to pci_request_regions(). Call this function only |
|---|
| 3959 | + * after all use of the PCI regions has ceased. |
|---|
| 3752 | 3960 | */ |
|---|
| 3753 | 3961 | |
|---|
| 3754 | 3962 | void pci_release_regions(struct pci_dev *pdev) |
|---|
| 3755 | 3963 | { |
|---|
| 3756 | | - pci_release_selected_regions(pdev, (1 << 6) - 1); |
|---|
| 3964 | + pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); |
|---|
| 3757 | 3965 | } |
|---|
| 3758 | 3966 | EXPORT_SYMBOL(pci_release_regions); |
|---|
| 3759 | 3967 | |
|---|
| 3760 | 3968 | /** |
|---|
| 3761 | | - * pci_request_regions - Reserved PCI I/O and memory resources |
|---|
| 3762 | | - * @pdev: PCI device whose resources are to be reserved |
|---|
| 3763 | | - * @res_name: Name to be associated with resource. |
|---|
| 3969 | + * pci_request_regions - Reserve PCI I/O and memory resources |
|---|
| 3970 | + * @pdev: PCI device whose resources are to be reserved |
|---|
| 3971 | + * @res_name: Name to be associated with resource. |
|---|
| 3764 | 3972 | * |
|---|
| 3765 | | - * Mark all PCI regions associated with PCI device @pdev as |
|---|
| 3766 | | - * being reserved by owner @res_name. Do not access any |
|---|
| 3767 | | - * address inside the PCI regions unless this call returns |
|---|
| 3768 | | - * successfully. |
|---|
| 3973 | + * Mark all PCI regions associated with PCI device @pdev as |
|---|
| 3974 | + * being reserved by owner @res_name. Do not access any |
|---|
| 3975 | + * address inside the PCI regions unless this call returns |
|---|
| 3976 | + * successfully. |
|---|
| 3769 | 3977 | * |
|---|
| 3770 | | - * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3771 | | - * message is also printed on failure. |
|---|
| 3978 | + * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3979 | + * message is also printed on failure. |
|---|
| 3772 | 3980 | */ |
|---|
| 3773 | 3981 | int pci_request_regions(struct pci_dev *pdev, const char *res_name) |
|---|
| 3774 | 3982 | { |
|---|
| 3775 | | - return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); |
|---|
| 3983 | + return pci_request_selected_regions(pdev, |
|---|
| 3984 | + ((1 << PCI_STD_NUM_BARS) - 1), res_name); |
|---|
| 3776 | 3985 | } |
|---|
| 3777 | 3986 | EXPORT_SYMBOL(pci_request_regions); |
|---|
| 3778 | 3987 | |
|---|
| 3779 | 3988 | /** |
|---|
| 3780 | | - * pci_request_regions_exclusive - Reserved PCI I/O and memory resources |
|---|
| 3781 | | - * @pdev: PCI device whose resources are to be reserved |
|---|
| 3782 | | - * @res_name: Name to be associated with resource. |
|---|
| 3989 | + * pci_request_regions_exclusive - Reserve PCI I/O and memory resources |
|---|
| 3990 | + * @pdev: PCI device whose resources are to be reserved |
|---|
| 3991 | + * @res_name: Name to be associated with resource. |
|---|
| 3783 | 3992 | * |
|---|
| 3784 | | - * Mark all PCI regions associated with PCI device @pdev as |
|---|
| 3785 | | - * being reserved by owner @res_name. Do not access any |
|---|
| 3786 | | - * address inside the PCI regions unless this call returns |
|---|
| 3787 | | - * successfully. |
|---|
| 3993 | + * Mark all PCI regions associated with PCI device @pdev as being reserved |
|---|
| 3994 | + * by owner @res_name. Do not access any address inside the PCI regions |
|---|
| 3995 | + * unless this call returns successfully. |
|---|
| 3788 | 3996 | * |
|---|
| 3789 | | - * pci_request_regions_exclusive() will mark the region so that |
|---|
| 3790 | | - * /dev/mem and the sysfs MMIO access will not be allowed. |
|---|
| 3997 | + * pci_request_regions_exclusive() will mark the region so that /dev/mem |
|---|
| 3998 | + * and the sysfs MMIO access will not be allowed. |
|---|
| 3791 | 3999 | * |
|---|
| 3792 | | - * Returns 0 on success, or %EBUSY on error. A warning |
|---|
| 3793 | | - * message is also printed on failure. |
|---|
| 4000 | + * Returns 0 on success, or %EBUSY on error. A warning message is also |
|---|
| 4001 | + * printed on failure. |
|---|
| 3794 | 4002 | */ |
|---|
| 3795 | 4003 | int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) |
|---|
| 3796 | 4004 | { |
|---|
| 3797 | 4005 | return pci_request_selected_regions_exclusive(pdev, |
|---|
| 3798 | | - ((1 << 6) - 1), res_name); |
|---|
| 4006 | + ((1 << PCI_STD_NUM_BARS) - 1), res_name); |
|---|
| 3799 | 4007 | } |
|---|
| 3800 | 4008 | EXPORT_SYMBOL(pci_request_regions_exclusive); |
|---|
| 3801 | 4009 | |
|---|
| 3802 | 4010 | /* |
|---|
| 3803 | 4011 | * Record the PCI IO range (expressed as CPU physical address + size). |
|---|
| 3804 | | - * Return a negative value if an error has occured, zero otherwise |
|---|
| 4012 | + * Return a negative value if an error has occurred, zero otherwise |
|---|
| 3805 | 4013 | */ |
|---|
| 3806 | 4014 | int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, |
|---|
| 3807 | 4015 | resource_size_t size) |
|---|
| .. | .. |
|---|
| 3847 | 4055 | |
|---|
| 3848 | 4056 | return address; |
|---|
| 3849 | 4057 | } |
|---|
| 4058 | +EXPORT_SYMBOL_GPL(pci_pio_to_address); |
|---|
| 3850 | 4059 | |
|---|
| 3851 | 4060 | unsigned long __weak pci_address_to_pio(phys_addr_t address) |
|---|
| 3852 | 4061 | { |
|---|
| .. | .. |
|---|
| 3861 | 4070 | } |
|---|
| 3862 | 4071 | |
|---|
| 3863 | 4072 | /** |
|---|
| 3864 | | - * pci_remap_iospace - Remap the memory mapped I/O space |
|---|
| 3865 | | - * @res: Resource describing the I/O space |
|---|
| 3866 | | - * @phys_addr: physical address of range to be mapped |
|---|
| 4073 | + * pci_remap_iospace - Remap the memory mapped I/O space |
|---|
| 4074 | + * @res: Resource describing the I/O space |
|---|
| 4075 | + * @phys_addr: physical address of range to be mapped |
|---|
| 3867 | 4076 | * |
|---|
| 3868 | | - * Remap the memory mapped I/O space described by the @res |
|---|
| 3869 | | - * and the CPU physical address @phys_addr into virtual address space. |
|---|
| 3870 | | - * Only architectures that have memory mapped IO functions defined |
|---|
| 3871 | | - * (and the PCI_IOBASE value defined) should call this function. |
|---|
| 4077 | + * Remap the memory mapped I/O space described by the @res and the CPU |
|---|
| 4078 | + * physical address @phys_addr into virtual address space. Only |
|---|
| 4079 | + * architectures that have memory mapped IO functions defined (and the |
|---|
| 4080 | + * PCI_IOBASE value defined) should call this function. |
|---|
| 3872 | 4081 | */ |
|---|
| 3873 | 4082 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) |
|---|
| 3874 | 4083 | { |
|---|
| .. | .. |
|---|
| 3884 | 4093 | return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, |
|---|
| 3885 | 4094 | pgprot_device(PAGE_KERNEL)); |
|---|
| 3886 | 4095 | #else |
|---|
| 3887 | | - /* this architecture does not have memory mapped I/O space, |
|---|
| 3888 | | - so this function should never be called */ |
|---|
| 4096 | + /* |
|---|
| 4097 | + * This architecture does not have memory mapped I/O space, |
|---|
| 4098 | + * so this function should never be called |
|---|
| 4099 | + */ |
|---|
| 3889 | 4100 | WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); |
|---|
| 3890 | 4101 | return -ENODEV; |
|---|
| 3891 | 4102 | #endif |
|---|
| .. | .. |
|---|
| 3893 | 4104 | EXPORT_SYMBOL(pci_remap_iospace); |
|---|
| 3894 | 4105 | |
|---|
| 3895 | 4106 | /** |
|---|
| 3896 | | - * pci_unmap_iospace - Unmap the memory mapped I/O space |
|---|
| 3897 | | - * @res: resource to be unmapped |
|---|
| 4107 | + * pci_unmap_iospace - Unmap the memory mapped I/O space |
|---|
| 4108 | + * @res: resource to be unmapped |
|---|
| 3898 | 4109 | * |
|---|
| 3899 | | - * Unmap the CPU virtual address @res from virtual address space. |
|---|
| 3900 | | - * Only architectures that have memory mapped IO functions defined |
|---|
| 3901 | | - * (and the PCI_IOBASE value defined) should call this function. |
|---|
| 4110 | + * Unmap the CPU virtual address @res from virtual address space. Only |
|---|
| 4111 | + * architectures that have memory mapped IO functions defined (and the |
|---|
| 4112 | + * PCI_IOBASE value defined) should call this function. |
|---|
| 3902 | 4113 | */ |
|---|
| 3903 | 4114 | void pci_unmap_iospace(struct resource *res) |
|---|
| 3904 | 4115 | { |
|---|
| .. | .. |
|---|
| 4141 | 4352 | if (cacheline_size == pci_cache_line_size) |
|---|
| 4142 | 4353 | return 0; |
|---|
| 4143 | 4354 | |
|---|
| 4144 | | - pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n", |
|---|
| 4355 | + pci_info(dev, "cache line size of %d is not supported\n", |
|---|
| 4145 | 4356 | pci_cache_line_size << 2); |
|---|
| 4146 | 4357 | |
|---|
| 4147 | 4358 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 4244 | 4455 | * @pdev: the PCI device to operate on |
|---|
| 4245 | 4456 | * @enable: boolean: whether to enable or disable PCI INTx |
|---|
| 4246 | 4457 | * |
|---|
| 4247 | | - * Enables/disables PCI INTx for device dev |
|---|
| 4458 | + * Enables/disables PCI INTx for device @pdev |
|---|
| 4248 | 4459 | */ |
|---|
| 4249 | 4460 | void pci_intx(struct pci_dev *pdev, int enable) |
|---|
| 4250 | 4461 | { |
|---|
| .. | .. |
|---|
| 4320 | 4531 | * pci_check_and_mask_intx - mask INTx on pending interrupt |
|---|
| 4321 | 4532 | * @dev: the PCI device to operate on |
|---|
| 4322 | 4533 | * |
|---|
| 4323 | | - * Check if the device dev has its INTx line asserted, mask it and |
|---|
| 4324 | | - * return true in that case. False is returned if no interrupt was |
|---|
| 4325 | | - * pending. |
|---|
| 4534 | + * Check if the device dev has its INTx line asserted, mask it and return |
|---|
| 4535 | + * true in that case. False is returned if no interrupt was pending. |
|---|
| 4326 | 4536 | */ |
|---|
| 4327 | 4537 | bool pci_check_and_mask_intx(struct pci_dev *dev) |
|---|
| 4328 | 4538 | { |
|---|
| .. | .. |
|---|
| 4334 | 4544 | * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending |
|---|
| 4335 | 4545 | * @dev: the PCI device to operate on |
|---|
| 4336 | 4546 | * |
|---|
| 4337 | | - * Check if the device dev has its INTx line asserted, unmask it if not |
|---|
| 4338 | | - * and return true. False is returned and the mask remains active if |
|---|
| 4339 | | - * there was still an interrupt pending. |
|---|
| 4547 | + * Check if the device dev has its INTx line asserted, unmask it if not and |
|---|
| 4548 | + * return true. False is returned and the mask remains active if there was |
|---|
| 4549 | + * still an interrupt pending. |
|---|
| 4340 | 4550 | */ |
|---|
| 4341 | 4551 | bool pci_check_and_unmask_intx(struct pci_dev *dev) |
|---|
| 4342 | 4552 | { |
|---|
| .. | .. |
|---|
| 4345 | 4555 | EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); |
|---|
| 4346 | 4556 | |
|---|
| 4347 | 4557 | /** |
|---|
| 4348 | | - * pci_wait_for_pending_transaction - waits for pending transaction |
|---|
| 4558 | + * pci_wait_for_pending_transaction - wait for pending transaction |
|---|
| 4349 | 4559 | * @dev: the PCI device to operate on |
|---|
| 4350 | 4560 | * |
|---|
| 4351 | 4561 | * Return 0 if transaction is pending 1 otherwise. |
|---|
| .. | .. |
|---|
| 4360 | 4570 | } |
|---|
| 4361 | 4571 | EXPORT_SYMBOL(pci_wait_for_pending_transaction); |
|---|
| 4362 | 4572 | |
|---|
| 4363 | | -static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) |
|---|
| 4364 | | -{ |
|---|
| 4365 | | - int delay = 1; |
|---|
| 4366 | | - u32 id; |
|---|
| 4367 | | - |
|---|
| 4368 | | - /* |
|---|
| 4369 | | - * After reset, the device should not silently discard config |
|---|
| 4370 | | - * requests, but it may still indicate that it needs more time by |
|---|
| 4371 | | - * responding to them with CRS completions. The Root Port will |
|---|
| 4372 | | - * generally synthesize ~0 data to complete the read (except when |
|---|
| 4373 | | - * CRS SV is enabled and the read was for the Vendor ID; in that |
|---|
| 4374 | | - * case it synthesizes 0x0001 data). |
|---|
| 4375 | | - * |
|---|
| 4376 | | - * Wait for the device to return a non-CRS completion. Read the |
|---|
| 4377 | | - * Command register instead of Vendor ID so we don't have to |
|---|
| 4378 | | - * contend with the CRS SV value. |
|---|
| 4379 | | - */ |
|---|
| 4380 | | - pci_read_config_dword(dev, PCI_COMMAND, &id); |
|---|
| 4381 | | - while (id == ~0) { |
|---|
| 4382 | | - if (delay > timeout) { |
|---|
| 4383 | | - pci_warn(dev, "not ready %dms after %s; giving up\n", |
|---|
| 4384 | | - delay - 1, reset_type); |
|---|
| 4385 | | - return -ENOTTY; |
|---|
| 4386 | | - } |
|---|
| 4387 | | - |
|---|
| 4388 | | - if (delay > 1000) |
|---|
| 4389 | | - pci_info(dev, "not ready %dms after %s; waiting\n", |
|---|
| 4390 | | - delay - 1, reset_type); |
|---|
| 4391 | | - |
|---|
| 4392 | | - msleep(delay); |
|---|
| 4393 | | - delay *= 2; |
|---|
| 4394 | | - pci_read_config_dword(dev, PCI_COMMAND, &id); |
|---|
| 4395 | | - } |
|---|
| 4396 | | - |
|---|
| 4397 | | - if (delay > 1000) |
|---|
| 4398 | | - pci_info(dev, "ready %dms after %s\n", delay - 1, |
|---|
| 4399 | | - reset_type); |
|---|
| 4400 | | - |
|---|
| 4401 | | - return 0; |
|---|
| 4402 | | -} |
|---|
| 4403 | | - |
|---|
| 4404 | 4573 | /** |
|---|
| 4405 | 4574 | * pcie_has_flr - check if a device supports function level resets |
|---|
| 4406 | | - * @dev: device to check |
|---|
| 4575 | + * @dev: device to check |
|---|
| 4407 | 4576 | * |
|---|
| 4408 | 4577 | * Returns true if the device advertises support for PCIe function level |
|---|
| 4409 | 4578 | * resets. |
|---|
| .. | .. |
|---|
| 4422 | 4591 | |
|---|
| 4423 | 4592 | /** |
|---|
| 4424 | 4593 | * pcie_flr - initiate a PCIe function level reset |
|---|
| 4425 | | - * @dev: device to reset |
|---|
| 4594 | + * @dev: device to reset |
|---|
| 4426 | 4595 | * |
|---|
| 4427 | 4596 | * Initiate a function level reset on @dev. The caller should ensure the |
|---|
| 4428 | 4597 | * device supports FLR before calling this function, e.g. by using the |
|---|
| .. | .. |
|---|
| 4434 | 4603 | pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); |
|---|
| 4435 | 4604 | |
|---|
| 4436 | 4605 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); |
|---|
| 4606 | + |
|---|
| 4607 | + if (dev->imm_ready) |
|---|
| 4608 | + return 0; |
|---|
| 4437 | 4609 | |
|---|
| 4438 | 4610 | /* |
|---|
| 4439 | 4611 | * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within |
|---|
| .. | .. |
|---|
| 4467 | 4639 | |
|---|
| 4468 | 4640 | /* |
|---|
| 4469 | 4641 | * Wait for Transaction Pending bit to clear. A word-aligned test |
|---|
| 4470 | | - * is used, so we use the conrol offset rather than status and shift |
|---|
| 4642 | + * is used, so we use the control offset rather than status and shift |
|---|
| 4471 | 4643 | * the test bit to match. |
|---|
| 4472 | 4644 | */ |
|---|
| 4473 | 4645 | if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, |
|---|
| .. | .. |
|---|
| 4475 | 4647 | pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); |
|---|
| 4476 | 4648 | |
|---|
| 4477 | 4649 | pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); |
|---|
| 4650 | + |
|---|
| 4651 | + if (dev->imm_ready) |
|---|
| 4652 | + return 0; |
|---|
| 4478 | 4653 | |
|---|
| 4479 | 4654 | /* |
|---|
| 4480 | 4655 | * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, |
|---|
| .. | .. |
|---|
| 4499 | 4674 | * |
|---|
| 4500 | 4675 | * NOTE: This causes the caller to sleep for twice the device power transition |
|---|
| 4501 | 4676 | * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms |
|---|
| 4502 | | - * by default (i.e. unless the @dev's d3_delay field has a different value). |
|---|
| 4677 | + * by default (i.e. unless the @dev's d3hot_delay field has a different value). |
|---|
| 4503 | 4678 | * Moreover, only devices in D0 can be reset by this function. |
|---|
| 4504 | 4679 | */ |
|---|
| 4505 | 4680 | static int pci_pm_reset(struct pci_dev *dev, int probe) |
|---|
| .. | .. |
|---|
| 4529 | 4704 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
|---|
| 4530 | 4705 | pci_dev_d3_sleep(dev); |
|---|
| 4531 | 4706 | |
|---|
| 4532 | | - return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); |
|---|
| 4707 | + return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); |
|---|
| 4533 | 4708 | } |
|---|
| 4709 | + |
|---|
| 4710 | +/** |
|---|
| 4711 | + * pcie_wait_for_link_delay - Wait until link is active or inactive |
|---|
| 4712 | + * @pdev: Bridge device |
|---|
| 4713 | + * @active: waiting for active or inactive? |
|---|
| 4714 | + * @delay: Delay to wait after link has become active (in ms) |
|---|
| 4715 | + * |
|---|
| 4716 | + * Use this to wait till link becomes active or inactive. |
|---|
| 4717 | + */ |
|---|
| 4718 | +static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, |
|---|
| 4719 | + int delay) |
|---|
| 4720 | +{ |
|---|
| 4721 | + int timeout = 1000; |
|---|
| 4722 | + bool ret; |
|---|
| 4723 | + u16 lnk_status; |
|---|
| 4724 | + |
|---|
| 4725 | + /* |
|---|
| 4726 | + * Some controllers might not implement link active reporting. In this |
|---|
| 4727 | + * case, we wait for 1000 ms + any delay requested by the caller. |
|---|
| 4728 | + */ |
|---|
| 4729 | + if (!pdev->link_active_reporting) { |
|---|
| 4730 | + msleep(timeout + delay); |
|---|
| 4731 | + return true; |
|---|
| 4732 | + } |
|---|
| 4733 | + |
|---|
| 4734 | + /* |
|---|
| 4735 | + * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, |
|---|
| 4736 | + * after which we should expect an link active if the reset was |
|---|
| 4737 | + * successful. If so, software must wait a minimum 100ms before sending |
|---|
| 4738 | + * configuration requests to devices downstream this port. |
|---|
| 4739 | + * |
|---|
| 4740 | + * If the link fails to activate, either the device was physically |
|---|
| 4741 | + * removed or the link is permanently failed. |
|---|
| 4742 | + */ |
|---|
| 4743 | + if (active) |
|---|
| 4744 | + msleep(20); |
|---|
| 4745 | + for (;;) { |
|---|
| 4746 | + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); |
|---|
| 4747 | + ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); |
|---|
| 4748 | + if (ret == active) |
|---|
| 4749 | + break; |
|---|
| 4750 | + if (timeout <= 0) |
|---|
| 4751 | + break; |
|---|
| 4752 | + msleep(10); |
|---|
| 4753 | + timeout -= 10; |
|---|
| 4754 | + } |
|---|
| 4755 | + if (active && ret) |
|---|
| 4756 | + msleep(delay); |
|---|
| 4757 | + |
|---|
| 4758 | + return ret == active; |
|---|
| 4759 | +} |
|---|
| 4760 | + |
|---|
| 4534 | 4761 | /** |
|---|
| 4535 | 4762 | * pcie_wait_for_link - Wait until link is active or inactive |
|---|
| 4536 | 4763 | * @pdev: Bridge device |
|---|
| .. | .. |
|---|
| 4540 | 4767 | */ |
|---|
| 4541 | 4768 | bool pcie_wait_for_link(struct pci_dev *pdev, bool active) |
|---|
| 4542 | 4769 | { |
|---|
| 4543 | | - int timeout = 1000; |
|---|
| 4544 | | - bool ret; |
|---|
| 4545 | | - u16 lnk_status; |
|---|
| 4770 | + return pcie_wait_for_link_delay(pdev, active, 100); |
|---|
| 4771 | +} |
|---|
| 4546 | 4772 | |
|---|
| 4547 | | - for (;;) { |
|---|
| 4548 | | - pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); |
|---|
| 4549 | | - ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); |
|---|
| 4550 | | - if (ret == active) |
|---|
| 4551 | | - return true; |
|---|
| 4552 | | - if (timeout <= 0) |
|---|
| 4553 | | - break; |
|---|
| 4554 | | - msleep(10); |
|---|
| 4555 | | - timeout -= 10; |
|---|
| 4773 | +/* |
|---|
| 4774 | + * Find maximum D3cold delay required by all the devices on the bus. The |
|---|
| 4775 | + * spec says 100 ms, but firmware can lower it and we allow drivers to |
|---|
| 4776 | + * increase it as well. |
|---|
| 4777 | + * |
|---|
| 4778 | + * Called with @pci_bus_sem locked for reading. |
|---|
| 4779 | + */ |
|---|
| 4780 | +static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) |
|---|
| 4781 | +{ |
|---|
| 4782 | + const struct pci_dev *pdev; |
|---|
| 4783 | + int min_delay = 100; |
|---|
| 4784 | + int max_delay = 0; |
|---|
| 4785 | + |
|---|
| 4786 | + list_for_each_entry(pdev, &bus->devices, bus_list) { |
|---|
| 4787 | + if (pdev->d3cold_delay < min_delay) |
|---|
| 4788 | + min_delay = pdev->d3cold_delay; |
|---|
| 4789 | + if (pdev->d3cold_delay > max_delay) |
|---|
| 4790 | + max_delay = pdev->d3cold_delay; |
|---|
| 4556 | 4791 | } |
|---|
| 4557 | 4792 | |
|---|
| 4558 | | - pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", |
|---|
| 4559 | | - active ? "set" : "cleared"); |
|---|
| 4793 | + return max(min_delay, max_delay); |
|---|
| 4794 | +} |
|---|
| 4560 | 4795 | |
|---|
| 4561 | | - return false; |
|---|
| 4796 | +/** |
|---|
| 4797 | + * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible |
|---|
| 4798 | + * @dev: PCI bridge |
|---|
| 4799 | + * @reset_type: reset type in human-readable form |
|---|
| 4800 | + * @timeout: maximum time to wait for devices on secondary bus (milliseconds) |
|---|
| 4801 | + * |
|---|
| 4802 | + * Handle necessary delays before access to the devices on the secondary |
|---|
| 4803 | + * side of the bridge are permitted after D3cold to D0 transition |
|---|
| 4804 | + * or Conventional Reset. |
|---|
| 4805 | + * |
|---|
| 4806 | + * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For |
|---|
| 4807 | + * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section |
|---|
| 4808 | + * 4.3.2. |
|---|
| 4809 | + * |
|---|
| 4810 | + * Return 0 on success or -ENOTTY if the first device on the secondary bus |
|---|
| 4811 | + * failed to become accessible. |
|---|
| 4812 | + */ |
|---|
| 4813 | +int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type, |
|---|
| 4814 | + int timeout) |
|---|
| 4815 | +{ |
|---|
| 4816 | + struct pci_dev *child; |
|---|
| 4817 | + int delay; |
|---|
| 4818 | + |
|---|
| 4819 | + if (pci_dev_is_disconnected(dev)) |
|---|
| 4820 | + return 0; |
|---|
| 4821 | + |
|---|
| 4822 | + if (!pci_is_bridge(dev)) |
|---|
| 4823 | + return 0; |
|---|
| 4824 | + |
|---|
| 4825 | + down_read(&pci_bus_sem); |
|---|
| 4826 | + |
|---|
| 4827 | + /* |
|---|
| 4828 | + * We only deal with devices that are present currently on the bus. |
|---|
| 4829 | + * For any hot-added devices the access delay is handled in pciehp |
|---|
| 4830 | + * board_added(). In case of ACPI hotplug the firmware is expected |
|---|
| 4831 | + * to configure the devices before OS is notified. |
|---|
| 4832 | + */ |
|---|
| 4833 | + if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { |
|---|
| 4834 | + up_read(&pci_bus_sem); |
|---|
| 4835 | + return 0; |
|---|
| 4836 | + } |
|---|
| 4837 | + |
|---|
| 4838 | + /* Take d3cold_delay requirements into account */ |
|---|
| 4839 | + delay = pci_bus_max_d3cold_delay(dev->subordinate); |
|---|
| 4840 | + if (!delay) { |
|---|
| 4841 | + up_read(&pci_bus_sem); |
|---|
| 4842 | + return 0; |
|---|
| 4843 | + } |
|---|
| 4844 | + |
|---|
| 4845 | + child = list_first_entry(&dev->subordinate->devices, struct pci_dev, |
|---|
| 4846 | + bus_list); |
|---|
| 4847 | + up_read(&pci_bus_sem); |
|---|
| 4848 | + |
|---|
| 4849 | + /* |
|---|
| 4850 | + * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before |
|---|
| 4851 | + * accessing the device after reset (that is 1000 ms + 100 ms). |
|---|
| 4852 | + */ |
|---|
| 4853 | + if (!pci_is_pcie(dev)) { |
|---|
| 4854 | + pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); |
|---|
| 4855 | + msleep(1000 + delay); |
|---|
| 4856 | + return 0; |
|---|
| 4857 | + } |
|---|
| 4858 | + |
|---|
| 4859 | + /* |
|---|
| 4860 | + * For PCIe downstream and root ports that do not support speeds |
|---|
| 4861 | + * greater than 5 GT/s need to wait minimum 100 ms. For higher |
|---|
| 4862 | + * speeds (gen3) we need to wait first for the data link layer to |
|---|
| 4863 | + * become active. |
|---|
| 4864 | + * |
|---|
| 4865 | + * However, 100 ms is the minimum and the PCIe spec says the |
|---|
| 4866 | + * software must allow at least 1s before it can determine that the |
|---|
| 4867 | + * device that did not respond is a broken device. There is |
|---|
| 4868 | + * evidence that 100 ms is not always enough, for example certain |
|---|
| 4869 | + * Titan Ridge xHCI controller does not always respond to |
|---|
| 4870 | + * configuration requests if we only wait for 100 ms (see |
|---|
| 4871 | + * https://bugzilla.kernel.org/show_bug.cgi?id=203885). |
|---|
| 4872 | + * |
|---|
| 4873 | + * Therefore we wait for 100 ms and check for the device presence |
|---|
| 4874 | + * until the timeout expires. |
|---|
| 4875 | + */ |
|---|
| 4876 | + if (!pcie_downstream_port(dev)) |
|---|
| 4877 | + return 0; |
|---|
| 4878 | + |
|---|
| 4879 | + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { |
|---|
| 4880 | + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); |
|---|
| 4881 | + msleep(delay); |
|---|
| 4882 | + } else { |
|---|
| 4883 | + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", |
|---|
| 4884 | + delay); |
|---|
| 4885 | + if (!pcie_wait_for_link_delay(dev, true, delay)) { |
|---|
| 4886 | + /* Did not train, no need to wait any further */ |
|---|
| 4887 | + pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); |
|---|
| 4888 | + return -ENOTTY; |
|---|
| 4889 | + } |
|---|
| 4890 | + } |
|---|
| 4891 | + |
|---|
| 4892 | + return pci_dev_wait(child, reset_type, timeout - delay); |
|---|
| 4562 | 4893 | } |
|---|
| 4563 | 4894 | |
|---|
| 4564 | 4895 | void pci_reset_secondary_bus(struct pci_dev *dev) |
|---|
| .. | .. |
|---|
| 4577 | 4908 | |
|---|
| 4578 | 4909 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; |
|---|
| 4579 | 4910 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); |
|---|
| 4580 | | - |
|---|
| 4581 | | - /* |
|---|
| 4582 | | - * Trhfa for conventional PCI is 2^25 clock cycles. |
|---|
| 4583 | | - * Assuming a minimum 33MHz clock this results in a 1s |
|---|
| 4584 | | - * delay before we can consider subordinate devices to |
|---|
| 4585 | | - * be re-initialized. PCIe has some ways to shorten this, |
|---|
| 4586 | | - * but we don't make use of them yet. |
|---|
| 4587 | | - */ |
|---|
| 4588 | | - ssleep(1); |
|---|
| 4589 | 4911 | } |
|---|
| 4590 | 4912 | |
|---|
| 4591 | 4913 | void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) |
|---|
| .. | .. |
|---|
| 4604 | 4926 | { |
|---|
| 4605 | 4927 | pcibios_reset_secondary_bus(dev); |
|---|
| 4606 | 4928 | |
|---|
| 4607 | | - return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); |
|---|
| 4929 | + return pci_bridge_wait_for_secondary_bus(dev, "bus reset", |
|---|
| 4930 | + PCIE_RESET_READY_POLL_MS); |
|---|
| 4608 | 4931 | } |
|---|
| 4609 | 4932 | EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); |
|---|
| 4610 | 4933 | |
|---|
| .. | .. |
|---|
| 4630 | 4953 | { |
|---|
| 4631 | 4954 | int rc = -ENOTTY; |
|---|
| 4632 | 4955 | |
|---|
| 4633 | | - if (!hotplug || !try_module_get(hotplug->ops->owner)) |
|---|
| 4956 | + if (!hotplug || !try_module_get(hotplug->owner)) |
|---|
| 4634 | 4957 | return rc; |
|---|
| 4635 | 4958 | |
|---|
| 4636 | 4959 | if (hotplug->ops->reset_slot) |
|---|
| 4637 | 4960 | rc = hotplug->ops->reset_slot(hotplug, probe); |
|---|
| 4638 | 4961 | |
|---|
| 4639 | | - module_put(hotplug->ops->owner); |
|---|
| 4962 | + module_put(hotplug->owner); |
|---|
| 4640 | 4963 | |
|---|
| 4641 | 4964 | return rc; |
|---|
| 4642 | 4965 | } |
|---|
| 4643 | 4966 | |
|---|
| 4644 | 4967 | static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) |
|---|
| 4645 | 4968 | { |
|---|
| 4646 | | - struct pci_dev *pdev; |
|---|
| 4647 | | - |
|---|
| 4648 | | - if (dev->subordinate || !dev->slot || |
|---|
| 4969 | + if (dev->multifunction || dev->subordinate || !dev->slot || |
|---|
| 4649 | 4970 | dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) |
|---|
| 4650 | 4971 | return -ENOTTY; |
|---|
| 4651 | | - |
|---|
| 4652 | | - list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
|---|
| 4653 | | - if (pdev != dev && pdev->slot == dev->slot) |
|---|
| 4654 | | - return -ENOTTY; |
|---|
| 4655 | 4972 | |
|---|
| 4656 | 4973 | return pci_reset_hotplug_slot(dev->slot->hotplug, probe); |
|---|
| 4657 | 4974 | } |
|---|
| 4658 | 4975 | |
|---|
| 4659 | 4976 | static void pci_dev_lock(struct pci_dev *dev) |
|---|
| 4660 | 4977 | { |
|---|
| 4661 | | - pci_cfg_access_lock(dev); |
|---|
| 4662 | 4978 | /* block PM suspend, driver probe, etc. */ |
|---|
| 4663 | 4979 | device_lock(&dev->dev); |
|---|
| 4980 | + pci_cfg_access_lock(dev); |
|---|
| 4664 | 4981 | } |
|---|
| 4665 | 4982 | |
|---|
| 4666 | 4983 | /* Return 1 on successful lock, 0 on contention */ |
|---|
| 4667 | 4984 | static int pci_dev_trylock(struct pci_dev *dev) |
|---|
| 4668 | 4985 | { |
|---|
| 4669 | | - if (pci_cfg_access_trylock(dev)) { |
|---|
| 4670 | | - if (device_trylock(&dev->dev)) |
|---|
| 4986 | + if (device_trylock(&dev->dev)) { |
|---|
| 4987 | + if (pci_cfg_access_trylock(dev)) |
|---|
| 4671 | 4988 | return 1; |
|---|
| 4672 | | - pci_cfg_access_unlock(dev); |
|---|
| 4989 | + device_unlock(&dev->dev); |
|---|
| 4673 | 4990 | } |
|---|
| 4674 | 4991 | |
|---|
| 4675 | 4992 | return 0; |
|---|
| .. | .. |
|---|
| 4677 | 4994 | |
|---|
| 4678 | 4995 | static void pci_dev_unlock(struct pci_dev *dev) |
|---|
| 4679 | 4996 | { |
|---|
| 4680 | | - device_unlock(&dev->dev); |
|---|
| 4681 | 4997 | pci_cfg_access_unlock(dev); |
|---|
| 4998 | + device_unlock(&dev->dev); |
|---|
| 4682 | 4999 | } |
|---|
| 4683 | 5000 | |
|---|
| 4684 | 5001 | static void pci_dev_save_and_disable(struct pci_dev *dev) |
|---|
| .. | .. |
|---|
| 4739 | 5056 | * |
|---|
| 4740 | 5057 | * The device function is presumed to be unused and the caller is holding |
|---|
| 4741 | 5058 | * the device mutex lock when this function is called. |
|---|
| 5059 | + * |
|---|
| 4742 | 5060 | * Resetting the device will make the contents of PCI configuration space |
|---|
| 4743 | 5061 | * random, so any caller of this must be prepared to reinitialise the |
|---|
| 4744 | 5062 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, |
|---|
| .. | .. |
|---|
| 5302 | 5620 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count |
|---|
| 5303 | 5621 | * @dev: PCI device to query |
|---|
| 5304 | 5622 | * |
|---|
| 5305 | | - * Returns mmrbc: maximum designed memory read count in bytes |
|---|
| 5306 | | - * or appropriate error value. |
|---|
| 5623 | + * Returns mmrbc: maximum designed memory read count in bytes or |
|---|
| 5624 | + * appropriate error value. |
|---|
| 5307 | 5625 | */ |
|---|
| 5308 | 5626 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
|---|
| 5309 | 5627 | { |
|---|
| .. | .. |
|---|
| 5325 | 5643 | * pcix_get_mmrbc - get PCI-X maximum memory read byte count |
|---|
| 5326 | 5644 | * @dev: PCI device to query |
|---|
| 5327 | 5645 | * |
|---|
| 5328 | | - * Returns mmrbc: maximum memory read count in bytes |
|---|
| 5329 | | - * or appropriate error value. |
|---|
| 5646 | + * Returns mmrbc: maximum memory read count in bytes or appropriate error |
|---|
| 5647 | + * value. |
|---|
| 5330 | 5648 | */ |
|---|
| 5331 | 5649 | int pcix_get_mmrbc(struct pci_dev *dev) |
|---|
| 5332 | 5650 | { |
|---|
| .. | .. |
|---|
| 5350 | 5668 | * @mmrbc: maximum memory read count in bytes |
|---|
| 5351 | 5669 | * valid values are 512, 1024, 2048, 4096 |
|---|
| 5352 | 5670 | * |
|---|
| 5353 | | - * If possible sets maximum memory read byte count, some bridges have erratas |
|---|
| 5671 | + * If possible sets maximum memory read byte count, some bridges have errata |
|---|
| 5354 | 5672 | * that prevent this. |
|---|
| 5355 | 5673 | */ |
|---|
| 5356 | 5674 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
|---|
| .. | .. |
|---|
| 5395 | 5713 | * pcie_get_readrq - get PCI Express read request size |
|---|
| 5396 | 5714 | * @dev: PCI device to query |
|---|
| 5397 | 5715 | * |
|---|
| 5398 | | - * Returns maximum memory read request in bytes |
|---|
| 5399 | | - * or appropriate error value. |
|---|
| 5716 | + * Returns maximum memory read request in bytes or appropriate error value. |
|---|
| 5400 | 5717 | */ |
|---|
| 5401 | 5718 | int pcie_get_readrq(struct pci_dev *dev) |
|---|
| 5402 | 5719 | { |
|---|
| .. | .. |
|---|
| 5419 | 5736 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
|---|
| 5420 | 5737 | { |
|---|
| 5421 | 5738 | u16 v; |
|---|
| 5739 | + int ret; |
|---|
| 5422 | 5740 | |
|---|
| 5423 | 5741 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
|---|
| 5424 | 5742 | return -EINVAL; |
|---|
| 5425 | 5743 | |
|---|
| 5426 | 5744 | /* |
|---|
| 5427 | | - * If using the "performance" PCIe config, we clamp the |
|---|
| 5428 | | - * read rq size to the max packet size to prevent the |
|---|
| 5429 | | - * host bridge generating requests larger than we can |
|---|
| 5430 | | - * cope with |
|---|
| 5745 | + * If using the "performance" PCIe config, we clamp the read rq |
|---|
| 5746 | + * size to the max packet size to keep the host bridge from |
|---|
| 5747 | + * generating requests larger than we can cope with. |
|---|
| 5431 | 5748 | */ |
|---|
| 5432 | 5749 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { |
|---|
| 5433 | 5750 | int mps = pcie_get_mps(dev); |
|---|
| .. | .. |
|---|
| 5438 | 5755 | |
|---|
| 5439 | 5756 | v = (ffs(rq) - 8) << 12; |
|---|
| 5440 | 5757 | |
|---|
| 5441 | | - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
|---|
| 5758 | + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
|---|
| 5442 | 5759 | PCI_EXP_DEVCTL_READRQ, v); |
|---|
| 5760 | + |
|---|
| 5761 | + return pcibios_err_to_errno(ret); |
|---|
| 5443 | 5762 | } |
|---|
| 5444 | 5763 | EXPORT_SYMBOL(pcie_set_readrq); |
|---|
| 5445 | 5764 | |
|---|
| .. | .. |
|---|
| 5470 | 5789 | int pcie_set_mps(struct pci_dev *dev, int mps) |
|---|
| 5471 | 5790 | { |
|---|
| 5472 | 5791 | u16 v; |
|---|
| 5792 | + int ret; |
|---|
| 5473 | 5793 | |
|---|
| 5474 | 5794 | if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) |
|---|
| 5475 | 5795 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 5479 | 5799 | return -EINVAL; |
|---|
| 5480 | 5800 | v <<= 5; |
|---|
| 5481 | 5801 | |
|---|
| 5482 | | - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
|---|
| 5802 | + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
|---|
| 5483 | 5803 | PCI_EXP_DEVCTL_PAYLOAD, v); |
|---|
| 5804 | + |
|---|
| 5805 | + return pcibios_err_to_errno(ret); |
|---|
| 5484 | 5806 | } |
|---|
| 5485 | 5807 | EXPORT_SYMBOL(pcie_set_mps); |
|---|
| 5486 | 5808 | |
|---|
| .. | .. |
|---|
| 5563 | 5885 | * where only 2.5 GT/s and 5.0 GT/s speeds were defined. |
|---|
| 5564 | 5886 | */ |
|---|
| 5565 | 5887 | pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); |
|---|
| 5566 | | - if (lnkcap2) { /* PCIe r3.0-compliant */ |
|---|
| 5567 | | - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) |
|---|
| 5568 | | - return PCIE_SPEED_16_0GT; |
|---|
| 5569 | | - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) |
|---|
| 5570 | | - return PCIE_SPEED_8_0GT; |
|---|
| 5571 | | - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) |
|---|
| 5572 | | - return PCIE_SPEED_5_0GT; |
|---|
| 5573 | | - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) |
|---|
| 5574 | | - return PCIE_SPEED_2_5GT; |
|---|
| 5575 | | - return PCI_SPEED_UNKNOWN; |
|---|
| 5576 | | - } |
|---|
| 5888 | + |
|---|
| 5889 | + /* PCIe r3.0-compliant */ |
|---|
| 5890 | + if (lnkcap2) |
|---|
| 5891 | + return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); |
|---|
| 5577 | 5892 | |
|---|
| 5578 | 5893 | pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); |
|---|
| 5579 | 5894 | if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) |
|---|
| .. | .. |
|---|
| 5649 | 5964 | if (bw_avail >= bw_cap && verbose) |
|---|
| 5650 | 5965 | pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", |
|---|
| 5651 | 5966 | bw_cap / 1000, bw_cap % 1000, |
|---|
| 5652 | | - PCIE_SPEED2STR(speed_cap), width_cap); |
|---|
| 5967 | + pci_speed_string(speed_cap), width_cap); |
|---|
| 5653 | 5968 | else if (bw_avail < bw_cap) |
|---|
| 5654 | 5969 | pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", |
|---|
| 5655 | 5970 | bw_avail / 1000, bw_avail % 1000, |
|---|
| 5656 | | - PCIE_SPEED2STR(speed), width, |
|---|
| 5971 | + pci_speed_string(speed), width, |
|---|
| 5657 | 5972 | limiting_dev ? pci_name(limiting_dev) : "<unknown>", |
|---|
| 5658 | 5973 | bw_cap / 1000, bw_cap % 1000, |
|---|
| 5659 | | - PCIE_SPEED2STR(speed_cap), width_cap); |
|---|
| 5974 | + pci_speed_string(speed_cap), width_cap); |
|---|
| 5660 | 5975 | } |
|---|
| 5661 | 5976 | |
|---|
| 5662 | 5977 | /** |
|---|
| .. | .. |
|---|
| 5730 | 6045 | |
|---|
| 5731 | 6046 | if (flags & PCI_VGA_STATE_CHANGE_DECODES) { |
|---|
| 5732 | 6047 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
|---|
| 5733 | | - if (decode == true) |
|---|
| 6048 | + if (decode) |
|---|
| 5734 | 6049 | cmd |= command_bits; |
|---|
| 5735 | 6050 | else |
|---|
| 5736 | 6051 | cmd &= ~command_bits; |
|---|
| .. | .. |
|---|
| 5746 | 6061 | if (bridge) { |
|---|
| 5747 | 6062 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, |
|---|
| 5748 | 6063 | &cmd); |
|---|
| 5749 | | - if (decode == true) |
|---|
| 6064 | + if (decode) |
|---|
| 5750 | 6065 | cmd |= PCI_BRIDGE_CTL_VGA; |
|---|
| 5751 | 6066 | else |
|---|
| 5752 | 6067 | cmd &= ~PCI_BRIDGE_CTL_VGA; |
|---|
| .. | .. |
|---|
| 5758 | 6073 | return 0; |
|---|
| 5759 | 6074 | } |
|---|
| 5760 | 6075 | |
|---|
| 6076 | +#ifdef CONFIG_ACPI |
|---|
| 6077 | +bool pci_pr3_present(struct pci_dev *pdev) |
|---|
| 6078 | +{ |
|---|
| 6079 | + struct acpi_device *adev; |
|---|
| 6080 | + |
|---|
| 6081 | + if (acpi_disabled) |
|---|
| 6082 | + return false; |
|---|
| 6083 | + |
|---|
| 6084 | + adev = ACPI_COMPANION(&pdev->dev); |
|---|
| 6085 | + if (!adev) |
|---|
| 6086 | + return false; |
|---|
| 6087 | + |
|---|
| 6088 | + return adev->power.flags.power_resources && |
|---|
| 6089 | + acpi_has_method(adev->handle, "_PR3"); |
|---|
| 6090 | +} |
|---|
| 6091 | +EXPORT_SYMBOL_GPL(pci_pr3_present); |
|---|
| 6092 | +#endif |
|---|
| 6093 | + |
|---|
| 5761 | 6094 | /** |
|---|
| 5762 | 6095 | * pci_add_dma_alias - Add a DMA devfn alias for a device |
|---|
| 5763 | 6096 | * @dev: the PCI device for which alias is added |
|---|
| 5764 | | - * @devfn: alias slot and function |
|---|
| 6097 | + * @devfn_from: alias slot and function |
|---|
| 6098 | + * @nr_devfns: number of subsequent devfns to alias |
|---|
| 5765 | 6099 | * |
|---|
| 5766 | 6100 | * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask |
|---|
| 5767 | 6101 | * which is used to program permissible bus-devfn source addresses for DMA |
|---|
| .. | .. |
|---|
| 5777 | 6111 | * cannot be left as a userspace activity). DMA aliases should therefore |
|---|
| 5778 | 6112 | * be configured via quirks, such as the PCI fixup header quirk. |
|---|
| 5779 | 6113 | */ |
|---|
| 5780 | | -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn) |
|---|
| 6114 | +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns) |
|---|
| 5781 | 6115 | { |
|---|
| 6116 | + int devfn_to; |
|---|
| 6117 | + |
|---|
| 6118 | + nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from); |
|---|
| 6119 | + devfn_to = devfn_from + nr_devfns - 1; |
|---|
| 6120 | + |
|---|
| 5782 | 6121 | if (!dev->dma_alias_mask) |
|---|
| 5783 | | - dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), |
|---|
| 5784 | | - sizeof(long), GFP_KERNEL); |
|---|
| 6122 | + dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); |
|---|
| 5785 | 6123 | if (!dev->dma_alias_mask) { |
|---|
| 5786 | 6124 | pci_warn(dev, "Unable to allocate DMA alias mask\n"); |
|---|
| 5787 | 6125 | return; |
|---|
| 5788 | 6126 | } |
|---|
| 5789 | 6127 | |
|---|
| 5790 | | - set_bit(devfn, dev->dma_alias_mask); |
|---|
| 5791 | | - pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", |
|---|
| 5792 | | - PCI_SLOT(devfn), PCI_FUNC(devfn)); |
|---|
| 6128 | + bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); |
|---|
| 6129 | + |
|---|
| 6130 | + if (nr_devfns == 1) |
|---|
| 6131 | + pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", |
|---|
| 6132 | + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); |
|---|
| 6133 | + else if (nr_devfns > 1) |
|---|
| 6134 | + pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", |
|---|
| 6135 | + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), |
|---|
| 6136 | + PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); |
|---|
| 5793 | 6137 | } |
|---|
| 5794 | 6138 | |
|---|
| 5795 | 6139 | bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) |
|---|
| .. | .. |
|---|
| 5797 | 6141 | return (dev1->dma_alias_mask && |
|---|
| 5798 | 6142 | test_bit(dev2->devfn, dev1->dma_alias_mask)) || |
|---|
| 5799 | 6143 | (dev2->dma_alias_mask && |
|---|
| 5800 | | - test_bit(dev1->devfn, dev2->dma_alias_mask)); |
|---|
| 6144 | + test_bit(dev1->devfn, dev2->dma_alias_mask)) || |
|---|
| 6145 | + pci_real_dma_dev(dev1) == dev2 || |
|---|
| 6146 | + pci_real_dma_dev(dev2) == dev1; |
|---|
| 5801 | 6147 | } |
|---|
| 5802 | 6148 | |
|---|
| 5803 | 6149 | bool pci_device_is_present(struct pci_dev *pdev) |
|---|
| 5804 | 6150 | { |
|---|
| 5805 | 6151 | u32 v; |
|---|
| 5806 | 6152 | |
|---|
| 6153 | + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ |
|---|
| 6154 | + pdev = pci_physfn(pdev); |
|---|
| 5807 | 6155 | if (pci_dev_is_disconnected(pdev)) |
|---|
| 5808 | 6156 | return false; |
|---|
| 5809 | 6157 | return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); |
|---|
| .. | .. |
|---|
| 5821 | 6169 | } |
|---|
| 5822 | 6170 | EXPORT_SYMBOL_GPL(pci_ignore_hotplug); |
|---|
| 5823 | 6171 | |
|---|
| 6172 | +/** |
|---|
| 6173 | + * pci_real_dma_dev - Get PCI DMA device for PCI device |
|---|
| 6174 | + * @dev: the PCI device that may have a PCI DMA alias |
|---|
| 6175 | + * |
|---|
| 6176 | + * Permits the platform to provide architecture-specific functionality to |
|---|
| 6177 | + * devices needing to alias DMA to another PCI device on another PCI bus. If |
|---|
| 6178 | + * the PCI device is on the same bus, it is recommended to use |
|---|
| 6179 | + * pci_add_dma_alias(). This is the default implementation. Architecture |
|---|
| 6180 | + * implementations can override this. |
|---|
| 6181 | + */ |
|---|
| 6182 | +struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev) |
|---|
| 6183 | +{ |
|---|
| 6184 | + return dev; |
|---|
| 6185 | +} |
|---|
| 6186 | + |
|---|
| 5824 | 6187 | resource_size_t __weak pcibios_default_alignment(void) |
|---|
| 5825 | 6188 | { |
|---|
| 5826 | 6189 | return 0; |
|---|
| 5827 | 6190 | } |
|---|
| 5828 | 6191 | |
|---|
| 5829 | | -#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
|---|
| 5830 | | -static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
|---|
| 6192 | +/* |
|---|
| 6193 | + * Arches that don't want to expose struct resource to userland as-is in |
|---|
| 6194 | + * sysfs and /proc can implement their own pci_resource_to_user(). |
|---|
| 6195 | + */ |
|---|
| 6196 | +void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, |
|---|
| 6197 | + const struct resource *rsrc, |
|---|
| 6198 | + resource_size_t *start, resource_size_t *end) |
|---|
| 6199 | +{ |
|---|
| 6200 | + *start = rsrc->start; |
|---|
| 6201 | + *end = rsrc->end; |
|---|
| 6202 | +} |
|---|
| 6203 | + |
|---|
| 6204 | +static char *resource_alignment_param; |
|---|
| 5831 | 6205 | static DEFINE_SPINLOCK(resource_alignment_lock); |
|---|
| 5832 | 6206 | |
|---|
| 5833 | 6207 | /** |
|---|
| .. | .. |
|---|
| 5848 | 6222 | |
|---|
| 5849 | 6223 | spin_lock(&resource_alignment_lock); |
|---|
| 5850 | 6224 | p = resource_alignment_param; |
|---|
| 5851 | | - if (!*p && !align) |
|---|
| 6225 | + if (!p || !*p) |
|---|
| 5852 | 6226 | goto out; |
|---|
| 5853 | 6227 | if (pci_has_flag(PCI_PROBE_ONLY)) { |
|---|
| 5854 | 6228 | align = 0; |
|---|
| .. | .. |
|---|
| 6001 | 6375 | * to enable the kernel to reassign new resource |
|---|
| 6002 | 6376 | * window later on. |
|---|
| 6003 | 6377 | */ |
|---|
| 6004 | | - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && |
|---|
| 6005 | | - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { |
|---|
| 6378 | + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { |
|---|
| 6006 | 6379 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { |
|---|
| 6007 | 6380 | r = &dev->resource[i]; |
|---|
| 6008 | 6381 | if (!(r->flags & IORESOURCE_MEM)) |
|---|
| .. | .. |
|---|
| 6015 | 6388 | } |
|---|
| 6016 | 6389 | } |
|---|
| 6017 | 6390 | |
|---|
| 6018 | | -static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) |
|---|
| 6391 | +static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) |
|---|
| 6019 | 6392 | { |
|---|
| 6020 | | - if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) |
|---|
| 6021 | | - count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; |
|---|
| 6393 | + size_t count = 0; |
|---|
| 6394 | + |
|---|
| 6022 | 6395 | spin_lock(&resource_alignment_lock); |
|---|
| 6023 | | - strncpy(resource_alignment_param, buf, count); |
|---|
| 6024 | | - resource_alignment_param[count] = '\0'; |
|---|
| 6396 | + if (resource_alignment_param) |
|---|
| 6397 | + count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param); |
|---|
| 6025 | 6398 | spin_unlock(&resource_alignment_lock); |
|---|
| 6399 | + |
|---|
| 6400 | + /* |
|---|
| 6401 | + * When set by the command line, resource_alignment_param will not |
|---|
| 6402 | + * have a trailing line feed, which is ugly. So conditionally add |
|---|
| 6403 | + * it here. |
|---|
| 6404 | + */ |
|---|
| 6405 | + if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) { |
|---|
| 6406 | + buf[count - 1] = '\n'; |
|---|
| 6407 | + buf[count++] = 0; |
|---|
| 6408 | + } |
|---|
| 6409 | + |
|---|
| 6026 | 6410 | return count; |
|---|
| 6027 | 6411 | } |
|---|
| 6028 | 6412 | |
|---|
| 6029 | | -static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) |
|---|
| 6030 | | -{ |
|---|
| 6031 | | - size_t count; |
|---|
| 6032 | | - spin_lock(&resource_alignment_lock); |
|---|
| 6033 | | - count = snprintf(buf, size, "%s", resource_alignment_param); |
|---|
| 6034 | | - spin_unlock(&resource_alignment_lock); |
|---|
| 6035 | | - return count; |
|---|
| 6036 | | -} |
|---|
| 6037 | | - |
|---|
| 6038 | | -static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) |
|---|
| 6039 | | -{ |
|---|
| 6040 | | - return pci_get_resource_alignment_param(buf, PAGE_SIZE); |
|---|
| 6041 | | -} |
|---|
| 6042 | | - |
|---|
| 6043 | | -static ssize_t pci_resource_alignment_store(struct bus_type *bus, |
|---|
| 6413 | +static ssize_t resource_alignment_store(struct bus_type *bus, |
|---|
| 6044 | 6414 | const char *buf, size_t count) |
|---|
| 6045 | 6415 | { |
|---|
| 6046 | | - return pci_set_resource_alignment_param(buf, count); |
|---|
| 6416 | + char *param = kstrndup(buf, count, GFP_KERNEL); |
|---|
| 6417 | + |
|---|
| 6418 | + if (!param) |
|---|
| 6419 | + return -ENOMEM; |
|---|
| 6420 | + |
|---|
| 6421 | + spin_lock(&resource_alignment_lock); |
|---|
| 6422 | + kfree(resource_alignment_param); |
|---|
| 6423 | + resource_alignment_param = param; |
|---|
| 6424 | + spin_unlock(&resource_alignment_lock); |
|---|
| 6425 | + return count; |
|---|
| 6047 | 6426 | } |
|---|
| 6048 | 6427 | |
|---|
| 6049 | | -static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, |
|---|
| 6050 | | - pci_resource_alignment_store); |
|---|
| 6428 | +static BUS_ATTR_RW(resource_alignment); |
|---|
| 6051 | 6429 | |
|---|
| 6052 | 6430 | static int __init pci_resource_alignment_sysfs_init(void) |
|---|
| 6053 | 6431 | { |
|---|
| .. | .. |
|---|
| 6078 | 6456 | |
|---|
| 6079 | 6457 | if (parent) |
|---|
| 6080 | 6458 | domain = of_get_pci_domain_nr(parent->of_node); |
|---|
| 6459 | + |
|---|
| 6081 | 6460 | /* |
|---|
| 6082 | 6461 | * Check DT domain and use_dt_domains values. |
|---|
| 6083 | 6462 | * |
|---|
| .. | .. |
|---|
| 6172 | 6551 | } else if (!strncmp(str, "cbmemsize=", 10)) { |
|---|
| 6173 | 6552 | pci_cardbus_mem_size = memparse(str + 10, &str); |
|---|
| 6174 | 6553 | } else if (!strncmp(str, "resource_alignment=", 19)) { |
|---|
| 6175 | | - pci_set_resource_alignment_param(str + 19, |
|---|
| 6176 | | - strlen(str + 19)); |
|---|
| 6554 | + resource_alignment_param = str + 19; |
|---|
| 6177 | 6555 | } else if (!strncmp(str, "ecrc=", 5)) { |
|---|
| 6178 | 6556 | pcie_ecrc_get_policy(str + 5); |
|---|
| 6179 | 6557 | } else if (!strncmp(str, "hpiosize=", 9)) { |
|---|
| 6180 | 6558 | pci_hotplug_io_size = memparse(str + 9, &str); |
|---|
| 6559 | + } else if (!strncmp(str, "hpmmiosize=", 11)) { |
|---|
| 6560 | + pci_hotplug_mmio_size = memparse(str + 11, &str); |
|---|
| 6561 | + } else if (!strncmp(str, "hpmmioprefsize=", 15)) { |
|---|
| 6562 | + pci_hotplug_mmio_pref_size = memparse(str + 15, &str); |
|---|
| 6181 | 6563 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
|---|
| 6182 | | - pci_hotplug_mem_size = memparse(str + 10, &str); |
|---|
| 6564 | + pci_hotplug_mmio_size = memparse(str + 10, &str); |
|---|
| 6565 | + pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size; |
|---|
| 6183 | 6566 | } else if (!strncmp(str, "hpbussize=", 10)) { |
|---|
| 6184 | 6567 | pci_hotplug_bus_size = |
|---|
| 6185 | 6568 | simple_strtoul(str + 10, &str, 0); |
|---|
| .. | .. |
|---|
| 6198 | 6581 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
|---|
| 6199 | 6582 | disable_acs_redir_param = str + 18; |
|---|
| 6200 | 6583 | } else { |
|---|
| 6201 | | - printk(KERN_ERR "PCI: Unknown option `%s'\n", |
|---|
| 6202 | | - str); |
|---|
| 6584 | + pr_err("PCI: Unknown option `%s'\n", str); |
|---|
| 6203 | 6585 | } |
|---|
| 6204 | 6586 | } |
|---|
| 6205 | 6587 | str = k; |
|---|
| .. | .. |
|---|
| 6209 | 6591 | early_param("pci", pci_setup); |
|---|
| 6210 | 6592 | |
|---|
| 6211 | 6593 | /* |
|---|
| 6212 | | - * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point |
|---|
| 6213 | | - * to data in the __initdata section which will be freed after the init |
|---|
| 6214 | | - * sequence is complete. We can't allocate memory in pci_setup() because some |
|---|
| 6215 | | - * architectures do not have any memory allocation service available during |
|---|
| 6216 | | - * an early_param() call. So we allocate memory and copy the variable here |
|---|
| 6217 | | - * before the init section is freed. |
|---|
| 6594 | + * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized |
|---|
| 6595 | + * in pci_setup(), above, to point to data in the __initdata section which |
|---|
| 6596 | + * will be freed after the init sequence is complete. We can't allocate memory |
|---|
| 6597 | + * in pci_setup() because some architectures do not have any memory allocation |
|---|
| 6598 | + * service available during an early_param() call. So we allocate memory and |
|---|
| 6599 | + * copy the variable here before the init section is freed. |
|---|
| 6600 | + * |
|---|
| 6218 | 6601 | */ |
|---|
| 6219 | 6602 | static int __init pci_realloc_setup_params(void) |
|---|
| 6220 | 6603 | { |
|---|
| 6604 | + resource_alignment_param = kstrdup(resource_alignment_param, |
|---|
| 6605 | + GFP_KERNEL); |
|---|
| 6221 | 6606 | disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); |
|---|
| 6222 | 6607 | |
|---|
| 6223 | 6608 | return 0; |
|---|