.. | .. |
---|
14 | 14 | #include <linux/msi.h> |
---|
15 | 15 | #include <linux/pci_hotplug.h> |
---|
16 | 16 | #include <linux/module.h> |
---|
17 | | -#include <linux/pci-aspm.h> |
---|
18 | 17 | #include <linux/pci-acpi.h> |
---|
19 | 18 | #include <linux/pm_runtime.h> |
---|
20 | 19 | #include <linux/pm_qos.h> |
---|
.. | .. |
---|
118 | 117 | return (phys_addr_t)mcfg_addr; |
---|
119 | 118 | } |
---|
120 | 119 | |
---|
| 120 | +/* _HPX PCI Setting Record (Type 0); same as _HPP */ |
---|
| 121 | +struct hpx_type0 { |
---|
| 122 | + u32 revision; /* Not present in _HPP */ |
---|
| 123 | + u8 cache_line_size; /* Not applicable to PCIe */ |
---|
| 124 | + u8 latency_timer; /* Not applicable to PCIe */ |
---|
| 125 | + u8 enable_serr; |
---|
| 126 | + u8 enable_perr; |
---|
| 127 | +}; |
---|
| 128 | + |
---|
| 129 | +static struct hpx_type0 pci_default_type0 = { |
---|
| 130 | + .revision = 1, |
---|
| 131 | + .cache_line_size = 8, |
---|
| 132 | + .latency_timer = 0x40, |
---|
| 133 | + .enable_serr = 0, |
---|
| 134 | + .enable_perr = 0, |
---|
| 135 | +}; |
---|
| 136 | + |
---|
| 137 | +static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) |
---|
| 138 | +{ |
---|
| 139 | + u16 pci_cmd, pci_bctl; |
---|
| 140 | + |
---|
| 141 | + if (!hpx) |
---|
| 142 | + hpx = &pci_default_type0; |
---|
| 143 | + |
---|
| 144 | + if (hpx->revision > 1) { |
---|
| 145 | + pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", |
---|
| 146 | + hpx->revision); |
---|
| 147 | + hpx = &pci_default_type0; |
---|
| 148 | + } |
---|
| 149 | + |
---|
| 150 | + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); |
---|
| 151 | + pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); |
---|
| 152 | + pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); |
---|
| 153 | + if (hpx->enable_serr) |
---|
| 154 | + pci_cmd |= PCI_COMMAND_SERR; |
---|
| 155 | + if (hpx->enable_perr) |
---|
| 156 | + pci_cmd |= PCI_COMMAND_PARITY; |
---|
| 157 | + pci_write_config_word(dev, PCI_COMMAND, pci_cmd); |
---|
| 158 | + |
---|
| 159 | + /* Program bridge control value */ |
---|
| 160 | + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { |
---|
| 161 | + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, |
---|
| 162 | + hpx->latency_timer); |
---|
| 163 | + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); |
---|
| 164 | + if (hpx->enable_perr) |
---|
| 165 | + pci_bctl |= PCI_BRIDGE_CTL_PARITY; |
---|
| 166 | + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); |
---|
| 167 | + } |
---|
| 168 | +} |
---|
| 169 | + |
---|
121 | 170 | static acpi_status decode_type0_hpx_record(union acpi_object *record, |
---|
122 | | - struct hotplug_params *hpx) |
---|
| 171 | + struct hpx_type0 *hpx0) |
---|
123 | 172 | { |
---|
124 | 173 | int i; |
---|
125 | 174 | union acpi_object *fields = record->package.elements; |
---|
.. | .. |
---|
132 | 181 | for (i = 2; i < 6; i++) |
---|
133 | 182 | if (fields[i].type != ACPI_TYPE_INTEGER) |
---|
134 | 183 | return AE_ERROR; |
---|
135 | | - hpx->t0 = &hpx->type0_data; |
---|
136 | | - hpx->t0->revision = revision; |
---|
137 | | - hpx->t0->cache_line_size = fields[2].integer.value; |
---|
138 | | - hpx->t0->latency_timer = fields[3].integer.value; |
---|
139 | | - hpx->t0->enable_serr = fields[4].integer.value; |
---|
140 | | - hpx->t0->enable_perr = fields[5].integer.value; |
---|
| 184 | + hpx0->revision = revision; |
---|
| 185 | + hpx0->cache_line_size = fields[2].integer.value; |
---|
| 186 | + hpx0->latency_timer = fields[3].integer.value; |
---|
| 187 | + hpx0->enable_serr = fields[4].integer.value; |
---|
| 188 | + hpx0->enable_perr = fields[5].integer.value; |
---|
141 | 189 | break; |
---|
142 | 190 | default: |
---|
143 | | - printk(KERN_WARNING |
---|
144 | | - "%s: Type 0 Revision %d record not supported\n", |
---|
| 191 | + pr_warn("%s: Type 0 Revision %d record not supported\n", |
---|
145 | 192 | __func__, revision); |
---|
146 | 193 | return AE_ERROR; |
---|
147 | 194 | } |
---|
148 | 195 | return AE_OK; |
---|
149 | 196 | } |
---|
150 | 197 | |
---|
| 198 | +/* _HPX PCI-X Setting Record (Type 1) */ |
---|
| 199 | +struct hpx_type1 { |
---|
| 200 | + u32 revision; |
---|
| 201 | + u8 max_mem_read; |
---|
| 202 | + u8 avg_max_split; |
---|
| 203 | + u16 tot_max_split; |
---|
| 204 | +}; |
---|
| 205 | + |
---|
| 206 | +static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) |
---|
| 207 | +{ |
---|
| 208 | + int pos; |
---|
| 209 | + |
---|
| 210 | + if (!hpx) |
---|
| 211 | + return; |
---|
| 212 | + |
---|
| 213 | + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
---|
| 214 | + if (!pos) |
---|
| 215 | + return; |
---|
| 216 | + |
---|
| 217 | + pci_warn(dev, "PCI-X settings not supported\n"); |
---|
| 218 | +} |
---|
| 219 | + |
---|
151 | 220 | static acpi_status decode_type1_hpx_record(union acpi_object *record, |
---|
152 | | - struct hotplug_params *hpx) |
---|
| 221 | + struct hpx_type1 *hpx1) |
---|
153 | 222 | { |
---|
154 | 223 | int i; |
---|
155 | 224 | union acpi_object *fields = record->package.elements; |
---|
.. | .. |
---|
162 | 231 | for (i = 2; i < 5; i++) |
---|
163 | 232 | if (fields[i].type != ACPI_TYPE_INTEGER) |
---|
164 | 233 | return AE_ERROR; |
---|
165 | | - hpx->t1 = &hpx->type1_data; |
---|
166 | | - hpx->t1->revision = revision; |
---|
167 | | - hpx->t1->max_mem_read = fields[2].integer.value; |
---|
168 | | - hpx->t1->avg_max_split = fields[3].integer.value; |
---|
169 | | - hpx->t1->tot_max_split = fields[4].integer.value; |
---|
| 234 | + hpx1->revision = revision; |
---|
| 235 | + hpx1->max_mem_read = fields[2].integer.value; |
---|
| 236 | + hpx1->avg_max_split = fields[3].integer.value; |
---|
| 237 | + hpx1->tot_max_split = fields[4].integer.value; |
---|
170 | 238 | break; |
---|
171 | 239 | default: |
---|
172 | | - printk(KERN_WARNING |
---|
173 | | - "%s: Type 1 Revision %d record not supported\n", |
---|
| 240 | + pr_warn("%s: Type 1 Revision %d record not supported\n", |
---|
174 | 241 | __func__, revision); |
---|
175 | 242 | return AE_ERROR; |
---|
176 | 243 | } |
---|
177 | 244 | return AE_OK; |
---|
178 | 245 | } |
---|
179 | 246 | |
---|
| 247 | +static bool pcie_root_rcb_set(struct pci_dev *dev) |
---|
| 248 | +{ |
---|
| 249 | + struct pci_dev *rp = pcie_find_root_port(dev); |
---|
| 250 | + u16 lnkctl; |
---|
| 251 | + |
---|
| 252 | + if (!rp) |
---|
| 253 | + return false; |
---|
| 254 | + |
---|
| 255 | + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); |
---|
| 256 | + if (lnkctl & PCI_EXP_LNKCTL_RCB) |
---|
| 257 | + return true; |
---|
| 258 | + |
---|
| 259 | + return false; |
---|
| 260 | +} |
---|
| 261 | + |
---|
| 262 | +/* _HPX PCI Express Setting Record (Type 2) */ |
---|
| 263 | +struct hpx_type2 { |
---|
| 264 | + u32 revision; |
---|
| 265 | + u32 unc_err_mask_and; |
---|
| 266 | + u32 unc_err_mask_or; |
---|
| 267 | + u32 unc_err_sever_and; |
---|
| 268 | + u32 unc_err_sever_or; |
---|
| 269 | + u32 cor_err_mask_and; |
---|
| 270 | + u32 cor_err_mask_or; |
---|
| 271 | + u32 adv_err_cap_and; |
---|
| 272 | + u32 adv_err_cap_or; |
---|
| 273 | + u16 pci_exp_devctl_and; |
---|
| 274 | + u16 pci_exp_devctl_or; |
---|
| 275 | + u16 pci_exp_lnkctl_and; |
---|
| 276 | + u16 pci_exp_lnkctl_or; |
---|
| 277 | + u32 sec_unc_err_sever_and; |
---|
| 278 | + u32 sec_unc_err_sever_or; |
---|
| 279 | + u32 sec_unc_err_mask_and; |
---|
| 280 | + u32 sec_unc_err_mask_or; |
---|
| 281 | +}; |
---|
| 282 | + |
---|
| 283 | +static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) |
---|
| 284 | +{ |
---|
| 285 | + int pos; |
---|
| 286 | + u32 reg32; |
---|
| 287 | + |
---|
| 288 | + if (!hpx) |
---|
| 289 | + return; |
---|
| 290 | + |
---|
| 291 | + if (!pci_is_pcie(dev)) |
---|
| 292 | + return; |
---|
| 293 | + |
---|
| 294 | + if (hpx->revision > 1) { |
---|
| 295 | + pci_warn(dev, "PCIe settings rev %d not supported\n", |
---|
| 296 | + hpx->revision); |
---|
| 297 | + return; |
---|
| 298 | + } |
---|
| 299 | + |
---|
| 300 | + /* |
---|
| 301 | + * Don't allow _HPX to change MPS or MRRS settings. We manage |
---|
| 302 | + * those to make sure they're consistent with the rest of the |
---|
| 303 | + * platform. |
---|
| 304 | + */ |
---|
| 305 | + hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | |
---|
| 306 | + PCI_EXP_DEVCTL_READRQ; |
---|
| 307 | + hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | |
---|
| 308 | + PCI_EXP_DEVCTL_READRQ); |
---|
| 309 | + |
---|
| 310 | + /* Initialize Device Control Register */ |
---|
| 311 | + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
---|
| 312 | + ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); |
---|
| 313 | + |
---|
| 314 | + /* Initialize Link Control Register */ |
---|
| 315 | + if (pcie_cap_has_lnkctl(dev)) { |
---|
| 316 | + |
---|
| 317 | + /* |
---|
| 318 | + * If the Root Port supports Read Completion Boundary of |
---|
| 319 | + * 128, set RCB to 128. Otherwise, clear it. |
---|
| 320 | + */ |
---|
| 321 | + hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; |
---|
| 322 | + hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; |
---|
| 323 | + if (pcie_root_rcb_set(dev)) |
---|
| 324 | + hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; |
---|
| 325 | + |
---|
| 326 | + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, |
---|
| 327 | + ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); |
---|
| 328 | + } |
---|
| 329 | + |
---|
| 330 | + /* Find Advanced Error Reporting Enhanced Capability */ |
---|
| 331 | + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
---|
| 332 | + if (!pos) |
---|
| 333 | + return; |
---|
| 334 | + |
---|
| 335 | + /* Initialize Uncorrectable Error Mask Register */ |
---|
| 336 | + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); |
---|
| 337 | + reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; |
---|
| 338 | + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); |
---|
| 339 | + |
---|
| 340 | + /* Initialize Uncorrectable Error Severity Register */ |
---|
| 341 | + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); |
---|
| 342 | + reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; |
---|
| 343 | + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); |
---|
| 344 | + |
---|
| 345 | + /* Initialize Correctable Error Mask Register */ |
---|
| 346 | + pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); |
---|
| 347 | + reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; |
---|
| 348 | + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); |
---|
| 349 | + |
---|
| 350 | + /* Initialize Advanced Error Capabilities and Control Register */ |
---|
| 351 | + pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); |
---|
| 352 | + reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; |
---|
| 353 | + |
---|
| 354 | + /* Don't enable ECRC generation or checking if unsupported */ |
---|
| 355 | + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) |
---|
| 356 | + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; |
---|
| 357 | + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) |
---|
| 358 | + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; |
---|
| 359 | + pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); |
---|
| 360 | + |
---|
| 361 | + /* |
---|
| 362 | + * FIXME: The following two registers are not supported yet. |
---|
| 363 | + * |
---|
| 364 | + * o Secondary Uncorrectable Error Severity Register |
---|
| 365 | + * o Secondary Uncorrectable Error Mask Register |
---|
| 366 | + */ |
---|
| 367 | +} |
---|
| 368 | + |
---|
180 | 369 | static acpi_status decode_type2_hpx_record(union acpi_object *record, |
---|
181 | | - struct hotplug_params *hpx) |
---|
| 370 | + struct hpx_type2 *hpx2) |
---|
182 | 371 | { |
---|
183 | 372 | int i; |
---|
184 | 373 | union acpi_object *fields = record->package.elements; |
---|
.. | .. |
---|
191 | 380 | for (i = 2; i < 18; i++) |
---|
192 | 381 | if (fields[i].type != ACPI_TYPE_INTEGER) |
---|
193 | 382 | return AE_ERROR; |
---|
194 | | - hpx->t2 = &hpx->type2_data; |
---|
195 | | - hpx->t2->revision = revision; |
---|
196 | | - hpx->t2->unc_err_mask_and = fields[2].integer.value; |
---|
197 | | - hpx->t2->unc_err_mask_or = fields[3].integer.value; |
---|
198 | | - hpx->t2->unc_err_sever_and = fields[4].integer.value; |
---|
199 | | - hpx->t2->unc_err_sever_or = fields[5].integer.value; |
---|
200 | | - hpx->t2->cor_err_mask_and = fields[6].integer.value; |
---|
201 | | - hpx->t2->cor_err_mask_or = fields[7].integer.value; |
---|
202 | | - hpx->t2->adv_err_cap_and = fields[8].integer.value; |
---|
203 | | - hpx->t2->adv_err_cap_or = fields[9].integer.value; |
---|
204 | | - hpx->t2->pci_exp_devctl_and = fields[10].integer.value; |
---|
205 | | - hpx->t2->pci_exp_devctl_or = fields[11].integer.value; |
---|
206 | | - hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; |
---|
207 | | - hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; |
---|
208 | | - hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; |
---|
209 | | - hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; |
---|
210 | | - hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; |
---|
211 | | - hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; |
---|
| 383 | + hpx2->revision = revision; |
---|
| 384 | + hpx2->unc_err_mask_and = fields[2].integer.value; |
---|
| 385 | + hpx2->unc_err_mask_or = fields[3].integer.value; |
---|
| 386 | + hpx2->unc_err_sever_and = fields[4].integer.value; |
---|
| 387 | + hpx2->unc_err_sever_or = fields[5].integer.value; |
---|
| 388 | + hpx2->cor_err_mask_and = fields[6].integer.value; |
---|
| 389 | + hpx2->cor_err_mask_or = fields[7].integer.value; |
---|
| 390 | + hpx2->adv_err_cap_and = fields[8].integer.value; |
---|
| 391 | + hpx2->adv_err_cap_or = fields[9].integer.value; |
---|
| 392 | + hpx2->pci_exp_devctl_and = fields[10].integer.value; |
---|
| 393 | + hpx2->pci_exp_devctl_or = fields[11].integer.value; |
---|
| 394 | + hpx2->pci_exp_lnkctl_and = fields[12].integer.value; |
---|
| 395 | + hpx2->pci_exp_lnkctl_or = fields[13].integer.value; |
---|
| 396 | + hpx2->sec_unc_err_sever_and = fields[14].integer.value; |
---|
| 397 | + hpx2->sec_unc_err_sever_or = fields[15].integer.value; |
---|
| 398 | + hpx2->sec_unc_err_mask_and = fields[16].integer.value; |
---|
| 399 | + hpx2->sec_unc_err_mask_or = fields[17].integer.value; |
---|
212 | 400 | break; |
---|
213 | 401 | default: |
---|
214 | | - printk(KERN_WARNING |
---|
215 | | - "%s: Type 2 Revision %d record not supported\n", |
---|
| 402 | + pr_warn("%s: Type 2 Revision %d record not supported\n", |
---|
216 | 403 | __func__, revision); |
---|
217 | 404 | return AE_ERROR; |
---|
218 | 405 | } |
---|
219 | 406 | return AE_OK; |
---|
220 | 407 | } |
---|
221 | 408 | |
---|
222 | | -static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) |
---|
| 409 | +/* _HPX PCI Express Setting Record (Type 3) */ |
---|
| 410 | +struct hpx_type3 { |
---|
| 411 | + u16 device_type; |
---|
| 412 | + u16 function_type; |
---|
| 413 | + u16 config_space_location; |
---|
| 414 | + u16 pci_exp_cap_id; |
---|
| 415 | + u16 pci_exp_cap_ver; |
---|
| 416 | + u16 pci_exp_vendor_id; |
---|
| 417 | + u16 dvsec_id; |
---|
| 418 | + u16 dvsec_rev; |
---|
| 419 | + u16 match_offset; |
---|
| 420 | + u32 match_mask_and; |
---|
| 421 | + u32 match_value; |
---|
| 422 | + u16 reg_offset; |
---|
| 423 | + u32 reg_mask_and; |
---|
| 424 | + u32 reg_mask_or; |
---|
| 425 | +}; |
---|
| 426 | + |
---|
| 427 | +enum hpx_type3_dev_type { |
---|
| 428 | + HPX_TYPE_ENDPOINT = BIT(0), |
---|
| 429 | + HPX_TYPE_LEG_END = BIT(1), |
---|
| 430 | + HPX_TYPE_RC_END = BIT(2), |
---|
| 431 | + HPX_TYPE_RC_EC = BIT(3), |
---|
| 432 | + HPX_TYPE_ROOT_PORT = BIT(4), |
---|
| 433 | + HPX_TYPE_UPSTREAM = BIT(5), |
---|
| 434 | + HPX_TYPE_DOWNSTREAM = BIT(6), |
---|
| 435 | + HPX_TYPE_PCI_BRIDGE = BIT(7), |
---|
| 436 | + HPX_TYPE_PCIE_BRIDGE = BIT(8), |
---|
| 437 | +}; |
---|
| 438 | + |
---|
| 439 | +static u16 hpx3_device_type(struct pci_dev *dev) |
---|
| 440 | +{ |
---|
| 441 | + u16 pcie_type = pci_pcie_type(dev); |
---|
| 442 | + static const int pcie_to_hpx3_type[] = { |
---|
| 443 | + [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, |
---|
| 444 | + [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, |
---|
| 445 | + [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, |
---|
| 446 | + [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, |
---|
| 447 | + [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, |
---|
| 448 | + [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, |
---|
| 449 | + [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, |
---|
| 450 | + [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, |
---|
| 451 | + [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, |
---|
| 452 | + }; |
---|
| 453 | + |
---|
| 454 | + if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) |
---|
| 455 | + return 0; |
---|
| 456 | + |
---|
| 457 | + return pcie_to_hpx3_type[pcie_type]; |
---|
| 458 | +} |
---|
| 459 | + |
---|
| 460 | +enum hpx_type3_fn_type { |
---|
| 461 | + HPX_FN_NORMAL = BIT(0), |
---|
| 462 | + HPX_FN_SRIOV_PHYS = BIT(1), |
---|
| 463 | + HPX_FN_SRIOV_VIRT = BIT(2), |
---|
| 464 | +}; |
---|
| 465 | + |
---|
| 466 | +static u8 hpx3_function_type(struct pci_dev *dev) |
---|
| 467 | +{ |
---|
| 468 | + if (dev->is_virtfn) |
---|
| 469 | + return HPX_FN_SRIOV_VIRT; |
---|
| 470 | + else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) |
---|
| 471 | + return HPX_FN_SRIOV_PHYS; |
---|
| 472 | + else |
---|
| 473 | + return HPX_FN_NORMAL; |
---|
| 474 | +} |
---|
| 475 | + |
---|
| 476 | +static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) |
---|
| 477 | +{ |
---|
| 478 | + u8 cap_ver = hpx3_cap_id & 0xf; |
---|
| 479 | + |
---|
| 480 | + if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) |
---|
| 481 | + return true; |
---|
| 482 | + else if (cap_ver == pcie_cap_id) |
---|
| 483 | + return true; |
---|
| 484 | + |
---|
| 485 | + return false; |
---|
| 486 | +} |
---|
| 487 | + |
---|
| 488 | +enum hpx_type3_cfg_loc { |
---|
| 489 | + HPX_CFG_PCICFG = 0, |
---|
| 490 | + HPX_CFG_PCIE_CAP = 1, |
---|
| 491 | + HPX_CFG_PCIE_CAP_EXT = 2, |
---|
| 492 | + HPX_CFG_VEND_CAP = 3, |
---|
| 493 | + HPX_CFG_DVSEC = 4, |
---|
| 494 | + HPX_CFG_MAX, |
---|
| 495 | +}; |
---|
| 496 | + |
---|
| 497 | +static void program_hpx_type3_register(struct pci_dev *dev, |
---|
| 498 | + const struct hpx_type3 *reg) |
---|
| 499 | +{ |
---|
| 500 | + u32 match_reg, write_reg, header, orig_value; |
---|
| 501 | + u16 pos; |
---|
| 502 | + |
---|
| 503 | + if (!(hpx3_device_type(dev) & reg->device_type)) |
---|
| 504 | + return; |
---|
| 505 | + |
---|
| 506 | + if (!(hpx3_function_type(dev) & reg->function_type)) |
---|
| 507 | + return; |
---|
| 508 | + |
---|
| 509 | + switch (reg->config_space_location) { |
---|
| 510 | + case HPX_CFG_PCICFG: |
---|
| 511 | + pos = 0; |
---|
| 512 | + break; |
---|
| 513 | + case HPX_CFG_PCIE_CAP: |
---|
| 514 | + pos = pci_find_capability(dev, reg->pci_exp_cap_id); |
---|
| 515 | + if (pos == 0) |
---|
| 516 | + return; |
---|
| 517 | + |
---|
| 518 | + break; |
---|
| 519 | + case HPX_CFG_PCIE_CAP_EXT: |
---|
| 520 | + pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); |
---|
| 521 | + if (pos == 0) |
---|
| 522 | + return; |
---|
| 523 | + |
---|
| 524 | + pci_read_config_dword(dev, pos, &header); |
---|
| 525 | + if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), |
---|
| 526 | + reg->pci_exp_cap_ver)) |
---|
| 527 | + return; |
---|
| 528 | + |
---|
| 529 | + break; |
---|
| 530 | + case HPX_CFG_VEND_CAP: |
---|
| 531 | + case HPX_CFG_DVSEC: |
---|
| 532 | + default: |
---|
| 533 | + pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); |
---|
| 534 | + return; |
---|
| 535 | + } |
---|
| 536 | + |
---|
| 537 | + pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); |
---|
| 538 | + |
---|
| 539 | + if ((match_reg & reg->match_mask_and) != reg->match_value) |
---|
| 540 | + return; |
---|
| 541 | + |
---|
| 542 | + pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); |
---|
| 543 | + orig_value = write_reg; |
---|
| 544 | + write_reg &= reg->reg_mask_and; |
---|
| 545 | + write_reg |= reg->reg_mask_or; |
---|
| 546 | + |
---|
| 547 | + if (orig_value == write_reg) |
---|
| 548 | + return; |
---|
| 549 | + |
---|
| 550 | + pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); |
---|
| 551 | + |
---|
| 552 | + pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", |
---|
| 553 | + pos, orig_value, write_reg); |
---|
| 554 | +} |
---|
| 555 | + |
---|
| 556 | +static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) |
---|
| 557 | +{ |
---|
| 558 | + if (!hpx) |
---|
| 559 | + return; |
---|
| 560 | + |
---|
| 561 | + if (!pci_is_pcie(dev)) |
---|
| 562 | + return; |
---|
| 563 | + |
---|
| 564 | + program_hpx_type3_register(dev, hpx); |
---|
| 565 | +} |
---|
| 566 | + |
---|
| 567 | +static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, |
---|
| 568 | + union acpi_object *reg_fields) |
---|
| 569 | +{ |
---|
| 570 | + hpx3_reg->device_type = reg_fields[0].integer.value; |
---|
| 571 | + hpx3_reg->function_type = reg_fields[1].integer.value; |
---|
| 572 | + hpx3_reg->config_space_location = reg_fields[2].integer.value; |
---|
| 573 | + hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; |
---|
| 574 | + hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; |
---|
| 575 | + hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; |
---|
| 576 | + hpx3_reg->dvsec_id = reg_fields[6].integer.value; |
---|
| 577 | + hpx3_reg->dvsec_rev = reg_fields[7].integer.value; |
---|
| 578 | + hpx3_reg->match_offset = reg_fields[8].integer.value; |
---|
| 579 | + hpx3_reg->match_mask_and = reg_fields[9].integer.value; |
---|
| 580 | + hpx3_reg->match_value = reg_fields[10].integer.value; |
---|
| 581 | + hpx3_reg->reg_offset = reg_fields[11].integer.value; |
---|
| 582 | + hpx3_reg->reg_mask_and = reg_fields[12].integer.value; |
---|
| 583 | + hpx3_reg->reg_mask_or = reg_fields[13].integer.value; |
---|
| 584 | +} |
---|
| 585 | + |
---|
| 586 | +static acpi_status program_type3_hpx_record(struct pci_dev *dev, |
---|
| 587 | + union acpi_object *record) |
---|
| 588 | +{ |
---|
| 589 | + union acpi_object *fields = record->package.elements; |
---|
| 590 | + u32 desc_count, expected_length, revision; |
---|
| 591 | + union acpi_object *reg_fields; |
---|
| 592 | + struct hpx_type3 hpx3; |
---|
| 593 | + int i; |
---|
| 594 | + |
---|
| 595 | + revision = fields[1].integer.value; |
---|
| 596 | + switch (revision) { |
---|
| 597 | + case 1: |
---|
| 598 | + desc_count = fields[2].integer.value; |
---|
| 599 | + expected_length = 3 + desc_count * 14; |
---|
| 600 | + |
---|
| 601 | + if (record->package.count != expected_length) |
---|
| 602 | + return AE_ERROR; |
---|
| 603 | + |
---|
| 604 | + for (i = 2; i < expected_length; i++) |
---|
| 605 | + if (fields[i].type != ACPI_TYPE_INTEGER) |
---|
| 606 | + return AE_ERROR; |
---|
| 607 | + |
---|
| 608 | + for (i = 0; i < desc_count; i++) { |
---|
| 609 | + reg_fields = fields + 3 + i * 14; |
---|
| 610 | + parse_hpx3_register(&hpx3, reg_fields); |
---|
| 611 | + program_hpx_type3(dev, &hpx3); |
---|
| 612 | + } |
---|
| 613 | + |
---|
| 614 | + break; |
---|
| 615 | + default: |
---|
| 616 | + printk(KERN_WARNING |
---|
| 617 | + "%s: Type 3 Revision %d record not supported\n", |
---|
| 618 | + __func__, revision); |
---|
| 619 | + return AE_ERROR; |
---|
| 620 | + } |
---|
| 621 | + return AE_OK; |
---|
| 622 | +} |
---|
| 623 | + |
---|
| 624 | +static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) |
---|
223 | 625 | { |
---|
224 | 626 | acpi_status status; |
---|
225 | 627 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
---|
226 | 628 | union acpi_object *package, *record, *fields; |
---|
| 629 | + struct hpx_type0 hpx0; |
---|
| 630 | + struct hpx_type1 hpx1; |
---|
| 631 | + struct hpx_type2 hpx2; |
---|
227 | 632 | u32 type; |
---|
228 | 633 | int i; |
---|
229 | | - |
---|
230 | | - /* Clear the return buffer with zeros */ |
---|
231 | | - memset(hpx, 0, sizeof(struct hotplug_params)); |
---|
232 | 634 | |
---|
233 | 635 | status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); |
---|
234 | 636 | if (ACPI_FAILURE(status)) |
---|
.. | .. |
---|
257 | 659 | type = fields[0].integer.value; |
---|
258 | 660 | switch (type) { |
---|
259 | 661 | case 0: |
---|
260 | | - status = decode_type0_hpx_record(record, hpx); |
---|
| 662 | + memset(&hpx0, 0, sizeof(hpx0)); |
---|
| 663 | + status = decode_type0_hpx_record(record, &hpx0); |
---|
261 | 664 | if (ACPI_FAILURE(status)) |
---|
262 | 665 | goto exit; |
---|
| 666 | + program_hpx_type0(dev, &hpx0); |
---|
263 | 667 | break; |
---|
264 | 668 | case 1: |
---|
265 | | - status = decode_type1_hpx_record(record, hpx); |
---|
| 669 | + memset(&hpx1, 0, sizeof(hpx1)); |
---|
| 670 | + status = decode_type1_hpx_record(record, &hpx1); |
---|
266 | 671 | if (ACPI_FAILURE(status)) |
---|
267 | 672 | goto exit; |
---|
| 673 | + program_hpx_type1(dev, &hpx1); |
---|
268 | 674 | break; |
---|
269 | 675 | case 2: |
---|
270 | | - status = decode_type2_hpx_record(record, hpx); |
---|
| 676 | + memset(&hpx2, 0, sizeof(hpx2)); |
---|
| 677 | + status = decode_type2_hpx_record(record, &hpx2); |
---|
| 678 | + if (ACPI_FAILURE(status)) |
---|
| 679 | + goto exit; |
---|
| 680 | + program_hpx_type2(dev, &hpx2); |
---|
| 681 | + break; |
---|
| 682 | + case 3: |
---|
| 683 | + status = program_type3_hpx_record(dev, record); |
---|
271 | 684 | if (ACPI_FAILURE(status)) |
---|
272 | 685 | goto exit; |
---|
273 | 686 | break; |
---|
274 | 687 | default: |
---|
275 | | - printk(KERN_ERR "%s: Type %d record not supported\n", |
---|
| 688 | + pr_err("%s: Type %d record not supported\n", |
---|
276 | 689 | __func__, type); |
---|
277 | 690 | status = AE_ERROR; |
---|
278 | 691 | goto exit; |
---|
.. | .. |
---|
283 | 696 | return status; |
---|
284 | 697 | } |
---|
285 | 698 | |
---|
286 | | -static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) |
---|
| 699 | +static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) |
---|
287 | 700 | { |
---|
288 | 701 | acpi_status status; |
---|
289 | 702 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
---|
290 | 703 | union acpi_object *package, *fields; |
---|
| 704 | + struct hpx_type0 hpx0; |
---|
291 | 705 | int i; |
---|
292 | 706 | |
---|
293 | | - memset(hpp, 0, sizeof(struct hotplug_params)); |
---|
| 707 | + memset(&hpx0, 0, sizeof(hpx0)); |
---|
294 | 708 | |
---|
295 | 709 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); |
---|
296 | 710 | if (ACPI_FAILURE(status)) |
---|
.. | .. |
---|
311 | 725 | } |
---|
312 | 726 | } |
---|
313 | 727 | |
---|
314 | | - hpp->t0 = &hpp->type0_data; |
---|
315 | | - hpp->t0->revision = 1; |
---|
316 | | - hpp->t0->cache_line_size = fields[0].integer.value; |
---|
317 | | - hpp->t0->latency_timer = fields[1].integer.value; |
---|
318 | | - hpp->t0->enable_serr = fields[2].integer.value; |
---|
319 | | - hpp->t0->enable_perr = fields[3].integer.value; |
---|
| 728 | + hpx0.revision = 1; |
---|
| 729 | + hpx0.cache_line_size = fields[0].integer.value; |
---|
| 730 | + hpx0.latency_timer = fields[1].integer.value; |
---|
| 731 | + hpx0.enable_serr = fields[2].integer.value; |
---|
| 732 | + hpx0.enable_perr = fields[3].integer.value; |
---|
| 733 | + |
---|
| 734 | + program_hpx_type0(dev, &hpx0); |
---|
320 | 735 | |
---|
321 | 736 | exit: |
---|
322 | 737 | kfree(buffer.pointer); |
---|
323 | 738 | return status; |
---|
324 | 739 | } |
---|
325 | 740 | |
---|
326 | | -/* pci_get_hp_params |
---|
| 741 | +/* pci_acpi_program_hp_params |
---|
327 | 742 | * |
---|
328 | 743 | * @dev - the pci_dev for which we want parameters |
---|
329 | | - * @hpp - allocated by the caller |
---|
330 | 744 | */ |
---|
331 | | -int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) |
---|
| 745 | +int pci_acpi_program_hp_params(struct pci_dev *dev) |
---|
332 | 746 | { |
---|
333 | 747 | acpi_status status; |
---|
334 | 748 | acpi_handle handle, phandle; |
---|
.. | .. |
---|
351 | 765 | * this pci dev. |
---|
352 | 766 | */ |
---|
353 | 767 | while (handle) { |
---|
354 | | - status = acpi_run_hpx(handle, hpp); |
---|
| 768 | + status = acpi_run_hpx(dev, handle); |
---|
355 | 769 | if (ACPI_SUCCESS(status)) |
---|
356 | 770 | return 0; |
---|
357 | | - status = acpi_run_hpp(handle, hpp); |
---|
| 771 | + status = acpi_run_hpp(dev, handle); |
---|
358 | 772 | if (ACPI_SUCCESS(status)) |
---|
359 | 773 | return 0; |
---|
360 | 774 | if (acpi_is_root_bridge(handle)) |
---|
.. | .. |
---|
366 | 780 | } |
---|
367 | 781 | return -ENODEV; |
---|
368 | 782 | } |
---|
369 | | -EXPORT_SYMBOL_GPL(pci_get_hp_params); |
---|
370 | 783 | |
---|
371 | 784 | /** |
---|
372 | 785 | * pciehp_is_native - Check whether a hotplug port is handled by the OS |
---|
.. | .. |
---|
519 | 932 | return PCI_POWER_ERROR; |
---|
520 | 933 | } |
---|
521 | 934 | |
---|
| 935 | +static struct acpi_device *acpi_pci_find_companion(struct device *dev); |
---|
| 936 | + |
---|
| 937 | +static bool acpi_pci_bridge_d3(struct pci_dev *dev) |
---|
| 938 | +{ |
---|
| 939 | + const struct fwnode_handle *fwnode; |
---|
| 940 | + struct acpi_device *adev; |
---|
| 941 | + struct pci_dev *root; |
---|
| 942 | + u8 val; |
---|
| 943 | + |
---|
| 944 | + if (!dev->is_hotplug_bridge) |
---|
| 945 | + return false; |
---|
| 946 | + |
---|
| 947 | + /* Assume D3 support if the bridge is power-manageable by ACPI. */ |
---|
| 948 | + adev = ACPI_COMPANION(&dev->dev); |
---|
| 949 | + if (!adev && !pci_dev_is_added(dev)) { |
---|
| 950 | + adev = acpi_pci_find_companion(&dev->dev); |
---|
| 951 | + ACPI_COMPANION_SET(&dev->dev, adev); |
---|
| 952 | + } |
---|
| 953 | + |
---|
| 954 | + if (adev && acpi_device_power_manageable(adev)) |
---|
| 955 | + return true; |
---|
| 956 | + |
---|
| 957 | + /* |
---|
| 958 | + * Look for a special _DSD property for the root port and if it |
---|
| 959 | + * is set we know the hierarchy behind it supports D3 just fine. |
---|
| 960 | + */ |
---|
| 961 | + root = pcie_find_root_port(dev); |
---|
| 962 | + if (!root) |
---|
| 963 | + return false; |
---|
| 964 | + |
---|
| 965 | + adev = ACPI_COMPANION(&root->dev); |
---|
| 966 | + if (root == dev) { |
---|
| 967 | + /* |
---|
| 968 | + * It is possible that the ACPI companion is not yet bound |
---|
| 969 | + * for the root port so look it up manually here. |
---|
| 970 | + */ |
---|
| 971 | + if (!adev && !pci_dev_is_added(root)) |
---|
| 972 | + adev = acpi_pci_find_companion(&root->dev); |
---|
| 973 | + } |
---|
| 974 | + |
---|
| 975 | + if (!adev) |
---|
| 976 | + return false; |
---|
| 977 | + |
---|
| 978 | + fwnode = acpi_fwnode_handle(adev); |
---|
| 979 | + if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val)) |
---|
| 980 | + return false; |
---|
| 981 | + |
---|
| 982 | + return val == 1; |
---|
| 983 | +} |
---|
| 984 | + |
---|
522 | 985 | static bool acpi_pci_power_manageable(struct pci_dev *dev) |
---|
523 | 986 | { |
---|
524 | 987 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
---|
.. | .. |
---|
548 | 1011 | error = -EBUSY; |
---|
549 | 1012 | break; |
---|
550 | 1013 | } |
---|
| 1014 | + fallthrough; |
---|
551 | 1015 | case PCI_D0: |
---|
552 | 1016 | case PCI_D1: |
---|
553 | 1017 | case PCI_D2: |
---|
.. | .. |
---|
577 | 1041 | if (!adev || !acpi_device_power_manageable(adev)) |
---|
578 | 1042 | return PCI_UNKNOWN; |
---|
579 | 1043 | |
---|
580 | | - if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN) |
---|
| 1044 | + state = adev->power.state; |
---|
| 1045 | + if (state == ACPI_STATE_UNKNOWN) |
---|
581 | 1046 | return PCI_UNKNOWN; |
---|
582 | 1047 | |
---|
583 | 1048 | return state_conv[state]; |
---|
| 1049 | +} |
---|
| 1050 | + |
---|
| 1051 | +static void acpi_pci_refresh_power_state(struct pci_dev *dev) |
---|
| 1052 | +{ |
---|
| 1053 | + struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
---|
| 1054 | + |
---|
| 1055 | + if (adev && acpi_device_power_manageable(adev)) |
---|
| 1056 | + acpi_device_update_power(adev, NULL); |
---|
584 | 1057 | } |
---|
585 | 1058 | |
---|
586 | 1059 | static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) |
---|
.. | .. |
---|
636 | 1109 | } |
---|
637 | 1110 | |
---|
638 | 1111 | static const struct pci_platform_pm_ops acpi_pci_platform_pm = { |
---|
| 1112 | + .bridge_d3 = acpi_pci_bridge_d3, |
---|
639 | 1113 | .is_manageable = acpi_pci_power_manageable, |
---|
640 | 1114 | .set_state = acpi_pci_set_power_state, |
---|
641 | 1115 | .get_state = acpi_pci_get_power_state, |
---|
| 1116 | + .refresh_state = acpi_pci_refresh_power_state, |
---|
642 | 1117 | .choose_state = acpi_pci_choose_state, |
---|
643 | 1118 | .set_wakeup = acpi_pci_wakeup, |
---|
644 | 1119 | .need_resume = acpi_pci_need_resume, |
---|
.. | .. |
---|
663 | 1138 | return; |
---|
664 | 1139 | |
---|
665 | 1140 | obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, |
---|
666 | | - RESET_DELAY_DSM, NULL); |
---|
| 1141 | + DSM_PCI_POWER_ON_RESET_DELAY, NULL); |
---|
667 | 1142 | if (!obj) |
---|
668 | 1143 | return; |
---|
669 | 1144 | |
---|
.. | .. |
---|
702 | 1177 | * @pdev: the PCI device whose delay is to be updated |
---|
703 | 1178 | * @handle: ACPI handle of this device |
---|
704 | 1179 | * |
---|
705 | | - * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM |
---|
| 1180 | + * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM |
---|
706 | 1181 | * control method of either the device itself or the PCI host bridge. |
---|
707 | 1182 | * |
---|
708 | 1183 | * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI |
---|
.. | .. |
---|
728 | 1203 | pdev->d3cold_delay = 0; |
---|
729 | 1204 | |
---|
730 | 1205 | obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, |
---|
731 | | - FUNCTION_DELAY_DSM, NULL); |
---|
| 1206 | + DSM_PCI_DEVICE_READINESS_DURATIONS, NULL); |
---|
732 | 1207 | if (!obj) |
---|
733 | 1208 | return; |
---|
734 | 1209 | |
---|
.. | .. |
---|
741 | 1216 | } |
---|
742 | 1217 | if (elements[3].type == ACPI_TYPE_INTEGER) { |
---|
743 | 1218 | value = (int)elements[3].integer.value / 1000; |
---|
744 | | - if (value < PCI_PM_D3_WAIT) |
---|
745 | | - pdev->d3_delay = value; |
---|
| 1219 | + if (value < PCI_PM_D3HOT_WAIT) |
---|
| 1220 | + pdev->d3hot_delay = value; |
---|
746 | 1221 | } |
---|
747 | 1222 | } |
---|
748 | 1223 | ACPI_FREE(obj); |
---|
| 1224 | +} |
---|
| 1225 | + |
---|
| 1226 | +static void pci_acpi_set_external_facing(struct pci_dev *dev) |
---|
| 1227 | +{ |
---|
| 1228 | + u8 val; |
---|
| 1229 | + |
---|
| 1230 | + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) |
---|
| 1231 | + return; |
---|
| 1232 | + if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) |
---|
| 1233 | + return; |
---|
| 1234 | + |
---|
| 1235 | + /* |
---|
| 1236 | + * These root ports expose PCIe (including DMA) outside of the |
---|
| 1237 | + * system. Everything downstream from them is external. |
---|
| 1238 | + */ |
---|
| 1239 | + if (val) |
---|
| 1240 | + dev->external_facing = 1; |
---|
749 | 1241 | } |
---|
750 | 1242 | |
---|
751 | 1243 | static void pci_acpi_setup(struct device *dev) |
---|
.. | .. |
---|
757 | 1249 | return; |
---|
758 | 1250 | |
---|
759 | 1251 | pci_acpi_optimize_delay(pci_dev, adev->handle); |
---|
| 1252 | + pci_acpi_set_external_facing(pci_dev); |
---|
| 1253 | + pci_acpi_add_edr_notifier(pci_dev); |
---|
760 | 1254 | |
---|
761 | 1255 | pci_acpi_add_pm_notifier(adev, pci_dev); |
---|
762 | 1256 | if (!adev->wakeup.flags.valid) |
---|
.. | .. |
---|
773 | 1267 | device_wakeup_enable(dev); |
---|
774 | 1268 | |
---|
775 | 1269 | acpi_pci_wakeup(pci_dev, false); |
---|
| 1270 | + acpi_device_power_add_dependent(adev, dev); |
---|
776 | 1271 | } |
---|
777 | 1272 | |
---|
778 | 1273 | static void pci_acpi_cleanup(struct device *dev) |
---|
.. | .. |
---|
783 | 1278 | if (!adev) |
---|
784 | 1279 | return; |
---|
785 | 1280 | |
---|
| 1281 | + pci_acpi_remove_edr_notifier(pci_dev); |
---|
786 | 1282 | pci_acpi_remove_pm_notifier(adev); |
---|
787 | 1283 | if (adev->wakeup.flags.valid) { |
---|
| 1284 | + acpi_device_power_remove_dependent(adev, dev); |
---|
788 | 1285 | if (pci_dev->bridge_d3) |
---|
789 | 1286 | device_wakeup_disable(dev); |
---|
790 | 1287 | |
---|