.. | .. |
---|
1 | 1 | /* |
---|
2 | | - * Copyright(c) 2015 - 2017 Intel Corporation. |
---|
| 2 | + * Copyright(c) 2015 - 2019 Intel Corporation. |
---|
3 | 3 | * |
---|
4 | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
5 | 5 | * redistributing this file, you may do so under either license. |
---|
.. | .. |
---|
61 | 61 | */ |
---|
62 | 62 | |
---|
63 | 63 | /* |
---|
64 | | - * Code to adjust PCIe capabilities. |
---|
65 | | - */ |
---|
66 | | -static void tune_pcie_caps(struct hfi1_devdata *); |
---|
67 | | - |
---|
68 | | -/* |
---|
69 | 64 | * Do all the common PCIe setup and initialization. |
---|
70 | | - * devdata is not yet allocated, and is not allocated until after this |
---|
71 | | - * routine returns success. Therefore dd_dev_err() can't be used for error |
---|
72 | | - * printing. |
---|
73 | 65 | */ |
---|
74 | | -int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
| 66 | +int hfi1_pcie_init(struct hfi1_devdata *dd) |
---|
75 | 67 | { |
---|
76 | 68 | int ret; |
---|
| 69 | + struct pci_dev *pdev = dd->pcidev; |
---|
77 | 70 | |
---|
78 | 71 | ret = pci_enable_device(pdev); |
---|
79 | 72 | if (ret) { |
---|
.. | .. |
---|
89 | 82 | * about that, it appears. If the original BAR was retained |
---|
90 | 83 | * in the kernel data structures, this may be OK. |
---|
91 | 84 | */ |
---|
92 | | - hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n", |
---|
93 | | - -ret); |
---|
94 | | - goto done; |
---|
| 85 | + dd_dev_err(dd, "pci enable failed: error %d\n", -ret); |
---|
| 86 | + return ret; |
---|
95 | 87 | } |
---|
96 | 88 | |
---|
97 | 89 | ret = pci_request_regions(pdev, DRIVER_NAME); |
---|
98 | 90 | if (ret) { |
---|
99 | | - hfi1_early_err(&pdev->dev, |
---|
100 | | - "pci_request_regions fails: err %d\n", -ret); |
---|
| 91 | + dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret); |
---|
101 | 92 | goto bail; |
---|
102 | 93 | } |
---|
103 | 94 | |
---|
.. | .. |
---|
110 | 101 | */ |
---|
111 | 102 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
112 | 103 | if (ret) { |
---|
113 | | - hfi1_early_err(&pdev->dev, |
---|
114 | | - "Unable to set DMA mask: %d\n", ret); |
---|
| 104 | + dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret); |
---|
115 | 105 | goto bail; |
---|
116 | 106 | } |
---|
117 | 107 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
.. | .. |
---|
119 | 109 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
120 | 110 | } |
---|
121 | 111 | if (ret) { |
---|
122 | | - hfi1_early_err(&pdev->dev, |
---|
123 | | - "Unable to set DMA consistent mask: %d\n", ret); |
---|
| 112 | + dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret); |
---|
124 | 113 | goto bail; |
---|
125 | 114 | } |
---|
126 | 115 | |
---|
127 | 116 | pci_set_master(pdev); |
---|
128 | 117 | (void)pci_enable_pcie_error_reporting(pdev); |
---|
129 | | - goto done; |
---|
| 118 | + return 0; |
---|
130 | 119 | |
---|
131 | 120 | bail: |
---|
132 | 121 | hfi1_pcie_cleanup(pdev); |
---|
133 | | -done: |
---|
134 | 122 | return ret; |
---|
135 | 123 | } |
---|
136 | 124 | |
---|
.. | .. |
---|
173 | 161 | return -EINVAL; |
---|
174 | 162 | } |
---|
175 | 163 | |
---|
176 | | - dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY); |
---|
| 164 | + dd->kregbase1 = ioremap(addr, RCV_ARRAY); |
---|
177 | 165 | if (!dd->kregbase1) { |
---|
178 | 166 | dd_dev_err(dd, "UC mapping of kregbase1 failed\n"); |
---|
179 | 167 | return -ENOMEM; |
---|
.. | .. |
---|
191 | 179 | dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count); |
---|
192 | 180 | dd->base2_start = RCV_ARRAY + rcv_array_count * 8; |
---|
193 | 181 | |
---|
194 | | - dd->kregbase2 = ioremap_nocache( |
---|
| 182 | + dd->kregbase2 = ioremap( |
---|
195 | 183 | addr + dd->base2_start, |
---|
196 | 184 | TXE_PIO_SEND - dd->base2_start); |
---|
197 | 185 | if (!dd->kregbase2) { |
---|
.. | .. |
---|
206 | 194 | dd_dev_err(dd, "WC mapping of send buffers failed\n"); |
---|
207 | 195 | goto nomem; |
---|
208 | 196 | } |
---|
209 | | - dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE); |
---|
| 197 | + dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE); |
---|
210 | 198 | |
---|
211 | 199 | dd->physaddr = addr; /* used for io_remap, etc. */ |
---|
212 | 200 | |
---|
.. | .. |
---|
318 | 306 | ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap); |
---|
319 | 307 | if (ret) { |
---|
320 | 308 | dd_dev_err(dd, "Unable to read from PCI config\n"); |
---|
321 | | - return ret; |
---|
| 309 | + return pcibios_err_to_errno(ret); |
---|
322 | 310 | } |
---|
323 | 311 | |
---|
324 | 312 | if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) { |
---|
.. | .. |
---|
346 | 334 | return 0; |
---|
347 | 335 | } |
---|
348 | 336 | |
---|
349 | | -/* |
---|
350 | | - * Returns: |
---|
351 | | - * - actual number of interrupts allocated or |
---|
352 | | - * - error |
---|
| 337 | +/** |
---|
| 338 | + * Restore command and BARs after a reset has wiped them out |
---|
| 339 | + * |
---|
| 340 | + * Returns 0 on success, otherwise a negative error value |
---|
353 | 341 | */ |
---|
354 | | -int request_msix(struct hfi1_devdata *dd, u32 msireq) |
---|
355 | | -{ |
---|
356 | | - int nvec; |
---|
357 | | - |
---|
358 | | - nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX); |
---|
359 | | - if (nvec < 0) { |
---|
360 | | - dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec); |
---|
361 | | - return nvec; |
---|
362 | | - } |
---|
363 | | - |
---|
364 | | - tune_pcie_caps(dd); |
---|
365 | | - |
---|
366 | | - return nvec; |
---|
367 | | -} |
---|
368 | | - |
---|
369 | | -/* restore command and BARs after a reset has wiped them out */ |
---|
370 | 342 | int restore_pci_variables(struct hfi1_devdata *dd) |
---|
371 | 343 | { |
---|
372 | | - int ret = 0; |
---|
| 344 | + int ret; |
---|
373 | 345 | |
---|
374 | 346 | ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command); |
---|
375 | 347 | if (ret) |
---|
.. | .. |
---|
418 | 390 | |
---|
419 | 391 | error: |
---|
420 | 392 | dd_dev_err(dd, "Unable to write to PCI config\n"); |
---|
421 | | - return ret; |
---|
| 393 | + return pcibios_err_to_errno(ret); |
---|
422 | 394 | } |
---|
423 | 395 | |
---|
424 | | -/* Save BARs and command to rewrite after device reset */ |
---|
| 396 | +/** |
---|
| 397 | + * Save BARs and command to rewrite after device reset |
---|
| 398 | + * |
---|
| 399 | + * Returns 0 on success, otherwise a negative error value |
---|
| 400 | + */ |
---|
425 | 401 | int save_pci_variables(struct hfi1_devdata *dd) |
---|
426 | 402 | { |
---|
427 | | - int ret = 0; |
---|
| 403 | + int ret; |
---|
428 | 404 | |
---|
429 | 405 | ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, |
---|
430 | 406 | &dd->pcibar0); |
---|
.. | .. |
---|
473 | 449 | |
---|
474 | 450 | error: |
---|
475 | 451 | dd_dev_err(dd, "Unable to read from PCI config\n"); |
---|
476 | | - return ret; |
---|
| 452 | + return pcibios_err_to_errno(ret); |
---|
477 | 453 | } |
---|
478 | 454 | |
---|
479 | 455 | /* |
---|
.. | .. |
---|
481 | 457 | * Check and optionally adjust them to maximize our throughput. |
---|
482 | 458 | */ |
---|
483 | 459 | static int hfi1_pcie_caps; |
---|
484 | | -module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO); |
---|
| 460 | +module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444); |
---|
485 | 461 | MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); |
---|
486 | 462 | |
---|
487 | | -uint aspm_mode = ASPM_MODE_DISABLED; |
---|
488 | | -module_param_named(aspm, aspm_mode, uint, S_IRUGO); |
---|
489 | | -MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); |
---|
490 | | - |
---|
491 | | -static void tune_pcie_caps(struct hfi1_devdata *dd) |
---|
| 463 | +/** |
---|
| 464 | + * tune_pcie_caps() - Code to adjust PCIe capabilities. |
---|
| 465 | + * @dd: Valid device data structure |
---|
| 466 | + * |
---|
| 467 | + */ |
---|
| 468 | +void tune_pcie_caps(struct hfi1_devdata *dd) |
---|
492 | 469 | { |
---|
493 | 470 | struct pci_dev *parent; |
---|
494 | 471 | u16 rc_mpss, rc_mps, ep_mpss, ep_mps; |
---|
.. | .. |
---|
652 | 629 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); |
---|
653 | 630 | |
---|
654 | 631 | dd_dev_info(dd, "HFI1 resume function called\n"); |
---|
655 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
656 | 632 | /* |
---|
657 | 633 | * Running jobs will fail, since it's asynchronous |
---|
658 | 634 | * unlike sysfs-requested reset. Better than |
---|
.. | .. |
---|
1031 | 1007 | const u8 (*ctle_tunings)[4]; |
---|
1032 | 1008 | uint static_ctle_mode; |
---|
1033 | 1009 | int return_error = 0; |
---|
| 1010 | + u32 target_width; |
---|
1034 | 1011 | |
---|
1035 | 1012 | /* PCIe Gen3 is for the ASIC only */ |
---|
1036 | 1013 | if (dd->icode != ICODE_RTL_SILICON) |
---|
.. | .. |
---|
1069 | 1046 | __func__); |
---|
1070 | 1047 | return 0; |
---|
1071 | 1048 | } |
---|
| 1049 | + |
---|
| 1050 | + /* Previous Gen1/Gen2 bus width */ |
---|
| 1051 | + target_width = dd->lbus_width; |
---|
1072 | 1052 | |
---|
1073 | 1053 | /* |
---|
1074 | 1054 | * Do the Gen3 transition. Steps are those of the PCIe Gen3 |
---|
.. | .. |
---|
1438 | 1418 | dd_dev_info(dd, "%s: new speed and width: %s\n", __func__, |
---|
1439 | 1419 | dd->lbus_info); |
---|
1440 | 1420 | |
---|
1441 | | - if (dd->lbus_speed != target_speed) { /* not target */ |
---|
| 1421 | + if (dd->lbus_speed != target_speed || |
---|
| 1422 | + dd->lbus_width < target_width) { /* not target */ |
---|
1442 | 1423 | /* maybe retry */ |
---|
1443 | 1424 | do_retry = retry_count < pcie_retry; |
---|
1444 | | - dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n", |
---|
1445 | | - pcie_target, do_retry ? ", retrying" : ""); |
---|
| 1425 | + dd_dev_err(dd, "PCIe link speed or width did not match target%s\n", |
---|
| 1426 | + do_retry ? ", retrying" : ""); |
---|
1446 | 1427 | retry_count++; |
---|
1447 | 1428 | if (do_retry) { |
---|
1448 | 1429 | msleep(100); /* allow time to settle */ |
---|