hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/hfi1/pcie.c
....@@ -1,5 +1,5 @@
11 /*
2
- * Copyright(c) 2015 - 2017 Intel Corporation.
2
+ * Copyright(c) 2015 - 2019 Intel Corporation.
33 *
44 * This file is provided under a dual BSD/GPLv2 license. When using or
55 * redistributing this file, you may do so under either license.
....@@ -61,19 +61,12 @@
6161 */
6262
6363 /*
64
- * Code to adjust PCIe capabilities.
65
- */
66
-static void tune_pcie_caps(struct hfi1_devdata *);
67
-
68
-/*
6964 * Do all the common PCIe setup and initialization.
70
- * devdata is not yet allocated, and is not allocated until after this
71
- * routine returns success. Therefore dd_dev_err() can't be used for error
72
- * printing.
7365 */
74
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
66
+int hfi1_pcie_init(struct hfi1_devdata *dd)
7567 {
7668 int ret;
69
+ struct pci_dev *pdev = dd->pcidev;
7770
7871 ret = pci_enable_device(pdev);
7972 if (ret) {
....@@ -89,15 +82,13 @@
8982 * about that, it appears. If the original BAR was retained
9083 * in the kernel data structures, this may be OK.
9184 */
92
- hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
93
- -ret);
94
- goto done;
85
+ dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
86
+ return ret;
9587 }
9688
9789 ret = pci_request_regions(pdev, DRIVER_NAME);
9890 if (ret) {
99
- hfi1_early_err(&pdev->dev,
100
- "pci_request_regions fails: err %d\n", -ret);
91
+ dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
10192 goto bail;
10293 }
10394
....@@ -110,8 +101,7 @@
110101 */
111102 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
112103 if (ret) {
113
- hfi1_early_err(&pdev->dev,
114
- "Unable to set DMA mask: %d\n", ret);
104
+ dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
115105 goto bail;
116106 }
117107 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
....@@ -119,18 +109,16 @@
119109 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
120110 }
121111 if (ret) {
122
- hfi1_early_err(&pdev->dev,
123
- "Unable to set DMA consistent mask: %d\n", ret);
112
+ dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
124113 goto bail;
125114 }
126115
127116 pci_set_master(pdev);
128117 (void)pci_enable_pcie_error_reporting(pdev);
129
- goto done;
118
+ return 0;
130119
131120 bail:
132121 hfi1_pcie_cleanup(pdev);
133
-done:
134122 return ret;
135123 }
136124
....@@ -173,7 +161,7 @@
173161 return -EINVAL;
174162 }
175163
176
- dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
164
+ dd->kregbase1 = ioremap(addr, RCV_ARRAY);
177165 if (!dd->kregbase1) {
178166 dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
179167 return -ENOMEM;
....@@ -191,7 +179,7 @@
191179 dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
192180 dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
193181
194
- dd->kregbase2 = ioremap_nocache(
182
+ dd->kregbase2 = ioremap(
195183 addr + dd->base2_start,
196184 TXE_PIO_SEND - dd->base2_start);
197185 if (!dd->kregbase2) {
....@@ -206,7 +194,7 @@
206194 dd_dev_err(dd, "WC mapping of send buffers failed\n");
207195 goto nomem;
208196 }
209
- dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE);
197
+ dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
210198
211199 dd->physaddr = addr; /* used for io_remap, etc. */
212200
....@@ -318,7 +306,7 @@
318306 ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
319307 if (ret) {
320308 dd_dev_err(dd, "Unable to read from PCI config\n");
321
- return ret;
309
+ return pcibios_err_to_errno(ret);
322310 }
323311
324312 if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
....@@ -346,30 +334,14 @@
346334 return 0;
347335 }
348336
349
-/*
350
- * Returns:
351
- * - actual number of interrupts allocated or
352
- * - error
337
+/**
338
+ * Restore command and BARs after a reset has wiped them out
339
+ *
340
+ * Returns 0 on success, otherwise a negative error value
353341 */
354
-int request_msix(struct hfi1_devdata *dd, u32 msireq)
355
-{
356
- int nvec;
357
-
358
- nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
359
- if (nvec < 0) {
360
- dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
361
- return nvec;
362
- }
363
-
364
- tune_pcie_caps(dd);
365
-
366
- return nvec;
367
-}
368
-
369
-/* restore command and BARs after a reset has wiped them out */
370342 int restore_pci_variables(struct hfi1_devdata *dd)
371343 {
372
- int ret = 0;
344
+ int ret;
373345
374346 ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
375347 if (ret)
....@@ -418,13 +390,17 @@
418390
419391 error:
420392 dd_dev_err(dd, "Unable to write to PCI config\n");
421
- return ret;
393
+ return pcibios_err_to_errno(ret);
422394 }
423395
424
-/* Save BARs and command to rewrite after device reset */
396
+/**
397
+ * Save BARs and command to rewrite after device reset
398
+ *
399
+ * Returns 0 on success, otherwise a negative error value
400
+ */
425401 int save_pci_variables(struct hfi1_devdata *dd)
426402 {
427
- int ret = 0;
403
+ int ret;
428404
429405 ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
430406 &dd->pcibar0);
....@@ -473,7 +449,7 @@
473449
474450 error:
475451 dd_dev_err(dd, "Unable to read from PCI config\n");
476
- return ret;
452
+ return pcibios_err_to_errno(ret);
477453 }
478454
479455 /*
....@@ -481,14 +457,15 @@
481457 * Check and optionally adjust them to maximize our throughput.
482458 */
483459 static int hfi1_pcie_caps;
484
-module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
460
+module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
485461 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
486462
487
-uint aspm_mode = ASPM_MODE_DISABLED;
488
-module_param_named(aspm, aspm_mode, uint, S_IRUGO);
489
-MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
490
-
491
-static void tune_pcie_caps(struct hfi1_devdata *dd)
463
+/**
464
+ * tune_pcie_caps() - Code to adjust PCIe capabilities.
465
+ * @dd: Valid device data structure
466
+ *
467
+ */
468
+void tune_pcie_caps(struct hfi1_devdata *dd)
492469 {
493470 struct pci_dev *parent;
494471 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
....@@ -652,7 +629,6 @@
652629 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
653630
654631 dd_dev_info(dd, "HFI1 resume function called\n");
655
- pci_cleanup_aer_uncorrect_error_status(pdev);
656632 /*
657633 * Running jobs will fail, since it's asynchronous
658634 * unlike sysfs-requested reset. Better than
....@@ -1031,6 +1007,7 @@
10311007 const u8 (*ctle_tunings)[4];
10321008 uint static_ctle_mode;
10331009 int return_error = 0;
1010
+ u32 target_width;
10341011
10351012 /* PCIe Gen3 is for the ASIC only */
10361013 if (dd->icode != ICODE_RTL_SILICON)
....@@ -1069,6 +1046,9 @@
10691046 __func__);
10701047 return 0;
10711048 }
1049
+
1050
+ /* Previous Gen1/Gen2 bus width */
1051
+ target_width = dd->lbus_width;
10721052
10731053 /*
10741054 * Do the Gen3 transition. Steps are those of the PCIe Gen3
....@@ -1438,11 +1418,12 @@
14381418 dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
14391419 dd->lbus_info);
14401420
1441
- if (dd->lbus_speed != target_speed) { /* not target */
1421
+ if (dd->lbus_speed != target_speed ||
1422
+ dd->lbus_width < target_width) { /* not target */
14421423 /* maybe retry */
14431424 do_retry = retry_count < pcie_retry;
1444
- dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
1445
- pcie_target, do_retry ? ", retrying" : "");
1425
+ dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
1426
+ do_retry ? ", retrying" : "");
14461427 retry_count++;
14471428 if (do_retry) {
14481429 msleep(100); /* allow time to settle */