| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Intel I/OAT DMA Linux driver |
|---|
| 3 | 4 | * Copyright(c) 2004 - 2015 Intel Corporation. |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 6 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 7 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
|---|
| 10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 12 | | - * more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * The full GNU General Public License is included in this distribution in |
|---|
| 15 | | - * the file called "COPYING". |
|---|
| 16 | | - * |
|---|
| 17 | 5 | */ |
|---|
| 18 | 6 | |
|---|
| 19 | 7 | #include <linux/init.h> |
|---|
| .. | .. |
|---|
| 119 | 107 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, |
|---|
| 120 | 108 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, |
|---|
| 121 | 109 | |
|---|
| 110 | + /* I/OAT v3.4 platforms */ |
|---|
| 111 | + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) }, |
|---|
| 112 | + |
|---|
| 122 | 113 | { 0, } |
|---|
| 123 | 114 | }; |
|---|
| 124 | 115 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
|---|
| .. | .. |
|---|
| 135 | 126 | static int ioat_dca_enabled = 1; |
|---|
| 136 | 127 | module_param(ioat_dca_enabled, int, 0644); |
|---|
| 137 | 128 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
|---|
| 138 | | -int ioat_pending_level = 4; |
|---|
| 129 | +int ioat_pending_level = 7; |
|---|
| 139 | 130 | module_param(ioat_pending_level, int, 0644); |
|---|
| 140 | 131 | MODULE_PARM_DESC(ioat_pending_level, |
|---|
| 141 | | - "high-water mark for pushing ioat descriptors (default: 4)"); |
|---|
| 132 | + "high-water mark for pushing ioat descriptors (default: 7)"); |
|---|
| 142 | 133 | static char ioat_interrupt_style[32] = "msix"; |
|---|
| 143 | 134 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
|---|
| 144 | 135 | sizeof(ioat_interrupt_style), 0644); |
|---|
| .. | .. |
|---|
| 565 | 556 | ioat_kobject_del(ioat_dma); |
|---|
| 566 | 557 | |
|---|
| 567 | 558 | dma_async_device_unregister(dma); |
|---|
| 568 | | - |
|---|
| 569 | | - dma_pool_destroy(ioat_dma->completion_pool); |
|---|
| 570 | | - |
|---|
| 571 | | - INIT_LIST_HEAD(&dma->channels); |
|---|
| 572 | 559 | } |
|---|
| 573 | 560 | |
|---|
| 574 | 561 | /** |
|---|
| .. | .. |
|---|
| 598 | 585 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
|---|
| 599 | 586 | |
|---|
| 600 | 587 | for (i = 0; i < dma->chancnt; i++) { |
|---|
| 601 | | - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); |
|---|
| 588 | + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); |
|---|
| 602 | 589 | if (!ioat_chan) |
|---|
| 603 | 590 | break; |
|---|
| 604 | 591 | |
|---|
| .. | .. |
|---|
| 615 | 602 | |
|---|
| 616 | 603 | /** |
|---|
| 617 | 604 | * ioat_free_chan_resources - release all the descriptors |
|---|
| 618 | | - * @chan: the channel to be cleaned |
|---|
| 605 | + * @c: the channel to be cleaned |
|---|
| 619 | 606 | */ |
|---|
| 620 | 607 | static void ioat_free_chan_resources(struct dma_chan *c) |
|---|
| 621 | 608 | { |
|---|
| .. | .. |
|---|
| 633 | 620 | return; |
|---|
| 634 | 621 | |
|---|
| 635 | 622 | ioat_stop(ioat_chan); |
|---|
| 636 | | - ioat_reset_hw(ioat_chan); |
|---|
| 623 | + |
|---|
| 624 | + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { |
|---|
| 625 | + ioat_reset_hw(ioat_chan); |
|---|
| 626 | + |
|---|
| 627 | + /* Put LTR to idle */ |
|---|
| 628 | + if (ioat_dma->version >= IOAT_VER_3_4) |
|---|
| 629 | + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, |
|---|
| 630 | + ioat_chan->reg_base + |
|---|
| 631 | + IOAT_CHAN_LTR_SWSEL_OFFSET); |
|---|
| 632 | + } |
|---|
| 637 | 633 | |
|---|
| 638 | 634 | spin_lock_bh(&ioat_chan->cleanup_lock); |
|---|
| 639 | 635 | spin_lock_bh(&ioat_chan->prep_lock); |
|---|
| .. | .. |
|---|
| 655 | 651 | } |
|---|
| 656 | 652 | |
|---|
| 657 | 653 | for (i = 0; i < ioat_chan->desc_chunks; i++) { |
|---|
| 658 | | - dma_free_coherent(to_dev(ioat_chan), SZ_2M, |
|---|
| 654 | + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE, |
|---|
| 659 | 655 | ioat_chan->descs[i].virt, |
|---|
| 660 | 656 | ioat_chan->descs[i].hw); |
|---|
| 661 | 657 | ioat_chan->descs[i].virt = NULL; |
|---|
| .. | .. |
|---|
| 724 | 720 | spin_unlock_bh(&ioat_chan->prep_lock); |
|---|
| 725 | 721 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
|---|
| 726 | 722 | |
|---|
| 723 | + /* Setting up LTR values for 3.4 or later */ |
|---|
| 724 | + if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { |
|---|
| 725 | + u32 lat_val; |
|---|
| 726 | + |
|---|
| 727 | + lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | |
|---|
| 728 | + IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | |
|---|
| 729 | + IOAT_CHAN_LTR_ACTIVE_SNREQMNT; |
|---|
| 730 | + writel(lat_val, ioat_chan->reg_base + |
|---|
| 731 | + IOAT_CHAN_LTR_ACTIVE_OFFSET); |
|---|
| 732 | + |
|---|
| 733 | + lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | |
|---|
| 734 | + IOAT_CHAN_LTR_IDLE_SNLATSCALE | |
|---|
| 735 | + IOAT_CHAN_LTR_IDLE_SNREQMNT; |
|---|
| 736 | + writel(lat_val, ioat_chan->reg_base + |
|---|
| 737 | + IOAT_CHAN_LTR_IDLE_OFFSET); |
|---|
| 738 | + |
|---|
| 739 | + /* Select to active */ |
|---|
| 740 | + writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, |
|---|
| 741 | + ioat_chan->reg_base + |
|---|
| 742 | + IOAT_CHAN_LTR_SWSEL_OFFSET); |
|---|
| 743 | + } |
|---|
| 744 | + |
|---|
| 727 | 745 | ioat_start_null_desc(ioat_chan); |
|---|
| 728 | 746 | |
|---|
| 729 | 747 | /* check that we got off the ground */ |
|---|
| .. | .. |
|---|
| 749 | 767 | struct ioatdma_chan *ioat_chan, int idx) |
|---|
| 750 | 768 | { |
|---|
| 751 | 769 | struct dma_device *dma = &ioat_dma->dma_dev; |
|---|
| 752 | | - struct dma_chan *c = &ioat_chan->dma_chan; |
|---|
| 753 | | - unsigned long data = (unsigned long) c; |
|---|
| 754 | 770 | |
|---|
| 755 | 771 | ioat_chan->ioat_dma = ioat_dma; |
|---|
| 756 | 772 | ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); |
|---|
| .. | .. |
|---|
| 760 | 776 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); |
|---|
| 761 | 777 | ioat_dma->idx[idx] = ioat_chan; |
|---|
| 762 | 778 | timer_setup(&ioat_chan->timer, ioat_timer_event, 0); |
|---|
| 763 | | - tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); |
|---|
| 779 | + tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event); |
|---|
| 764 | 780 | } |
|---|
| 765 | 781 | |
|---|
| 766 | 782 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ |
|---|
| .. | .. |
|---|
| 1177 | 1193 | /* disable relaxed ordering */ |
|---|
| 1178 | 1194 | err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16); |
|---|
| 1179 | 1195 | if (err) |
|---|
| 1180 | | - return err; |
|---|
| 1196 | + return pcibios_err_to_errno(err); |
|---|
| 1181 | 1197 | |
|---|
| 1182 | 1198 | /* clear relaxed ordering enable */ |
|---|
| 1183 | 1199 | val16 &= ~IOAT_DEVCTRL_ROE; |
|---|
| 1184 | 1200 | err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16); |
|---|
| 1185 | 1201 | if (err) |
|---|
| 1186 | | - return err; |
|---|
| 1202 | + return pcibios_err_to_errno(err); |
|---|
| 1203 | + |
|---|
| 1204 | + if (ioat_dma->cap & IOAT_CAP_DPS) |
|---|
| 1205 | + writeb(ioat_pending_level + 1, |
|---|
| 1206 | + ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); |
|---|
| 1187 | 1207 | |
|---|
| 1188 | 1208 | return 0; |
|---|
| 1189 | 1209 | } |
|---|
| .. | .. |
|---|
| 1245 | 1265 | #define DRV_NAME "ioatdma" |
|---|
| 1246 | 1266 | |
|---|
| 1247 | 1267 | static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, |
|---|
| 1248 | | - enum pci_channel_state error) |
|---|
| 1268 | + pci_channel_state_t error) |
|---|
| 1249 | 1269 | { |
|---|
| 1250 | 1270 | dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); |
|---|
| 1251 | 1271 | |
|---|
| .. | .. |
|---|
| 1258 | 1278 | static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) |
|---|
| 1259 | 1279 | { |
|---|
| 1260 | 1280 | pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; |
|---|
| 1261 | | - int err; |
|---|
| 1262 | 1281 | |
|---|
| 1263 | 1282 | dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); |
|---|
| 1264 | 1283 | |
|---|
| .. | .. |
|---|
| 1271 | 1290 | pci_restore_state(pdev); |
|---|
| 1272 | 1291 | pci_save_state(pdev); |
|---|
| 1273 | 1292 | pci_wake_from_d3(pdev, false); |
|---|
| 1274 | | - } |
|---|
| 1275 | | - |
|---|
| 1276 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
|---|
| 1277 | | - if (err) { |
|---|
| 1278 | | - dev_err(&pdev->dev, |
|---|
| 1279 | | - "AER uncorrect error status clear failed: %#x\n", err); |
|---|
| 1280 | 1293 | } |
|---|
| 1281 | 1294 | |
|---|
| 1282 | 1295 | return result; |
|---|
| .. | .. |
|---|
| 1307 | 1320 | .err_handler = &ioat_err_handler, |
|---|
| 1308 | 1321 | }; |
|---|
| 1309 | 1322 | |
|---|
| 1323 | +static void release_ioatdma(struct dma_device *device) |
|---|
| 1324 | +{ |
|---|
| 1325 | + struct ioatdma_device *d = to_ioatdma_device(device); |
|---|
| 1326 | + int i; |
|---|
| 1327 | + |
|---|
| 1328 | + for (i = 0; i < IOAT_MAX_CHANS; i++) |
|---|
| 1329 | + kfree(d->idx[i]); |
|---|
| 1330 | + |
|---|
| 1331 | + dma_pool_destroy(d->completion_pool); |
|---|
| 1332 | + kfree(d); |
|---|
| 1333 | +} |
|---|
| 1334 | + |
|---|
| 1310 | 1335 | static struct ioatdma_device * |
|---|
| 1311 | 1336 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) |
|---|
| 1312 | 1337 | { |
|---|
| 1313 | | - struct device *dev = &pdev->dev; |
|---|
| 1314 | | - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); |
|---|
| 1338 | + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); |
|---|
| 1315 | 1339 | |
|---|
| 1316 | 1340 | if (!d) |
|---|
| 1317 | 1341 | return NULL; |
|---|
| 1318 | 1342 | d->pdev = pdev; |
|---|
| 1319 | 1343 | d->reg_base = iobase; |
|---|
| 1344 | + d->dma_dev.device_release = release_ioatdma; |
|---|
| 1320 | 1345 | return d; |
|---|
| 1321 | 1346 | } |
|---|
| 1322 | 1347 | |
|---|
| .. | .. |
|---|
| 1357 | 1382 | pci_set_drvdata(pdev, device); |
|---|
| 1358 | 1383 | |
|---|
| 1359 | 1384 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
|---|
| 1385 | + if (device->version >= IOAT_VER_3_4) |
|---|
| 1386 | + ioat_dca_enabled = 0; |
|---|
| 1360 | 1387 | if (device->version >= IOAT_VER_3_0) { |
|---|
| 1361 | 1388 | if (is_skx_ioat(pdev)) |
|---|
| 1362 | 1389 | device->version = IOAT_VER_3_2; |
|---|
| .. | .. |
|---|
| 1383 | 1410 | if (!device) |
|---|
| 1384 | 1411 | return; |
|---|
| 1385 | 1412 | |
|---|
| 1413 | + ioat_shutdown(pdev); |
|---|
| 1414 | + |
|---|
| 1386 | 1415 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
|---|
| 1387 | 1416 | if (device->dca) { |
|---|
| 1388 | 1417 | unregister_dca_provider(device->dca, &pdev->dev); |
|---|