.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * VFIO PCI config space virtualization |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. |
---|
5 | 6 | * Author: Alex Williamson <alex.williamson@redhat.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | 7 | * |
---|
11 | 8 | * Derived from original vfio: |
---|
12 | 9 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. |
---|
.. | .. |
---|
409 | 406 | * PF SR-IOV capability, there's therefore no need to trigger |
---|
410 | 407 | * faults based on the virtual value. |
---|
411 | 408 | */ |
---|
412 | | - return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); |
---|
| 409 | + return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY); |
---|
413 | 410 | } |
---|
414 | 411 | |
---|
415 | 412 | /* |
---|
.. | .. |
---|
426 | 423 | if (pdev->is_virtfn) |
---|
427 | 424 | return; |
---|
428 | 425 | |
---|
429 | | - pr_info("%s: %s reset recovery - restoring bars\n", |
---|
430 | | - __func__, dev_name(&pdev->dev)); |
---|
| 426 | + pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__); |
---|
431 | 427 | |
---|
432 | 428 | for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++) |
---|
433 | 429 | pci_user_write_config_dword(pdev, i, *rbar); |
---|
.. | .. |
---|
468 | 464 | { |
---|
469 | 465 | struct pci_dev *pdev = vdev->pdev; |
---|
470 | 466 | int i; |
---|
471 | | - __le32 *bar; |
---|
| 467 | + __le32 *vbar; |
---|
472 | 468 | u64 mask; |
---|
473 | 469 | |
---|
474 | | - bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; |
---|
| 470 | + if (!vdev->bardirty) |
---|
| 471 | + return; |
---|
475 | 472 | |
---|
476 | | - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) { |
---|
477 | | - if (!pci_resource_start(pdev, i)) { |
---|
478 | | - *bar = 0; /* Unmapped by host = unimplemented to user */ |
---|
| 473 | + vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; |
---|
| 474 | + |
---|
| 475 | + for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) { |
---|
| 476 | + int bar = i + PCI_STD_RESOURCES; |
---|
| 477 | + |
---|
| 478 | + if (!pci_resource_start(pdev, bar)) { |
---|
| 479 | + *vbar = 0; /* Unmapped by host = unimplemented to user */ |
---|
479 | 480 | continue; |
---|
480 | 481 | } |
---|
481 | 482 | |
---|
482 | | - mask = ~(pci_resource_len(pdev, i) - 1); |
---|
| 483 | + mask = ~(pci_resource_len(pdev, bar) - 1); |
---|
483 | 484 | |
---|
484 | | - *bar &= cpu_to_le32((u32)mask); |
---|
485 | | - *bar |= vfio_generate_bar_flags(pdev, i); |
---|
| 485 | + *vbar &= cpu_to_le32((u32)mask); |
---|
| 486 | + *vbar |= vfio_generate_bar_flags(pdev, bar); |
---|
486 | 487 | |
---|
487 | | - if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { |
---|
488 | | - bar++; |
---|
489 | | - *bar &= cpu_to_le32((u32)(mask >> 32)); |
---|
| 488 | + if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { |
---|
| 489 | + vbar++; |
---|
| 490 | + *vbar &= cpu_to_le32((u32)(mask >> 32)); |
---|
490 | 491 | i++; |
---|
491 | 492 | } |
---|
492 | 493 | } |
---|
493 | 494 | |
---|
494 | | - bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; |
---|
| 495 | + vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; |
---|
495 | 496 | |
---|
496 | 497 | /* |
---|
497 | 498 | * NB. REGION_INFO will have reported zero size if we weren't able |
---|
.. | .. |
---|
501 | 502 | if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { |
---|
502 | 503 | mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); |
---|
503 | 504 | mask |= PCI_ROM_ADDRESS_ENABLE; |
---|
504 | | - *bar &= cpu_to_le32((u32)mask); |
---|
| 505 | + *vbar &= cpu_to_le32((u32)mask); |
---|
505 | 506 | } else if (pdev->resource[PCI_ROM_RESOURCE].flags & |
---|
506 | 507 | IORESOURCE_ROM_SHADOW) { |
---|
507 | 508 | mask = ~(0x20000 - 1); |
---|
508 | 509 | mask |= PCI_ROM_ADDRESS_ENABLE; |
---|
509 | | - *bar &= cpu_to_le32((u32)mask); |
---|
| 510 | + *vbar &= cpu_to_le32((u32)mask); |
---|
510 | 511 | } else |
---|
511 | | - *bar = 0; |
---|
| 512 | + *vbar = 0; |
---|
512 | 513 | |
---|
513 | 514 | vdev->bardirty = false; |
---|
514 | 515 | } |
---|
.. | .. |
---|
522 | 523 | |
---|
523 | 524 | count = vfio_default_config_read(vdev, pos, count, perm, offset, val); |
---|
524 | 525 | |
---|
525 | | - /* Mask in virtual memory enable for SR-IOV devices */ |
---|
526 | | - if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { |
---|
| 526 | + /* Mask in virtual memory enable */ |
---|
| 527 | + if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) { |
---|
527 | 528 | u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); |
---|
528 | 529 | u32 tmp_val = le32_to_cpu(*val); |
---|
529 | 530 | |
---|
.. | .. |
---|
591 | 592 | * shows it disabled (phys_mem/io, then the device has |
---|
592 | 593 | * undergone some kind of backdoor reset and needs to be |
---|
593 | 594 | * restored before we allow it to enable the bars. |
---|
594 | | - * SR-IOV devices will trigger this, but we catch them later |
---|
| 595 | + * SR-IOV devices will trigger this - for mem enable let's |
---|
| 596 | + * catch this now and for io enable it will be caught later |
---|
595 | 597 | */ |
---|
596 | | - if ((new_mem && virt_mem && !phys_mem) || |
---|
| 598 | + if ((new_mem && virt_mem && !phys_mem && |
---|
| 599 | + !pdev->no_command_memory) || |
---|
597 | 600 | (new_io && virt_io && !phys_io) || |
---|
598 | 601 | vfio_need_bar_restore(vdev)) |
---|
599 | 602 | vfio_bar_restore(vdev); |
---|
.. | .. |
---|
715 | 718 | break; |
---|
716 | 719 | } |
---|
717 | 720 | |
---|
718 | | - pci_set_power_state(vdev->pdev, state); |
---|
| 721 | + vfio_pci_set_power_state(vdev, state); |
---|
719 | 722 | } |
---|
720 | 723 | |
---|
721 | 724 | return count; |
---|
.. | .. |
---|
1328 | 1331 | else |
---|
1329 | 1332 | return PCI_SATA_SIZEOF_SHORT; |
---|
1330 | 1333 | default: |
---|
1331 | | - pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n", |
---|
1332 | | - dev_name(&pdev->dev), __func__, cap, pos); |
---|
| 1334 | + pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n", |
---|
| 1335 | + __func__, cap, pos); |
---|
1333 | 1336 | } |
---|
1334 | 1337 | |
---|
1335 | 1338 | return 0; |
---|
.. | .. |
---|
1402 | 1405 | } |
---|
1403 | 1406 | return PCI_TPH_BASE_SIZEOF; |
---|
1404 | 1407 | default: |
---|
1405 | | - pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n", |
---|
1406 | | - dev_name(&pdev->dev), __func__, ecap, epos); |
---|
| 1408 | + pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n", |
---|
| 1409 | + __func__, ecap, epos); |
---|
1407 | 1410 | } |
---|
1408 | 1411 | |
---|
1409 | 1412 | return 0; |
---|
.. | .. |
---|
1509 | 1512 | } |
---|
1510 | 1513 | |
---|
1511 | 1514 | if (!len) { |
---|
1512 | | - pr_info("%s: %s hiding cap 0x%x\n", |
---|
1513 | | - __func__, dev_name(&pdev->dev), cap); |
---|
| 1515 | + pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__, |
---|
| 1516 | + cap, pos); |
---|
1514 | 1517 | *prev = next; |
---|
1515 | 1518 | pos = next; |
---|
1516 | 1519 | continue; |
---|
.. | .. |
---|
1521 | 1524 | if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) |
---|
1522 | 1525 | continue; |
---|
1523 | 1526 | |
---|
1524 | | - pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n", |
---|
1525 | | - __func__, dev_name(&pdev->dev), |
---|
1526 | | - pos + i, map[pos + i], cap); |
---|
| 1527 | + pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n", |
---|
| 1528 | + __func__, pos + i, map[pos + i], cap); |
---|
1527 | 1529 | } |
---|
1528 | 1530 | |
---|
1529 | 1531 | BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); |
---|
.. | .. |
---|
1584 | 1586 | } |
---|
1585 | 1587 | |
---|
1586 | 1588 | if (!len) { |
---|
1587 | | - pr_info("%s: %s hiding ecap 0x%x@0x%x\n", |
---|
1588 | | - __func__, dev_name(&pdev->dev), ecap, epos); |
---|
| 1589 | + pci_info(pdev, "%s: hiding ecap %#x@%#x\n", |
---|
| 1590 | + __func__, ecap, epos); |
---|
1589 | 1591 | |
---|
1590 | 1592 | /* If not the first in the chain, we can skip over it */ |
---|
1591 | 1593 | if (prev) { |
---|
.. | .. |
---|
1607 | 1609 | if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) |
---|
1608 | 1610 | continue; |
---|
1609 | 1611 | |
---|
1610 | | - pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n", |
---|
1611 | | - __func__, dev_name(&pdev->dev), |
---|
1612 | | - epos + i, map[epos + i], ecap); |
---|
| 1612 | + pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n", |
---|
| 1613 | + __func__, epos + i, map[epos + i], ecap); |
---|
1613 | 1614 | } |
---|
1614 | 1615 | |
---|
1615 | 1616 | /* |
---|
.. | .. |
---|
1738 | 1739 | vconfig[PCI_INTERRUPT_PIN]); |
---|
1739 | 1740 | |
---|
1740 | 1741 | vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ |
---|
1741 | | - |
---|
| 1742 | + } |
---|
| 1743 | + if (pdev->no_command_memory) { |
---|
1742 | 1744 | /* |
---|
1743 | | - * VFs do no implement the memory enable bit of the COMMAND |
---|
1744 | | - * register therefore we'll not have it set in our initial |
---|
1745 | | - * copy of config space after pci_enable_device(). For |
---|
1746 | | - * consistency with PFs, set the virtual enable bit here. |
---|
| 1745 | + * VFs and devices that set pdev->no_command_memory do not |
---|
| 1746 | + * implement the memory enable bit of the COMMAND register |
---|
| 1747 | + * therefore we'll not have it set in our initial copy of |
---|
| 1748 | + * config space after pci_enable_device(). For consistency |
---|
| 1749 | + * with PFs, set the virtual enable bit here. |
---|
1747 | 1750 | */ |
---|
1748 | 1751 | *(__le16 *)&vconfig[PCI_COMMAND] |= |
---|
1749 | 1752 | cpu_to_le16(PCI_COMMAND_MEMORY); |
---|