.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2016, Semihalf |
---|
3 | 4 | * Author: Tomasz Nowicki <tn@semihalf.com> |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify it |
---|
6 | | - * under the terms and conditions of the GNU General Public License, |
---|
7 | | - * version 2, as published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
12 | | - * more details. |
---|
13 | 5 | * |
---|
14 | 6 | * This file implements early detection/parsing of I/O mapping |
---|
15 | 7 | * reported to OS through firmware via I/O Remapping Table (IORT) |
---|
.. | .. |
---|
19 | 11 | #define pr_fmt(fmt) "ACPI: IORT: " fmt |
---|
20 | 12 | |
---|
21 | 13 | #include <linux/acpi_iort.h> |
---|
| 14 | +#include <linux/bitfield.h> |
---|
22 | 15 | #include <linux/iommu.h> |
---|
23 | 16 | #include <linux/kernel.h> |
---|
24 | 17 | #include <linux/list.h> |
---|
25 | 18 | #include <linux/pci.h> |
---|
26 | 19 | #include <linux/platform_device.h> |
---|
27 | 20 | #include <linux/slab.h> |
---|
| 21 | +#include <linux/dma-map-ops.h> |
---|
28 | 22 | |
---|
29 | 23 | #define IORT_TYPE_MASK(type) (1 << (type)) |
---|
30 | 24 | #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) |
---|
.. | .. |
---|
50 | 44 | * iort_set_fwnode() - Create iort_fwnode and use it to register |
---|
51 | 45 | * iommu data in the iort_fwnode_list |
---|
52 | 46 | * |
---|
53 | | - * @node: IORT table node associated with the IOMMU |
---|
| 47 | + * @iort_node: IORT table node associated with the IOMMU |
---|
54 | 48 | * @fwnode: fwnode associated with the IORT node |
---|
55 | 49 | * |
---|
56 | 50 | * Returns: 0 on success |
---|
.. | .. |
---|
271 | 265 | |
---|
272 | 266 | if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { |
---|
273 | 267 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
---|
274 | | - struct acpi_device *adev = to_acpi_device_node(dev->fwnode); |
---|
| 268 | + struct acpi_device *adev; |
---|
275 | 269 | struct acpi_iort_named_component *ncomp; |
---|
| 270 | + struct device *nc_dev = dev; |
---|
| 271 | + |
---|
| 272 | + /* |
---|
| 273 | + * Walk the device tree to find a device with an |
---|
| 274 | + * ACPI companion; there is no point in scanning |
---|
| 275 | + * IORT for a device matching a named component if |
---|
| 276 | + * the device does not have an ACPI companion to |
---|
| 277 | + * start with. |
---|
| 278 | + */ |
---|
| 279 | + do { |
---|
| 280 | + adev = ACPI_COMPANION(nc_dev); |
---|
| 281 | + if (adev) |
---|
| 282 | + break; |
---|
| 283 | + |
---|
| 284 | + nc_dev = nc_dev->parent; |
---|
| 285 | + } while (nc_dev); |
---|
276 | 286 | |
---|
277 | 287 | if (!adev) |
---|
278 | 288 | goto out; |
---|
279 | 289 | |
---|
280 | 290 | status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); |
---|
281 | 291 | if (ACPI_FAILURE(status)) { |
---|
282 | | - dev_warn(dev, "Can't get device full path name\n"); |
---|
| 292 | + dev_warn(nc_dev, "Can't get device full path name\n"); |
---|
283 | 293 | goto out; |
---|
284 | 294 | } |
---|
285 | 295 | |
---|
.. | .. |
---|
307 | 317 | } |
---|
308 | 318 | |
---|
309 | 319 | static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, |
---|
310 | | - u32 *rid_out) |
---|
| 320 | + u32 *rid_out, bool check_overlap) |
---|
311 | 321 | { |
---|
312 | 322 | /* Single mapping does not care for input id */ |
---|
313 | 323 | if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { |
---|
.. | .. |
---|
323 | 333 | } |
---|
324 | 334 | |
---|
325 | 335 | if (rid_in < map->input_base || |
---|
326 | | - (rid_in >= map->input_base + map->id_count)) |
---|
| 336 | + (rid_in > map->input_base + map->id_count)) |
---|
327 | 337 | return -ENXIO; |
---|
328 | 338 | |
---|
| 339 | + if (check_overlap) { |
---|
| 340 | + /* |
---|
| 341 | + * We already found a mapping for this input ID at the end of |
---|
| 342 | + * another region. If it coincides with the start of this |
---|
| 343 | + * region, we assume the prior match was due to the off-by-1 |
---|
| 344 | + * issue mentioned below, and allow it to be superseded. |
---|
| 345 | + * Otherwise, things are *really* broken, and we just disregard |
---|
| 346 | + * duplicate matches entirely to retain compatibility. |
---|
| 347 | + */ |
---|
| 348 | + pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n", |
---|
| 349 | + map, rid_in); |
---|
| 350 | + if (rid_in != map->input_base) |
---|
| 351 | + return -ENXIO; |
---|
| 352 | + |
---|
| 353 | + pr_err(FW_BUG "applying workaround.\n"); |
---|
| 354 | + } |
---|
| 355 | + |
---|
329 | 356 | *rid_out = map->output_base + (rid_in - map->input_base); |
---|
| 357 | + |
---|
| 358 | + /* |
---|
| 359 | + * Due to confusion regarding the meaning of the id_count field (which |
---|
| 360 | + * carries the number of IDs *minus 1*), we may have to disregard this |
---|
| 361 | + * match if it is at the end of the range, and overlaps with the start |
---|
| 362 | + * of another one. |
---|
| 363 | + */ |
---|
| 364 | + if (map->id_count > 0 && rid_in == map->input_base + map->id_count) |
---|
| 365 | + return -EAGAIN; |
---|
330 | 366 | return 0; |
---|
331 | 367 | } |
---|
332 | 368 | |
---|
.. | .. |
---|
356 | 392 | if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { |
---|
357 | 393 | if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || |
---|
358 | 394 | node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || |
---|
359 | | - node->type == ACPI_IORT_NODE_SMMU_V3) { |
---|
| 395 | + node->type == ACPI_IORT_NODE_SMMU_V3 || |
---|
| 396 | + node->type == ACPI_IORT_NODE_PMCG) { |
---|
360 | 397 | *id_out = map->output_base; |
---|
361 | 398 | return parent; |
---|
362 | 399 | } |
---|
.. | .. |
---|
368 | 405 | static int iort_get_id_mapping_index(struct acpi_iort_node *node) |
---|
369 | 406 | { |
---|
370 | 407 | struct acpi_iort_smmu_v3 *smmu; |
---|
| 408 | + struct acpi_iort_pmcg *pmcg; |
---|
371 | 409 | |
---|
372 | 410 | switch (node->type) { |
---|
373 | 411 | case ACPI_IORT_NODE_SMMU_V3: |
---|
.. | .. |
---|
394 | 432 | } |
---|
395 | 433 | |
---|
396 | 434 | return smmu->id_mapping_index; |
---|
| 435 | + case ACPI_IORT_NODE_PMCG: |
---|
| 436 | + pmcg = (struct acpi_iort_pmcg *)node->node_data; |
---|
| 437 | + if (pmcg->overflow_gsiv || node->mapping_count == 0) |
---|
| 438 | + return -EINVAL; |
---|
| 439 | + |
---|
| 440 | + return 0; |
---|
397 | 441 | default: |
---|
398 | 442 | return -EINVAL; |
---|
399 | 443 | } |
---|
.. | .. |
---|
408 | 452 | /* Parse the ID mapping tree to find specified node type */ |
---|
409 | 453 | while (node) { |
---|
410 | 454 | struct acpi_iort_id_mapping *map; |
---|
411 | | - int i, index; |
---|
| 455 | + int i, index, rc = 0; |
---|
| 456 | + u32 out_ref = 0, map_id = id; |
---|
412 | 457 | |
---|
413 | 458 | if (IORT_TYPE_MASK(node->type) & type_mask) { |
---|
414 | 459 | if (id_out) |
---|
.. | .. |
---|
442 | 487 | if (i == index) |
---|
443 | 488 | continue; |
---|
444 | 489 | |
---|
445 | | - if (!iort_id_map(map, node->type, id, &id)) |
---|
| 490 | + rc = iort_id_map(map, node->type, map_id, &id, out_ref); |
---|
| 491 | + if (!rc) |
---|
446 | 492 | break; |
---|
| 493 | + if (rc == -EAGAIN) |
---|
| 494 | + out_ref = map->output_reference; |
---|
447 | 495 | } |
---|
448 | 496 | |
---|
449 | | - if (i == node->mapping_count) |
---|
| 497 | + if (i == node->mapping_count && !out_ref) |
---|
450 | 498 | goto fail_map; |
---|
451 | 499 | |
---|
452 | 500 | node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, |
---|
453 | | - map->output_reference); |
---|
| 501 | + rc ? out_ref : map->output_reference); |
---|
454 | 502 | } |
---|
455 | 503 | |
---|
456 | 504 | fail_map: |
---|
.. | .. |
---|
503 | 551 | node = iort_get_iort_node(dev->fwnode); |
---|
504 | 552 | if (node) |
---|
505 | 553 | return node; |
---|
506 | | - |
---|
507 | 554 | /* |
---|
508 | 555 | * if not, then it should be a platform device defined in |
---|
509 | 556 | * DSDT/SSDT (with Named Component node in IORT) |
---|
.. | .. |
---|
512 | 559 | iort_match_node_callback, dev); |
---|
513 | 560 | } |
---|
514 | 561 | |
---|
515 | | - /* Find a PCI root bus */ |
---|
516 | 562 | pbus = to_pci_dev(dev)->bus; |
---|
517 | | - while (!pci_is_root_bus(pbus)) |
---|
518 | | - pbus = pbus->parent; |
---|
519 | 563 | |
---|
520 | 564 | return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, |
---|
521 | 565 | iort_match_node_callback, &pbus->dev); |
---|
522 | 566 | } |
---|
523 | 567 | |
---|
524 | 568 | /** |
---|
525 | | - * iort_msi_map_rid() - Map a MSI requester ID for a device |
---|
| 569 | + * iort_msi_map_id() - Map a MSI input ID for a device |
---|
526 | 570 | * @dev: The device for which the mapping is to be done. |
---|
527 | | - * @req_id: The device requester ID. |
---|
| 571 | + * @input_id: The device input ID. |
---|
528 | 572 | * |
---|
529 | | - * Returns: mapped MSI RID on success, input requester ID otherwise |
---|
| 573 | + * Returns: mapped MSI ID on success, input ID otherwise |
---|
530 | 574 | */ |
---|
531 | | -u32 iort_msi_map_rid(struct device *dev, u32 req_id) |
---|
| 575 | +u32 iort_msi_map_id(struct device *dev, u32 input_id) |
---|
532 | 576 | { |
---|
533 | 577 | struct acpi_iort_node *node; |
---|
534 | 578 | u32 dev_id; |
---|
535 | 579 | |
---|
536 | 580 | node = iort_find_dev_node(dev); |
---|
537 | 581 | if (!node) |
---|
538 | | - return req_id; |
---|
| 582 | + return input_id; |
---|
539 | 583 | |
---|
540 | | - iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); |
---|
| 584 | + iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE); |
---|
541 | 585 | return dev_id; |
---|
542 | 586 | } |
---|
543 | 587 | |
---|
.. | .. |
---|
594 | 638 | /** |
---|
595 | 639 | * iort_dev_find_its_id() - Find the ITS identifier for a device |
---|
596 | 640 | * @dev: The device. |
---|
597 | | - * @req_id: Device's requester ID |
---|
| 641 | + * @id: Device's ID |
---|
598 | 642 | * @idx: Index of the ITS identifier list. |
---|
599 | 643 | * @its_id: ITS identifier. |
---|
600 | 644 | * |
---|
601 | 645 | * Returns: 0 on success, appropriate error value otherwise |
---|
602 | 646 | */ |
---|
603 | | -static int iort_dev_find_its_id(struct device *dev, u32 req_id, |
---|
| 647 | +static int iort_dev_find_its_id(struct device *dev, u32 id, |
---|
604 | 648 | unsigned int idx, int *its_id) |
---|
605 | 649 | { |
---|
606 | 650 | struct acpi_iort_its_group *its; |
---|
.. | .. |
---|
610 | 654 | if (!node) |
---|
611 | 655 | return -ENXIO; |
---|
612 | 656 | |
---|
613 | | - node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); |
---|
| 657 | + node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE); |
---|
614 | 658 | if (!node) |
---|
615 | 659 | return -ENXIO; |
---|
616 | 660 | |
---|
.. | .. |
---|
629 | 673 | /** |
---|
630 | 674 | * iort_get_device_domain() - Find MSI domain related to a device |
---|
631 | 675 | * @dev: The device. |
---|
632 | | - * @req_id: Requester ID for the device. |
---|
| 676 | + * @id: Requester ID for the device. |
---|
| 677 | + * @bus_token: irq domain bus token. |
---|
633 | 678 | * |
---|
634 | 679 | * Returns: the MSI domain for this device, NULL otherwise |
---|
635 | 680 | */ |
---|
636 | | -struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) |
---|
| 681 | +struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, |
---|
| 682 | + enum irq_domain_bus_token bus_token) |
---|
637 | 683 | { |
---|
638 | 684 | struct fwnode_handle *handle; |
---|
639 | 685 | int its_id; |
---|
640 | 686 | |
---|
641 | | - if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) |
---|
| 687 | + if (iort_dev_find_its_id(dev, id, 0, &its_id)) |
---|
642 | 688 | return NULL; |
---|
643 | 689 | |
---|
644 | 690 | handle = iort_find_domain_token(its_id); |
---|
645 | 691 | if (!handle) |
---|
646 | 692 | return NULL; |
---|
647 | 693 | |
---|
648 | | - return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); |
---|
| 694 | + return irq_find_matching_fwnode(handle, bus_token); |
---|
649 | 695 | } |
---|
650 | 696 | |
---|
651 | 697 | static void iort_set_device_domain(struct device *dev, |
---|
.. | .. |
---|
741 | 787 | dev_set_msi_domain(dev, msi_domain); |
---|
742 | 788 | } |
---|
743 | 789 | |
---|
744 | | -static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, |
---|
745 | | - void *data) |
---|
746 | | -{ |
---|
747 | | - u32 *rid = data; |
---|
748 | | - |
---|
749 | | - *rid = alias; |
---|
750 | | - return 0; |
---|
751 | | -} |
---|
752 | | - |
---|
753 | | -static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, |
---|
754 | | - struct fwnode_handle *fwnode, |
---|
755 | | - const struct iommu_ops *ops) |
---|
756 | | -{ |
---|
757 | | - int ret = iommu_fwspec_init(dev, fwnode, ops); |
---|
758 | | - |
---|
759 | | - if (!ret) |
---|
760 | | - ret = iommu_fwspec_add_ids(dev, &streamid, 1); |
---|
761 | | - |
---|
762 | | - return ret; |
---|
763 | | -} |
---|
764 | | - |
---|
765 | | -static inline bool iort_iommu_driver_enabled(u8 type) |
---|
766 | | -{ |
---|
767 | | - switch (type) { |
---|
768 | | - case ACPI_IORT_NODE_SMMU_V3: |
---|
769 | | - return IS_BUILTIN(CONFIG_ARM_SMMU_V3); |
---|
770 | | - case ACPI_IORT_NODE_SMMU: |
---|
771 | | - return IS_BUILTIN(CONFIG_ARM_SMMU); |
---|
772 | | - default: |
---|
773 | | - pr_warn("IORT node type %u does not describe an SMMU\n", type); |
---|
774 | | - return false; |
---|
775 | | - } |
---|
776 | | -} |
---|
777 | | - |
---|
778 | 790 | #ifdef CONFIG_IOMMU_API |
---|
779 | 791 | static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) |
---|
780 | 792 | { |
---|
781 | 793 | struct acpi_iort_node *iommu; |
---|
782 | | - struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
---|
| 794 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
783 | 795 | |
---|
784 | 796 | iommu = iort_get_iort_node(fwspec->iommu_fwnode); |
---|
785 | 797 | |
---|
.. | .. |
---|
794 | 806 | return NULL; |
---|
795 | 807 | } |
---|
796 | 808 | |
---|
797 | | -static inline const struct iommu_ops *iort_fwspec_iommu_ops( |
---|
798 | | - struct iommu_fwspec *fwspec) |
---|
| 809 | +static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) |
---|
799 | 810 | { |
---|
| 811 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
| 812 | + |
---|
800 | 813 | return (fwspec && fwspec->ops) ? fwspec->ops : NULL; |
---|
801 | 814 | } |
---|
802 | 815 | |
---|
803 | | -static inline int iort_add_device_replay(const struct iommu_ops *ops, |
---|
804 | | - struct device *dev) |
---|
| 816 | +static inline int iort_add_device_replay(struct device *dev) |
---|
805 | 817 | { |
---|
806 | 818 | int err = 0; |
---|
807 | 819 | |
---|
808 | | - if (ops->add_device && dev->bus && !dev->iommu_group) |
---|
809 | | - err = ops->add_device(dev); |
---|
| 820 | + if (dev->bus && !device_iommu_mapped(dev)) |
---|
| 821 | + err = iommu_probe_device(dev); |
---|
810 | 822 | |
---|
811 | 823 | return err; |
---|
812 | 824 | } |
---|
.. | .. |
---|
824 | 836 | */ |
---|
825 | 837 | int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) |
---|
826 | 838 | { |
---|
| 839 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
827 | 840 | struct acpi_iort_its_group *its; |
---|
828 | 841 | struct acpi_iort_node *iommu_node, *its_node = NULL; |
---|
829 | 842 | int i, resv = 0; |
---|
.. | .. |
---|
841 | 854 | * a given PCI or named component may map IDs to. |
---|
842 | 855 | */ |
---|
843 | 856 | |
---|
844 | | - for (i = 0; i < dev->iommu_fwspec->num_ids; i++) { |
---|
| 857 | + for (i = 0; i < fwspec->num_ids; i++) { |
---|
845 | 858 | its_node = iort_node_map_id(iommu_node, |
---|
846 | | - dev->iommu_fwspec->ids[i], |
---|
| 859 | + fwspec->ids[i], |
---|
847 | 860 | NULL, IORT_MSI_TYPE); |
---|
848 | 861 | if (its_node) |
---|
849 | 862 | break; |
---|
.. | .. |
---|
873 | 886 | |
---|
874 | 887 | return (resv == its->its_count) ? resv : -ENODEV; |
---|
875 | 888 | } |
---|
876 | | -#else |
---|
877 | | -static inline const struct iommu_ops *iort_fwspec_iommu_ops( |
---|
878 | | - struct iommu_fwspec *fwspec) |
---|
879 | | -{ return NULL; } |
---|
880 | | -static inline int iort_add_device_replay(const struct iommu_ops *ops, |
---|
881 | | - struct device *dev) |
---|
882 | | -{ return 0; } |
---|
883 | | -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) |
---|
884 | | -{ return 0; } |
---|
885 | | -#endif |
---|
| 889 | + |
---|
| 890 | +static inline bool iort_iommu_driver_enabled(u8 type) |
---|
| 891 | +{ |
---|
| 892 | + switch (type) { |
---|
| 893 | + case ACPI_IORT_NODE_SMMU_V3: |
---|
| 894 | + return IS_ENABLED(CONFIG_ARM_SMMU_V3); |
---|
| 895 | + case ACPI_IORT_NODE_SMMU: |
---|
| 896 | + return IS_ENABLED(CONFIG_ARM_SMMU); |
---|
| 897 | + default: |
---|
| 898 | + pr_warn("IORT node type %u does not describe an SMMU\n", type); |
---|
| 899 | + return false; |
---|
| 900 | + } |
---|
| 901 | +} |
---|
| 902 | + |
---|
| 903 | +static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, |
---|
| 904 | + struct fwnode_handle *fwnode, |
---|
| 905 | + const struct iommu_ops *ops) |
---|
| 906 | +{ |
---|
| 907 | + int ret = iommu_fwspec_init(dev, fwnode, ops); |
---|
| 908 | + |
---|
| 909 | + if (!ret) |
---|
| 910 | + ret = iommu_fwspec_add_ids(dev, &streamid, 1); |
---|
| 911 | + |
---|
| 912 | + return ret; |
---|
| 913 | +} |
---|
| 914 | + |
---|
| 915 | +static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) |
---|
| 916 | +{ |
---|
| 917 | + struct acpi_iort_root_complex *pci_rc; |
---|
| 918 | + |
---|
| 919 | + pci_rc = (struct acpi_iort_root_complex *)node->node_data; |
---|
| 920 | + return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; |
---|
| 921 | +} |
---|
886 | 922 | |
---|
887 | 923 | static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, |
---|
888 | 924 | u32 streamid) |
---|
.. | .. |
---|
929 | 965 | return iort_iommu_xlate(info->dev, parent, streamid); |
---|
930 | 966 | } |
---|
931 | 967 | |
---|
| 968 | +static void iort_named_component_init(struct device *dev, |
---|
| 969 | + struct acpi_iort_node *node) |
---|
| 970 | +{ |
---|
| 971 | + struct acpi_iort_named_component *nc; |
---|
| 972 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
| 973 | + |
---|
| 974 | + if (!fwspec) |
---|
| 975 | + return; |
---|
| 976 | + |
---|
| 977 | + nc = (struct acpi_iort_named_component *)node->node_data; |
---|
| 978 | + fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS, |
---|
| 979 | + nc->node_flags); |
---|
| 980 | +} |
---|
| 981 | + |
---|
| 982 | +static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node) |
---|
| 983 | +{ |
---|
| 984 | + struct acpi_iort_node *parent; |
---|
| 985 | + int err = -ENODEV, i = 0; |
---|
| 986 | + u32 streamid = 0; |
---|
| 987 | + |
---|
| 988 | + do { |
---|
| 989 | + |
---|
| 990 | + parent = iort_node_map_platform_id(node, &streamid, |
---|
| 991 | + IORT_IOMMU_TYPE, |
---|
| 992 | + i++); |
---|
| 993 | + |
---|
| 994 | + if (parent) |
---|
| 995 | + err = iort_iommu_xlate(dev, parent, streamid); |
---|
| 996 | + } while (parent && !err); |
---|
| 997 | + |
---|
| 998 | + return err; |
---|
| 999 | +} |
---|
| 1000 | + |
---|
| 1001 | +static int iort_nc_iommu_map_id(struct device *dev, |
---|
| 1002 | + struct acpi_iort_node *node, |
---|
| 1003 | + const u32 *in_id) |
---|
| 1004 | +{ |
---|
| 1005 | + struct acpi_iort_node *parent; |
---|
| 1006 | + u32 streamid; |
---|
| 1007 | + |
---|
| 1008 | + parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE); |
---|
| 1009 | + if (parent) |
---|
| 1010 | + return iort_iommu_xlate(dev, parent, streamid); |
---|
| 1011 | + |
---|
| 1012 | + return -ENODEV; |
---|
| 1013 | +} |
---|
| 1014 | + |
---|
| 1015 | + |
---|
| 1016 | +/** |
---|
| 1017 | + * iort_iommu_configure_id - Set-up IOMMU configuration for a device. |
---|
| 1018 | + * |
---|
| 1019 | + * @dev: device to configure |
---|
| 1020 | + * @id_in: optional input id const value pointer |
---|
| 1021 | + * |
---|
| 1022 | + * Returns: iommu_ops pointer on configuration success |
---|
| 1023 | + * NULL on configuration failure |
---|
| 1024 | + */ |
---|
| 1025 | +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, |
---|
| 1026 | + const u32 *id_in) |
---|
| 1027 | +{ |
---|
| 1028 | + struct acpi_iort_node *node; |
---|
| 1029 | + const struct iommu_ops *ops; |
---|
| 1030 | + int err = -ENODEV; |
---|
| 1031 | + |
---|
| 1032 | + /* |
---|
| 1033 | + * If we already translated the fwspec there |
---|
| 1034 | + * is nothing left to do, return the iommu_ops. |
---|
| 1035 | + */ |
---|
| 1036 | + ops = iort_fwspec_iommu_ops(dev); |
---|
| 1037 | + if (ops) |
---|
| 1038 | + return ops; |
---|
| 1039 | + |
---|
| 1040 | + if (dev_is_pci(dev)) { |
---|
| 1041 | + struct iommu_fwspec *fwspec; |
---|
| 1042 | + struct pci_bus *bus = to_pci_dev(dev)->bus; |
---|
| 1043 | + struct iort_pci_alias_info info = { .dev = dev }; |
---|
| 1044 | + |
---|
| 1045 | + node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, |
---|
| 1046 | + iort_match_node_callback, &bus->dev); |
---|
| 1047 | + if (!node) |
---|
| 1048 | + return NULL; |
---|
| 1049 | + |
---|
| 1050 | + info.node = node; |
---|
| 1051 | + err = pci_for_each_dma_alias(to_pci_dev(dev), |
---|
| 1052 | + iort_pci_iommu_init, &info); |
---|
| 1053 | + |
---|
| 1054 | + fwspec = dev_iommu_fwspec_get(dev); |
---|
| 1055 | + if (fwspec && iort_pci_rc_supports_ats(node)) |
---|
| 1056 | + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; |
---|
| 1057 | + } else { |
---|
| 1058 | + node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, |
---|
| 1059 | + iort_match_node_callback, dev); |
---|
| 1060 | + if (!node) |
---|
| 1061 | + return NULL; |
---|
| 1062 | + |
---|
| 1063 | + err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) : |
---|
| 1064 | + iort_nc_iommu_map(dev, node); |
---|
| 1065 | + |
---|
| 1066 | + if (!err) |
---|
| 1067 | + iort_named_component_init(dev, node); |
---|
| 1068 | + } |
---|
| 1069 | + |
---|
| 1070 | + /* |
---|
| 1071 | + * If we have reason to believe the IOMMU driver missed the initial |
---|
| 1072 | + * add_device callback for dev, replay it to get things in order. |
---|
| 1073 | + */ |
---|
| 1074 | + if (!err) { |
---|
| 1075 | + ops = iort_fwspec_iommu_ops(dev); |
---|
| 1076 | + err = iort_add_device_replay(dev); |
---|
| 1077 | + } |
---|
| 1078 | + |
---|
| 1079 | + /* Ignore all other errors apart from EPROBE_DEFER */ |
---|
| 1080 | + if (err == -EPROBE_DEFER) { |
---|
| 1081 | + ops = ERR_PTR(err); |
---|
| 1082 | + } else if (err) { |
---|
| 1083 | + dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); |
---|
| 1084 | + ops = NULL; |
---|
| 1085 | + } |
---|
| 1086 | + |
---|
| 1087 | + return ops; |
---|
| 1088 | +} |
---|
| 1089 | + |
---|
| 1090 | +#else |
---|
| 1091 | +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) |
---|
| 1092 | +{ return 0; } |
---|
| 1093 | +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, |
---|
| 1094 | + const u32 *input_id) |
---|
| 1095 | +{ return NULL; } |
---|
| 1096 | +#endif |
---|
| 1097 | + |
---|
932 | 1098 | static int nc_dma_get_range(struct device *dev, u64 *size) |
---|
933 | 1099 | { |
---|
934 | 1100 | struct acpi_iort_node *node; |
---|
.. | .. |
---|
940 | 1106 | return -ENODEV; |
---|
941 | 1107 | |
---|
942 | 1108 | ncomp = (struct acpi_iort_named_component *)node->node_data; |
---|
| 1109 | + |
---|
| 1110 | + if (!ncomp->memory_address_limit) { |
---|
| 1111 | + pr_warn(FW_BUG "Named component missing memory address limit\n"); |
---|
| 1112 | + return -EINVAL; |
---|
| 1113 | + } |
---|
943 | 1114 | |
---|
944 | 1115 | *size = ncomp->memory_address_limit >= 64 ? U64_MAX : |
---|
945 | 1116 | 1ULL<<ncomp->memory_address_limit; |
---|
.. | .. |
---|
960 | 1131 | |
---|
961 | 1132 | rc = (struct acpi_iort_root_complex *)node->node_data; |
---|
962 | 1133 | |
---|
| 1134 | + if (!rc->memory_address_limit) { |
---|
| 1135 | + pr_warn(FW_BUG "Root complex missing memory address limit\n"); |
---|
| 1136 | + return -EINVAL; |
---|
| 1137 | + } |
---|
| 1138 | + |
---|
963 | 1139 | *size = rc->memory_address_limit >= 64 ? U64_MAX : |
---|
964 | 1140 | 1ULL<<rc->memory_address_limit; |
---|
965 | 1141 | |
---|
.. | .. |
---|
971 | 1147 | * |
---|
972 | 1148 | * @dev: device to configure |
---|
973 | 1149 | * @dma_addr: device DMA address result pointer |
---|
974 | | - * @size: DMA range size result pointer |
---|
| 1150 | + * @dma_size: DMA range size result pointer |
---|
975 | 1151 | */ |
---|
976 | 1152 | void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) |
---|
977 | 1153 | { |
---|
978 | | - u64 mask, dmaaddr = 0, size = 0, offset = 0; |
---|
979 | | - int ret, msb; |
---|
| 1154 | + u64 end, mask, dmaaddr = 0, size = 0, offset = 0; |
---|
| 1155 | + int ret; |
---|
980 | 1156 | |
---|
981 | 1157 | /* |
---|
982 | 1158 | * If @dev is expected to be DMA-capable then the bus code that created |
---|
.. | .. |
---|
994 | 1170 | else |
---|
995 | 1171 | size = 1ULL << 32; |
---|
996 | 1172 | |
---|
997 | | - if (dev_is_pci(dev)) { |
---|
998 | | - ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); |
---|
999 | | - if (ret == -ENODEV) |
---|
1000 | | - ret = rc_dma_get_range(dev, &size); |
---|
1001 | | - } else { |
---|
1002 | | - ret = nc_dma_get_range(dev, &size); |
---|
1003 | | - } |
---|
| 1173 | + ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); |
---|
| 1174 | + if (ret == -ENODEV) |
---|
| 1175 | + ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size) |
---|
| 1176 | + : nc_dma_get_range(dev, &size); |
---|
1004 | 1177 | |
---|
1005 | 1178 | if (!ret) { |
---|
1006 | | - msb = fls64(dmaaddr + size - 1); |
---|
1007 | 1179 | /* |
---|
1008 | | - * Round-up to the power-of-two mask or set |
---|
1009 | | - * the mask to the whole 64-bit address space |
---|
1010 | | - * in case the DMA region covers the full |
---|
1011 | | - * memory window. |
---|
| 1180 | + * Limit coherent and dma mask based on size retrieved from |
---|
| 1181 | + * firmware. |
---|
1012 | 1182 | */ |
---|
1013 | | - mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; |
---|
1014 | | - /* |
---|
1015 | | - * Limit coherent and dma mask based on size |
---|
1016 | | - * retrieved from firmware. |
---|
1017 | | - */ |
---|
1018 | | - dev->bus_dma_mask = mask; |
---|
1019 | | - dev->coherent_dma_mask = mask; |
---|
1020 | | - *dev->dma_mask = mask; |
---|
| 1183 | + end = dmaaddr + size - 1; |
---|
| 1184 | + mask = DMA_BIT_MASK(ilog2(end) + 1); |
---|
| 1185 | + dev->bus_dma_limit = end; |
---|
| 1186 | + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); |
---|
| 1187 | + *dev->dma_mask = min(*dev->dma_mask, mask); |
---|
1021 | 1188 | } |
---|
1022 | 1189 | |
---|
1023 | 1190 | *dma_addr = dmaaddr; |
---|
1024 | 1191 | *dma_size = size; |
---|
1025 | 1192 | |
---|
1026 | | - dev->dma_pfn_offset = PFN_DOWN(offset); |
---|
1027 | | - dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); |
---|
1028 | | -} |
---|
| 1193 | + ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size); |
---|
1029 | 1194 | |
---|
1030 | | -/** |
---|
1031 | | - * iort_iommu_configure - Set-up IOMMU configuration for a device. |
---|
1032 | | - * |
---|
1033 | | - * @dev: device to configure |
---|
1034 | | - * |
---|
1035 | | - * Returns: iommu_ops pointer on configuration success |
---|
1036 | | - * NULL on configuration failure |
---|
1037 | | - */ |
---|
1038 | | -const struct iommu_ops *iort_iommu_configure(struct device *dev) |
---|
1039 | | -{ |
---|
1040 | | - struct acpi_iort_node *node, *parent; |
---|
1041 | | - const struct iommu_ops *ops; |
---|
1042 | | - u32 streamid = 0; |
---|
1043 | | - int err = -ENODEV; |
---|
1044 | | - |
---|
1045 | | - /* |
---|
1046 | | - * If we already translated the fwspec there |
---|
1047 | | - * is nothing left to do, return the iommu_ops. |
---|
1048 | | - */ |
---|
1049 | | - ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); |
---|
1050 | | - if (ops) |
---|
1051 | | - return ops; |
---|
1052 | | - |
---|
1053 | | - if (dev_is_pci(dev)) { |
---|
1054 | | - struct pci_bus *bus = to_pci_dev(dev)->bus; |
---|
1055 | | - struct iort_pci_alias_info info = { .dev = dev }; |
---|
1056 | | - |
---|
1057 | | - node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, |
---|
1058 | | - iort_match_node_callback, &bus->dev); |
---|
1059 | | - if (!node) |
---|
1060 | | - return NULL; |
---|
1061 | | - |
---|
1062 | | - info.node = node; |
---|
1063 | | - err = pci_for_each_dma_alias(to_pci_dev(dev), |
---|
1064 | | - iort_pci_iommu_init, &info); |
---|
1065 | | - } else { |
---|
1066 | | - int i = 0; |
---|
1067 | | - |
---|
1068 | | - node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, |
---|
1069 | | - iort_match_node_callback, dev); |
---|
1070 | | - if (!node) |
---|
1071 | | - return NULL; |
---|
1072 | | - |
---|
1073 | | - do { |
---|
1074 | | - parent = iort_node_map_platform_id(node, &streamid, |
---|
1075 | | - IORT_IOMMU_TYPE, |
---|
1076 | | - i++); |
---|
1077 | | - |
---|
1078 | | - if (parent) |
---|
1079 | | - err = iort_iommu_xlate(dev, parent, streamid); |
---|
1080 | | - } while (parent && !err); |
---|
1081 | | - } |
---|
1082 | | - |
---|
1083 | | - /* |
---|
1084 | | - * If we have reason to believe the IOMMU driver missed the initial |
---|
1085 | | - * add_device callback for dev, replay it to get things in order. |
---|
1086 | | - */ |
---|
1087 | | - if (!err) { |
---|
1088 | | - ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); |
---|
1089 | | - err = iort_add_device_replay(ops, dev); |
---|
1090 | | - } |
---|
1091 | | - |
---|
1092 | | - /* Ignore all other errors apart from EPROBE_DEFER */ |
---|
1093 | | - if (err == -EPROBE_DEFER) { |
---|
1094 | | - ops = ERR_PTR(err); |
---|
1095 | | - } else if (err) { |
---|
1096 | | - dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); |
---|
1097 | | - ops = NULL; |
---|
1098 | | - } |
---|
1099 | | - |
---|
1100 | | - return ops; |
---|
| 1195 | + dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : ""); |
---|
1101 | 1196 | } |
---|
1102 | 1197 | |
---|
1103 | 1198 | static void __init acpi_iort_register_irq(int hwirq, const char *name, |
---|
.. | .. |
---|
1217 | 1312 | } |
---|
1218 | 1313 | } |
---|
1219 | 1314 | |
---|
1220 | | -static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) |
---|
| 1315 | +static void __init arm_smmu_v3_dma_configure(struct device *dev, |
---|
| 1316 | + struct acpi_iort_node *node) |
---|
1221 | 1317 | { |
---|
1222 | 1318 | struct acpi_iort_smmu_v3 *smmu; |
---|
| 1319 | + enum dev_dma_attr attr; |
---|
1223 | 1320 | |
---|
1224 | 1321 | /* Retrieve SMMUv3 specific data */ |
---|
1225 | 1322 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
---|
1226 | 1323 | |
---|
1227 | | - return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; |
---|
| 1324 | + attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? |
---|
| 1325 | + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; |
---|
| 1326 | + |
---|
| 1327 | + /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ |
---|
| 1328 | + dev->dma_mask = &dev->coherent_dma_mask; |
---|
| 1329 | + |
---|
| 1330 | + /* Configure DMA for the page table walker */ |
---|
| 1331 | + acpi_dma_configure(dev, attr); |
---|
1228 | 1332 | } |
---|
1229 | 1333 | |
---|
1230 | 1334 | #if defined(CONFIG_ACPI_NUMA) |
---|
.. | .. |
---|
1238 | 1342 | |
---|
1239 | 1343 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
---|
1240 | 1344 | if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { |
---|
1241 | | - int node = acpi_map_pxm_to_node(smmu->pxm); |
---|
| 1345 | + int dev_node = pxm_to_node(smmu->pxm); |
---|
1242 | 1346 | |
---|
1243 | | - if (node != NUMA_NO_NODE && !node_online(node)) |
---|
| 1347 | + if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) |
---|
1244 | 1348 | return -EINVAL; |
---|
1245 | 1349 | |
---|
1246 | | - set_dev_node(dev, node); |
---|
| 1350 | + set_dev_node(dev, dev_node); |
---|
1247 | 1351 | pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", |
---|
1248 | 1352 | smmu->base_address, |
---|
1249 | 1353 | smmu->pxm); |
---|
.. | .. |
---|
1306 | 1410 | } |
---|
1307 | 1411 | } |
---|
1308 | 1412 | |
---|
1309 | | -static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) |
---|
| 1413 | +static void __init arm_smmu_dma_configure(struct device *dev, |
---|
| 1414 | + struct acpi_iort_node *node) |
---|
1310 | 1415 | { |
---|
1311 | 1416 | struct acpi_iort_smmu *smmu; |
---|
| 1417 | + enum dev_dma_attr attr; |
---|
1312 | 1418 | |
---|
1313 | 1419 | /* Retrieve SMMU specific data */ |
---|
1314 | 1420 | smmu = (struct acpi_iort_smmu *)node->node_data; |
---|
1315 | 1421 | |
---|
1316 | | - return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; |
---|
| 1422 | + attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? |
---|
| 1423 | + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; |
---|
| 1424 | + |
---|
| 1425 | + /* We expect the dma masks to be equivalent for SMMU set-ups */ |
---|
| 1426 | + dev->dma_mask = &dev->coherent_dma_mask; |
---|
| 1427 | + |
---|
| 1428 | + /* Configure DMA for the page table walker */ |
---|
| 1429 | + acpi_dma_configure(dev, attr); |
---|
| 1430 | +} |
---|
| 1431 | + |
---|
| 1432 | +static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) |
---|
| 1433 | +{ |
---|
| 1434 | + struct acpi_iort_pmcg *pmcg; |
---|
| 1435 | + |
---|
| 1436 | + /* Retrieve PMCG specific data */ |
---|
| 1437 | + pmcg = (struct acpi_iort_pmcg *)node->node_data; |
---|
| 1438 | + |
---|
| 1439 | + /* |
---|
| 1440 | + * There are always 2 memory resources. |
---|
| 1441 | + * If the overflow_gsiv is present then add that for a total of 3. |
---|
| 1442 | + */ |
---|
| 1443 | + return pmcg->overflow_gsiv ? 3 : 2; |
---|
| 1444 | +} |
---|
| 1445 | + |
---|
| 1446 | +static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, |
---|
| 1447 | + struct acpi_iort_node *node) |
---|
| 1448 | +{ |
---|
| 1449 | + struct acpi_iort_pmcg *pmcg; |
---|
| 1450 | + |
---|
| 1451 | + /* Retrieve PMCG specific data */ |
---|
| 1452 | + pmcg = (struct acpi_iort_pmcg *)node->node_data; |
---|
| 1453 | + |
---|
| 1454 | + res[0].start = pmcg->page0_base_address; |
---|
| 1455 | + res[0].end = pmcg->page0_base_address + SZ_4K - 1; |
---|
| 1456 | + res[0].flags = IORESOURCE_MEM; |
---|
| 1457 | + /* |
---|
| 1458 | + * The initial version in DEN0049C lacked a way to describe register |
---|
| 1459 | + * page 1, which makes it broken for most PMCG implementations; in |
---|
| 1460 | + * that case, just let the driver fail gracefully if it expects to |
---|
| 1461 | + * find a second memory resource. |
---|
| 1462 | + */ |
---|
| 1463 | + if (node->revision > 0) { |
---|
| 1464 | + res[1].start = pmcg->page1_base_address; |
---|
| 1465 | + res[1].end = pmcg->page1_base_address + SZ_4K - 1; |
---|
| 1466 | + res[1].flags = IORESOURCE_MEM; |
---|
| 1467 | + } |
---|
| 1468 | + |
---|
| 1469 | + if (pmcg->overflow_gsiv) |
---|
| 1470 | + acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", |
---|
| 1471 | + ACPI_EDGE_SENSITIVE, &res[2]); |
---|
| 1472 | +} |
---|
| 1473 | + |
---|
| 1474 | +static struct acpi_platform_list pmcg_plat_info[] __initdata = { |
---|
| 1475 | + /* HiSilicon Hip08 Platform */ |
---|
| 1476 | + {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, |
---|
| 1477 | + "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08}, |
---|
| 1478 | + /* HiSilicon Hip09 Platform */ |
---|
| 1479 | + {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, |
---|
| 1480 | + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, |
---|
| 1481 | + { } |
---|
| 1482 | +}; |
---|
| 1483 | + |
---|
| 1484 | +static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) |
---|
| 1485 | +{ |
---|
| 1486 | + u32 model; |
---|
| 1487 | + int idx; |
---|
| 1488 | + |
---|
| 1489 | + idx = acpi_match_platform_list(pmcg_plat_info); |
---|
| 1490 | + if (idx >= 0) |
---|
| 1491 | + model = pmcg_plat_info[idx].data; |
---|
| 1492 | + else |
---|
| 1493 | + model = IORT_SMMU_V3_PMCG_GENERIC; |
---|
| 1494 | + |
---|
| 1495 | + return platform_device_add_data(pdev, &model, sizeof(model)); |
---|
1317 | 1496 | } |
---|
1318 | 1497 | |
---|
1319 | 1498 | struct iort_dev_config { |
---|
1320 | 1499 | const char *name; |
---|
1321 | 1500 | int (*dev_init)(struct acpi_iort_node *node); |
---|
1322 | | - bool (*dev_is_coherent)(struct acpi_iort_node *node); |
---|
| 1501 | + void (*dev_dma_configure)(struct device *dev, |
---|
| 1502 | + struct acpi_iort_node *node); |
---|
1323 | 1503 | int (*dev_count_resources)(struct acpi_iort_node *node); |
---|
1324 | 1504 | void (*dev_init_resources)(struct resource *res, |
---|
1325 | 1505 | struct acpi_iort_node *node); |
---|
1326 | 1506 | int (*dev_set_proximity)(struct device *dev, |
---|
1327 | 1507 | struct acpi_iort_node *node); |
---|
| 1508 | + int (*dev_add_platdata)(struct platform_device *pdev); |
---|
1328 | 1509 | }; |
---|
1329 | 1510 | |
---|
1330 | 1511 | static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { |
---|
1331 | 1512 | .name = "arm-smmu-v3", |
---|
1332 | | - .dev_is_coherent = arm_smmu_v3_is_coherent, |
---|
| 1513 | + .dev_dma_configure = arm_smmu_v3_dma_configure, |
---|
1333 | 1514 | .dev_count_resources = arm_smmu_v3_count_resources, |
---|
1334 | 1515 | .dev_init_resources = arm_smmu_v3_init_resources, |
---|
1335 | 1516 | .dev_set_proximity = arm_smmu_v3_set_proximity, |
---|
.. | .. |
---|
1337 | 1518 | |
---|
1338 | 1519 | static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { |
---|
1339 | 1520 | .name = "arm-smmu", |
---|
1340 | | - .dev_is_coherent = arm_smmu_is_coherent, |
---|
| 1521 | + .dev_dma_configure = arm_smmu_dma_configure, |
---|
1341 | 1522 | .dev_count_resources = arm_smmu_count_resources, |
---|
1342 | | - .dev_init_resources = arm_smmu_init_resources |
---|
| 1523 | + .dev_init_resources = arm_smmu_init_resources, |
---|
| 1524 | +}; |
---|
| 1525 | + |
---|
| 1526 | +static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { |
---|
| 1527 | + .name = "arm-smmu-v3-pmcg", |
---|
| 1528 | + .dev_count_resources = arm_smmu_v3_pmcg_count_resources, |
---|
| 1529 | + .dev_init_resources = arm_smmu_v3_pmcg_init_resources, |
---|
| 1530 | + .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, |
---|
1343 | 1531 | }; |
---|
1344 | 1532 | |
---|
1345 | 1533 | static __init const struct iort_dev_config *iort_get_dev_cfg( |
---|
.. | .. |
---|
1350 | 1538 | return &iort_arm_smmu_v3_cfg; |
---|
1351 | 1539 | case ACPI_IORT_NODE_SMMU: |
---|
1352 | 1540 | return &iort_arm_smmu_cfg; |
---|
| 1541 | + case ACPI_IORT_NODE_PMCG: |
---|
| 1542 | + return &iort_arm_smmu_v3_pmcg_cfg; |
---|
1353 | 1543 | default: |
---|
1354 | 1544 | return NULL; |
---|
1355 | 1545 | } |
---|
.. | .. |
---|
1358 | 1548 | /** |
---|
1359 | 1549 | * iort_add_platform_device() - Allocate a platform device for IORT node |
---|
1360 | 1550 | * @node: Pointer to device ACPI IORT node |
---|
| 1551 | + * @ops: Pointer to IORT device config struct |
---|
1361 | 1552 | * |
---|
1362 | 1553 | * Returns: 0 on success, <0 failure |
---|
1363 | 1554 | */ |
---|
.. | .. |
---|
1367 | 1558 | struct fwnode_handle *fwnode; |
---|
1368 | 1559 | struct platform_device *pdev; |
---|
1369 | 1560 | struct resource *r; |
---|
1370 | | - enum dev_dma_attr attr; |
---|
1371 | 1561 | int ret, count; |
---|
1372 | 1562 | |
---|
1373 | 1563 | pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); |
---|
.. | .. |
---|
1401 | 1591 | goto dev_put; |
---|
1402 | 1592 | |
---|
1403 | 1593 | /* |
---|
1404 | | - * Add a copy of IORT node pointer to platform_data to |
---|
1405 | | - * be used to retrieve IORT data information. |
---|
| 1594 | + * Platform devices based on PMCG nodes uses platform_data to |
---|
| 1595 | + * pass the hardware model info to the driver. For others, add |
---|
| 1596 | + * a copy of IORT node pointer to platform_data to be used to |
---|
| 1597 | + * retrieve IORT data information. |
---|
1406 | 1598 | */ |
---|
1407 | | - ret = platform_device_add_data(pdev, &node, sizeof(node)); |
---|
| 1599 | + if (ops->dev_add_platdata) |
---|
| 1600 | + ret = ops->dev_add_platdata(pdev); |
---|
| 1601 | + else |
---|
| 1602 | + ret = platform_device_add_data(pdev, &node, sizeof(node)); |
---|
| 1603 | + |
---|
1408 | 1604 | if (ret) |
---|
1409 | 1605 | goto dev_put; |
---|
1410 | | - |
---|
1411 | | - /* |
---|
1412 | | - * We expect the dma masks to be equivalent for |
---|
1413 | | - * all SMMUs set-ups |
---|
1414 | | - */ |
---|
1415 | | - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; |
---|
1416 | 1606 | |
---|
1417 | 1607 | fwnode = iort_get_fwnode(node); |
---|
1418 | 1608 | |
---|
.. | .. |
---|
1423 | 1613 | |
---|
1424 | 1614 | pdev->dev.fwnode = fwnode; |
---|
1425 | 1615 | |
---|
1426 | | - attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? |
---|
1427 | | - DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; |
---|
1428 | | - |
---|
1429 | | - /* Configure DMA for the page table walker */ |
---|
1430 | | - acpi_dma_configure(&pdev->dev, attr); |
---|
| 1616 | + if (ops->dev_dma_configure) |
---|
| 1617 | + ops->dev_dma_configure(&pdev->dev, node); |
---|
1431 | 1618 | |
---|
1432 | 1619 | iort_set_device_domain(&pdev->dev, node); |
---|
1433 | 1620 | |
---|
.. | .. |
---|
1438 | 1625 | return 0; |
---|
1439 | 1626 | |
---|
1440 | 1627 | dma_deconfigure: |
---|
1441 | | - acpi_dma_deconfigure(&pdev->dev); |
---|
| 1628 | + arch_teardown_dma_ops(&pdev->dev); |
---|
1442 | 1629 | dev_put: |
---|
1443 | 1630 | platform_device_put(pdev); |
---|
1444 | 1631 | |
---|
1445 | 1632 | return ret; |
---|
1446 | 1633 | } |
---|
1447 | 1634 | |
---|
1448 | | -static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) |
---|
| 1635 | +#ifdef CONFIG_PCI |
---|
| 1636 | +static void __init iort_enable_acs(struct acpi_iort_node *iort_node) |
---|
1449 | 1637 | { |
---|
| 1638 | + static bool acs_enabled __initdata; |
---|
| 1639 | + |
---|
| 1640 | + if (acs_enabled) |
---|
| 1641 | + return; |
---|
| 1642 | + |
---|
1450 | 1643 | if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { |
---|
1451 | 1644 | struct acpi_iort_node *parent; |
---|
1452 | 1645 | struct acpi_iort_id_mapping *map; |
---|
.. | .. |
---|
1468 | 1661 | if ((parent->type == ACPI_IORT_NODE_SMMU) || |
---|
1469 | 1662 | (parent->type == ACPI_IORT_NODE_SMMU_V3)) { |
---|
1470 | 1663 | pci_request_acs(); |
---|
1471 | | - return true; |
---|
| 1664 | + acs_enabled = true; |
---|
| 1665 | + return; |
---|
1472 | 1666 | } |
---|
1473 | 1667 | } |
---|
1474 | 1668 | } |
---|
1475 | | - |
---|
1476 | | - return false; |
---|
1477 | 1669 | } |
---|
| 1670 | +#else |
---|
| 1671 | +static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } |
---|
| 1672 | +#endif |
---|
1478 | 1673 | |
---|
1479 | 1674 | static void __init iort_init_platform_devices(void) |
---|
1480 | 1675 | { |
---|
.. | .. |
---|
1482 | 1677 | struct acpi_table_iort *iort; |
---|
1483 | 1678 | struct fwnode_handle *fwnode; |
---|
1484 | 1679 | int i, ret; |
---|
1485 | | - bool acs_enabled = false; |
---|
1486 | 1680 | const struct iort_dev_config *ops; |
---|
1487 | 1681 | |
---|
1488 | 1682 | /* |
---|
.. | .. |
---|
1503 | 1697 | return; |
---|
1504 | 1698 | } |
---|
1505 | 1699 | |
---|
1506 | | - if (!acs_enabled) |
---|
1507 | | - acs_enabled = iort_enable_acs(iort_node); |
---|
| 1700 | + iort_enable_acs(iort_node); |
---|
1508 | 1701 | |
---|
1509 | 1702 | ops = iort_get_dev_cfg(iort_node); |
---|
1510 | 1703 | if (ops) { |
---|
.. | .. |
---|
1531 | 1724 | { |
---|
1532 | 1725 | acpi_status status; |
---|
1533 | 1726 | |
---|
| 1727 | + /* iort_table will be used at runtime after the iort init, |
---|
| 1728 | + * so we don't need to call acpi_put_table() to release |
---|
| 1729 | + * the IORT table mapping. |
---|
| 1730 | + */ |
---|
1534 | 1731 | status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); |
---|
1535 | 1732 | if (ACPI_FAILURE(status)) { |
---|
1536 | 1733 | if (status != AE_NOT_FOUND) { |
---|
.. | .. |
---|
1544 | 1741 | |
---|
1545 | 1742 | iort_init_platform_devices(); |
---|
1546 | 1743 | } |
---|
| 1744 | + |
---|
| 1745 | +#ifdef CONFIG_ZONE_DMA |
---|
| 1746 | +/* |
---|
| 1747 | + * Extract the highest CPU physical address accessible to all DMA masters in |
---|
| 1748 | + * the system. PHYS_ADDR_MAX is returned when no constrained device is found. |
---|
| 1749 | + */ |
---|
| 1750 | +phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void) |
---|
| 1751 | +{ |
---|
| 1752 | + phys_addr_t limit = PHYS_ADDR_MAX; |
---|
| 1753 | + struct acpi_iort_node *node, *end; |
---|
| 1754 | + struct acpi_table_iort *iort; |
---|
| 1755 | + acpi_status status; |
---|
| 1756 | + int i; |
---|
| 1757 | + |
---|
| 1758 | + if (acpi_disabled) |
---|
| 1759 | + return limit; |
---|
| 1760 | + |
---|
| 1761 | + status = acpi_get_table(ACPI_SIG_IORT, 0, |
---|
| 1762 | + (struct acpi_table_header **)&iort); |
---|
| 1763 | + if (ACPI_FAILURE(status)) |
---|
| 1764 | + return limit; |
---|
| 1765 | + |
---|
| 1766 | + node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset); |
---|
| 1767 | + end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length); |
---|
| 1768 | + |
---|
| 1769 | + for (i = 0; i < iort->node_count; i++) { |
---|
| 1770 | + if (node >= end) |
---|
| 1771 | + break; |
---|
| 1772 | + |
---|
| 1773 | + switch (node->type) { |
---|
| 1774 | + struct acpi_iort_named_component *ncomp; |
---|
| 1775 | + struct acpi_iort_root_complex *rc; |
---|
| 1776 | + phys_addr_t local_limit; |
---|
| 1777 | + |
---|
| 1778 | + case ACPI_IORT_NODE_NAMED_COMPONENT: |
---|
| 1779 | + ncomp = (struct acpi_iort_named_component *)node->node_data; |
---|
| 1780 | + local_limit = DMA_BIT_MASK(ncomp->memory_address_limit); |
---|
| 1781 | + limit = min_not_zero(limit, local_limit); |
---|
| 1782 | + break; |
---|
| 1783 | + |
---|
| 1784 | + case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: |
---|
| 1785 | + if (node->revision < 1) |
---|
| 1786 | + break; |
---|
| 1787 | + |
---|
| 1788 | + rc = (struct acpi_iort_root_complex *)node->node_data; |
---|
| 1789 | + local_limit = DMA_BIT_MASK(rc->memory_address_limit); |
---|
| 1790 | + limit = min_not_zero(limit, local_limit); |
---|
| 1791 | + break; |
---|
| 1792 | + } |
---|
| 1793 | + node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length); |
---|
| 1794 | + } |
---|
| 1795 | + acpi_put_table(&iort->header); |
---|
| 1796 | + return limit; |
---|
| 1797 | +} |
---|
| 1798 | +#endif |
---|