.. | .. |
---|
58 | 58 | #define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs |
---|
59 | 59 | #endif |
---|
60 | 60 | |
---|
| 61 | +#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS |
---|
61 | 62 | /* Arch hooks */ |
---|
62 | | - |
---|
63 | 63 | int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) |
---|
64 | 64 | { |
---|
65 | 65 | struct msi_controller *chip = dev->bus->msi; |
---|
.. | .. |
---|
132 | 132 | { |
---|
133 | 133 | return default_teardown_msi_irqs(dev); |
---|
134 | 134 | } |
---|
| 135 | +#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ |
---|
135 | 136 | |
---|
136 | 137 | static void default_restore_msi_irq(struct pci_dev *dev, int irq) |
---|
137 | 138 | { |
---|
.. | .. |
---|
192 | 193 | |
---|
193 | 194 | static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) |
---|
194 | 195 | { |
---|
| 196 | + if (desc->msi_attrib.is_virtual) |
---|
| 197 | + return NULL; |
---|
| 198 | + |
---|
195 | 199 | return desc->mask_base + |
---|
196 | 200 | desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
---|
197 | 201 | } |
---|
.. | .. |
---|
206 | 210 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) |
---|
207 | 211 | { |
---|
208 | 212 | u32 mask_bits = desc->masked; |
---|
| 213 | + void __iomem *desc_addr; |
---|
209 | 214 | |
---|
210 | 215 | if (pci_msi_ignore_mask) |
---|
| 216 | + return 0; |
---|
| 217 | + |
---|
| 218 | + desc_addr = pci_msix_desc_addr(desc); |
---|
| 219 | + if (!desc_addr) |
---|
211 | 220 | return 0; |
---|
212 | 221 | |
---|
213 | 222 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; |
---|
214 | 223 | if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT) |
---|
215 | 224 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; |
---|
216 | | - writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); |
---|
| 225 | + |
---|
| 226 | + writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); |
---|
217 | 227 | |
---|
218 | 228 | return mask_bits; |
---|
219 | 229 | } |
---|
.. | .. |
---|
237 | 247 | } |
---|
238 | 248 | |
---|
239 | 249 | /** |
---|
240 | | - * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts |
---|
| 250 | + * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts |
---|
241 | 251 | * @data: pointer to irqdata associated to that interrupt |
---|
242 | 252 | */ |
---|
243 | 253 | void pci_msi_mask_irq(struct irq_data *data) |
---|
.. | .. |
---|
247 | 257 | EXPORT_SYMBOL_GPL(pci_msi_mask_irq); |
---|
248 | 258 | |
---|
249 | 259 | /** |
---|
250 | | - * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts |
---|
| 260 | + * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts |
---|
251 | 261 | * @data: pointer to irqdata associated to that interrupt |
---|
252 | 262 | */ |
---|
253 | 263 | void pci_msi_unmask_irq(struct irq_data *data) |
---|
.. | .. |
---|
272 | 282 | |
---|
273 | 283 | if (entry->msi_attrib.is_msix) { |
---|
274 | 284 | void __iomem *base = pci_msix_desc_addr(entry); |
---|
| 285 | + |
---|
| 286 | + if (!base) { |
---|
| 287 | + WARN_ON(1); |
---|
| 288 | + return; |
---|
| 289 | + } |
---|
275 | 290 | |
---|
276 | 291 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); |
---|
277 | 292 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); |
---|
.. | .. |
---|
303 | 318 | } else if (entry->msi_attrib.is_msix) { |
---|
304 | 319 | void __iomem *base = pci_msix_desc_addr(entry); |
---|
305 | 320 | bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); |
---|
| 321 | + |
---|
| 322 | + if (!base) |
---|
| 323 | + goto skip; |
---|
306 | 324 | |
---|
307 | 325 | /* |
---|
308 | 326 | * The specification mandates that the entry is masked |
---|
.. | .. |
---|
347 | 365 | /* Ensure that the writes are visible in the device */ |
---|
348 | 366 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); |
---|
349 | 367 | } |
---|
| 368 | + |
---|
| 369 | +skip: |
---|
350 | 370 | entry->msg = *msg; |
---|
| 371 | + |
---|
| 372 | + if (entry->write_msi_msg) |
---|
| 373 | + entry->write_msi_msg(entry, entry->write_msi_msg_data); |
---|
| 374 | + |
---|
351 | 375 | } |
---|
352 | 376 | |
---|
353 | 377 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
---|
.. | .. |
---|
552 | 576 | } |
---|
553 | 577 | |
---|
554 | 578 | static struct msi_desc * |
---|
555 | | -msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) |
---|
| 579 | +msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) |
---|
556 | 580 | { |
---|
557 | | - struct cpumask *masks = NULL; |
---|
| 581 | + struct irq_affinity_desc *masks = NULL; |
---|
558 | 582 | struct msi_desc *entry; |
---|
559 | 583 | u16 control; |
---|
560 | 584 | |
---|
561 | 585 | if (affd) |
---|
562 | 586 | masks = irq_create_affinity_masks(nvec, affd); |
---|
563 | | - |
---|
564 | 587 | |
---|
565 | 588 | /* MSI Entry Initialization */ |
---|
566 | 589 | entry = alloc_msi_entry(&dev->dev, nvec, masks); |
---|
.. | .. |
---|
574 | 597 | |
---|
575 | 598 | entry->msi_attrib.is_msix = 0; |
---|
576 | 599 | entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); |
---|
| 600 | + entry->msi_attrib.is_virtual = 0; |
---|
577 | 601 | entry->msi_attrib.entry_nr = 0; |
---|
578 | 602 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); |
---|
579 | 603 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
---|
.. | .. |
---|
612 | 636 | * msi_capability_init - configure device's MSI capability structure |
---|
613 | 637 | * @dev: pointer to the pci_dev data structure of MSI device function |
---|
614 | 638 | * @nvec: number of interrupts to allocate |
---|
615 | | - * @affd: description of automatic irq affinity assignments (may be %NULL) |
---|
| 639 | + * @affd: description of automatic IRQ affinity assignments (may be %NULL) |
---|
616 | 640 | * |
---|
617 | 641 | * Setup the MSI capability structure of the device with the requested |
---|
618 | 642 | * number of interrupts. A return value of zero indicates the successful |
---|
619 | | - * setup of an entry with the new MSI irq. A negative return value indicates |
---|
| 643 | + * setup of an entry with the new MSI IRQ. A negative return value indicates |
---|
620 | 644 | * an error, and a positive return value indicates the number of interrupts |
---|
621 | 645 | * which could have been allocated. |
---|
622 | 646 | */ |
---|
623 | 647 | static int msi_capability_init(struct pci_dev *dev, int nvec, |
---|
624 | | - const struct irq_affinity *affd) |
---|
| 648 | + struct irq_affinity *affd) |
---|
625 | 649 | { |
---|
626 | 650 | struct msi_desc *entry; |
---|
627 | 651 | int ret; |
---|
.. | .. |
---|
633 | 657 | if (!entry) |
---|
634 | 658 | return -ENOMEM; |
---|
635 | 659 | |
---|
636 | | - /* All MSIs are unmasked by default, Mask them all */ |
---|
| 660 | + /* All MSIs are unmasked by default; mask them all */ |
---|
637 | 661 | mask = msi_mask(entry->msi_attrib.multi_cap); |
---|
638 | 662 | msi_mask_irq(entry, mask, mask); |
---|
639 | 663 | |
---|
.. | .. |
---|
661 | 685 | return ret; |
---|
662 | 686 | } |
---|
663 | 687 | |
---|
664 | | - /* Set MSI enabled bits */ |
---|
| 688 | + /* Set MSI enabled bits */ |
---|
665 | 689 | pci_intx_for_msi(dev, 0); |
---|
666 | 690 | pci_msi_set_enable(dev, 1); |
---|
667 | 691 | dev->msi_enabled = 1; |
---|
.. | .. |
---|
688 | 712 | table_offset &= PCI_MSIX_TABLE_OFFSET; |
---|
689 | 713 | phys_addr = pci_resource_start(dev, bir) + table_offset; |
---|
690 | 714 | |
---|
691 | | - return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
---|
| 715 | + return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
---|
692 | 716 | } |
---|
693 | 717 | |
---|
694 | 718 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
---|
695 | 719 | struct msix_entry *entries, int nvec, |
---|
696 | | - const struct irq_affinity *affd) |
---|
| 720 | + struct irq_affinity *affd) |
---|
697 | 721 | { |
---|
698 | | - struct cpumask *curmsk, *masks = NULL; |
---|
| 722 | + struct irq_affinity_desc *curmsk, *masks = NULL; |
---|
699 | 723 | struct msi_desc *entry; |
---|
700 | 724 | void __iomem *addr; |
---|
701 | 725 | int ret, i; |
---|
| 726 | + int vec_count = pci_msix_vec_count(dev); |
---|
702 | 727 | |
---|
703 | 728 | if (affd) |
---|
704 | 729 | masks = irq_create_affinity_masks(nvec, affd); |
---|
.. | .. |
---|
722 | 747 | entry->msi_attrib.entry_nr = entries[i].entry; |
---|
723 | 748 | else |
---|
724 | 749 | entry->msi_attrib.entry_nr = i; |
---|
| 750 | + |
---|
| 751 | + entry->msi_attrib.is_virtual = |
---|
| 752 | + entry->msi_attrib.entry_nr >= vec_count; |
---|
| 753 | + |
---|
725 | 754 | entry->msi_attrib.default_irq = dev->irq; |
---|
726 | 755 | entry->mask_base = base; |
---|
727 | 756 | |
---|
.. | .. |
---|
768 | 797 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
---|
769 | 798 | * @entries: pointer to an array of struct msix_entry entries |
---|
770 | 799 | * @nvec: number of @entries |
---|
771 | | - * @affd: Optional pointer to enable automatic affinity assignement |
---|
| 800 | + * @affd: Optional pointer to enable automatic affinity assignment |
---|
772 | 801 | * |
---|
773 | 802 | * Setup the MSI-X capability structure of device function with a |
---|
774 | | - * single MSI-X irq. A return of zero indicates the successful setup of |
---|
775 | | - * requested MSI-X entries with allocated irqs or non-zero for otherwise. |
---|
| 803 | + * single MSI-X IRQ. A return of zero indicates the successful setup of |
---|
| 804 | + * requested MSI-X entries with allocated IRQs or non-zero for otherwise. |
---|
776 | 805 | **/ |
---|
777 | 806 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
---|
778 | | - int nvec, const struct irq_affinity *affd) |
---|
| 807 | + int nvec, struct irq_affinity *affd) |
---|
779 | 808 | { |
---|
780 | 809 | void __iomem *base; |
---|
781 | 810 | int ret, tsize; |
---|
.. | .. |
---|
838 | 867 | out_avail: |
---|
839 | 868 | if (ret < 0) { |
---|
840 | 869 | /* |
---|
841 | | - * If we had some success, report the number of irqs |
---|
| 870 | + * If we had some success, report the number of IRQs |
---|
842 | 871 | * we succeeded in setting up. |
---|
843 | 872 | */ |
---|
844 | 873 | struct msi_desc *entry; |
---|
.. | .. |
---|
864 | 893 | /** |
---|
865 | 894 | * pci_msi_supported - check whether MSI may be enabled on a device |
---|
866 | 895 | * @dev: pointer to the pci_dev data structure of MSI device function |
---|
867 | | - * @nvec: how many MSIs have been requested ? |
---|
| 896 | + * @nvec: how many MSIs have been requested? |
---|
868 | 897 | * |
---|
869 | 898 | * Look at global flags, the device itself, and its parent buses |
---|
870 | 899 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
---|
.. | .. |
---|
878 | 907 | if (!pci_msi_enable) |
---|
879 | 908 | return 0; |
---|
880 | 909 | |
---|
881 | | - if (!dev || dev->no_msi || dev->current_state != PCI_D0) |
---|
| 910 | + if (!dev || dev->no_msi) |
---|
882 | 911 | return 0; |
---|
883 | 912 | |
---|
884 | 913 | /* |
---|
.. | .. |
---|
947 | 976 | mask = msi_mask(desc->msi_attrib.multi_cap); |
---|
948 | 977 | msi_mask_irq(desc, mask, 0); |
---|
949 | 978 | |
---|
950 | | - /* Restore dev->irq to its default pin-assertion irq */ |
---|
| 979 | + /* Restore dev->irq to its default pin-assertion IRQ */ |
---|
951 | 980 | dev->irq = desc->msi_attrib.default_irq; |
---|
952 | 981 | pcibios_alloc_irq(dev); |
---|
953 | 982 | } |
---|
.. | .. |
---|
983 | 1012 | EXPORT_SYMBOL(pci_msix_vec_count); |
---|
984 | 1013 | |
---|
985 | 1014 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
---|
986 | | - int nvec, const struct irq_affinity *affd) |
---|
| 1015 | + int nvec, struct irq_affinity *affd, int flags) |
---|
987 | 1016 | { |
---|
988 | 1017 | int nr_entries; |
---|
989 | 1018 | int i, j; |
---|
990 | 1019 | |
---|
991 | | - if (!pci_msi_supported(dev, nvec)) |
---|
| 1020 | + if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) |
---|
992 | 1021 | return -EINVAL; |
---|
993 | 1022 | |
---|
994 | 1023 | nr_entries = pci_msix_vec_count(dev); |
---|
995 | 1024 | if (nr_entries < 0) |
---|
996 | 1025 | return nr_entries; |
---|
997 | | - if (nvec > nr_entries) |
---|
| 1026 | + if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) |
---|
998 | 1027 | return nr_entries; |
---|
999 | 1028 | |
---|
1000 | 1029 | if (entries) { |
---|
.. | .. |
---|
1009 | 1038 | } |
---|
1010 | 1039 | } |
---|
1011 | 1040 | |
---|
1012 | | - /* Check whether driver already requested for MSI irq */ |
---|
| 1041 | + /* Check whether driver already requested for MSI IRQ */ |
---|
1013 | 1042 | if (dev->msi_enabled) { |
---|
1014 | 1043 | pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); |
---|
1015 | 1044 | return -EINVAL; |
---|
.. | .. |
---|
1067 | 1096 | EXPORT_SYMBOL(pci_msi_enabled); |
---|
1068 | 1097 | |
---|
1069 | 1098 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, |
---|
1070 | | - const struct irq_affinity *affd) |
---|
| 1099 | + struct irq_affinity *affd) |
---|
1071 | 1100 | { |
---|
1072 | 1101 | int nvec; |
---|
1073 | 1102 | int rc; |
---|
1074 | 1103 | |
---|
1075 | | - if (!pci_msi_supported(dev, minvec)) |
---|
| 1104 | + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) |
---|
1076 | 1105 | return -EINVAL; |
---|
1077 | 1106 | |
---|
1078 | | - /* Check whether driver already requested MSI-X irqs */ |
---|
| 1107 | + /* Check whether driver already requested MSI-X IRQs */ |
---|
1079 | 1108 | if (dev->msix_enabled) { |
---|
1080 | 1109 | pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); |
---|
1081 | 1110 | return -EINVAL; |
---|
.. | .. |
---|
1128 | 1157 | |
---|
1129 | 1158 | static int __pci_enable_msix_range(struct pci_dev *dev, |
---|
1130 | 1159 | struct msix_entry *entries, int minvec, |
---|
1131 | | - int maxvec, const struct irq_affinity *affd) |
---|
| 1160 | + int maxvec, struct irq_affinity *affd, |
---|
| 1161 | + int flags) |
---|
1132 | 1162 | { |
---|
1133 | 1163 | int rc, nvec = maxvec; |
---|
1134 | 1164 | |
---|
.. | .. |
---|
1145 | 1175 | return -ENOSPC; |
---|
1146 | 1176 | } |
---|
1147 | 1177 | |
---|
1148 | | - rc = __pci_enable_msix(dev, entries, nvec, affd); |
---|
| 1178 | + rc = __pci_enable_msix(dev, entries, nvec, affd, flags); |
---|
1149 | 1179 | if (rc == 0) |
---|
1150 | 1180 | return nvec; |
---|
1151 | 1181 | |
---|
.. | .. |
---|
1162 | 1192 | * pci_enable_msix_range - configure device's MSI-X capability structure |
---|
1163 | 1193 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
---|
1164 | 1194 | * @entries: pointer to an array of MSI-X entries |
---|
1165 | | - * @minvec: minimum number of MSI-X irqs requested |
---|
1166 | | - * @maxvec: maximum number of MSI-X irqs requested |
---|
| 1195 | + * @minvec: minimum number of MSI-X IRQs requested |
---|
| 1196 | + * @maxvec: maximum number of MSI-X IRQs requested |
---|
1167 | 1197 | * |
---|
1168 | 1198 | * Setup the MSI-X capability structure of device function with a maximum |
---|
1169 | 1199 | * possible number of interrupts in the range between @minvec and @maxvec |
---|
.. | .. |
---|
1176 | 1206 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
---|
1177 | 1207 | int minvec, int maxvec) |
---|
1178 | 1208 | { |
---|
1179 | | - return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); |
---|
| 1209 | + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); |
---|
1180 | 1210 | } |
---|
1181 | 1211 | EXPORT_SYMBOL(pci_enable_msix_range); |
---|
1182 | 1212 | |
---|
.. | .. |
---|
1200 | 1230 | */ |
---|
1201 | 1231 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
---|
1202 | 1232 | unsigned int max_vecs, unsigned int flags, |
---|
1203 | | - const struct irq_affinity *affd) |
---|
| 1233 | + struct irq_affinity *affd) |
---|
1204 | 1234 | { |
---|
1205 | | - static const struct irq_affinity msi_default_affd; |
---|
1206 | | - int msix_vecs = -ENOSPC; |
---|
1207 | | - int msi_vecs = -ENOSPC; |
---|
| 1235 | + struct irq_affinity msi_default_affd = {0}; |
---|
| 1236 | + int nvecs = -ENOSPC; |
---|
1208 | 1237 | |
---|
1209 | 1238 | if (flags & PCI_IRQ_AFFINITY) { |
---|
1210 | 1239 | if (!affd) |
---|
.. | .. |
---|
1215 | 1244 | } |
---|
1216 | 1245 | |
---|
1217 | 1246 | if (flags & PCI_IRQ_MSIX) { |
---|
1218 | | - msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, |
---|
1219 | | - max_vecs, affd); |
---|
1220 | | - if (msix_vecs > 0) |
---|
1221 | | - return msix_vecs; |
---|
| 1247 | + nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
---|
| 1248 | + affd, flags); |
---|
| 1249 | + if (nvecs > 0) |
---|
| 1250 | + return nvecs; |
---|
1222 | 1251 | } |
---|
1223 | 1252 | |
---|
1224 | 1253 | if (flags & PCI_IRQ_MSI) { |
---|
1225 | | - msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, |
---|
1226 | | - affd); |
---|
1227 | | - if (msi_vecs > 0) |
---|
1228 | | - return msi_vecs; |
---|
| 1254 | + nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); |
---|
| 1255 | + if (nvecs > 0) |
---|
| 1256 | + return nvecs; |
---|
1229 | 1257 | } |
---|
1230 | 1258 | |
---|
1231 | | - /* use legacy irq if allowed */ |
---|
| 1259 | + /* use legacy IRQ if allowed */ |
---|
1232 | 1260 | if (flags & PCI_IRQ_LEGACY) { |
---|
1233 | 1261 | if (min_vecs == 1 && dev->irq) { |
---|
| 1262 | + /* |
---|
| 1263 | + * Invoke the affinity spreading logic to ensure that |
---|
| 1264 | + * the device driver can adjust queue configuration |
---|
| 1265 | + * for the single interrupt case. |
---|
| 1266 | + */ |
---|
| 1267 | + if (affd) |
---|
| 1268 | + irq_create_affinity_masks(1, affd); |
---|
1234 | 1269 | pci_intx(dev, 1); |
---|
1235 | 1270 | return 1; |
---|
1236 | 1271 | } |
---|
1237 | 1272 | } |
---|
1238 | 1273 | |
---|
1239 | | - if (msix_vecs == -ENOSPC) |
---|
1240 | | - return -ENOSPC; |
---|
1241 | | - return msi_vecs; |
---|
| 1274 | + return nvecs; |
---|
1242 | 1275 | } |
---|
1243 | 1276 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); |
---|
1244 | 1277 | |
---|
.. | .. |
---|
1257 | 1290 | |
---|
1258 | 1291 | /** |
---|
1259 | 1292 | * pci_irq_vector - return Linux IRQ number of a device vector |
---|
1260 | | - * @dev: PCI device to operate on |
---|
1261 | | - * @nr: device-relative interrupt vector index (0-based). |
---|
| 1293 | + * @dev: PCI device to operate on |
---|
| 1294 | + * @nr: Interrupt vector index (0-based) |
---|
| 1295 | + * |
---|
| 1296 | + * @nr has the following meanings depending on the interrupt mode: |
---|
| 1297 | + * MSI-X: The index in the MSI-X vector table |
---|
| 1298 | + * MSI: The index of the enabled MSI vectors |
---|
| 1299 | + * INTx: Must be 0 |
---|
| 1300 | + * |
---|
| 1301 | + * Return: The Linux interrupt number or -EINVAl if @nr is out of range. |
---|
1262 | 1302 | */ |
---|
1263 | 1303 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr) |
---|
1264 | 1304 | { |
---|
1265 | 1305 | if (dev->msix_enabled) { |
---|
1266 | 1306 | struct msi_desc *entry; |
---|
1267 | | - int i = 0; |
---|
1268 | 1307 | |
---|
1269 | 1308 | for_each_pci_msi_entry(entry, dev) { |
---|
1270 | | - if (i == nr) |
---|
| 1309 | + if (entry->msi_attrib.entry_nr == nr) |
---|
1271 | 1310 | return entry->irq; |
---|
1272 | | - i++; |
---|
1273 | 1311 | } |
---|
1274 | 1312 | WARN_ON_ONCE(1); |
---|
1275 | 1313 | return -EINVAL; |
---|
.. | .. |
---|
1290 | 1328 | EXPORT_SYMBOL(pci_irq_vector); |
---|
1291 | 1329 | |
---|
1292 | 1330 | /** |
---|
1293 | | - * pci_irq_get_affinity - return the affinity of a particular msi vector |
---|
| 1331 | + * pci_irq_get_affinity - return the affinity of a particular MSI vector |
---|
1294 | 1332 | * @dev: PCI device to operate on |
---|
1295 | 1333 | * @nr: device-relative interrupt vector index (0-based). |
---|
| 1334 | + * |
---|
| 1335 | + * @nr has the following meanings depending on the interrupt mode: |
---|
| 1336 | + * MSI-X: The index in the MSI-X vector table |
---|
| 1337 | + * MSI: The index of the enabled MSI vectors |
---|
| 1338 | + * INTx: Must be 0 |
---|
| 1339 | + * |
---|
| 1340 | + * Return: A cpumask pointer or NULL if @nr is out of range |
---|
1296 | 1341 | */ |
---|
1297 | 1342 | const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) |
---|
1298 | 1343 | { |
---|
1299 | 1344 | if (dev->msix_enabled) { |
---|
1300 | 1345 | struct msi_desc *entry; |
---|
1301 | | - int i = 0; |
---|
1302 | 1346 | |
---|
1303 | 1347 | for_each_pci_msi_entry(entry, dev) { |
---|
1304 | | - if (i == nr) |
---|
1305 | | - return entry->affinity; |
---|
1306 | | - i++; |
---|
| 1348 | + if (entry->msi_attrib.entry_nr == nr) |
---|
| 1349 | + return &entry->affinity->mask; |
---|
1307 | 1350 | } |
---|
1308 | 1351 | WARN_ON_ONCE(1); |
---|
1309 | 1352 | return NULL; |
---|
.. | .. |
---|
1314 | 1357 | nr >= entry->nvec_used)) |
---|
1315 | 1358 | return NULL; |
---|
1316 | 1359 | |
---|
1317 | | - return &entry->affinity[nr]; |
---|
| 1360 | + return &entry->affinity[nr].mask; |
---|
1318 | 1361 | } else { |
---|
1319 | 1362 | return cpu_possible_mask; |
---|
1320 | 1363 | } |
---|
1321 | 1364 | } |
---|
1322 | 1365 | EXPORT_SYMBOL(pci_irq_get_affinity); |
---|
1323 | | - |
---|
1324 | | -/** |
---|
1325 | | - * pci_irq_get_node - return the numa node of a particular msi vector |
---|
1326 | | - * @pdev: PCI device to operate on |
---|
1327 | | - * @vec: device-relative interrupt vector index (0-based). |
---|
1328 | | - */ |
---|
1329 | | -int pci_irq_get_node(struct pci_dev *pdev, int vec) |
---|
1330 | | -{ |
---|
1331 | | - const struct cpumask *mask; |
---|
1332 | | - |
---|
1333 | | - mask = pci_irq_get_affinity(pdev, vec); |
---|
1334 | | - if (mask) |
---|
1335 | | - return local_memory_node(cpu_to_node(cpumask_first(mask))); |
---|
1336 | | - return dev_to_node(&pdev->dev); |
---|
1337 | | -} |
---|
1338 | | -EXPORT_SYMBOL(pci_irq_get_node); |
---|
1339 | 1366 | |
---|
1340 | 1367 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) |
---|
1341 | 1368 | { |
---|
.. | .. |
---|
1371 | 1398 | |
---|
1372 | 1399 | /** |
---|
1373 | 1400 | * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source |
---|
1374 | | - * @dev: Pointer to the PCI device |
---|
1375 | | - * @desc: Pointer to the msi descriptor |
---|
| 1401 | + * @desc: Pointer to the MSI descriptor |
---|
1376 | 1402 | * |
---|
1377 | 1403 | * The ID number is only used within the irqdomain. |
---|
1378 | 1404 | */ |
---|
1379 | | -irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, |
---|
1380 | | - struct msi_desc *desc) |
---|
| 1405 | +static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) |
---|
1381 | 1406 | { |
---|
| 1407 | + struct pci_dev *dev = msi_desc_to_pci_dev(desc); |
---|
| 1408 | + |
---|
1382 | 1409 | return (irq_hw_number_t)desc->msi_attrib.entry_nr | |
---|
1383 | | - PCI_DEVID(dev->bus->number, dev->devfn) << 11 | |
---|
| 1410 | + pci_dev_id(dev) << 11 | |
---|
1384 | 1411 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; |
---|
1385 | 1412 | } |
---|
1386 | 1413 | |
---|
.. | .. |
---|
1390 | 1417 | } |
---|
1391 | 1418 | |
---|
1392 | 1419 | /** |
---|
1393 | | - * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev |
---|
| 1420 | + * pci_msi_domain_check_cap - Verify that @domain supports the capabilities |
---|
| 1421 | + * for @dev |
---|
1394 | 1422 | * @domain: The interrupt domain to check |
---|
1395 | 1423 | * @info: The domain info for verification |
---|
1396 | 1424 | * @dev: The device to check |
---|
.. | .. |
---|
1425 | 1453 | return error; |
---|
1426 | 1454 | } |
---|
1427 | 1455 | |
---|
1428 | | -#ifdef GENERIC_MSI_DOMAIN_OPS |
---|
1429 | 1456 | static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, |
---|
1430 | 1457 | struct msi_desc *desc) |
---|
1431 | 1458 | { |
---|
1432 | 1459 | arg->desc = desc; |
---|
1433 | | - arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), |
---|
1434 | | - desc); |
---|
| 1460 | + arg->hwirq = pci_msi_domain_calc_hwirq(desc); |
---|
1435 | 1461 | } |
---|
1436 | | -#else |
---|
1437 | | -#define pci_msi_domain_set_desc NULL |
---|
1438 | | -#endif |
---|
1439 | 1462 | |
---|
1440 | 1463 | static struct msi_domain_ops pci_msi_domain_ops_default = { |
---|
1441 | 1464 | .set_desc = pci_msi_domain_set_desc, |
---|
.. | .. |
---|
1550 | 1573 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) |
---|
1551 | 1574 | { |
---|
1552 | 1575 | struct device_node *of_node; |
---|
1553 | | - u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); |
---|
| 1576 | + u32 rid = pci_dev_id(pdev); |
---|
1554 | 1577 | |
---|
1555 | 1578 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); |
---|
1556 | 1579 | |
---|
1557 | 1580 | of_node = irq_domain_get_of_node(domain); |
---|
1558 | | - rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) : |
---|
1559 | | - iort_msi_map_rid(&pdev->dev, rid); |
---|
| 1581 | + rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : |
---|
| 1582 | + iort_msi_map_id(&pdev->dev, rid); |
---|
1560 | 1583 | |
---|
1561 | 1584 | return rid; |
---|
1562 | 1585 | } |
---|
.. | .. |
---|
1573 | 1596 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) |
---|
1574 | 1597 | { |
---|
1575 | 1598 | struct irq_domain *dom; |
---|
1576 | | - u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); |
---|
| 1599 | + u32 rid = pci_dev_id(pdev); |
---|
1577 | 1600 | |
---|
1578 | 1601 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); |
---|
1579 | | - dom = of_msi_map_get_device_domain(&pdev->dev, rid); |
---|
| 1602 | + dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); |
---|
1580 | 1603 | if (!dom) |
---|
1581 | | - dom = iort_get_device_domain(&pdev->dev, rid); |
---|
| 1604 | + dom = iort_get_device_domain(&pdev->dev, rid, |
---|
| 1605 | + DOMAIN_BUS_PCI_MSI); |
---|
1582 | 1606 | return dom; |
---|
1583 | 1607 | } |
---|
| 1608 | + |
---|
| 1609 | +/** |
---|
| 1610 | + * pci_dev_has_special_msi_domain - Check whether the device is handled by |
---|
| 1611 | + * a non-standard PCI-MSI domain |
---|
| 1612 | + * @pdev: The PCI device to check. |
---|
| 1613 | + * |
---|
| 1614 | + * Returns: True if the device irqdomain or the bus irqdomain is |
---|
| 1615 | + * non-standard PCI/MSI. |
---|
| 1616 | + */ |
---|
| 1617 | +bool pci_dev_has_special_msi_domain(struct pci_dev *pdev) |
---|
| 1618 | +{ |
---|
| 1619 | + struct irq_domain *dom = dev_get_msi_domain(&pdev->dev); |
---|
| 1620 | + |
---|
| 1621 | + if (!dom) |
---|
| 1622 | + dom = dev_get_msi_domain(&pdev->bus->dev); |
---|
| 1623 | + |
---|
| 1624 | + if (!dom) |
---|
| 1625 | + return true; |
---|
| 1626 | + |
---|
| 1627 | + return dom->bus_token != DOMAIN_BUS_PCI_MSI; |
---|
| 1628 | +} |
---|
| 1629 | + |
---|
1584 | 1630 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
---|