.. | .. |
---|
48 | 48 | }; |
---|
49 | 49 | |
---|
50 | 50 | /** |
---|
| 51 | + * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data |
---|
| 52 | + * @dev_index: TISCI device index |
---|
| 53 | + */ |
---|
| 54 | +struct ti_sci_inta_msi_desc { |
---|
| 55 | + u16 dev_index; |
---|
| 56 | +}; |
---|
| 57 | + |
---|
| 58 | +/** |
---|
51 | 59 | * struct msi_desc - Descriptor structure for MSI based interrupts |
---|
52 | 60 | * @list: List head for management |
---|
53 | 61 | * @irq: The base interrupt number |
---|
.. | .. |
---|
55 | 63 | * @dev: Pointer to the device which uses this descriptor |
---|
56 | 64 | * @msg: The last set MSI message cached for reuse |
---|
57 | 65 | * @affinity: Optional pointer to a cpu affinity mask for this descriptor |
---|
| 66 | + * |
---|
| 67 | + * @write_msi_msg: Callback that may be called when the MSI message |
---|
| 68 | + * address or data changes |
---|
| 69 | + * @write_msi_msg_data: Data parameter for the callback. |
---|
58 | 70 | * |
---|
59 | 71 | * @masked: [PCI MSI/X] Mask bits |
---|
60 | 72 | * @is_msix: [PCI MSI/X] True if MSI-X |
---|
.. | .. |
---|
68 | 80 | * @mask_base: [PCI MSI-X] Mask register base address |
---|
69 | 81 | * @platform: [platform] Platform device specific msi descriptor data |
---|
70 | 82 | * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data |
---|
| 83 | + * @inta: [INTA] TISCI based INTA specific msi descriptor data |
---|
71 | 84 | */ |
---|
72 | 85 | struct msi_desc { |
---|
73 | 86 | /* Shared device/bus type independent data */ |
---|
.. | .. |
---|
76 | 89 | unsigned int nvec_used; |
---|
77 | 90 | struct device *dev; |
---|
78 | 91 | struct msi_msg msg; |
---|
79 | | - struct cpumask *affinity; |
---|
| 92 | + struct irq_affinity_desc *affinity; |
---|
| 93 | +#ifdef CONFIG_IRQ_MSI_IOMMU |
---|
| 94 | + const void *iommu_cookie; |
---|
| 95 | +#endif |
---|
| 96 | + |
---|
| 97 | + void (*write_msi_msg)(struct msi_desc *entry, void *data); |
---|
| 98 | + void *write_msi_msg_data; |
---|
80 | 99 | |
---|
81 | 100 | union { |
---|
82 | 101 | /* PCI MSI/X specific data */ |
---|
83 | 102 | struct { |
---|
84 | 103 | u32 masked; |
---|
85 | 104 | struct { |
---|
86 | | - __u8 is_msix : 1; |
---|
87 | | - __u8 multiple : 3; |
---|
88 | | - __u8 multi_cap : 3; |
---|
89 | | - __u8 maskbit : 1; |
---|
90 | | - __u8 is_64 : 1; |
---|
91 | | - __u16 entry_nr; |
---|
| 105 | + u8 is_msix : 1; |
---|
| 106 | + u8 multiple : 3; |
---|
| 107 | + u8 multi_cap : 3; |
---|
| 108 | + u8 maskbit : 1; |
---|
| 109 | + u8 is_64 : 1; |
---|
| 110 | + u8 is_virtual : 1; |
---|
| 111 | + u16 entry_nr; |
---|
92 | 112 | unsigned default_irq; |
---|
93 | 113 | } msi_attrib; |
---|
94 | 114 | union { |
---|
.. | .. |
---|
106 | 126 | */ |
---|
107 | 127 | struct platform_msi_desc platform; |
---|
108 | 128 | struct fsl_mc_msi_desc fsl_mc; |
---|
| 129 | + struct ti_sci_inta_msi_desc inta; |
---|
109 | 130 | }; |
---|
110 | 131 | }; |
---|
111 | 132 | |
---|
.. | .. |
---|
124 | 145 | for (__irq = (desc)->irq; \ |
---|
125 | 146 | __irq < ((desc)->irq + (desc)->nvec_used); \ |
---|
126 | 147 | __irq++) |
---|
| 148 | + |
---|
| 149 | +#ifdef CONFIG_IRQ_MSI_IOMMU |
---|
| 150 | +static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) |
---|
| 151 | +{ |
---|
| 152 | + return desc->iommu_cookie; |
---|
| 153 | +} |
---|
| 154 | + |
---|
| 155 | +static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, |
---|
| 156 | + const void *iommu_cookie) |
---|
| 157 | +{ |
---|
| 158 | + desc->iommu_cookie = iommu_cookie; |
---|
| 159 | +} |
---|
| 160 | +#else |
---|
| 161 | +static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) |
---|
| 162 | +{ |
---|
| 163 | + return NULL; |
---|
| 164 | +} |
---|
| 165 | + |
---|
| 166 | +static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, |
---|
| 167 | + const void *iommu_cookie) |
---|
| 168 | +{ |
---|
| 169 | +} |
---|
| 170 | +#endif |
---|
127 | 171 | |
---|
128 | 172 | #ifdef CONFIG_PCI_MSI |
---|
129 | 173 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) |
---|
.. | .. |
---|
144 | 188 | #endif /* CONFIG_PCI_MSI */ |
---|
145 | 189 | |
---|
146 | 190 | struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, |
---|
147 | | - const struct cpumask *affinity); |
---|
| 191 | + const struct irq_affinity_desc *affinity); |
---|
148 | 192 | void free_msi_entry(struct msi_desc *entry); |
---|
149 | 193 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
---|
150 | 194 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
---|
.. | .. |
---|
154 | 198 | void pci_msi_mask_irq(struct irq_data *data); |
---|
155 | 199 | void pci_msi_unmask_irq(struct irq_data *data); |
---|
156 | 200 | |
---|
157 | | -/* Conversion helpers. Should be removed after merging */ |
---|
158 | | -static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
---|
159 | | -{ |
---|
160 | | - __pci_write_msi_msg(entry, msg); |
---|
161 | | -} |
---|
162 | | -static inline void write_msi_msg(int irq, struct msi_msg *msg) |
---|
163 | | -{ |
---|
164 | | - pci_write_msi_msg(irq, msg); |
---|
165 | | -} |
---|
166 | | -static inline void mask_msi_irq(struct irq_data *data) |
---|
167 | | -{ |
---|
168 | | - pci_msi_mask_irq(data); |
---|
169 | | -} |
---|
170 | | -static inline void unmask_msi_irq(struct irq_data *data) |
---|
171 | | -{ |
---|
172 | | - pci_msi_unmask_irq(data); |
---|
173 | | -} |
---|
174 | | - |
---|
175 | 201 | /* |
---|
176 | | - * The arch hooks to setup up msi irqs. Those functions are |
---|
177 | | - * implemented as weak symbols so that they /can/ be overriden by |
---|
178 | | - * architecture specific code if needed. |
---|
| 202 | + * The arch hooks to setup up msi irqs. Default functions are implemented |
---|
| 203 | + * as weak symbols so that they /can/ be overriden by architecture specific |
---|
| 204 | + * code if needed. These hooks must be enabled by the architecture or by |
---|
| 205 | + * drivers which depend on them via msi_controller based MSI handling. |
---|
| 206 | + * |
---|
| 207 | + * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by |
---|
| 208 | + * stubs with warnings. |
---|
179 | 209 | */ |
---|
| 210 | +#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS |
---|
180 | 211 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); |
---|
181 | 212 | void arch_teardown_msi_irq(unsigned int irq); |
---|
182 | 213 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
---|
183 | 214 | void arch_teardown_msi_irqs(struct pci_dev *dev); |
---|
184 | | -void arch_restore_msi_irqs(struct pci_dev *dev); |
---|
185 | | - |
---|
186 | 215 | void default_teardown_msi_irqs(struct pci_dev *dev); |
---|
| 216 | +#else |
---|
| 217 | +static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
---|
| 218 | +{ |
---|
| 219 | + WARN_ON_ONCE(1); |
---|
| 220 | + return -ENODEV; |
---|
| 221 | +} |
---|
| 222 | + |
---|
| 223 | +static inline void arch_teardown_msi_irqs(struct pci_dev *dev) |
---|
| 224 | +{ |
---|
| 225 | + WARN_ON_ONCE(1); |
---|
| 226 | +} |
---|
| 227 | +#endif |
---|
| 228 | + |
---|
| 229 | +/* |
---|
| 230 | + * The restore hooks are still available as they are useful even |
---|
| 231 | + * for fully irq domain based setups. Courtesy to XEN/X86. |
---|
| 232 | + */ |
---|
| 233 | +void arch_restore_msi_irqs(struct pci_dev *dev); |
---|
187 | 234 | void default_restore_msi_irqs(struct pci_dev *dev); |
---|
188 | 235 | |
---|
189 | 236 | struct msi_controller { |
---|
.. | .. |
---|
221 | 268 | * @msi_finish: Optional callback to finalize the allocation |
---|
222 | 269 | * @set_desc: Set the msi descriptor for an interrupt |
---|
223 | 270 | * @handle_error: Optional error handler if the allocation fails |
---|
| 271 | + * @domain_alloc_irqs: Optional function to override the default allocation |
---|
| 272 | + * function. |
---|
| 273 | + * @domain_free_irqs: Optional function to override the default free |
---|
| 274 | + * function. |
---|
224 | 275 | * |
---|
225 | 276 | * @get_hwirq, @msi_init and @msi_free are callbacks used by |
---|
226 | 277 | * msi_create_irq_domain() and related interfaces |
---|
.. | .. |
---|
228 | 279 | * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error |
---|
229 | 280 | * are callbacks used by msi_domain_alloc_irqs() and related |
---|
230 | 281 | * interfaces which are based on msi_desc. |
---|
| 282 | + * |
---|
| 283 | + * @domain_alloc_irqs, @domain_free_irqs can be used to override the |
---|
| 284 | + * default allocation/free functions (__msi_domain_alloc/free_irqs). This |
---|
| 285 | + * is initially for a wrapper around XENs seperate MSI universe which can't |
---|
| 286 | + * be wrapped into the regular irq domains concepts by mere mortals. This |
---|
| 287 | + * allows to universally use msi_domain_alloc/free_irqs without having to |
---|
| 288 | + * special case XEN all over the place. |
---|
| 289 | + * |
---|
| 290 | + * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs |
---|
| 291 | + * are set to the default implementation if NULL and even when |
---|
| 292 | + * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and |
---|
| 293 | + * because these callbacks are obviously mandatory. |
---|
| 294 | + * |
---|
| 295 | + * This is NOT meant to be abused, but it can be useful to build wrappers |
---|
| 296 | + * for specialized MSI irq domains which need extra work before and after |
---|
| 297 | + * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). |
---|
231 | 298 | */ |
---|
232 | 299 | struct msi_domain_ops { |
---|
233 | 300 | irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, |
---|
.. | .. |
---|
250 | 317 | struct msi_desc *desc); |
---|
251 | 318 | int (*handle_error)(struct irq_domain *domain, |
---|
252 | 319 | struct msi_desc *desc, int error); |
---|
| 320 | + int (*domain_alloc_irqs)(struct irq_domain *domain, |
---|
| 321 | + struct device *dev, int nvec); |
---|
| 322 | + void (*domain_free_irqs)(struct irq_domain *domain, |
---|
| 323 | + struct device *dev); |
---|
253 | 324 | }; |
---|
254 | 325 | |
---|
255 | 326 | /** |
---|
.. | .. |
---|
307 | 378 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
---|
308 | 379 | struct msi_domain_info *info, |
---|
309 | 380 | struct irq_domain *parent); |
---|
| 381 | +int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
---|
| 382 | + int nvec); |
---|
310 | 383 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
---|
311 | 384 | int nvec); |
---|
| 385 | +void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
---|
312 | 386 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
---|
313 | 387 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
---|
314 | 388 | |
---|
.. | .. |
---|
325 | 399 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
---|
326 | 400 | int virq, int nvec, msi_alloc_info_t *args); |
---|
327 | 401 | struct irq_domain * |
---|
328 | | -platform_msi_create_device_domain(struct device *dev, |
---|
329 | | - unsigned int nvec, |
---|
330 | | - irq_write_msi_msg_t write_msi_msg, |
---|
331 | | - const struct irq_domain_ops *ops, |
---|
332 | | - void *host_data); |
---|
| 402 | +__platform_msi_create_device_domain(struct device *dev, |
---|
| 403 | + unsigned int nvec, |
---|
| 404 | + bool is_tree, |
---|
| 405 | + irq_write_msi_msg_t write_msi_msg, |
---|
| 406 | + const struct irq_domain_ops *ops, |
---|
| 407 | + void *host_data); |
---|
| 408 | + |
---|
| 409 | +#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ |
---|
| 410 | + __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) |
---|
| 411 | +#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ |
---|
| 412 | + __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) |
---|
| 413 | + |
---|
333 | 414 | int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, |
---|
334 | 415 | unsigned int nr_irqs); |
---|
335 | 416 | void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, |
---|
.. | .. |
---|
342 | 423 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
---|
343 | 424 | struct msi_domain_info *info, |
---|
344 | 425 | struct irq_domain *parent); |
---|
345 | | -irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, |
---|
346 | | - struct msi_desc *desc); |
---|
347 | 426 | int pci_msi_domain_check_cap(struct irq_domain *domain, |
---|
348 | 427 | struct msi_domain_info *info, struct device *dev); |
---|
349 | 428 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); |
---|
350 | 429 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); |
---|
| 430 | +bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); |
---|
351 | 431 | #else |
---|
352 | 432 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) |
---|
353 | 433 | { |
---|