hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/kernel/irq/irqdomain.c
....@@ -25,13 +25,16 @@
2525
2626 static struct irq_domain *irq_default_domain;
2727
28
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
29
+ unsigned int nr_irqs, int node, void *arg,
30
+ bool realloc, const struct irq_affinity_desc *affinity);
2831 static void irq_domain_check_hierarchy(struct irq_domain *domain);
2932
3033 struct irqchip_fwid {
3134 struct fwnode_handle fwnode;
3235 unsigned int type;
3336 char *name;
34
- void *data;
37
+ phys_addr_t *pa;
3538 };
3639
3740 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
....@@ -46,14 +49,14 @@
4649 EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
4750
4851 /**
49
- * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
52
+ * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
5053 * identifying an irq domain
5154 * @type: Type of irqchip_fwnode. See linux/irqdomain.h
52
- * @name: Optional user provided domain name
5355 * @id: Optional user provided id if name != NULL
54
- * @data: Optional user-provided data
56
+ * @name: Optional user provided domain name
57
+ * @pa: Optional user-provided physical address
5558 *
56
- * Allocate a struct irqchip_fwid, and return a poiner to the embedded
59
+ * Allocate a struct irqchip_fwid, and return a pointer to the embedded
5760 * fwnode_handle (or NULL on failure).
5861 *
5962 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
....@@ -62,7 +65,8 @@
6265 * domain struct.
6366 */
6467 struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
65
- const char *name, void *data)
68
+ const char *name,
69
+ phys_addr_t *pa)
6670 {
6771 struct irqchip_fwid *fwid;
6872 char *n;
....@@ -77,7 +81,7 @@
7781 n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
7882 break;
7983 default:
80
- n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
84
+ n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa);
8185 break;
8286 }
8387
....@@ -89,8 +93,8 @@
8993
9094 fwid->type = type;
9195 fwid->name = n;
92
- fwid->data = data;
93
- fwid->fwnode.ops = &irqchip_fwnode_ops;
96
+ fwid->pa = pa;
97
+ fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
9498 return &fwid->fwnode;
9599 }
96100 EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
....@@ -113,36 +117,24 @@
113117 }
114118 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
115119
116
-/**
117
- * __irq_domain_add() - Allocate a new irq_domain data structure
118
- * @fwnode: firmware node for the interrupt controller
119
- * @size: Size of linear map; 0 for radix mapping only
120
- * @hwirq_max: Maximum number of interrupts supported by controller
121
- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
122
- * direct mapping
123
- * @ops: domain callbacks
124
- * @host_data: Controller private data pointer
125
- *
126
- * Allocates and initialize and irq_domain structure.
127
- * Returns pointer to IRQ domain, or NULL on failure.
128
- */
129
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
130
- irq_hw_number_t hwirq_max, int direct_max,
131
- const struct irq_domain_ops *ops,
132
- void *host_data)
120
+static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
121
+ unsigned int size,
122
+ irq_hw_number_t hwirq_max,
123
+ int direct_max,
124
+ const struct irq_domain_ops *ops,
125
+ void *host_data)
133126 {
134
- struct device_node *of_node = to_of_node(fwnode);
135127 struct irqchip_fwid *fwid;
136128 struct irq_domain *domain;
137129
138130 static atomic_t unknown_domains;
139131
140132 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
141
- GFP_KERNEL, of_node_to_nid(of_node));
142
- if (WARN_ON(!domain))
133
+ GFP_KERNEL, of_node_to_nid(to_of_node(fwnode)));
134
+ if (!domain)
143135 return NULL;
144136
145
- if (fwnode && is_fwnode_irqchip(fwnode)) {
137
+ if (is_fwnode_irqchip(fwnode)) {
146138 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
147139
148140 switch (fwid->type) {
....@@ -161,30 +153,16 @@
161153 domain->name = fwid->name;
162154 break;
163155 }
164
-#ifdef CONFIG_ACPI
165
- } else if (is_acpi_device_node(fwnode)) {
166
- struct acpi_buffer buf = {
167
- .length = ACPI_ALLOCATE_BUFFER,
168
- };
169
- acpi_handle handle;
170
-
171
- handle = acpi_device_handle(to_acpi_device_node(fwnode));
172
- if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
173
- domain->name = buf.pointer;
174
- domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
175
- }
176
-
177
- domain->fwnode = fwnode;
178
-#endif
179
- } else if (of_node) {
156
+ } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
157
+ is_software_node(fwnode)) {
180158 char *name;
181159
182160 /*
183
- * DT paths contain '/', which debugfs is legitimately
161
+ * fwnode paths contain '/', which debugfs is legitimately
184162 * unhappy about. Replace them with ':', which does
185163 * the trick and is not as offensive as '\'...
186164 */
187
- name = kasprintf(GFP_KERNEL, "%pOF", of_node);
165
+ name = kasprintf(GFP_KERNEL, "%pfw", fwnode);
188166 if (!name) {
189167 kfree(domain);
190168 return NULL;
....@@ -209,7 +187,8 @@
209187 domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
210188 }
211189
212
- of_node_get(of_node);
190
+ fwnode_handle_get(fwnode);
191
+ fwnode_dev_initialized(fwnode, true);
213192
214193 /* Fill structure */
215194 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
....@@ -221,12 +200,44 @@
221200 domain->revmap_direct_max_irq = direct_max;
222201 irq_domain_check_hierarchy(domain);
223202
203
+ return domain;
204
+}
205
+
206
+static void __irq_domain_publish(struct irq_domain *domain)
207
+{
224208 mutex_lock(&irq_domain_mutex);
225209 debugfs_add_domain_dir(domain);
226210 list_add(&domain->link, &irq_domain_list);
227211 mutex_unlock(&irq_domain_mutex);
228212
229213 pr_debug("Added domain %s\n", domain->name);
214
+}
215
+
216
+/**
217
+ * __irq_domain_add() - Allocate a new irq_domain data structure
218
+ * @fwnode: firmware node for the interrupt controller
219
+ * @size: Size of linear map; 0 for radix mapping only
220
+ * @hwirq_max: Maximum number of interrupts supported by controller
221
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
222
+ * direct mapping
223
+ * @ops: domain callbacks
224
+ * @host_data: Controller private data pointer
225
+ *
226
+ * Allocates and initializes an irq_domain structure.
227
+ * Returns pointer to IRQ domain, or NULL on failure.
228
+ */
229
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
230
+ irq_hw_number_t hwirq_max, int direct_max,
231
+ const struct irq_domain_ops *ops,
232
+ void *host_data)
233
+{
234
+ struct irq_domain *domain;
235
+
236
+ domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
237
+ ops, host_data);
238
+ if (domain)
239
+ __irq_domain_publish(domain);
240
+
230241 return domain;
231242 }
232243 EXPORT_SYMBOL_GPL(__irq_domain_add);
....@@ -258,7 +269,8 @@
258269
259270 pr_debug("Removed domain %s\n", domain->name);
260271
261
- of_node_put(irq_domain_get_of_node(domain));
272
+ fwnode_dev_initialized(domain->fwnode, false);
273
+ fwnode_handle_put(domain->fwnode);
262274 if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
263275 kfree(domain->name);
264276 kfree(domain);
....@@ -460,6 +472,20 @@
460472 }
461473 EXPORT_SYMBOL_GPL(irq_set_default_host);
462474
475
+/**
476
+ * irq_get_default_host() - Retrieve the "default" irq domain
477
+ *
478
+ * Returns: the default domain, if any.
479
+ *
480
+ * Modern code should never use this. This should only be used on
481
+ * systems that cannot implement a firmware->fwnode mapping (which
482
+ * both DT and ACPI provide).
483
+ */
484
+struct irq_domain *irq_get_default_host(void)
485
+{
486
+ return irq_default_domain;
487
+}
488
+
463489 static void irq_domain_clear_mapping(struct irq_domain *domain,
464490 irq_hw_number_t hwirq)
465491 {
....@@ -495,6 +521,9 @@
495521 return;
496522
497523 hwirq = irq_data->hwirq;
524
+
525
+ mutex_lock(&irq_domain_mutex);
526
+
498527 irq_set_status_flags(irq, IRQ_NOREQUEST);
499528
500529 /* remove chip and handler */
....@@ -514,10 +543,12 @@
514543
515544 /* Clear reverse map for this hwirq */
516545 irq_domain_clear_mapping(domain, hwirq);
546
+
547
+ mutex_unlock(&irq_domain_mutex);
517548 }
518549
519
-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
520
- irq_hw_number_t hwirq)
550
+static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
551
+ irq_hw_number_t hwirq)
521552 {
522553 struct irq_data *irq_data = irq_get_irq_data(virq);
523554 int ret;
....@@ -530,7 +561,6 @@
530561 if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
531562 return -EINVAL;
532563
533
- mutex_lock(&irq_domain_mutex);
534564 irq_data->hwirq = hwirq;
535565 irq_data->domain = domain;
536566 if (domain->ops->map) {
....@@ -547,7 +577,6 @@
547577 }
548578 irq_data->domain = NULL;
549579 irq_data->hwirq = 0;
550
- mutex_unlock(&irq_domain_mutex);
551580 return ret;
552581 }
553582
....@@ -558,11 +587,22 @@
558587
559588 domain->mapcount++;
560589 irq_domain_set_mapping(domain, hwirq, irq_data);
561
- mutex_unlock(&irq_domain_mutex);
562590
563591 irq_clear_status_flags(virq, IRQ_NOREQUEST);
564592
565593 return 0;
594
+}
595
+
596
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
597
+ irq_hw_number_t hwirq)
598
+{
599
+ int ret;
600
+
601
+ mutex_lock(&irq_domain_mutex);
602
+ ret = irq_domain_associate_locked(domain, virq, hwirq);
603
+ mutex_unlock(&irq_domain_mutex);
604
+
605
+ return ret;
566606 }
567607 EXPORT_SYMBOL_GPL(irq_domain_associate);
568608
....@@ -623,50 +663,24 @@
623663 }
624664 EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
625665
626
-/**
627
- * irq_create_mapping() - Map a hardware interrupt into linux irq space
628
- * @domain: domain owning this hardware interrupt or NULL for default domain
629
- * @hwirq: hardware irq number in that domain space
630
- *
631
- * Only one mapping per hardware interrupt is permitted. Returns a linux
632
- * irq number.
633
- * If the sense/trigger is to be specified, set_irq_type() should be called
634
- * on the number returned from that call.
635
- */
636
-unsigned int irq_create_mapping(struct irq_domain *domain,
637
- irq_hw_number_t hwirq)
666
+static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
667
+ irq_hw_number_t hwirq,
668
+ const struct irq_affinity_desc *affinity)
638669 {
639
- struct device_node *of_node;
670
+ struct device_node *of_node = irq_domain_get_of_node(domain);
640671 int virq;
641672
642673 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
643674
644
- /* Look for default domain if nececssary */
645
- if (domain == NULL)
646
- domain = irq_default_domain;
647
- if (domain == NULL) {
648
- WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
649
- return 0;
650
- }
651
- pr_debug("-> using domain @%p\n", domain);
652
-
653
- of_node = irq_domain_get_of_node(domain);
654
-
655
- /* Check if mapping already exists */
656
- virq = irq_find_mapping(domain, hwirq);
657
- if (virq) {
658
- pr_debug("-> existing mapping on virq %d\n", virq);
659
- return virq;
660
- }
661
-
662675 /* Allocate a virtual interrupt number */
663
- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
676
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
677
+ affinity);
664678 if (virq <= 0) {
665679 pr_debug("-> virq allocation failed\n");
666680 return 0;
667681 }
668682
669
- if (irq_domain_associate(domain, virq, hwirq)) {
683
+ if (irq_domain_associate_locked(domain, virq, hwirq)) {
670684 irq_free_desc(virq);
671685 return 0;
672686 }
....@@ -676,7 +690,48 @@
676690
677691 return virq;
678692 }
679
-EXPORT_SYMBOL_GPL(irq_create_mapping);
693
+
694
+/**
695
+ * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
696
+ * @domain: domain owning this hardware interrupt or NULL for default domain
697
+ * @hwirq: hardware irq number in that domain space
698
+ * @affinity: irq affinity
699
+ *
700
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
701
+ * irq number.
702
+ * If the sense/trigger is to be specified, set_irq_type() should be called
703
+ * on the number returned from that call.
704
+ */
705
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
706
+ irq_hw_number_t hwirq,
707
+ const struct irq_affinity_desc *affinity)
708
+{
709
+ int virq;
710
+
711
+ /* Look for default domain if necessary */
712
+ if (domain == NULL)
713
+ domain = irq_default_domain;
714
+ if (domain == NULL) {
715
+ WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
716
+ return 0;
717
+ }
718
+
719
+ mutex_lock(&irq_domain_mutex);
720
+
721
+ /* Check if mapping already exists */
722
+ virq = irq_find_mapping(domain, hwirq);
723
+ if (virq) {
724
+ pr_debug("existing mapping on virq %d\n", virq);
725
+ goto out;
726
+ }
727
+
728
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
729
+out:
730
+ mutex_unlock(&irq_domain_mutex);
731
+
732
+ return virq;
733
+}
734
+EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
680735
681736 /**
682737 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
....@@ -731,16 +786,17 @@
731786 return 0;
732787 }
733788
734
-static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
789
+static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
790
+ unsigned int count,
735791 struct irq_fwspec *fwspec)
736792 {
737793 int i;
738794
739
- fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
740
- fwspec->param_count = irq_data->args_count;
795
+ fwspec->fwnode = np ? &np->fwnode : NULL;
796
+ fwspec->param_count = count;
741797
742
- for (i = 0; i < irq_data->args_count; i++)
743
- fwspec->param[i] = irq_data->args[i];
798
+ for (i = 0; i < count; i++)
799
+ fwspec->param[i] = args[i];
744800 }
745801
746802 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
....@@ -775,6 +831,8 @@
775831 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
776832 type &= IRQ_TYPE_SENSE_MASK;
777833
834
+ mutex_lock(&irq_domain_mutex);
835
+
778836 /*
779837 * If we've already configured this interrupt,
780838 * don't do it again, or hell will break loose.
....@@ -787,7 +845,7 @@
787845 * interrupt number.
788846 */
789847 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
790
- return virq;
848
+ goto out;
791849
792850 /*
793851 * If the trigger type has not been set yet, then set
....@@ -795,40 +853,45 @@
795853 */
796854 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
797855 irq_data = irq_get_irq_data(virq);
798
- if (!irq_data)
799
- return 0;
856
+ if (!irq_data) {
857
+ virq = 0;
858
+ goto out;
859
+ }
800860
801861 irqd_set_trigger_type(irq_data, type);
802
- return virq;
862
+ goto out;
803863 }
804864
805865 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
806866 hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
807
- return 0;
867
+ virq = 0;
868
+ goto out;
808869 }
809870
810871 if (irq_domain_is_hierarchy(domain)) {
811
- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
812
- if (virq <= 0)
813
- return 0;
872
+ virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
873
+ fwspec, false, NULL);
874
+ if (virq <= 0) {
875
+ virq = 0;
876
+ goto out;
877
+ }
814878 } else {
815879 /* Create mapping */
816
- virq = irq_create_mapping(domain, hwirq);
880
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
817881 if (!virq)
818
- return virq;
882
+ goto out;
819883 }
820884
821885 irq_data = irq_get_irq_data(virq);
822
- if (!irq_data) {
823
- if (irq_domain_is_hierarchy(domain))
824
- irq_domain_free_irqs(virq, 1);
825
- else
826
- irq_dispose_mapping(virq);
827
- return 0;
886
+ if (WARN_ON(!irq_data)) {
887
+ virq = 0;
888
+ goto out;
828889 }
829890
830891 /* Store trigger type */
831892 irqd_set_trigger_type(irq_data, type);
893
+out:
894
+ mutex_unlock(&irq_domain_mutex);
832895
833896 return virq;
834897 }
....@@ -838,7 +901,9 @@
838901 {
839902 struct irq_fwspec fwspec;
840903
841
- of_phandle_args_to_fwspec(irq_data, &fwspec);
904
+ of_phandle_args_to_fwspec(irq_data->np, irq_data->args,
905
+ irq_data->args_count, &fwspec);
906
+
842907 return irq_create_fwspec_mapping(&fwspec);
843908 }
844909 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
....@@ -869,7 +934,7 @@
869934 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
870935
871936 /**
872
- * irq_find_mapping() - Find a linux irq from an hw irq number.
937
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
873938 * @domain: domain owning this hardware interrupt
874939 * @hwirq: hardware irq number in that domain space
875940 */
....@@ -878,7 +943,7 @@
878943 {
879944 struct irq_data *data;
880945
881
- /* Look for default domain if nececssary */
946
+ /* Look for default domain if necessary */
882947 if (domain == NULL)
883948 domain = irq_default_domain;
884949 if (domain == NULL)
....@@ -930,11 +995,10 @@
930995 const u32 *intspec, unsigned int intsize,
931996 irq_hw_number_t *out_hwirq, unsigned int *out_type)
932997 {
933
- if (WARN_ON(intsize < 2))
934
- return -EINVAL;
935
- *out_hwirq = intspec[0];
936
- *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
937
- return 0;
998
+ struct irq_fwspec fwspec;
999
+
1000
+ of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec);
1001
+ return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type);
9381002 }
9391003 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
9401004
....@@ -970,8 +1034,46 @@
9701034 };
9711035 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
9721036
1037
+/**
1038
+ * irq_domain_translate_onecell() - Generic translate for direct one cell
1039
+ * bindings
1040
+ */
1041
+int irq_domain_translate_onecell(struct irq_domain *d,
1042
+ struct irq_fwspec *fwspec,
1043
+ unsigned long *out_hwirq,
1044
+ unsigned int *out_type)
1045
+{
1046
+ if (WARN_ON(fwspec->param_count < 1))
1047
+ return -EINVAL;
1048
+ *out_hwirq = fwspec->param[0];
1049
+ *out_type = IRQ_TYPE_NONE;
1050
+ return 0;
1051
+}
1052
+EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
1053
+
1054
+/**
1055
+ * irq_domain_translate_twocell() - Generic translate for direct two cell
1056
+ * bindings
1057
+ *
1058
+ * Device Tree IRQ specifier translation function which works with two cell
1059
+ * bindings where the cell values map directly to the hwirq number
1060
+ * and linux irq flags.
1061
+ */
1062
+int irq_domain_translate_twocell(struct irq_domain *d,
1063
+ struct irq_fwspec *fwspec,
1064
+ unsigned long *out_hwirq,
1065
+ unsigned int *out_type)
1066
+{
1067
+ if (WARN_ON(fwspec->param_count < 2))
1068
+ return -EINVAL;
1069
+ *out_hwirq = fwspec->param[0];
1070
+ *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
1071
+ return 0;
1072
+}
1073
+EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
1074
+
9731075 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
974
- int node, const struct cpumask *affinity)
1076
+ int node, const struct irq_affinity_desc *affinity)
9751077 {
9761078 unsigned int hint;
9771079
....@@ -992,6 +1094,18 @@
9921094
9931095 return virq;
9941096 }
1097
+
1098
+/**
1099
+ * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1100
+ * @irq_data: The pointer to irq_data
1101
+ */
1102
+void irq_domain_reset_irq_data(struct irq_data *irq_data)
1103
+{
1104
+ irq_data->hwirq = 0;
1105
+ irq_data->chip = &no_irq_chip;
1106
+ irq_data->chip_data = NULL;
1107
+}
1108
+EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
9951109
9961110 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
9971111 /**
....@@ -1019,12 +1133,15 @@
10191133 struct irq_domain *domain;
10201134
10211135 if (size)
1022
- domain = irq_domain_create_linear(fwnode, size, ops, host_data);
1136
+ domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
10231137 else
1024
- domain = irq_domain_create_tree(fwnode, ops, host_data);
1138
+ domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
1139
+
10251140 if (domain) {
10261141 domain->parent = parent;
10271142 domain->flags |= flags;
1143
+
1144
+ __irq_domain_publish(domain);
10281145 }
10291146
10301147 return domain;
....@@ -1084,6 +1201,17 @@
10841201 return irq_data;
10851202 }
10861203
1204
+static void __irq_domain_free_hierarchy(struct irq_data *irq_data)
1205
+{
1206
+ struct irq_data *tmp;
1207
+
1208
+ while (irq_data) {
1209
+ tmp = irq_data;
1210
+ irq_data = irq_data->parent_data;
1211
+ kfree(tmp);
1212
+ }
1213
+}
1214
+
10871215 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
10881216 {
10891217 struct irq_data *irq_data, *tmp;
....@@ -1095,12 +1223,84 @@
10951223 irq_data->parent_data = NULL;
10961224 irq_data->domain = NULL;
10971225
1098
- while (tmp) {
1099
- irq_data = tmp;
1100
- tmp = tmp->parent_data;
1101
- kfree(irq_data);
1226
+ __irq_domain_free_hierarchy(tmp);
1227
+ }
1228
+}
1229
+
1230
+/**
1231
+ * irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy
1232
+ * @domain: IRQ domain from which the hierarchy is to be disconnected
1233
+ * @virq: IRQ number where the hierarchy is to be trimmed
1234
+ *
1235
+ * Marks the @virq level belonging to @domain as disconnected.
1236
+ * Returns -EINVAL if @virq doesn't have a valid irq_data pointing
1237
+ * to @domain.
1238
+ *
1239
+ * Its only use is to be able to trim levels of hierarchy that do not
1240
+ * have any real meaning for this interrupt, and that the driver marks
1241
+ * as such from its .alloc() callback.
1242
+ */
1243
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
1244
+ unsigned int virq)
1245
+{
1246
+ struct irq_data *irqd;
1247
+
1248
+ irqd = irq_domain_get_irq_data(domain, virq);
1249
+ if (!irqd)
1250
+ return -EINVAL;
1251
+
1252
+ irqd->chip = ERR_PTR(-ENOTCONN);
1253
+ return 0;
1254
+}
1255
+EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy);
1256
+
1257
+static int irq_domain_trim_hierarchy(unsigned int virq)
1258
+{
1259
+ struct irq_data *tail, *irqd, *irq_data;
1260
+
1261
+ irq_data = irq_get_irq_data(virq);
1262
+ tail = NULL;
1263
+
1264
+ /* The first entry must have a valid irqchip */
1265
+ if (!irq_data->chip || IS_ERR(irq_data->chip))
1266
+ return -EINVAL;
1267
+
1268
+ /*
1269
+ * Validate that the irq_data chain is sane in the presence of
1270
+ * a hierarchy trimming marker.
1271
+ */
1272
+ for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) {
1273
+ /* Can't have a valid irqchip after a trim marker */
1274
+ if (irqd->chip && tail)
1275
+ return -EINVAL;
1276
+
1277
+ /* Can't have an empty irqchip before a trim marker */
1278
+ if (!irqd->chip && !tail)
1279
+ return -EINVAL;
1280
+
1281
+ if (IS_ERR(irqd->chip)) {
1282
+ /* Only -ENOTCONN is a valid trim marker */
1283
+ if (PTR_ERR(irqd->chip) != -ENOTCONN)
1284
+ return -EINVAL;
1285
+
1286
+ tail = irq_data;
11021287 }
11031288 }
1289
+
1290
+ /* No trim marker, nothing to do */
1291
+ if (!tail)
1292
+ return 0;
1293
+
1294
+ pr_info("IRQ%d: trimming hierarchy from %s\n",
1295
+ virq, tail->parent_data->domain->name);
1296
+
1297
+ /* Sever the inner part of the hierarchy... */
1298
+ irqd = tail;
1299
+ tail = tail->parent_data;
1300
+ irqd->parent_data = NULL;
1301
+ __irq_domain_free_hierarchy(tail);
1302
+
1303
+ return 0;
11041304 }
11051305
11061306 static int irq_domain_alloc_irq_data(struct irq_domain *domain,
....@@ -1194,18 +1394,6 @@
11941394 EXPORT_SYMBOL(irq_domain_set_info);
11951395
11961396 /**
1197
- * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1198
- * @irq_data: The pointer to irq_data
1199
- */
1200
-void irq_domain_reset_irq_data(struct irq_data *irq_data)
1201
-{
1202
- irq_data->hwirq = 0;
1203
- irq_data->chip = &no_irq_chip;
1204
- irq_data->chip_data = NULL;
1205
-}
1206
-EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1207
-
1208
-/**
12091397 * irq_domain_free_irqs_common - Clear irq_data and free the parent
12101398 * @domain: Interrupt domain to match
12111399 * @virq: IRQ number to start with
....@@ -1243,7 +1431,6 @@
12431431 }
12441432 irq_domain_free_irqs_common(domain, virq, nr_irqs);
12451433 }
1246
-EXPORT_SYMBOL_GPL(irq_domain_free_irqs_top);
12471434
12481435 static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
12491436 unsigned int irq_base,
....@@ -1272,39 +1459,11 @@
12721459 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
12731460 }
12741461
1275
-/**
1276
- * __irq_domain_alloc_irqs - Allocate IRQs from domain
1277
- * @domain: domain to allocate from
1278
- * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
1279
- * @nr_irqs: number of IRQs to allocate
1280
- * @node: NUMA node id for memory allocation
1281
- * @arg: domain specific argument
1282
- * @realloc: IRQ descriptors have already been allocated if true
1283
- * @affinity: Optional irq affinity mask for multiqueue devices
1284
- *
1285
- * Allocate IRQ numbers and initialized all data structures to support
1286
- * hierarchy IRQ domains.
1287
- * Parameter @realloc is mainly to support legacy IRQs.
1288
- * Returns error code or allocated IRQ number
1289
- *
1290
- * The whole process to setup an IRQ has been split into two steps.
1291
- * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1292
- * descriptor and required hardware resources. The second step,
1293
- * irq_domain_activate_irq(), is to program hardwares with preallocated
1294
- * resources. In this way, it's easier to rollback when failing to
1295
- * allocate resources.
1296
- */
1297
-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1298
- unsigned int nr_irqs, int node, void *arg,
1299
- bool realloc, const struct cpumask *affinity)
1462
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
1463
+ unsigned int nr_irqs, int node, void *arg,
1464
+ bool realloc, const struct irq_affinity_desc *affinity)
13001465 {
13011466 int i, ret, virq;
1302
-
1303
- if (domain == NULL) {
1304
- domain = irq_default_domain;
1305
- if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1306
- return -EINVAL;
1307
- }
13081467
13091468 if (realloc && irq_base >= 0) {
13101469 virq = irq_base;
....@@ -1324,15 +1483,18 @@
13241483 goto out_free_desc;
13251484 }
13261485
1327
- mutex_lock(&irq_domain_mutex);
13281486 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1329
- if (ret < 0) {
1330
- mutex_unlock(&irq_domain_mutex);
1487
+ if (ret < 0)
13311488 goto out_free_irq_data;
1489
+
1490
+ for (i = 0; i < nr_irqs; i++) {
1491
+ ret = irq_domain_trim_hierarchy(virq + i);
1492
+ if (ret)
1493
+ goto out_free_irq_data;
13321494 }
1495
+
13331496 for (i = 0; i < nr_irqs; i++)
13341497 irq_domain_insert_irq(virq + i);
1335
- mutex_unlock(&irq_domain_mutex);
13361498
13371499 return virq;
13381500
....@@ -1340,6 +1502,48 @@
13401502 irq_domain_free_irq_data(virq, nr_irqs);
13411503 out_free_desc:
13421504 irq_free_descs(virq, nr_irqs);
1505
+ return ret;
1506
+}
1507
+
1508
+/**
1509
+ * __irq_domain_alloc_irqs - Allocate IRQs from domain
1510
+ * @domain: domain to allocate from
1511
+ * @irq_base: allocate specified IRQ number if irq_base >= 0
1512
+ * @nr_irqs: number of IRQs to allocate
1513
+ * @node: NUMA node id for memory allocation
1514
+ * @arg: domain specific argument
1515
+ * @realloc: IRQ descriptors have already been allocated if true
1516
+ * @affinity: Optional irq affinity mask for multiqueue devices
1517
+ *
1518
+ * Allocate IRQ numbers and initialized all data structures to support
1519
+ * hierarchy IRQ domains.
1520
+ * Parameter @realloc is mainly to support legacy IRQs.
1521
+ * Returns error code or allocated IRQ number
1522
+ *
1523
+ * The whole process to setup an IRQ has been split into two steps.
1524
+ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1525
+ * descriptor and required hardware resources. The second step,
1526
+ * irq_domain_activate_irq(), is to program the hardware with preallocated
1527
+ * resources. In this way, it's easier to rollback when failing to
1528
+ * allocate resources.
1529
+ */
1530
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1531
+ unsigned int nr_irqs, int node, void *arg,
1532
+ bool realloc, const struct irq_affinity_desc *affinity)
1533
+{
1534
+ int ret;
1535
+
1536
+ if (domain == NULL) {
1537
+ domain = irq_default_domain;
1538
+ if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1539
+ return -EINVAL;
1540
+ }
1541
+
1542
+ mutex_lock(&irq_domain_mutex);
1543
+ ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
1544
+ realloc, affinity);
1545
+ mutex_unlock(&irq_domain_mutex);
1546
+
13431547 return ret;
13441548 }
13451549
....@@ -1700,6 +1904,13 @@
17001904 irq_set_handler_data(virq, handler_data);
17011905 }
17021906
1907
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
1908
+ unsigned int nr_irqs, int node, void *arg,
1909
+ bool realloc, const struct irq_affinity_desc *affinity)
1910
+{
1911
+ return -EINVAL;
1912
+}
1913
+
17031914 static void irq_domain_check_hierarchy(struct irq_domain *domain)
17041915 {
17051916 }
....@@ -1752,6 +1963,7 @@
17521963 static void debugfs_remove_domain_dir(struct irq_domain *d)
17531964 {
17541965 debugfs_remove(d->debugfs_file);
1966
+ d->debugfs_file = NULL;
17551967 }
17561968
17571969 void __init irq_domain_debugfs_init(struct dentry *root)
....@@ -1759,8 +1971,6 @@
17591971 struct irq_domain *d;
17601972
17611973 domain_dir = debugfs_create_dir("domains", root);
1762
- if (!domain_dir)
1763
- return;
17641974
17651975 debugfs_create_file("default", 0444, domain_dir, NULL,
17661976 &irq_domain_debug_fops);