hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/kernel/irq/irqdomain.c
....@@ -25,6 +25,9 @@
2525
2626 static struct irq_domain *irq_default_domain;
2727
28
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
29
+ unsigned int nr_irqs, int node, void *arg,
30
+ bool realloc, const struct irq_affinity_desc *affinity);
2831 static void irq_domain_check_hierarchy(struct irq_domain *domain);
2932
3033 struct irqchip_fwid {
....@@ -53,7 +56,7 @@
5356 * @name: Optional user provided domain name
5457 * @pa: Optional user-provided physical address
5558 *
56
- * Allocate a struct irqchip_fwid, and return a poiner to the embedded
59
+ * Allocate a struct irqchip_fwid, and return a pointer to the embedded
5760 * fwnode_handle (or NULL on failure).
5861 *
5962 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
....@@ -114,23 +117,12 @@
114117 }
115118 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
116119
117
-/**
118
- * __irq_domain_add() - Allocate a new irq_domain data structure
119
- * @fwnode: firmware node for the interrupt controller
120
- * @size: Size of linear map; 0 for radix mapping only
121
- * @hwirq_max: Maximum number of interrupts supported by controller
122
- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
123
- * direct mapping
124
- * @ops: domain callbacks
125
- * @host_data: Controller private data pointer
126
- *
127
- * Allocates and initializes an irq_domain structure.
128
- * Returns pointer to IRQ domain, or NULL on failure.
129
- */
130
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
131
- irq_hw_number_t hwirq_max, int direct_max,
132
- const struct irq_domain_ops *ops,
133
- void *host_data)
120
+static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
121
+ unsigned int size,
122
+ irq_hw_number_t hwirq_max,
123
+ int direct_max,
124
+ const struct irq_domain_ops *ops,
125
+ void *host_data)
134126 {
135127 struct irqchip_fwid *fwid;
136128 struct irq_domain *domain;
....@@ -208,12 +200,44 @@
208200 domain->revmap_direct_max_irq = direct_max;
209201 irq_domain_check_hierarchy(domain);
210202
203
+ return domain;
204
+}
205
+
206
+static void __irq_domain_publish(struct irq_domain *domain)
207
+{
211208 mutex_lock(&irq_domain_mutex);
212209 debugfs_add_domain_dir(domain);
213210 list_add(&domain->link, &irq_domain_list);
214211 mutex_unlock(&irq_domain_mutex);
215212
216213 pr_debug("Added domain %s\n", domain->name);
214
+}
215
+
216
+/**
217
+ * __irq_domain_add() - Allocate a new irq_domain data structure
218
+ * @fwnode: firmware node for the interrupt controller
219
+ * @size: Size of linear map; 0 for radix mapping only
220
+ * @hwirq_max: Maximum number of interrupts supported by controller
221
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
222
+ * direct mapping
223
+ * @ops: domain callbacks
224
+ * @host_data: Controller private data pointer
225
+ *
226
+ * Allocates and initializes an irq_domain structure.
227
+ * Returns pointer to IRQ domain, or NULL on failure.
228
+ */
229
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
230
+ irq_hw_number_t hwirq_max, int direct_max,
231
+ const struct irq_domain_ops *ops,
232
+ void *host_data)
233
+{
234
+ struct irq_domain *domain;
235
+
236
+ domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
237
+ ops, host_data);
238
+ if (domain)
239
+ __irq_domain_publish(domain);
240
+
217241 return domain;
218242 }
219243 EXPORT_SYMBOL_GPL(__irq_domain_add);
....@@ -497,6 +521,9 @@
497521 return;
498522
499523 hwirq = irq_data->hwirq;
524
+
525
+ mutex_lock(&irq_domain_mutex);
526
+
500527 irq_set_status_flags(irq, IRQ_NOREQUEST);
501528
502529 /* remove chip and handler */
....@@ -516,10 +543,12 @@
516543
517544 /* Clear reverse map for this hwirq */
518545 irq_domain_clear_mapping(domain, hwirq);
546
+
547
+ mutex_unlock(&irq_domain_mutex);
519548 }
520549
521
-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
522
- irq_hw_number_t hwirq)
550
+static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
551
+ irq_hw_number_t hwirq)
523552 {
524553 struct irq_data *irq_data = irq_get_irq_data(virq);
525554 int ret;
....@@ -532,7 +561,6 @@
532561 if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
533562 return -EINVAL;
534563
535
- mutex_lock(&irq_domain_mutex);
536564 irq_data->hwirq = hwirq;
537565 irq_data->domain = domain;
538566 if (domain->ops->map) {
....@@ -549,7 +577,6 @@
549577 }
550578 irq_data->domain = NULL;
551579 irq_data->hwirq = 0;
552
- mutex_unlock(&irq_domain_mutex);
553580 return ret;
554581 }
555582
....@@ -560,11 +587,22 @@
560587
561588 domain->mapcount++;
562589 irq_domain_set_mapping(domain, hwirq, irq_data);
563
- mutex_unlock(&irq_domain_mutex);
564590
565591 irq_clear_status_flags(virq, IRQ_NOREQUEST);
566592
567593 return 0;
594
+}
595
+
596
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
597
+ irq_hw_number_t hwirq)
598
+{
599
+ int ret;
600
+
601
+ mutex_lock(&irq_domain_mutex);
602
+ ret = irq_domain_associate_locked(domain, virq, hwirq);
603
+ mutex_unlock(&irq_domain_mutex);
604
+
605
+ return ret;
568606 }
569607 EXPORT_SYMBOL_GPL(irq_domain_associate);
570608
....@@ -625,6 +663,34 @@
625663 }
626664 EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
627665
666
+static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
667
+ irq_hw_number_t hwirq,
668
+ const struct irq_affinity_desc *affinity)
669
+{
670
+ struct device_node *of_node = irq_domain_get_of_node(domain);
671
+ int virq;
672
+
673
+ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
674
+
675
+ /* Allocate a virtual interrupt number */
676
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
677
+ affinity);
678
+ if (virq <= 0) {
679
+ pr_debug("-> virq allocation failed\n");
680
+ return 0;
681
+ }
682
+
683
+ if (irq_domain_associate_locked(domain, virq, hwirq)) {
684
+ irq_free_desc(virq);
685
+ return 0;
686
+ }
687
+
688
+ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
689
+ hwirq, of_node_full_name(of_node), virq);
690
+
691
+ return virq;
692
+}
693
+
628694 /**
629695 * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
630696 * @domain: domain owning this hardware interrupt or NULL for default domain
....@@ -637,47 +703,31 @@
637703 * on the number returned from that call.
638704 */
639705 unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
640
- irq_hw_number_t hwirq,
641
- const struct irq_affinity_desc *affinity)
706
+ irq_hw_number_t hwirq,
707
+ const struct irq_affinity_desc *affinity)
642708 {
643
- struct device_node *of_node;
644709 int virq;
645710
646
- pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
647
-
648
- /* Look for default domain if nececssary */
711
+ /* Look for default domain if necessary */
649712 if (domain == NULL)
650713 domain = irq_default_domain;
651714 if (domain == NULL) {
652715 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
653716 return 0;
654717 }
655
- pr_debug("-> using domain @%p\n", domain);
656718
657
- of_node = irq_domain_get_of_node(domain);
719
+ mutex_lock(&irq_domain_mutex);
658720
659721 /* Check if mapping already exists */
660722 virq = irq_find_mapping(domain, hwirq);
661723 if (virq) {
662
- pr_debug("-> existing mapping on virq %d\n", virq);
663
- return virq;
724
+ pr_debug("existing mapping on virq %d\n", virq);
725
+ goto out;
664726 }
665727
666
- /* Allocate a virtual interrupt number */
667
- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
668
- affinity);
669
- if (virq <= 0) {
670
- pr_debug("-> virq allocation failed\n");
671
- return 0;
672
- }
673
-
674
- if (irq_domain_associate(domain, virq, hwirq)) {
675
- irq_free_desc(virq);
676
- return 0;
677
- }
678
-
679
- pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
680
- hwirq, of_node_full_name(of_node), virq);
728
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
729
+out:
730
+ mutex_unlock(&irq_domain_mutex);
681731
682732 return virq;
683733 }
....@@ -781,6 +831,8 @@
781831 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
782832 type &= IRQ_TYPE_SENSE_MASK;
783833
834
+ mutex_lock(&irq_domain_mutex);
835
+
784836 /*
785837 * If we've already configured this interrupt,
786838 * don't do it again, or hell will break loose.
....@@ -793,7 +845,7 @@
793845 * interrupt number.
794846 */
795847 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
796
- return virq;
848
+ goto out;
797849
798850 /*
799851 * If the trigger type has not been set yet, then set
....@@ -801,40 +853,45 @@
801853 */
802854 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
803855 irq_data = irq_get_irq_data(virq);
804
- if (!irq_data)
805
- return 0;
856
+ if (!irq_data) {
857
+ virq = 0;
858
+ goto out;
859
+ }
806860
807861 irqd_set_trigger_type(irq_data, type);
808
- return virq;
862
+ goto out;
809863 }
810864
811865 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
812866 hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
813
- return 0;
867
+ virq = 0;
868
+ goto out;
814869 }
815870
816871 if (irq_domain_is_hierarchy(domain)) {
817
- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
818
- if (virq <= 0)
819
- return 0;
872
+ virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
873
+ fwspec, false, NULL);
874
+ if (virq <= 0) {
875
+ virq = 0;
876
+ goto out;
877
+ }
820878 } else {
821879 /* Create mapping */
822
- virq = irq_create_mapping(domain, hwirq);
880
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
823881 if (!virq)
824
- return virq;
882
+ goto out;
825883 }
826884
827885 irq_data = irq_get_irq_data(virq);
828
- if (!irq_data) {
829
- if (irq_domain_is_hierarchy(domain))
830
- irq_domain_free_irqs(virq, 1);
831
- else
832
- irq_dispose_mapping(virq);
833
- return 0;
886
+ if (WARN_ON(!irq_data)) {
887
+ virq = 0;
888
+ goto out;
834889 }
835890
836891 /* Store trigger type */
837892 irqd_set_trigger_type(irq_data, type);
893
+out:
894
+ mutex_unlock(&irq_domain_mutex);
838895
839896 return virq;
840897 }
....@@ -886,7 +943,7 @@
886943 {
887944 struct irq_data *data;
888945
889
- /* Look for default domain if nececssary */
946
+ /* Look for default domain if necessary */
890947 if (domain == NULL)
891948 domain = irq_default_domain;
892949 if (domain == NULL)
....@@ -1076,12 +1133,15 @@
10761133 struct irq_domain *domain;
10771134
10781135 if (size)
1079
- domain = irq_domain_create_linear(fwnode, size, ops, host_data);
1136
+ domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
10801137 else
1081
- domain = irq_domain_create_tree(fwnode, ops, host_data);
1138
+ domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
1139
+
10821140 if (domain) {
10831141 domain->parent = parent;
10841142 domain->flags |= flags;
1143
+
1144
+ __irq_domain_publish(domain);
10851145 }
10861146
10871147 return domain;
....@@ -1399,39 +1459,11 @@
13991459 return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
14001460 }
14011461
1402
-/**
1403
- * __irq_domain_alloc_irqs - Allocate IRQs from domain
1404
- * @domain: domain to allocate from
1405
- * @irq_base: allocate specified IRQ number if irq_base >= 0
1406
- * @nr_irqs: number of IRQs to allocate
1407
- * @node: NUMA node id for memory allocation
1408
- * @arg: domain specific argument
1409
- * @realloc: IRQ descriptors have already been allocated if true
1410
- * @affinity: Optional irq affinity mask for multiqueue devices
1411
- *
1412
- * Allocate IRQ numbers and initialized all data structures to support
1413
- * hierarchy IRQ domains.
1414
- * Parameter @realloc is mainly to support legacy IRQs.
1415
- * Returns error code or allocated IRQ number
1416
- *
1417
- * The whole process to setup an IRQ has been split into two steps.
1418
- * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1419
- * descriptor and required hardware resources. The second step,
1420
- * irq_domain_activate_irq(), is to program hardwares with preallocated
1421
- * resources. In this way, it's easier to rollback when failing to
1422
- * allocate resources.
1423
- */
1424
-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1425
- unsigned int nr_irqs, int node, void *arg,
1426
- bool realloc, const struct irq_affinity_desc *affinity)
1462
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
1463
+ unsigned int nr_irqs, int node, void *arg,
1464
+ bool realloc, const struct irq_affinity_desc *affinity)
14271465 {
14281466 int i, ret, virq;
1429
-
1430
- if (domain == NULL) {
1431
- domain = irq_default_domain;
1432
- if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1433
- return -EINVAL;
1434
- }
14351467
14361468 if (realloc && irq_base >= 0) {
14371469 virq = irq_base;
....@@ -1451,24 +1483,18 @@
14511483 goto out_free_desc;
14521484 }
14531485
1454
- mutex_lock(&irq_domain_mutex);
14551486 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1456
- if (ret < 0) {
1457
- mutex_unlock(&irq_domain_mutex);
1487
+ if (ret < 0)
14581488 goto out_free_irq_data;
1459
- }
14601489
14611490 for (i = 0; i < nr_irqs; i++) {
14621491 ret = irq_domain_trim_hierarchy(virq + i);
1463
- if (ret) {
1464
- mutex_unlock(&irq_domain_mutex);
1492
+ if (ret)
14651493 goto out_free_irq_data;
1466
- }
14671494 }
1468
-
1495
+
14691496 for (i = 0; i < nr_irqs; i++)
14701497 irq_domain_insert_irq(virq + i);
1471
- mutex_unlock(&irq_domain_mutex);
14721498
14731499 return virq;
14741500
....@@ -1476,6 +1502,48 @@
14761502 irq_domain_free_irq_data(virq, nr_irqs);
14771503 out_free_desc:
14781504 irq_free_descs(virq, nr_irqs);
1505
+ return ret;
1506
+}
1507
+
1508
+/**
1509
+ * __irq_domain_alloc_irqs - Allocate IRQs from domain
1510
+ * @domain: domain to allocate from
1511
+ * @irq_base: allocate specified IRQ number if irq_base >= 0
1512
+ * @nr_irqs: number of IRQs to allocate
1513
+ * @node: NUMA node id for memory allocation
1514
+ * @arg: domain specific argument
1515
+ * @realloc: IRQ descriptors have already been allocated if true
1516
+ * @affinity: Optional irq affinity mask for multiqueue devices
1517
+ *
1518
+ * Allocate IRQ numbers and initialized all data structures to support
1519
+ * hierarchy IRQ domains.
1520
+ * Parameter @realloc is mainly to support legacy IRQs.
1521
+ * Returns error code or allocated IRQ number
1522
+ *
1523
+ * The whole process to setup an IRQ has been split into two steps.
1524
+ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1525
+ * descriptor and required hardware resources. The second step,
1526
+ * irq_domain_activate_irq(), is to program the hardware with preallocated
1527
+ * resources. In this way, it's easier to rollback when failing to
1528
+ * allocate resources.
1529
+ */
1530
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1531
+ unsigned int nr_irqs, int node, void *arg,
1532
+ bool realloc, const struct irq_affinity_desc *affinity)
1533
+{
1534
+ int ret;
1535
+
1536
+ if (domain == NULL) {
1537
+ domain = irq_default_domain;
1538
+ if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1539
+ return -EINVAL;
1540
+ }
1541
+
1542
+ mutex_lock(&irq_domain_mutex);
1543
+ ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
1544
+ realloc, affinity);
1545
+ mutex_unlock(&irq_domain_mutex);
1546
+
14791547 return ret;
14801548 }
14811549
....@@ -1836,6 +1904,13 @@
18361904 irq_set_handler_data(virq, handler_data);
18371905 }
18381906
1907
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
1908
+ unsigned int nr_irqs, int node, void *arg,
1909
+ bool realloc, const struct irq_affinity_desc *affinity)
1910
+{
1911
+ return -EINVAL;
1912
+}
1913
+
18391914 static void irq_domain_check_hierarchy(struct irq_domain *domain)
18401915 {
18411916 }