forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/kernel/irq/irqdesc.c
....@@ -31,7 +31,7 @@
3131 cpulist_parse(str, irq_default_affinity);
3232 /*
3333 * Set at least the boot cpu. We don't want to end up with
34
- * bugreports caused by random comandline masks
34
+ * bugreports caused by random commandline masks
3535 */
3636 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
3737 return 1;
....@@ -275,11 +275,12 @@
275275 &actions_attr.attr,
276276 NULL
277277 };
278
+ATTRIBUTE_GROUPS(irq);
278279
279280 static struct kobj_type irq_kobj_type = {
280281 .release = irq_kobj_release,
281282 .sysfs_ops = &kobj_sysfs_ops,
282
- .default_attrs = irq_attrs,
283
+ .default_groups = irq_groups,
283284 };
284285
285286 static void irq_sysfs_add(int irq, struct irq_desc *desc)
....@@ -287,22 +288,25 @@
287288 if (irq_kobj_base) {
288289 /*
289290 * Continue even in case of failure as this is nothing
290
- * crucial.
291
+ * crucial and failures in the late irq_sysfs_init()
292
+ * cannot be rolled back.
291293 */
292294 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
293295 pr_warn("Failed to add kobject for irq %d\n", irq);
296
+ else
297
+ desc->istate |= IRQS_SYSFS;
294298 }
295299 }
296300
297301 static void irq_sysfs_del(struct irq_desc *desc)
298302 {
299303 /*
300
- * If irq_sysfs_init() has not yet been invoked (early boot), then
301
- * irq_kobj_base is NULL and the descriptor was never added.
302
- * kobject_del() complains about a object with no parent, so make
303
- * it conditional.
304
+ * Only invoke kobject_del() when kobject_add() was successfully
305
+ * invoked for the descriptor. This covers both early boot, where
306
+ * sysfs is not initialized yet, and the case of a failed
307
+ * kobject_add() invocation.
304308 */
305
- if (irq_kobj_base)
309
+ if (desc->istate & IRQS_SYSFS)
306310 kobject_del(&desc->kobj);
307311 }
308312
....@@ -404,6 +408,7 @@
404408 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
405409 mutex_init(&desc->request_mutex);
406410 init_rcu_head(&desc->rcu);
411
+ init_waitqueue_head(&desc->wait_for_threads);
407412
408413 desc_set_defaults(irq, desc, node, affinity, owner);
409414 irqd_set(&desc->irq_data, flags);
....@@ -463,30 +468,34 @@
463468 }
464469
465470 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
466
- const struct cpumask *affinity, struct module *owner)
471
+ const struct irq_affinity_desc *affinity,
472
+ struct module *owner)
467473 {
468
- const struct cpumask *mask = NULL;
469474 struct irq_desc *desc;
470
- unsigned int flags;
471475 int i;
472476
473477 /* Validate affinity mask(s) */
474478 if (affinity) {
475
- for (i = 0, mask = affinity; i < cnt; i++, mask++) {
476
- if (cpumask_empty(mask))
479
+ for (i = 0; i < cnt; i++) {
480
+ if (cpumask_empty(&affinity[i].mask))
477481 return -EINVAL;
478482 }
479483 }
480484
481
- flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
482
- mask = NULL;
483
-
484485 for (i = 0; i < cnt; i++) {
486
+ const struct cpumask *mask = NULL;
487
+ unsigned int flags = 0;
488
+
485489 if (affinity) {
486
- node = cpu_to_node(cpumask_first(affinity));
487
- mask = affinity;
490
+ if (affinity->is_managed) {
491
+ flags = IRQD_AFFINITY_MANAGED |
492
+ IRQD_MANAGED_SHUTDOWN;
493
+ }
494
+ mask = &affinity->mask;
495
+ node = cpu_to_node(cpumask_first(mask));
488496 affinity++;
489497 }
498
+
490499 desc = alloc_desc(start + i, node, flags, mask, owner);
491500 if (!desc)
492501 goto err;
....@@ -568,6 +577,7 @@
568577 raw_spin_lock_init(&desc[i].lock);
569578 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
570579 mutex_init(&desc[i].request_mutex);
580
+ init_waitqueue_head(&desc[i].wait_for_threads);
571581 desc_set_defaults(i, &desc[i], node, NULL, NULL);
572582 }
573583 return arch_early_irq_init();
....@@ -590,7 +600,7 @@
590600 }
591601
592602 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
593
- const struct cpumask *affinity,
603
+ const struct irq_affinity_desc *affinity,
594604 struct module *owner)
595605 {
596606 u32 i;
....@@ -633,9 +643,15 @@
633643 int generic_handle_irq(unsigned int irq)
634644 {
635645 struct irq_desc *desc = irq_to_desc(irq);
646
+ struct irq_data *data;
636647
637648 if (!desc)
638649 return -EINVAL;
650
+
651
+ data = irq_desc_get_irq_data(desc);
652
+ if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))
653
+ return -EPERM;
654
+
639655 generic_handle_irq_desc(desc);
640656 return 0;
641657 }
....@@ -656,9 +672,8 @@
656672 {
657673 struct pt_regs *old_regs = set_irq_regs(regs);
658674 unsigned int irq = hwirq;
675
+ struct irq_desc *desc;
659676 int ret = 0;
660
-
661
- irq_enter();
662677
663678 #ifdef CONFIG_IRQ_DOMAIN
664679 if (lookup)
....@@ -669,17 +684,64 @@
669684 * Some hardware gives randomly wrong interrupts. Rather
670685 * than crashing, do something sensible.
671686 */
672
- if (unlikely(!irq || irq >= nr_irqs)) {
687
+ if (unlikely(!irq || irq >= nr_irqs || !(desc = irq_to_desc(irq)))) {
673688 ack_bad_irq(irq);
674689 ret = -EINVAL;
675
- } else {
676
- generic_handle_irq(irq);
690
+ goto out;
677691 }
678692
679
- irq_exit();
693
+ if (IS_ENABLED(CONFIG_ARCH_WANTS_IRQ_RAW) &&
694
+ unlikely(irq_settings_is_raw(desc))) {
695
+ generic_handle_irq_desc(desc);
696
+ } else {
697
+ irq_enter();
698
+ generic_handle_irq_desc(desc);
699
+ irq_exit();
700
+ }
701
+
702
+out:
680703 set_irq_regs(old_regs);
681704 return ret;
682705 }
706
+
707
+#ifdef CONFIG_IRQ_DOMAIN
708
+/**
709
+ * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
710
+ * @domain: The domain where to perform the lookup
711
+ * @hwirq: The HW irq number to convert to a logical one
712
+ * @regs: Register file coming from the low-level handling code
713
+ *
714
+ * This function must be called from an NMI context.
715
+ *
716
+ * Returns: 0 on success, or -EINVAL if conversion has failed
717
+ */
718
+int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
719
+ struct pt_regs *regs)
720
+{
721
+ struct pt_regs *old_regs = set_irq_regs(regs);
722
+ unsigned int irq;
723
+ int ret = 0;
724
+
725
+ /*
726
+ * NMI context needs to be setup earlier in order to deal with tracing.
727
+ */
728
+ WARN_ON(!in_nmi());
729
+
730
+ irq = irq_find_mapping(domain, hwirq);
731
+
732
+ /*
733
+ * ack_bad_irq is not NMI-safe, just report
734
+ * an invalid interrupt.
735
+ */
736
+ if (likely(irq))
737
+ generic_handle_irq(irq);
738
+ else
739
+ ret = -EINVAL;
740
+
741
+ set_irq_regs(old_regs);
742
+ return ret;
743
+}
744
+#endif
683745 #endif
684746
685747 /* Dynamic interrupt handling */
....@@ -706,7 +768,7 @@
706768 EXPORT_SYMBOL_GPL(irq_free_descs);
707769
708770 /**
709
- * irq_alloc_descs - allocate and initialize a range of irq descriptors
771
+ * __irq_alloc_descs - allocate and initialize a range of irq descriptors
710772 * @irq: Allocate for specific irq number if irq >= 0
711773 * @from: Start the search from this irq number
712774 * @cnt: Number of consecutive irqs to allocate.
....@@ -720,7 +782,7 @@
720782 */
721783 int __ref
722784 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
723
- struct module *owner, const struct cpumask *affinity)
785
+ struct module *owner, const struct irq_affinity_desc *affinity)
724786 {
725787 int start, ret;
726788
....@@ -847,6 +909,7 @@
847909 }
848910
849911 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
912
+ __releases(&desc->lock)
850913 {
851914 raw_spin_unlock_irqrestore(&desc->lock, flags);
852915 if (bus)
....@@ -918,6 +981,12 @@
918981 return desc && desc->kstat_irqs ?
919982 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
920983 }
984
+EXPORT_SYMBOL_GPL(kstat_irqs_cpu);
985
+
986
+static bool irq_is_nmi(struct irq_desc *desc)
987
+{
988
+ return desc->istate & IRQS_NMI;
989
+}
921990
922991 /**
923992 * kstat_irqs - Get the statistics for an interrupt
....@@ -936,7 +1005,8 @@
9361005 if (!desc || !desc->kstat_irqs)
9371006 return 0;
9381007 if (!irq_settings_is_per_cpu_devid(desc) &&
939
- !irq_settings_is_per_cpu(desc))
1008
+ !irq_settings_is_per_cpu(desc) &&
1009
+ !irq_is_nmi(desc))
9401010 return desc->tot_count;
9411011
9421012 for_each_possible_cpu(cpu)