.. | .. |
---|
179 | 179 | static DEFINE_IDA(its_vpeid_ida); |
---|
180 | 180 | |
---|
181 | 181 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
---|
| 182 | +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) |
---|
182 | 183 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) |
---|
183 | 184 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
---|
184 | 185 | |
---|
.. | .. |
---|
1661 | 1662 | get_order(LPI_PROPBASE_SZ)); |
---|
1662 | 1663 | } |
---|
1663 | 1664 | |
---|
1664 | | -static int __init its_alloc_lpi_tables(void) |
---|
| 1665 | +static int __init its_alloc_lpi_prop_table(void) |
---|
1665 | 1666 | { |
---|
1666 | 1667 | phys_addr_t paddr; |
---|
1667 | 1668 | |
---|
.. | .. |
---|
2023 | 2024 | return val; |
---|
2024 | 2025 | } |
---|
2025 | 2026 | |
---|
| 2027 | +static int __init allocate_lpi_tables(void) |
---|
| 2028 | +{ |
---|
| 2029 | + int err, cpu; |
---|
| 2030 | + |
---|
| 2031 | + err = its_alloc_lpi_prop_table(); |
---|
| 2032 | + if (err) |
---|
| 2033 | + return err; |
---|
| 2034 | + |
---|
| 2035 | + /* |
---|
| 2036 | + * We allocate all the pending tables anyway, as we may have a |
---|
| 2037 | + * mix of RDs that have had LPIs enabled, and some that |
---|
| 2038 | + * don't. We'll free the unused ones as each CPU comes online. |
---|
| 2039 | + */ |
---|
| 2040 | + for_each_possible_cpu(cpu) { |
---|
| 2041 | + struct page *pend_page; |
---|
| 2042 | + |
---|
| 2043 | + pend_page = its_allocate_pending_table(GFP_NOWAIT); |
---|
| 2044 | + if (!pend_page) { |
---|
| 2045 | + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); |
---|
| 2046 | + return -ENOMEM; |
---|
| 2047 | + } |
---|
| 2048 | + |
---|
| 2049 | + gic_data_rdist_cpu(cpu)->pend_page = pend_page; |
---|
| 2050 | + } |
---|
| 2051 | + |
---|
| 2052 | + return 0; |
---|
| 2053 | +} |
---|
| 2054 | + |
---|
2026 | 2055 | static void its_cpu_init_lpis(void) |
---|
2027 | 2056 | { |
---|
2028 | 2057 | void __iomem *rbase = gic_data_rdist_rd_base(); |
---|
2029 | 2058 | struct page *pend_page; |
---|
| 2059 | + phys_addr_t paddr; |
---|
2030 | 2060 | u64 val, tmp; |
---|
2031 | 2061 | |
---|
2032 | | - /* If we didn't allocate the pending table yet, do it now */ |
---|
| 2062 | + if (gic_data_rdist()->lpi_enabled) |
---|
| 2063 | + return; |
---|
| 2064 | + |
---|
2033 | 2065 | pend_page = gic_data_rdist()->pend_page; |
---|
2034 | | - if (!pend_page) { |
---|
2035 | | - phys_addr_t paddr; |
---|
2036 | | - |
---|
2037 | | - pend_page = its_allocate_pending_table(GFP_NOWAIT); |
---|
2038 | | - if (!pend_page) { |
---|
2039 | | - pr_err("Failed to allocate PENDBASE for CPU%d\n", |
---|
2040 | | - smp_processor_id()); |
---|
2041 | | - return; |
---|
2042 | | - } |
---|
2043 | | - |
---|
2044 | | - paddr = page_to_phys(pend_page); |
---|
2045 | | - pr_info("CPU%d: using LPI pending table @%pa\n", |
---|
2046 | | - smp_processor_id(), &paddr); |
---|
2047 | | - gic_data_rdist()->pend_page = pend_page; |
---|
2048 | | - } |
---|
| 2066 | + paddr = page_to_phys(pend_page); |
---|
2049 | 2067 | |
---|
2050 | 2068 | /* set PROPBASE */ |
---|
2051 | 2069 | val = (page_to_phys(gic_rdists->prop_page) | |
---|
.. | .. |
---|
2128 | 2146 | |
---|
2129 | 2147 | /* Make sure the GIC has seen the above */ |
---|
2130 | 2148 | dsb(sy); |
---|
| 2149 | + gic_data_rdist()->lpi_enabled = true; |
---|
| 2150 | + pr_info("GICv3: CPU%d: using LPI pending table @%pa\n", |
---|
| 2151 | + smp_processor_id(), |
---|
| 2152 | + &paddr); |
---|
2131 | 2153 | } |
---|
2132 | 2154 | |
---|
2133 | 2155 | static void its_cpu_init_collection(struct its_node *its) |
---|
.. | .. |
---|
3623 | 3645 | u64 timeout = USEC_PER_SEC; |
---|
3624 | 3646 | u64 val; |
---|
3625 | 3647 | |
---|
3626 | | - /* |
---|
3627 | | - * If coming via a CPU hotplug event, we don't need to disable |
---|
3628 | | - * LPIs before trying to re-enable them. They are already |
---|
3629 | | - * configured and all is well in the world. Detect this case |
---|
3630 | | - * by checking the allocation of the pending table for the |
---|
3631 | | - * current CPU. |
---|
3632 | | - */ |
---|
3633 | | - if (gic_data_rdist()->pend_page) |
---|
3634 | | - return 0; |
---|
3635 | | - |
---|
3636 | 3648 | if (!gic_rdists_supports_plpis()) { |
---|
3637 | 3649 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); |
---|
3638 | 3650 | return -ENXIO; |
---|
.. | .. |
---|
3642 | 3654 | if (!(val & GICR_CTLR_ENABLE_LPIS)) |
---|
3643 | 3655 | return 0; |
---|
3644 | 3656 | |
---|
3645 | | - pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", |
---|
| 3657 | + /* |
---|
| 3658 | + * If coming via a CPU hotplug event, we don't need to disable |
---|
| 3659 | + * LPIs before trying to re-enable them. They are already |
---|
| 3660 | + * configured and all is well in the world. |
---|
| 3661 | + */ |
---|
| 3662 | + if (gic_data_rdist()->lpi_enabled) |
---|
| 3663 | + return 0; |
---|
| 3664 | + |
---|
| 3665 | + /* |
---|
| 3666 | + * From that point on, we only try to do some damage control. |
---|
| 3667 | + */ |
---|
| 3668 | + pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", |
---|
3646 | 3669 | smp_processor_id()); |
---|
3647 | 3670 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); |
---|
3648 | 3671 | |
---|
.. | .. |
---|
3898 | 3921 | } |
---|
3899 | 3922 | |
---|
3900 | 3923 | gic_rdists = rdists; |
---|
3901 | | - err = its_alloc_lpi_tables(); |
---|
| 3924 | + |
---|
| 3925 | + err = allocate_lpi_tables(); |
---|
3902 | 3926 | if (err) |
---|
3903 | 3927 | return err; |
---|
3904 | 3928 | |
---|