hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/arch/arm/mach-hisi/platmcpm.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2013-2014 Linaro Ltd.
34 * Copyright (c) 2013-2014 Hisilicon Limited.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
85 */
96 #include <linux/init.h>
107 #include <linux/smp.h>
....@@ -61,7 +58,7 @@
6158
6259 static void __iomem *sysctrl, *fabric;
6360 static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
64
-static DEFINE_RAW_SPINLOCK(boot_lock);
61
+static DEFINE_SPINLOCK(boot_lock);
6562 static u32 fabric_phys_addr;
6663 /*
6764 * [0]: bootwrapper physical address
....@@ -113,7 +110,7 @@
113110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
114111 return -EINVAL;
115112
116
- raw_spin_lock_irq(&boot_lock);
113
+ spin_lock_irq(&boot_lock);
117114
118115 if (hip04_cpu_table[cluster][cpu])
119116 goto out;
....@@ -147,7 +144,7 @@
147144
148145 out:
149146 hip04_cpu_table[cluster][cpu]++;
150
- raw_spin_unlock_irq(&boot_lock);
147
+ spin_unlock_irq(&boot_lock);
151148
152149 return 0;
153150 }
....@@ -162,11 +159,11 @@
162159 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
163160 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
164161
165
- raw_spin_lock(&boot_lock);
162
+ spin_lock(&boot_lock);
166163 hip04_cpu_table[cluster][cpu]--;
167164 if (hip04_cpu_table[cluster][cpu] == 1) {
168165 /* A power_up request went ahead of us. */
169
- raw_spin_unlock(&boot_lock);
166
+ spin_unlock(&boot_lock);
170167 return;
171168 } else if (hip04_cpu_table[cluster][cpu] > 1) {
172169 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
....@@ -174,7 +171,7 @@
174171 }
175172
176173 last_man = hip04_cluster_is_down(cluster);
177
- raw_spin_unlock(&boot_lock);
174
+ spin_unlock(&boot_lock);
178175 if (last_man) {
179176 /* Since it's Cortex A15, disable L2 prefetching. */
180177 asm volatile(
....@@ -203,7 +200,7 @@
203200 cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
204201
205202 count = TIMEOUT_MSEC / POLL_MSEC;
206
- raw_spin_lock_irq(&boot_lock);
203
+ spin_lock_irq(&boot_lock);
207204 for (tries = 0; tries < count; tries++) {
208205 if (hip04_cpu_table[cluster][cpu])
209206 goto err;
....@@ -211,10 +208,10 @@
211208 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
212209 if (data & CORE_WFI_STATUS(cpu))
213210 break;
214
- raw_spin_unlock_irq(&boot_lock);
211
+ spin_unlock_irq(&boot_lock);
215212 /* Wait for clean L2 when the whole cluster is down. */
216213 msleep(POLL_MSEC);
217
- raw_spin_lock_irq(&boot_lock);
214
+ spin_lock_irq(&boot_lock);
218215 }
219216 if (tries >= count)
220217 goto err;
....@@ -231,10 +228,10 @@
231228 goto err;
232229 if (hip04_cluster_is_down(cluster))
233230 hip04_set_snoop_filter(cluster, 0);
234
- raw_spin_unlock_irq(&boot_lock);
231
+ spin_unlock_irq(&boot_lock);
235232 return 1;
236233 err:
237
- raw_spin_unlock_irq(&boot_lock);
234
+ spin_unlock_irq(&boot_lock);
238235 return 0;
239236 }
240237 #endif