commit | author | age
|
a07526
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
H |
2 |
/* linux/arch/arm/mach-exynos4/mct.c |
|
3 |
* |
|
4 |
* Copyright (c) 2011 Samsung Electronics Co., Ltd. |
|
5 |
* http://www.samsung.com |
|
6 |
* |
|
7 |
* Exynos4 MCT(Multi-Core Timer) support |
|
8 |
*/ |
|
9 |
|
|
10 |
#include <linux/interrupt.h> |
|
11 |
#include <linux/irq.h> |
|
12 |
#include <linux/err.h> |
|
13 |
#include <linux/clk.h> |
|
14 |
#include <linux/clockchips.h> |
|
15 |
#include <linux/cpu.h> |
|
16 |
#include <linux/delay.h> |
|
17 |
#include <linux/percpu.h> |
|
18 |
#include <linux/of.h> |
|
19 |
#include <linux/of_irq.h> |
|
20 |
#include <linux/of_address.h> |
|
21 |
#include <linux/clocksource.h> |
|
22 |
#include <linux/sched_clock.h> |
|
23 |
|
|
24 |
#define EXYNOS4_MCTREG(x) (x) |
|
25 |
#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) |
|
26 |
#define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) |
|
27 |
#define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) |
|
28 |
#define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) |
|
29 |
#define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) |
|
30 |
#define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) |
|
31 |
#define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) |
|
32 |
#define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) |
|
33 |
#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) |
|
34 |
#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) |
|
35 |
#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) |
|
36 |
#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x)) |
|
37 |
#define EXYNOS4_MCT_L_MASK (0xffffff00) |
|
38 |
|
|
39 |
#define MCT_L_TCNTB_OFFSET (0x00) |
|
40 |
#define MCT_L_ICNTB_OFFSET (0x08) |
|
41 |
#define MCT_L_TCON_OFFSET (0x20) |
|
42 |
#define MCT_L_INT_CSTAT_OFFSET (0x30) |
|
43 |
#define MCT_L_INT_ENB_OFFSET (0x34) |
|
44 |
#define MCT_L_WSTAT_OFFSET (0x40) |
|
45 |
#define MCT_G_TCON_START (1 << 8) |
|
46 |
#define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) |
|
47 |
#define MCT_G_TCON_COMP0_ENABLE (1 << 0) |
|
48 |
#define MCT_L_TCON_INTERVAL_MODE (1 << 2) |
|
49 |
#define MCT_L_TCON_INT_START (1 << 1) |
|
50 |
#define MCT_L_TCON_TIMER_START (1 << 0) |
|
51 |
|
|
52 |
#define TICK_BASE_CNT 1 |
|
53 |
|
|
54 |
enum { |
|
55 |
MCT_INT_SPI, |
|
56 |
MCT_INT_PPI |
|
57 |
}; |
|
58 |
|
|
59 |
enum { |
|
60 |
MCT_G0_IRQ, |
|
61 |
MCT_G1_IRQ, |
|
62 |
MCT_G2_IRQ, |
|
63 |
MCT_G3_IRQ, |
|
64 |
MCT_L0_IRQ, |
|
65 |
MCT_L1_IRQ, |
|
66 |
MCT_L2_IRQ, |
|
67 |
MCT_L3_IRQ, |
|
68 |
MCT_L4_IRQ, |
|
69 |
MCT_L5_IRQ, |
|
70 |
MCT_L6_IRQ, |
|
71 |
MCT_L7_IRQ, |
|
72 |
MCT_NR_IRQS, |
|
73 |
}; |
|
74 |
|
|
75 |
static void __iomem *reg_base; |
|
76 |
static unsigned long clk_rate; |
|
77 |
static unsigned int mct_int_type; |
|
78 |
static int mct_irqs[MCT_NR_IRQS]; |
|
79 |
|
|
80 |
struct mct_clock_event_device { |
|
81 |
struct clock_event_device evt; |
|
82 |
unsigned long base; |
|
83 |
char name[10]; |
|
84 |
}; |
|
85 |
|
|
86 |
static void exynos4_mct_write(unsigned int value, unsigned long offset) |
|
87 |
{ |
|
88 |
unsigned long stat_addr; |
|
89 |
u32 mask; |
|
90 |
u32 i; |
|
91 |
|
|
92 |
writel_relaxed(value, reg_base + offset); |
|
93 |
|
|
94 |
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { |
|
95 |
stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; |
|
96 |
switch (offset & ~EXYNOS4_MCT_L_MASK) { |
|
97 |
case MCT_L_TCON_OFFSET: |
|
98 |
mask = 1 << 3; /* L_TCON write status */ |
|
99 |
break; |
|
100 |
case MCT_L_ICNTB_OFFSET: |
|
101 |
mask = 1 << 1; /* L_ICNTB write status */ |
|
102 |
break; |
|
103 |
case MCT_L_TCNTB_OFFSET: |
|
104 |
mask = 1 << 0; /* L_TCNTB write status */ |
|
105 |
break; |
|
106 |
default: |
|
107 |
return; |
|
108 |
} |
|
109 |
} else { |
|
110 |
switch (offset) { |
|
111 |
case EXYNOS4_MCT_G_TCON: |
|
112 |
stat_addr = EXYNOS4_MCT_G_WSTAT; |
|
113 |
mask = 1 << 16; /* G_TCON write status */ |
|
114 |
break; |
|
115 |
case EXYNOS4_MCT_G_COMP0_L: |
|
116 |
stat_addr = EXYNOS4_MCT_G_WSTAT; |
|
117 |
mask = 1 << 0; /* G_COMP0_L write status */ |
|
118 |
break; |
|
119 |
case EXYNOS4_MCT_G_COMP0_U: |
|
120 |
stat_addr = EXYNOS4_MCT_G_WSTAT; |
|
121 |
mask = 1 << 1; /* G_COMP0_U write status */ |
|
122 |
break; |
|
123 |
case EXYNOS4_MCT_G_COMP0_ADD_INCR: |
|
124 |
stat_addr = EXYNOS4_MCT_G_WSTAT; |
|
125 |
mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ |
|
126 |
break; |
|
127 |
case EXYNOS4_MCT_G_CNT_L: |
|
128 |
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; |
|
129 |
mask = 1 << 0; /* G_CNT_L write status */ |
|
130 |
break; |
|
131 |
case EXYNOS4_MCT_G_CNT_U: |
|
132 |
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; |
|
133 |
mask = 1 << 1; /* G_CNT_U write status */ |
|
134 |
break; |
|
135 |
default: |
|
136 |
return; |
|
137 |
} |
|
138 |
} |
|
139 |
|
|
140 |
/* Wait maximum 1 ms until written values are applied */ |
|
141 |
for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) |
|
142 |
if (readl_relaxed(reg_base + stat_addr) & mask) { |
|
143 |
writel_relaxed(mask, reg_base + stat_addr); |
|
144 |
return; |
|
145 |
} |
|
146 |
|
|
147 |
panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); |
|
148 |
} |
|
149 |
|
|
150 |
/* Clocksource handling */ |
|
151 |
static void exynos4_mct_frc_start(void) |
|
152 |
{ |
|
153 |
u32 reg; |
|
154 |
|
|
155 |
reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
|
156 |
reg |= MCT_G_TCON_START; |
|
157 |
exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); |
|
158 |
} |
|
159 |
|
|
160 |
/** |
|
161 |
* exynos4_read_count_64 - Read all 64-bits of the global counter |
|
162 |
* |
|
163 |
* This will read all 64-bits of the global counter taking care to make sure |
|
164 |
* that the upper and lower half match. Note that reading the MCT can be quite |
|
165 |
* slow (hundreds of nanoseconds) so you should use the 32-bit (lower half |
|
166 |
* only) version when possible. |
|
167 |
* |
|
168 |
* Returns the number of cycles in the global counter. |
|
169 |
*/ |
|
170 |
static u64 exynos4_read_count_64(void) |
|
171 |
{ |
|
172 |
unsigned int lo, hi; |
|
173 |
u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); |
|
174 |
|
|
175 |
do { |
|
176 |
hi = hi2; |
|
177 |
lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); |
|
178 |
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); |
|
179 |
} while (hi != hi2); |
|
180 |
|
|
181 |
return ((u64)hi << 32) | lo; |
|
182 |
} |
|
183 |
|
|
184 |
/** |
|
185 |
* exynos4_read_count_32 - Read the lower 32-bits of the global counter |
|
186 |
* |
|
187 |
* This will read just the lower 32-bits of the global counter. This is marked |
|
188 |
* as notrace so it can be used by the scheduler clock. |
|
189 |
* |
|
190 |
* Returns the number of cycles in the global counter (lower 32 bits). |
|
191 |
*/ |
|
192 |
static u32 notrace exynos4_read_count_32(void) |
|
193 |
{ |
|
194 |
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); |
|
195 |
} |
|
196 |
|
|
197 |
static void exynos4_frc_resume(struct clocksource *cs) |
|
198 |
{ |
|
199 |
exynos4_mct_frc_start(); |
|
200 |
} |
|
201 |
|
2f529f
|
202 |
static struct clocksource_user_mmio mct_frc = { |
H |
203 |
.mmio.clksrc = { |
|
204 |
.name = "mct-frc", |
|
205 |
.rating = 450, /* use value higher than ARM arch timer */ |
|
206 |
.read = clocksource_mmio_readl_up, |
|
207 |
.mask = CLOCKSOURCE_MASK(32), |
|
208 |
.flags = CLOCK_SOURCE_IS_CONTINUOUS, |
|
209 |
.resume = exynos4_frc_resume, |
|
210 |
}, |
a07526
|
211 |
}; |
H |
212 |
|
|
213 |
static u64 notrace exynos4_read_sched_clock(void) |
|
214 |
{ |
|
215 |
return exynos4_read_count_32(); |
|
216 |
} |
|
217 |
|
|
218 |
#if defined(CONFIG_ARM) |
|
219 |
static struct delay_timer exynos4_delay_timer; |
|
220 |
|
|
221 |
static cycles_t exynos4_read_current_timer(void) |
|
222 |
{ |
|
223 |
BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), |
|
224 |
"cycles_t needs to move to 32-bit for ARM64 usage"); |
|
225 |
return exynos4_read_count_32(); |
|
226 |
} |
|
227 |
#endif |
|
228 |
|
|
229 |
static int __init exynos4_clocksource_init(void) |
|
230 |
{ |
2f529f
|
231 |
struct clocksource_mmio_regs mmr; |
H |
232 |
|
a07526
|
233 |
exynos4_mct_frc_start(); |
H |
234 |
|
|
235 |
#if defined(CONFIG_ARM) |
|
236 |
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; |
|
237 |
exynos4_delay_timer.freq = clk_rate; |
|
238 |
register_current_timer_delay(&exynos4_delay_timer); |
|
239 |
#endif |
|
240 |
|
2f529f
|
241 |
mmr.reg_upper = NULL; |
H |
242 |
mmr.reg_lower = reg_base + EXYNOS4_MCT_G_CNT_L; |
|
243 |
mmr.bits_upper = 0; |
|
244 |
mmr.bits_lower = 32; |
|
245 |
mmr.revmap = NULL; |
|
246 |
if (clocksource_user_mmio_init(&mct_frc, &mmr, clk_rate)) |
|
247 |
panic("%s: can't register clocksource\n", mct_frc.mmio.clksrc.name); |
a07526
|
248 |
|
H |
249 |
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); |
|
250 |
|
|
251 |
return 0; |
|
252 |
} |
|
253 |
|
|
254 |
static void exynos4_mct_comp0_stop(void) |
|
255 |
{ |
|
256 |
unsigned int tcon; |
|
257 |
|
|
258 |
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
|
259 |
tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); |
|
260 |
|
|
261 |
exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); |
|
262 |
exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); |
|
263 |
} |
|
264 |
|
|
265 |
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) |
|
266 |
{ |
|
267 |
unsigned int tcon; |
|
268 |
u64 comp_cycle; |
|
269 |
|
|
270 |
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); |
|
271 |
|
|
272 |
if (periodic) { |
|
273 |
tcon |= MCT_G_TCON_COMP0_AUTO_INC; |
|
274 |
exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); |
|
275 |
} |
|
276 |
|
|
277 |
comp_cycle = exynos4_read_count_64() + cycles; |
|
278 |
exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); |
|
279 |
exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); |
|
280 |
|
|
281 |
exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); |
|
282 |
|
|
283 |
tcon |= MCT_G_TCON_COMP0_ENABLE; |
|
284 |
exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); |
|
285 |
} |
|
286 |
|
|
287 |
static int exynos4_comp_set_next_event(unsigned long cycles, |
|
288 |
struct clock_event_device *evt) |
|
289 |
{ |
|
290 |
exynos4_mct_comp0_start(false, cycles); |
|
291 |
|
|
292 |
return 0; |
|
293 |
} |
|
294 |
|
|
295 |
static int mct_set_state_shutdown(struct clock_event_device *evt) |
|
296 |
{ |
|
297 |
exynos4_mct_comp0_stop(); |
|
298 |
return 0; |
|
299 |
} |
|
300 |
|
|
301 |
static int mct_set_state_periodic(struct clock_event_device *evt) |
|
302 |
{ |
|
303 |
unsigned long cycles_per_jiffy; |
|
304 |
|
|
305 |
cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) |
|
306 |
>> evt->shift); |
|
307 |
exynos4_mct_comp0_stop(); |
|
308 |
exynos4_mct_comp0_start(true, cycles_per_jiffy); |
|
309 |
return 0; |
|
310 |
} |
|
311 |
|
|
312 |
static struct clock_event_device mct_comp_device = { |
|
313 |
.name = "mct-comp", |
|
314 |
.features = CLOCK_EVT_FEAT_PERIODIC | |
2f529f
|
315 |
CLOCK_EVT_FEAT_ONESHOT | |
H |
316 |
CLOCK_EVT_FEAT_PIPELINE, |
a07526
|
317 |
.rating = 250, |
H |
318 |
.set_next_event = exynos4_comp_set_next_event, |
|
319 |
.set_state_periodic = mct_set_state_periodic, |
|
320 |
.set_state_shutdown = mct_set_state_shutdown, |
|
321 |
.set_state_oneshot = mct_set_state_shutdown, |
|
322 |
.set_state_oneshot_stopped = mct_set_state_shutdown, |
|
323 |
.tick_resume = mct_set_state_shutdown, |
|
324 |
}; |
|
325 |
|
|
326 |
static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) |
|
327 |
{ |
|
328 |
struct clock_event_device *evt = dev_id; |
|
329 |
|
|
330 |
exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); |
|
331 |
|
2f529f
|
332 |
clockevents_handle_event(evt); |
a07526
|
333 |
|
H |
334 |
return IRQ_HANDLED; |
|
335 |
} |
|
336 |
|
|
337 |
static int exynos4_clockevent_init(void) |
|
338 |
{ |
|
339 |
mct_comp_device.cpumask = cpumask_of(0); |
|
340 |
clockevents_config_and_register(&mct_comp_device, clk_rate, |
|
341 |
0xf, 0xffffffff); |
|
342 |
if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr, |
2f529f
|
343 |
IRQF_TIMER | IRQF_IRQPOLL | IRQF_OOB, "mct_comp_irq", |
a07526
|
344 |
&mct_comp_device)) |
H |
345 |
pr_err("%s: request_irq() failed\n", "mct_comp_irq"); |
|
346 |
|
|
347 |
return 0; |
|
348 |
} |
|
349 |
|
|
350 |
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); |
|
351 |
|
|
352 |
/* Clock event handling */ |
|
353 |
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) |
|
354 |
{ |
|
355 |
unsigned long tmp; |
|
356 |
unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; |
|
357 |
unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; |
|
358 |
|
|
359 |
tmp = readl_relaxed(reg_base + offset); |
|
360 |
if (tmp & mask) { |
|
361 |
tmp &= ~mask; |
|
362 |
exynos4_mct_write(tmp, offset); |
|
363 |
} |
|
364 |
} |
|
365 |
|
|
366 |
static void exynos4_mct_tick_start(unsigned long cycles, |
|
367 |
struct mct_clock_event_device *mevt) |
|
368 |
{ |
|
369 |
unsigned long tmp; |
|
370 |
|
|
371 |
exynos4_mct_tick_stop(mevt); |
|
372 |
|
|
373 |
tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ |
|
374 |
|
|
375 |
/* update interrupt count buffer */ |
|
376 |
exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); |
|
377 |
|
|
378 |
/* enable MCT tick interrupt */ |
|
379 |
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); |
|
380 |
|
|
381 |
tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); |
|
382 |
tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | |
|
383 |
MCT_L_TCON_INTERVAL_MODE; |
|
384 |
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); |
|
385 |
} |
|
386 |
|
|
387 |
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) |
|
388 |
{ |
|
389 |
/* Clear the MCT tick interrupt */ |
|
390 |
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) |
|
391 |
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); |
|
392 |
} |
|
393 |
|
|
394 |
static int exynos4_tick_set_next_event(unsigned long cycles, |
|
395 |
struct clock_event_device *evt) |
|
396 |
{ |
|
397 |
struct mct_clock_event_device *mevt; |
|
398 |
|
|
399 |
mevt = container_of(evt, struct mct_clock_event_device, evt); |
|
400 |
exynos4_mct_tick_start(cycles, mevt); |
|
401 |
return 0; |
|
402 |
} |
|
403 |
|
|
404 |
static int set_state_shutdown(struct clock_event_device *evt) |
|
405 |
{ |
|
406 |
struct mct_clock_event_device *mevt; |
|
407 |
|
|
408 |
mevt = container_of(evt, struct mct_clock_event_device, evt); |
|
409 |
exynos4_mct_tick_stop(mevt); |
|
410 |
exynos4_mct_tick_clear(mevt); |
|
411 |
return 0; |
|
412 |
} |
|
413 |
|
|
414 |
static int set_state_periodic(struct clock_event_device *evt) |
|
415 |
{ |
|
416 |
struct mct_clock_event_device *mevt; |
|
417 |
unsigned long cycles_per_jiffy; |
|
418 |
|
|
419 |
mevt = container_of(evt, struct mct_clock_event_device, evt); |
|
420 |
cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) |
|
421 |
>> evt->shift); |
|
422 |
exynos4_mct_tick_stop(mevt); |
|
423 |
exynos4_mct_tick_start(cycles_per_jiffy, mevt); |
|
424 |
return 0; |
|
425 |
} |
|
426 |
|
|
427 |
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) |
|
428 |
{ |
|
429 |
struct mct_clock_event_device *mevt = dev_id; |
|
430 |
struct clock_event_device *evt = &mevt->evt; |
|
431 |
|
|
432 |
/* |
|
433 |
* This is for supporting oneshot mode. |
|
434 |
* Mct would generate interrupt periodically |
|
435 |
* without explicit stopping. |
|
436 |
*/ |
|
437 |
if (!clockevent_state_periodic(&mevt->evt)) |
|
438 |
exynos4_mct_tick_stop(mevt); |
|
439 |
|
|
440 |
exynos4_mct_tick_clear(mevt); |
|
441 |
|
2f529f
|
442 |
clockevents_handle_event(evt); |
a07526
|
443 |
|
H |
444 |
return IRQ_HANDLED; |
|
445 |
} |
|
446 |
|
|
447 |
static int exynos4_mct_starting_cpu(unsigned int cpu) |
|
448 |
{ |
|
449 |
struct mct_clock_event_device *mevt = |
|
450 |
per_cpu_ptr(&percpu_mct_tick, cpu); |
|
451 |
struct clock_event_device *evt = &mevt->evt; |
|
452 |
|
|
453 |
mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
|
454 |
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); |
|
455 |
|
|
456 |
evt->name = mevt->name; |
|
457 |
evt->cpumask = cpumask_of(cpu); |
|
458 |
evt->set_next_event = exynos4_tick_set_next_event; |
|
459 |
evt->set_state_periodic = set_state_periodic; |
|
460 |
evt->set_state_shutdown = set_state_shutdown; |
|
461 |
evt->set_state_oneshot = set_state_shutdown; |
|
462 |
evt->set_state_oneshot_stopped = set_state_shutdown; |
|
463 |
evt->tick_resume = set_state_shutdown; |
2f529f
|
464 |
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | \ |
H |
465 |
CLOCK_EVT_FEAT_PIPELINE; |
a07526
|
466 |
evt->rating = 500; /* use value higher than ARM arch timer */ |
H |
467 |
|
|
468 |
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); |
|
469 |
|
|
470 |
if (mct_int_type == MCT_INT_SPI) { |
|
471 |
|
|
472 |
if (evt->irq == -1) |
|
473 |
return -EIO; |
|
474 |
|
|
475 |
irq_force_affinity(evt->irq, cpumask_of(cpu)); |
|
476 |
enable_irq(evt->irq); |
|
477 |
} else { |
|
478 |
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); |
|
479 |
} |
|
480 |
clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), |
|
481 |
0xf, 0x7fffffff); |
|
482 |
|
|
483 |
return 0; |
|
484 |
} |
|
485 |
|
|
486 |
static int exynos4_mct_dying_cpu(unsigned int cpu) |
|
487 |
{ |
|
488 |
struct mct_clock_event_device *mevt = |
|
489 |
per_cpu_ptr(&percpu_mct_tick, cpu); |
|
490 |
struct clock_event_device *evt = &mevt->evt; |
|
491 |
|
|
492 |
evt->set_state_shutdown(evt); |
|
493 |
if (mct_int_type == MCT_INT_SPI) { |
|
494 |
if (evt->irq != -1) |
|
495 |
disable_irq_nosync(evt->irq); |
|
496 |
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); |
|
497 |
} else { |
|
498 |
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
|
499 |
} |
|
500 |
return 0; |
|
501 |
} |
|
502 |
|
|
503 |
static int __init exynos4_timer_resources(struct device_node *np) |
|
504 |
{ |
|
505 |
struct clk *mct_clk, *tick_clk; |
|
506 |
|
|
507 |
reg_base = of_iomap(np, 0); |
|
508 |
if (!reg_base) |
|
509 |
panic("%s: unable to ioremap mct address space\n", __func__); |
|
510 |
|
|
511 |
tick_clk = of_clk_get_by_name(np, "fin_pll"); |
|
512 |
if (IS_ERR(tick_clk)) |
|
513 |
panic("%s: unable to determine tick clock rate\n", __func__); |
|
514 |
clk_rate = clk_get_rate(tick_clk); |
|
515 |
|
|
516 |
mct_clk = of_clk_get_by_name(np, "mct"); |
|
517 |
if (IS_ERR(mct_clk)) |
|
518 |
panic("%s: unable to retrieve mct clock instance\n", __func__); |
|
519 |
clk_prepare_enable(mct_clk); |
|
520 |
|
|
521 |
return 0; |
|
522 |
} |
|
523 |
|
|
524 |
static int __init exynos4_timer_interrupts(struct device_node *np, |
|
525 |
unsigned int int_type) |
|
526 |
{ |
|
527 |
int nr_irqs, i, err, cpu; |
|
528 |
|
|
529 |
mct_int_type = int_type; |
|
530 |
|
|
531 |
/* This driver uses only one global timer interrupt */ |
|
532 |
mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); |
|
533 |
|
|
534 |
/* |
|
535 |
* Find out the number of local irqs specified. The local |
|
536 |
* timer irqs are specified after the four global timer |
|
537 |
* irqs are specified. |
|
538 |
*/ |
|
539 |
nr_irqs = of_irq_count(np); |
|
540 |
if (nr_irqs > ARRAY_SIZE(mct_irqs)) { |
|
541 |
pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", |
|
542 |
nr_irqs); |
|
543 |
nr_irqs = ARRAY_SIZE(mct_irqs); |
|
544 |
} |
|
545 |
for (i = MCT_L0_IRQ; i < nr_irqs; i++) |
|
546 |
mct_irqs[i] = irq_of_parse_and_map(np, i); |
|
547 |
|
|
548 |
if (mct_int_type == MCT_INT_PPI) { |
|
549 |
|
2f529f
|
550 |
err = __request_percpu_irq(mct_irqs[MCT_L0_IRQ], |
H |
551 |
exynos4_mct_tick_isr, IRQF_TIMER, |
|
552 |
"MCT", &percpu_mct_tick); |
a07526
|
553 |
WARN(err, "MCT: can't request IRQ %d (%d)\n", |
H |
554 |
mct_irqs[MCT_L0_IRQ], err); |
|
555 |
} else { |
|
556 |
for_each_possible_cpu(cpu) { |
|
557 |
int mct_irq; |
|
558 |
struct mct_clock_event_device *pcpu_mevt = |
|
559 |
per_cpu_ptr(&percpu_mct_tick, cpu); |
|
560 |
|
|
561 |
pcpu_mevt->evt.irq = -1; |
|
562 |
if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) |
|
563 |
break; |
|
564 |
mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; |
|
565 |
|
|
566 |
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); |
|
567 |
if (request_irq(mct_irq, |
|
568 |
exynos4_mct_tick_isr, |
|
569 |
IRQF_TIMER | IRQF_NOBALANCING, |
|
570 |
pcpu_mevt->name, pcpu_mevt)) { |
|
571 |
pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", |
|
572 |
cpu); |
|
573 |
|
|
574 |
continue; |
|
575 |
} |
|
576 |
pcpu_mevt->evt.irq = mct_irq; |
|
577 |
} |
|
578 |
} |
|
579 |
|
|
580 |
/* Install hotplug callbacks which configure the timer on this CPU */ |
|
581 |
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, |
|
582 |
"clockevents/exynos4/mct_timer:starting", |
|
583 |
exynos4_mct_starting_cpu, |
|
584 |
exynos4_mct_dying_cpu); |
|
585 |
if (err) |
|
586 |
goto out_irq; |
|
587 |
|
|
588 |
return 0; |
|
589 |
|
|
590 |
out_irq: |
|
591 |
if (mct_int_type == MCT_INT_PPI) { |
|
592 |
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); |
|
593 |
} else { |
|
594 |
for_each_possible_cpu(cpu) { |
|
595 |
struct mct_clock_event_device *pcpu_mevt = |
|
596 |
per_cpu_ptr(&percpu_mct_tick, cpu); |
|
597 |
|
|
598 |
if (pcpu_mevt->evt.irq != -1) { |
|
599 |
free_irq(pcpu_mevt->evt.irq, pcpu_mevt); |
|
600 |
pcpu_mevt->evt.irq = -1; |
|
601 |
} |
|
602 |
} |
|
603 |
} |
|
604 |
return err; |
|
605 |
} |
|
606 |
|
|
607 |
static int __init mct_init_dt(struct device_node *np, unsigned int int_type) |
|
608 |
{ |
|
609 |
int ret; |
|
610 |
|
|
611 |
ret = exynos4_timer_resources(np); |
|
612 |
if (ret) |
|
613 |
return ret; |
|
614 |
|
|
615 |
ret = exynos4_timer_interrupts(np, int_type); |
|
616 |
if (ret) |
|
617 |
return ret; |
|
618 |
|
|
619 |
ret = exynos4_clocksource_init(); |
|
620 |
if (ret) |
|
621 |
return ret; |
|
622 |
|
|
623 |
return exynos4_clockevent_init(); |
|
624 |
} |
|
625 |
|
|
626 |
|
|
627 |
static int __init mct_init_spi(struct device_node *np) |
|
628 |
{ |
|
629 |
return mct_init_dt(np, MCT_INT_SPI); |
|
630 |
} |
|
631 |
|
|
632 |
static int __init mct_init_ppi(struct device_node *np) |
|
633 |
{ |
|
634 |
return mct_init_dt(np, MCT_INT_PPI); |
|
635 |
} |
|
636 |
TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi); |
|
637 |
TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi); |