hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/kernel/exceptions-64s.S
....@@ -19,6 +19,718 @@
1919 #include <asm/cpuidle.h>
2020 #include <asm/head-64.h>
2121 #include <asm/feature-fixups.h>
22
+#include <asm/kup.h>
23
+
24
+/* PACA save area offsets (exgen, exmc, etc) */
25
+#define EX_R9 0
26
+#define EX_R10 8
27
+#define EX_R11 16
28
+#define EX_R12 24
29
+#define EX_R13 32
30
+#define EX_DAR 40
31
+#define EX_DSISR 48
32
+#define EX_CCR 52
33
+#define EX_CFAR 56
34
+#define EX_PPR 64
35
+#define EX_CTR 72
36
+.if EX_SIZE != 10
37
+ .error "EX_SIZE is wrong"
38
+.endif
39
+
40
+/*
41
+ * Following are fixed section helper macros.
42
+ *
43
+ * EXC_REAL_BEGIN/END - real, unrelocated exception vectors
44
+ * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
45
+ * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
46
+ * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
47
+ * EXC_COMMON - After switching to virtual, relocated mode.
48
+ */
49
+
50
+#define EXC_REAL_BEGIN(name, start, size) \
51
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
52
+
53
+#define EXC_REAL_END(name, start, size) \
54
+ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
55
+
56
+#define EXC_VIRT_BEGIN(name, start, size) \
57
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
58
+
59
+#define EXC_VIRT_END(name, start, size) \
60
+ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
61
+
62
+#define EXC_COMMON_BEGIN(name) \
63
+ USE_TEXT_SECTION(); \
64
+ .balign IFETCH_ALIGN_BYTES; \
65
+ .global name; \
66
+ _ASM_NOKPROBE_SYMBOL(name); \
67
+ DEFINE_FIXED_SYMBOL(name); \
68
+name:
69
+
70
+#define TRAMP_REAL_BEGIN(name) \
71
+ FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
72
+
73
+#define TRAMP_VIRT_BEGIN(name) \
74
+ FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
75
+
76
+#define EXC_REAL_NONE(start, size) \
77
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
78
+ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
79
+
80
+#define EXC_VIRT_NONE(start, size) \
81
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
82
+ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
83
+
84
+/*
85
+ * We're short on space and time in the exception prolog, so we can't
86
+ * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
87
+ * Instead we get the base of the kernel from paca->kernelbase and or in the low
88
+ * part of label. This requires that the label be within 64KB of kernelbase, and
89
+ * that kernelbase be 64K aligned.
90
+ */
91
+#define LOAD_HANDLER(reg, label) \
92
+ ld reg,PACAKBASE(r13); /* get high part of &label */ \
93
+ ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
94
+
95
+#define __LOAD_HANDLER(reg, label) \
96
+ ld reg,PACAKBASE(r13); \
97
+ ori reg,reg,(ABS_ADDR(label))@l
98
+
99
+/*
100
+ * Branches from unrelocated code (e.g., interrupts) to labels outside
101
+ * head-y require >64K offsets.
102
+ */
103
+#define __LOAD_FAR_HANDLER(reg, label) \
104
+ ld reg,PACAKBASE(r13); \
105
+ ori reg,reg,(ABS_ADDR(label))@l; \
106
+ addis reg,reg,(ABS_ADDR(label))@h
107
+
108
+/*
109
+ * Branch to label using its 0xC000 address. This results in instruction
110
+ * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
111
+ * on using mtmsr rather than rfid.
112
+ *
113
+ * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
114
+ * load KBASE for a slight optimisation.
115
+ */
116
+#define BRANCH_TO_C000(reg, label) \
117
+ __LOAD_FAR_HANDLER(reg, label); \
118
+ mtctr reg; \
119
+ bctr
120
+
121
+/*
122
+ * Interrupt code generation macros
123
+ */
124
+#define IVEC .L_IVEC_\name\() /* Interrupt vector address */
125
+#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
126
+#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
127
+#define IAREA .L_IAREA_\name\() /* PACA save area */
128
+#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
129
+#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
130
+#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
131
+#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
132
+#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
133
+#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
134
+#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
135
+#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
136
+#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
137
+#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
138
+#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
139
+#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
140
+#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
141
+#define __ISTACK(name) .L_ISTACK_ ## name
142
+#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */
143
+#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
144
+
145
+#define INT_DEFINE_BEGIN(n) \
146
+.macro int_define_ ## n name
147
+
148
+#define INT_DEFINE_END(n) \
149
+.endm ; \
150
+int_define_ ## n n ; \
151
+do_define_int n
152
+
153
+.macro do_define_int name
154
+ .ifndef IVEC
155
+ .error "IVEC not defined"
156
+ .endif
157
+ .ifndef IHSRR
158
+ IHSRR=0
159
+ .endif
160
+ .ifndef IHSRR_IF_HVMODE
161
+ IHSRR_IF_HVMODE=0
162
+ .endif
163
+ .ifndef IAREA
164
+ IAREA=PACA_EXGEN
165
+ .endif
166
+ .ifndef IVIRT
167
+ IVIRT=1
168
+ .endif
169
+ .ifndef IISIDE
170
+ IISIDE=0
171
+ .endif
172
+ .ifndef IDAR
173
+ IDAR=0
174
+ .endif
175
+ .ifndef IDSISR
176
+ IDSISR=0
177
+ .endif
178
+ .ifndef ISET_RI
179
+ ISET_RI=1
180
+ .endif
181
+ .ifndef IBRANCH_TO_COMMON
182
+ IBRANCH_TO_COMMON=1
183
+ .endif
184
+ .ifndef IREALMODE_COMMON
185
+ IREALMODE_COMMON=0
186
+ .else
187
+ .if ! IBRANCH_TO_COMMON
188
+ .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
189
+ .endif
190
+ .endif
191
+ .ifndef IMASK
192
+ IMASK=0
193
+ .endif
194
+ .ifndef IKVM_SKIP
195
+ IKVM_SKIP=0
196
+ .endif
197
+ .ifndef IKVM_REAL
198
+ IKVM_REAL=0
199
+ .endif
200
+ .ifndef IKVM_VIRT
201
+ IKVM_VIRT=0
202
+ .endif
203
+ .ifndef ISTACK
204
+ ISTACK=1
205
+ .endif
206
+ .ifndef IRECONCILE
207
+ IRECONCILE=1
208
+ .endif
209
+ .ifndef IKUAP
210
+ IKUAP=1
211
+ .endif
212
+.endm
213
+
214
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
215
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
216
+/*
217
+ * All interrupts which set HSRR registers, as well as SRESET and MCE and
218
+ * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
219
+ * so they all generally need to test whether they were taken in guest context.
220
+ *
221
+ * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
222
+ * taken with MSR[HV]=0.
223
+ *
224
+ * Interrupts which set SRR registers (with the above exceptions) do not
225
+ * elevate to MSR[HV]=1 mode, though most can be taken when running with
226
+ * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
227
+ * not need to test whether a guest is running because they get delivered to
228
+ * the guest directly, including nested HV KVM guests.
229
+ *
230
+ * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
231
+ * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
232
+ * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
233
+ * delivered to the real-mode entry point, therefore such interrupts only test
234
+ * KVM in their real mode handlers, and only when PR KVM is possible.
235
+ *
236
+ * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
237
+ * delivered in real-mode when the MMU is in hash mode because the MMU
238
+ * registers are not set appropriately to translate host addresses. In nested
239
+ * radix mode these can be delivered in virt-mode as the host translations are
240
+ * used implicitly (see: effective LPID, effective PID).
241
+ */
242
+
243
+/*
244
+ * If an interrupt is taken while a guest is running, it is immediately routed
245
+ * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
246
+ * to kvmppc_interrupt_hv, which handles the PR guest case.
247
+ */
248
+#define kvmppc_interrupt kvmppc_interrupt_hv
249
+#else
250
+#define kvmppc_interrupt kvmppc_interrupt_pr
251
+#endif
252
+
253
+.macro KVMTEST name
254
+ lbz r10,HSTATE_IN_GUEST(r13)
255
+ cmpwi r10,0
256
+ bne \name\()_kvm
257
+.endm
258
+
259
+.macro GEN_KVM name
260
+ .balign IFETCH_ALIGN_BYTES
261
+\name\()_kvm:
262
+
263
+ .if IKVM_SKIP
264
+ cmpwi r10,KVM_GUEST_MODE_SKIP
265
+ beq 89f
266
+ .else
267
+BEGIN_FTR_SECTION
268
+ ld r10,IAREA+EX_CFAR(r13)
269
+ std r10,HSTATE_CFAR(r13)
270
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
271
+ .endif
272
+
273
+ ld r10,IAREA+EX_CTR(r13)
274
+ mtctr r10
275
+BEGIN_FTR_SECTION
276
+ ld r10,IAREA+EX_PPR(r13)
277
+ std r10,HSTATE_PPR(r13)
278
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
279
+ ld r11,IAREA+EX_R11(r13)
280
+ ld r12,IAREA+EX_R12(r13)
281
+ std r12,HSTATE_SCRATCH0(r13)
282
+ sldi r12,r9,32
283
+ ld r9,IAREA+EX_R9(r13)
284
+ ld r10,IAREA+EX_R10(r13)
285
+ /* HSRR variants have the 0x2 bit added to their trap number */
286
+ .if IHSRR_IF_HVMODE
287
+ BEGIN_FTR_SECTION
288
+ ori r12,r12,(IVEC + 0x2)
289
+ FTR_SECTION_ELSE
290
+ ori r12,r12,(IVEC)
291
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
292
+ .elseif IHSRR
293
+ ori r12,r12,(IVEC+ 0x2)
294
+ .else
295
+ ori r12,r12,(IVEC)
296
+ .endif
297
+ b kvmppc_interrupt
298
+
299
+ .if IKVM_SKIP
300
+89: mtocrf 0x80,r9
301
+ ld r10,IAREA+EX_CTR(r13)
302
+ mtctr r10
303
+ ld r9,IAREA+EX_R9(r13)
304
+ ld r10,IAREA+EX_R10(r13)
305
+ ld r11,IAREA+EX_R11(r13)
306
+ ld r12,IAREA+EX_R12(r13)
307
+ .if IHSRR_IF_HVMODE
308
+ BEGIN_FTR_SECTION
309
+ b kvmppc_skip_Hinterrupt
310
+ FTR_SECTION_ELSE
311
+ b kvmppc_skip_interrupt
312
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
313
+ .elseif IHSRR
314
+ b kvmppc_skip_Hinterrupt
315
+ .else
316
+ b kvmppc_skip_interrupt
317
+ .endif
318
+ .endif
319
+.endm
320
+
321
+#else
322
+.macro KVMTEST name
323
+.endm
324
+.macro GEN_KVM name
325
+.endm
326
+#endif
327
+
328
+/*
329
+ * This is the BOOK3S interrupt entry code macro.
330
+ *
331
+ * This can result in one of several things happening:
332
+ * - Branch to the _common handler, relocated, in virtual mode.
333
+ * These are normal interrupts (synchronous and asynchronous) handled by
334
+ * the kernel.
335
+ * - Branch to KVM, relocated but real mode interrupts remain in real mode.
336
+ * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
337
+ * / intended for host or guest kernel, but KVM must always be involved
338
+ * because the machine state is set for guest execution.
339
+ * - Branch to the masked handler, unrelocated.
340
+ * These occur when maskable asynchronous interrupts are taken with the
341
+ * irq_soft_mask set.
342
+ * - Branch to an "early" handler in real mode but relocated.
343
+ * This is done if early=1. MCE and HMI use these to handle errors in real
344
+ * mode.
345
+ * - Fall through and continue executing in real, unrelocated mode.
346
+ * This is done if early=2.
347
+ */
348
+
349
+.macro GEN_BRANCH_TO_COMMON name, virt
350
+ .if IREALMODE_COMMON
351
+ LOAD_HANDLER(r10, \name\()_common)
352
+ mtctr r10
353
+ bctr
354
+ .else
355
+ .if \virt
356
+#ifndef CONFIG_RELOCATABLE
357
+ b \name\()_common_virt
358
+#else
359
+ LOAD_HANDLER(r10, \name\()_common_virt)
360
+ mtctr r10
361
+ bctr
362
+#endif
363
+ .else
364
+ LOAD_HANDLER(r10, \name\()_common_real)
365
+ mtctr r10
366
+ bctr
367
+ .endif
368
+ .endif
369
+.endm
370
+
371
+.macro GEN_INT_ENTRY name, virt, ool=0
372
+ SET_SCRATCH0(r13) /* save r13 */
373
+ GET_PACA(r13)
374
+ std r9,IAREA+EX_R9(r13) /* save r9 */
375
+BEGIN_FTR_SECTION
376
+ mfspr r9,SPRN_PPR
377
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
378
+ HMT_MEDIUM
379
+ std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
380
+BEGIN_FTR_SECTION
381
+ mfspr r10,SPRN_CFAR
382
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
383
+ .if \ool
384
+ .if !\virt
385
+ b tramp_real_\name
386
+ .pushsection .text
387
+ TRAMP_REAL_BEGIN(tramp_real_\name)
388
+ .else
389
+ b tramp_virt_\name
390
+ .pushsection .text
391
+ TRAMP_VIRT_BEGIN(tramp_virt_\name)
392
+ .endif
393
+ .endif
394
+
395
+BEGIN_FTR_SECTION
396
+ std r9,IAREA+EX_PPR(r13)
397
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
398
+BEGIN_FTR_SECTION
399
+ std r10,IAREA+EX_CFAR(r13)
400
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
401
+ INTERRUPT_TO_KERNEL
402
+ mfctr r10
403
+ std r10,IAREA+EX_CTR(r13)
404
+ mfcr r9
405
+ std r11,IAREA+EX_R11(r13)
406
+ std r12,IAREA+EX_R12(r13)
407
+
408
+ /*
409
+ * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
410
+ * because a d-side MCE will clobber those registers so is
411
+ * not recoverable if they are live.
412
+ */
413
+ GET_SCRATCH0(r10)
414
+ std r10,IAREA+EX_R13(r13)
415
+ .if IDAR && !IISIDE
416
+ .if IHSRR
417
+ mfspr r10,SPRN_HDAR
418
+ .else
419
+ mfspr r10,SPRN_DAR
420
+ .endif
421
+ std r10,IAREA+EX_DAR(r13)
422
+ .endif
423
+ .if IDSISR && !IISIDE
424
+ .if IHSRR
425
+ mfspr r10,SPRN_HDSISR
426
+ .else
427
+ mfspr r10,SPRN_DSISR
428
+ .endif
429
+ stw r10,IAREA+EX_DSISR(r13)
430
+ .endif
431
+
432
+ .if IHSRR_IF_HVMODE
433
+ BEGIN_FTR_SECTION
434
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
435
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
436
+ FTR_SECTION_ELSE
437
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
438
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
439
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
440
+ .elseif IHSRR
441
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
442
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
443
+ .else
444
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
445
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
446
+ .endif
447
+
448
+ .if IBRANCH_TO_COMMON
449
+ GEN_BRANCH_TO_COMMON \name \virt
450
+ .endif
451
+
452
+ .if \ool
453
+ .popsection
454
+ .endif
455
+.endm
456
+
457
+/*
458
+ * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
459
+ * entry, except in the case of the real-mode handlers which require
460
+ * __GEN_REALMODE_COMMON_ENTRY.
461
+ *
462
+ * This switches to virtual mode and sets MSR[RI].
463
+ */
464
+.macro __GEN_COMMON_ENTRY name
465
+DEFINE_FIXED_SYMBOL(\name\()_common_real)
466
+\name\()_common_real:
467
+ .if IKVM_REAL
468
+ KVMTEST \name
469
+ .endif
470
+
471
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
472
+ /* MSR[RI] is clear iff using SRR regs */
473
+ .if IHSRR_IF_HVMODE
474
+ BEGIN_FTR_SECTION
475
+ xori r10,r10,MSR_RI
476
+ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
477
+ .elseif ! IHSRR
478
+ xori r10,r10,MSR_RI
479
+ .endif
480
+ mtmsrd r10
481
+
482
+ .if IVIRT
483
+ .if IKVM_VIRT
484
+ b 1f /* skip the virt test coming from real */
485
+ .endif
486
+
487
+ .balign IFETCH_ALIGN_BYTES
488
+DEFINE_FIXED_SYMBOL(\name\()_common_virt)
489
+\name\()_common_virt:
490
+ .if IKVM_VIRT
491
+ KVMTEST \name
492
+1:
493
+ .endif
494
+ .endif /* IVIRT */
495
+.endm
496
+
497
+/*
498
+ * Don't switch to virt mode. Used for early MCE and HMI handlers that
499
+ * want to run in real mode.
500
+ */
501
+.macro __GEN_REALMODE_COMMON_ENTRY name
502
+DEFINE_FIXED_SYMBOL(\name\()_common_real)
503
+\name\()_common_real:
504
+ .if IKVM_REAL
505
+ KVMTEST \name
506
+ .endif
507
+.endm
508
+
509
+.macro __GEN_COMMON_BODY name
510
+ .if IMASK
511
+ .if ! ISTACK
512
+ .error "No support for masked interrupt to use custom stack"
513
+ .endif
514
+
515
+ /* If coming from user, skip soft-mask tests. */
516
+ andi. r10,r12,MSR_PR
517
+ bne 2f
518
+
519
+ /* Kernel code running below __end_interrupts is implicitly
520
+ * soft-masked */
521
+ LOAD_HANDLER(r10, __end_interrupts)
522
+ cmpld r11,r10
523
+ li r10,IMASK
524
+ blt- 1f
525
+
526
+ /* Test the soft mask state against our interrupt's bit */
527
+ lbz r10,PACAIRQSOFTMASK(r13)
528
+1: andi. r10,r10,IMASK
529
+ /* Associate vector numbers with bits in paca->irq_happened */
530
+ .if IVEC == 0x500 || IVEC == 0xea0
531
+ li r10,PACA_IRQ_EE
532
+ .elseif IVEC == 0x900
533
+ li r10,PACA_IRQ_DEC
534
+ .elseif IVEC == 0xa00 || IVEC == 0xe80
535
+ li r10,PACA_IRQ_DBELL
536
+ .elseif IVEC == 0xe60
537
+ li r10,PACA_IRQ_HMI
538
+ .elseif IVEC == 0xf00
539
+ li r10,PACA_IRQ_PMI
540
+ .else
541
+ .abort "Bad maskable vector"
542
+ .endif
543
+
544
+ .if IHSRR_IF_HVMODE
545
+ BEGIN_FTR_SECTION
546
+ bne masked_Hinterrupt
547
+ FTR_SECTION_ELSE
548
+ bne masked_interrupt
549
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
550
+ .elseif IHSRR
551
+ bne masked_Hinterrupt
552
+ .else
553
+ bne masked_interrupt
554
+ .endif
555
+ .endif
556
+
557
+ .if ISTACK
558
+ andi. r10,r12,MSR_PR /* See if coming from user */
559
+2: mr r10,r1 /* Save r1 */
560
+ subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
561
+ beq- 100f
562
+ ld r1,PACAKSAVE(r13) /* kernel stack to use */
563
+100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */
564
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
565
+ .endif
566
+
567
+ std r9,_CCR(r1) /* save CR in stackframe */
568
+ std r11,_NIP(r1) /* save SRR0 in stackframe */
569
+ std r12,_MSR(r1) /* save SRR1 in stackframe */
570
+ std r10,0(r1) /* make stack chain pointer */
571
+ std r0,GPR0(r1) /* save r0 in stackframe */
572
+ std r10,GPR1(r1) /* save r1 in stackframe */
573
+
574
+ .if ISET_RI
575
+ li r10,MSR_RI
576
+ mtmsrd r10,1 /* Set MSR_RI */
577
+ .endif
578
+
579
+ .if ISTACK
580
+ .if IKUAP
581
+ kuap_save_amr_and_lock r9, r10, cr1, cr0
582
+ .endif
583
+ beq 101f /* if from kernel mode */
584
+ ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
585
+BEGIN_FTR_SECTION
586
+ ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
587
+ std r9,_PPR(r1)
588
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
589
+101:
590
+ .else
591
+ .if IKUAP
592
+ kuap_save_amr_and_lock r9, r10, cr1
593
+ .endif
594
+ .endif
595
+
596
+ /* Save original regs values from save area to stack frame. */
597
+ ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
598
+ ld r10,IAREA+EX_R10(r13)
599
+ std r9,GPR9(r1)
600
+ std r10,GPR10(r1)
601
+ ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
602
+ ld r10,IAREA+EX_R12(r13)
603
+ ld r11,IAREA+EX_R13(r13)
604
+ std r9,GPR11(r1)
605
+ std r10,GPR12(r1)
606
+ std r11,GPR13(r1)
607
+
608
+ SAVE_NVGPRS(r1)
609
+
610
+ .if IDAR
611
+ .if IISIDE
612
+ ld r10,_NIP(r1)
613
+ .else
614
+ ld r10,IAREA+EX_DAR(r13)
615
+ .endif
616
+ std r10,_DAR(r1)
617
+ .endif
618
+
619
+ .if IDSISR
620
+ .if IISIDE
621
+ ld r10,_MSR(r1)
622
+ lis r11,DSISR_SRR1_MATCH_64S@h
623
+ and r10,r10,r11
624
+ .else
625
+ lwz r10,IAREA+EX_DSISR(r13)
626
+ .endif
627
+ std r10,_DSISR(r1)
628
+ .endif
629
+
630
+BEGIN_FTR_SECTION
631
+ ld r10,IAREA+EX_CFAR(r13)
632
+ std r10,ORIG_GPR3(r1)
633
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
634
+ ld r10,IAREA+EX_CTR(r13)
635
+ std r10,_CTR(r1)
636
+ std r2,GPR2(r1) /* save r2 in stackframe */
637
+ SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
638
+ SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */
639
+ mflr r9 /* Get LR, later save to stack */
640
+ ld r2,PACATOC(r13) /* get kernel TOC into r2 */
641
+ std r9,_LINK(r1)
642
+ lbz r10,PACAIRQSOFTMASK(r13)
643
+ mfspr r11,SPRN_XER /* save XER in stackframe */
644
+ std r10,SOFTE(r1)
645
+ std r11,_XER(r1)
646
+ li r9,IVEC
647
+ std r9,_TRAP(r1) /* set trap number */
648
+ li r10,0
649
+ ld r11,exception_marker@toc(r2)
650
+ std r10,RESULT(r1) /* clear regs->result */
651
+ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
652
+
653
+ .if ISTACK
654
+ ACCOUNT_STOLEN_TIME
655
+ .endif
656
+
657
+ .if IRECONCILE
658
+ RECONCILE_IRQ_STATE(r10, r11)
659
+ .endif
660
+.endm
661
+
662
+/*
663
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
664
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
665
+ * SRR1, and relocation is on.
666
+ *
667
+ * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
668
+ * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
669
+ */
670
+.macro GEN_COMMON name
671
+ __GEN_COMMON_ENTRY \name
672
+ __GEN_COMMON_BODY \name
673
+.endm
674
+
675
+/*
676
+ * Restore all registers including H/SRR0/1 saved in a stack frame of a
677
+ * standard exception.
678
+ */
679
+.macro EXCEPTION_RESTORE_REGS hsrr=0
680
+ /* Move original SRR0 and SRR1 into the respective regs */
681
+ ld r9,_MSR(r1)
682
+ .if \hsrr
683
+ mtspr SPRN_HSRR1,r9
684
+ .else
685
+ mtspr SPRN_SRR1,r9
686
+ .endif
687
+ ld r9,_NIP(r1)
688
+ .if \hsrr
689
+ mtspr SPRN_HSRR0,r9
690
+ .else
691
+ mtspr SPRN_SRR0,r9
692
+ .endif
693
+ ld r9,_CTR(r1)
694
+ mtctr r9
695
+ ld r9,_XER(r1)
696
+ mtxer r9
697
+ ld r9,_LINK(r1)
698
+ mtlr r9
699
+ ld r9,_CCR(r1)
700
+ mtcr r9
701
+ REST_8GPRS(2, r1)
702
+ REST_4GPRS(10, r1)
703
+ REST_GPR(0, r1)
704
+ /* restore original r1. */
705
+ ld r1,GPR1(r1)
706
+.endm
707
+
708
+#define RUNLATCH_ON \
709
+BEGIN_FTR_SECTION \
710
+ ld r3, PACA_THREAD_INFO(r13); \
711
+ ld r4,TI_LOCAL_FLAGS(r3); \
712
+ andi. r0,r4,_TLF_RUNLATCH; \
713
+ beql ppc64_runlatch_on_trampoline; \
714
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
715
+
716
+/*
717
+ * When the idle code in power4_idle puts the CPU into NAP mode,
718
+ * it has to do so in a loop, and relies on the external interrupt
719
+ * and decrementer interrupt entry code to get it out of the loop.
720
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
721
+ * to signal that it is in the loop and needs help to get out.
722
+ */
723
+#ifdef CONFIG_PPC_970_NAP
724
+#define FINISH_NAP \
725
+BEGIN_FTR_SECTION \
726
+ ld r11, PACA_THREAD_INFO(r13); \
727
+ ld r9,TI_LOCAL_FLAGS(r11); \
728
+ andi. r10,r9,_TLF_NAPPING; \
729
+ bnel power4_fixup_nap; \
730
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
731
+#else
732
+#define FINISH_NAP
733
+#endif
22734
23735 /*
24736 * There are a few constraints to be concerned with.
....@@ -44,6 +756,9 @@
44756 * guarantee they will be delivered virtually. Some conditions (see the ISA)
45757 * cause exceptions to be delivered in real mode.
46758 *
759
+ * The scv instructions are a special case. They get a 0x3000 offset applied.
760
+ * scv exceptions have unique reentrancy properties, see below.
761
+ *
47762 * It's impossible to receive interrupts below 0x300 via AIL.
48763 *
49764 * KVM: None of the virtual exceptions are from the guest. Anything that
....@@ -53,8 +768,8 @@
53768 * We layout physical memory as follows:
54769 * 0x0000 - 0x00ff : Secondary processor spin code
55770 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
56
- * 0x1900 - 0x3fff : Real mode trampolines
57
- * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
771
+ * 0x1900 - 0x2fff : Real mode trampolines
772
+ * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
58773 * 0x5900 - 0x6fff : Relon mode trampolines
59774 * 0x7000 - 0x7fff : FWNMI data area
60775 * 0x8000 - .... : Common interrupt handlers, remaining early
....@@ -65,9 +780,17 @@
65780 * vectors there.
66781 */
67782 OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
68
-OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
69
-OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
783
+OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000)
784
+OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900)
70785 OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
786
+
787
+#ifdef CONFIG_PPC_POWERNV
788
+ .globl start_real_trampolines
789
+ .globl end_real_trampolines
790
+ .globl start_virt_trampolines
791
+ .globl end_virt_trampolines
792
+#endif
793
+
71794 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
72795 /*
73796 * Data area reserved for FWNMI option.
....@@ -94,10 +817,129 @@
94817 .globl __start_interrupts
95818 __start_interrupts:
96819
820
+/**
821
+ * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
822
+ * This is a synchronous interrupt invoked with the "scv" instruction. The
823
+ * system call does not alter the HV bit, so it is directed to the OS.
824
+ *
825
+ * Handling:
826
+ * scv instructions enter the kernel without changing EE, RI, ME, or HV.
827
+ * In particular, this means we can take a maskable interrupt at any point
828
+ * in the scv handler, which is unlike any other interrupt. This is solved
829
+ * by treating the instruction addresses below __end_interrupts as being
830
+ * soft-masked.
831
+ *
832
+ * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
833
+ * ensure scv is never executed with relocation off, which means AIL-0
834
+ * should never happen.
835
+ *
836
+ * Before leaving the below __end_interrupts text, at least of the following
837
+ * must be true:
838
+ * - MSR[PR]=1 (i.e., return to userspace)
839
+ * - MSR_EE|MSR_RI is set (no reentrant exceptions)
840
+ * - Standard kernel environment is set up (stack, paca, etc)
841
+ *
842
+ * Call convention:
843
+ *
844
+ * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
845
+ */
846
+EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
847
+ /* SCV 0 */
848
+ mr r9,r13
849
+ GET_PACA(r13)
850
+ mflr r11
851
+ mfctr r12
852
+ li r10,IRQS_ALL_DISABLED
853
+ stb r10,PACAIRQSOFTMASK(r13)
854
+#ifdef CONFIG_RELOCATABLE
855
+ b system_call_vectored_tramp
856
+#else
857
+ b system_call_vectored_common
858
+#endif
859
+ nop
860
+
861
+ /* SCV 1 - 127 */
862
+ .rept 127
863
+ mr r9,r13
864
+ GET_PACA(r13)
865
+ mflr r11
866
+ mfctr r12
867
+ li r10,IRQS_ALL_DISABLED
868
+ stb r10,PACAIRQSOFTMASK(r13)
869
+ li r0,-1 /* cause failure */
870
+#ifdef CONFIG_RELOCATABLE
871
+ b system_call_vectored_sigill_tramp
872
+#else
873
+ b system_call_vectored_sigill
874
+#endif
875
+ .endr
876
+EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
877
+
878
+#ifdef CONFIG_RELOCATABLE
879
+TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
880
+ __LOAD_HANDLER(r10, system_call_vectored_common)
881
+ mtctr r10
882
+ bctr
883
+
884
+TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
885
+ __LOAD_HANDLER(r10, system_call_vectored_sigill)
886
+ mtctr r10
887
+ bctr
888
+#endif
889
+
890
+
97891 /* No virt vectors corresponding with 0x0..0x100 */
98892 EXC_VIRT_NONE(0x4000, 0x100)
99893
100894
895
+/**
896
+ * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
897
+ * This is a non-maskable, asynchronous interrupt always taken in real-mode.
898
+ * It is caused by:
899
+ * - Wake from power-saving state, on powernv.
900
+ * - An NMI from another CPU, triggered by firmware or hypercall.
901
+ * - As crash/debug signal injected from BMC, firmware or hypervisor.
902
+ *
903
+ * Handling:
904
+ * Power-save wakeup is the only performance critical path, so this is
905
+ * determined quickly as possible first. In this case volatile registers
906
+ * can be discarded and SPRs like CFAR don't need to be read.
907
+ *
908
+ * If not a powersave wakeup, then it's run as a regular interrupt, however
909
+ * it uses its own stack and PACA save area to preserve the regular kernel
910
+ * environment for debugging.
911
+ *
912
+ * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
913
+ * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
914
+ * correct to switch to virtual mode to run the regular interrupt handler
915
+ * because it might be interrupted when the MMU is in a bad state (e.g., SLB
916
+ * is clear).
917
+ *
918
+ * FWNMI:
919
+ * PAPR specifies a "fwnmi" facility which sends the sreset to a different
920
+ * entry point with a different register set up. Some hypervisors will
921
+ * send the sreset to 0x100 in the guest if it is not fwnmi capable.
922
+ *
923
+ * KVM:
924
+ * Unlike most SRR interrupts, this may be taken by the host while executing
925
+ * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
926
+ * mode and then raise the sreset.
927
+ */
928
+INT_DEFINE_BEGIN(system_reset)
929
+ IVEC=0x100
930
+ IAREA=PACA_EXNMI
931
+ IVIRT=0 /* no virt entry point */
932
+ /*
933
+ * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
934
+ * being used, so a nested NMI exception would corrupt it.
935
+ */
936
+ ISET_RI=0
937
+ ISTACK=0
938
+ IRECONCILE=0
939
+ IKVM_REAL=1
940
+INT_DEFINE_END(system_reset)
941
+
942
+EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
101943 #ifdef CONFIG_PPC_P7_NAP
102944 /*
103945 * If running native on arch 2.06 or later, check if we are waking up
....@@ -105,57 +947,65 @@
105947 * bits 46:47. A non-0 value indicates that we are coming from a power
106948 * saving state. The idle wakeup handler initially runs in real mode,
107949 * but we branch to the 0xc000... address so we can turn on relocation
108
- * with mtmsr.
950
+ * with mtmsrd later, after SPRs are restored.
951
+ *
952
+ * Careful to minimise cost for the fast path (idle wakeup) while
953
+ * also avoiding clobbering CFAR for the debug path (non-idle).
954
+ *
955
+ * For the idle wake case volatile registers can be clobbered, which
956
+ * is why we use those initially. If it turns out to not be an idle
957
+ * wake, carefully put everything back the way it was, so we can use
958
+ * common exception macros to handle it.
109959 */
110
-#define IDLETEST(n) \
111
- BEGIN_FTR_SECTION ; \
112
- mfspr r10,SPRN_SRR1 ; \
113
- rlwinm. r10,r10,47-31,30,31 ; \
114
- beq- 1f ; \
115
- cmpwi cr3,r10,2 ; \
116
- BRANCH_TO_C000(r10, system_reset_idle_common) ; \
117
-1: \
118
- KVMTEST_PR(n) ; \
119
- END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
120
-#else
121
-#define IDLETEST NOTEST
960
+BEGIN_FTR_SECTION
961
+ SET_SCRATCH0(r13)
962
+ GET_PACA(r13)
963
+ std r3,PACA_EXNMI+0*8(r13)
964
+ std r4,PACA_EXNMI+1*8(r13)
965
+ std r5,PACA_EXNMI+2*8(r13)
966
+ mfspr r3,SPRN_SRR1
967
+ mfocrf r4,0x80
968
+ rlwinm. r5,r3,47-31,30,31
969
+ bne+ system_reset_idle_wake
970
+ /* Not powersave wakeup. Restore regs for regular interrupt handler. */
971
+ mtocrf 0x80,r4
972
+ ld r3,PACA_EXNMI+0*8(r13)
973
+ ld r4,PACA_EXNMI+1*8(r13)
974
+ ld r5,PACA_EXNMI+2*8(r13)
975
+ GET_SCRATCH0(r13)
976
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
122977 #endif
123978
124
-EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
125
- SET_SCRATCH0(r13)
979
+ GEN_INT_ENTRY system_reset, virt=0
126980 /*
127
- * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
128
- * being used, so a nested NMI exception would corrupt it.
981
+ * In theory, we should not enable relocation here if it was disabled
982
+ * in SRR1, because the MMU may not be configured to support it (e.g.,
983
+ * SLB may have been cleared). In practice, there should only be a few
984
+ * small windows where that's the case, and sreset is considered to
985
+ * be dangerous anyway.
129986 */
130
- EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
131
- IDLETEST, 0x100)
132
-
133987 EXC_REAL_END(system_reset, 0x100, 0x100)
134988 EXC_VIRT_NONE(0x4100, 0x100)
135
-TRAMP_KVM(PACA_EXNMI, 0x100)
136989
137990 #ifdef CONFIG_PPC_P7_NAP
138
-EXC_COMMON_BEGIN(system_reset_idle_common)
139
- mfspr r12,SPRN_SRR1
140
- b pnv_powersave_wakeup
991
+TRAMP_REAL_BEGIN(system_reset_idle_wake)
992
+ /* We are waking up from idle, so may clobber any volatile register */
993
+ cmpwi cr1,r5,2
994
+ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
995
+ BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
141996 #endif
142997
998
+#ifdef CONFIG_PPC_PSERIES
143999 /*
144
- * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
145
- * the right thing. We do not want to reconcile because that goes
146
- * through irq tracing which we don't want in NMI.
147
- *
148
- * Save PACAIRQHAPPENED because some code will do a hard disable
149
- * (e.g., xmon). So we want to restore this back to where it was
150
- * when we return. DAR is unused in the stack, so save it there.
1000
+ * Vectors for the FWNMI option. Share common code.
1511001 */
152
-#define ADD_RECONCILE_NMI \
153
- li r10,IRQS_ALL_DISABLED; \
154
- stb r10,PACAIRQSOFTMASK(r13); \
155
- lbz r10,PACAIRQHAPPENED(r13); \
156
- std r10,_DAR(r1)
1002
+TRAMP_REAL_BEGIN(system_reset_fwnmi)
1003
+ GEN_INT_ENTRY system_reset, virt=0
1004
+
1005
+#endif /* CONFIG_PPC_PSERIES */
1571006
1581007 EXC_COMMON_BEGIN(system_reset_common)
1008
+ __GEN_COMMON_ENTRY system_reset
1591009 /*
1601010 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
1611011 * to recover, but nested NMI will notice in_nmi and not recover
....@@ -171,15 +1021,27 @@
1711021 mr r10,r1
1721022 ld r1,PACA_NMI_EMERG_SP(r13)
1731023 subi r1,r1,INT_FRAME_SIZE
174
- EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100,
175
- system_reset, system_reset_exception,
176
- ADD_NVGPRS;ADD_RECONCILE_NMI)
1024
+ __GEN_COMMON_BODY system_reset
1025
+ /*
1026
+ * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
1027
+ * the right thing. We do not want to reconcile because that goes
1028
+ * through irq tracing which we don't want in NMI.
1029
+ *
1030
+ * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS
1031
+ * as we are running with MSR[EE]=0.
1032
+ */
1033
+ li r10,IRQS_ALL_DISABLED
1034
+ stb r10,PACAIRQSOFTMASK(r13)
1035
+ lbz r10,PACAIRQHAPPENED(r13)
1036
+ std r10,RESULT(r1)
1037
+ ori r10,r10,PACA_IRQ_HARD_DIS
1038
+ stb r10,PACAIRQHAPPENED(r13)
1771039
178
- /* This (and MCE) can be simplified with mtmsrd L=1 */
1040
+ addi r3,r1,STACK_FRAME_OVERHEAD
1041
+ bl system_reset_exception
1042
+
1791043 /* Clear MSR_RI before setting SRR0 and SRR1. */
180
- li r0,MSR_RI
181
- mfmsr r9
182
- andc r9,r9,r0
1044
+ li r9,0
1831045 mtmsrd r9,1
1841046
1851047 /*
....@@ -192,73 +1054,115 @@
1921054 /*
1931055 * Restore soft mask settings.
1941056 */
195
- ld r10,_DAR(r1)
1057
+ ld r10,RESULT(r1)
1961058 stb r10,PACAIRQHAPPENED(r13)
1971059 ld r10,SOFTE(r1)
1981060 stb r10,PACAIRQSOFTMASK(r13)
1991061
200
- /*
201
- * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
202
- * Should share common bits...
203
- */
204
-
205
- /* Move original SRR0 and SRR1 into the respective regs */
206
- ld r9,_MSR(r1)
207
- mtspr SPRN_SRR1,r9
208
- ld r3,_NIP(r1)
209
- mtspr SPRN_SRR0,r3
210
- ld r9,_CTR(r1)
211
- mtctr r9
212
- ld r9,_XER(r1)
213
- mtxer r9
214
- ld r9,_LINK(r1)
215
- mtlr r9
216
- REST_GPR(0, r1)
217
- REST_8GPRS(2, r1)
218
- REST_GPR(10, r1)
219
- ld r11,_CCR(r1)
220
- mtcr r11
221
- REST_GPR(11, r1)
222
- REST_2GPRS(12, r1)
223
- /* restore original r1. */
224
- ld r1,GPR1(r1)
1062
+ kuap_restore_amr r9, r10
1063
+ EXCEPTION_RESTORE_REGS
2251064 RFI_TO_USER_OR_KERNEL
2261065
227
-#ifdef CONFIG_PPC_PSERIES
228
-/*
229
- * Vectors for the FWNMI option. Share common code.
230
- */
231
-TRAMP_REAL_BEGIN(system_reset_fwnmi)
232
- SET_SCRATCH0(r13) /* save r13 */
233
- /* See comment at system_reset exception */
234
- EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
235
- NOTEST, 0x100)
236
-#endif /* CONFIG_PPC_PSERIES */
1066
+ GEN_KVM system_reset
2371067
1068
+
1069
+/**
1070
+ * Interrupt 0x200 - Machine Check Interrupt (MCE).
1071
+ * This is a non-maskable interrupt always taken in real-mode. It can be
1072
+ * synchronous or asynchronous, caused by hardware or software, and it may be
1073
+ * taken in a power-saving state.
1074
+ *
1075
+ * Handling:
1076
+ * Similarly to system reset, this uses its own stack and PACA save area,
1077
+ * the difference is re-entrancy is allowed on the machine check stack.
1078
+ *
1079
+ * machine_check_early is run in real mode, and carefully decodes the
1080
+ * machine check and tries to handle it (e.g., flush the SLB if there was an
1081
+ * error detected there), determines if it was recoverable and logs the
1082
+ * event.
1083
+ *
1084
+ * This early code does not "reconcile" irq soft-mask state like SRESET or
1085
+ * regular interrupts do, so irqs_disabled() among other things may not work
1086
+ * properly (irq disable/enable already doesn't work because irq tracing can
1087
+ * not work in real mode).
1088
+ *
1089
+ * Then, depending on the execution context when the interrupt is taken, there
1090
+ * are 3 main actions:
1091
+ * - Executing in kernel mode. The event is queued with irq_work, which means
1092
+ * it is handled when it is next safe to do so (i.e., the kernel has enabled
1093
+ * interrupts), which could be immediately when the interrupt returns. This
1094
+ * avoids nasty issues like switching to virtual mode when the MMU is in a
1095
+ * bad state, or when executing OPAL code. (SRESET is exposed to such issues,
1096
+ * but it has different priorities). Check to see if the CPU was in power
1097
+ * save, and return via the wake up code if it was.
1098
+ *
1099
+ * - Executing in user mode. machine_check_exception is run like a normal
1100
+ * interrupt handler, which processes the data generated by the early handler.
1101
+ *
1102
+ * - Executing in guest mode. The interrupt is run with its KVM test, and
1103
+ * branches to KVM to deal with. KVM may queue the event for the host
1104
+ * to report later.
1105
+ *
1106
+ * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
1107
+ * or SCRATCH0 is in use, it may cause a crash.
1108
+ *
1109
+ * KVM:
1110
+ * See SRESET.
1111
+ */
1112
+INT_DEFINE_BEGIN(machine_check_early)
1113
+ IVEC=0x200
1114
+ IAREA=PACA_EXMC
1115
+ IVIRT=0 /* no virt entry point */
1116
+ IREALMODE_COMMON=1
1117
+ /*
1118
+ * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1119
+ * nested machine check corrupts it. machine_check_common enables
1120
+ * MSR_RI.
1121
+ */
1122
+ ISET_RI=0
1123
+ ISTACK=0
1124
+ IDAR=1
1125
+ IDSISR=1
1126
+ IRECONCILE=0
1127
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
1128
+INT_DEFINE_END(machine_check_early)
1129
+
1130
+INT_DEFINE_BEGIN(machine_check)
1131
+ IVEC=0x200
1132
+ IAREA=PACA_EXMC
1133
+ IVIRT=0 /* no virt entry point */
1134
+ ISET_RI=0
1135
+ IDAR=1
1136
+ IDSISR=1
1137
+ IKVM_SKIP=1
1138
+ IKVM_REAL=1
1139
+INT_DEFINE_END(machine_check)
2381140
2391141 EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
240
- /* This is moved out of line as it can be patched by FW, but
241
- * some code path might still want to branch into the original
242
- * vector
243
- */
244
- SET_SCRATCH0(r13) /* save r13 */
245
- EXCEPTION_PROLOG_0(PACA_EXMC)
246
-BEGIN_FTR_SECTION
247
- b machine_check_powernv_early
248
-FTR_SECTION_ELSE
249
- b machine_check_pSeries_0
250
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1142
+ GEN_INT_ENTRY machine_check_early, virt=0
2511143 EXC_REAL_END(machine_check, 0x200, 0x100)
2521144 EXC_VIRT_NONE(0x4200, 0x100)
253
-TRAMP_REAL_BEGIN(machine_check_powernv_early)
254
-BEGIN_FTR_SECTION
255
- EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
1145
+
1146
+#ifdef CONFIG_PPC_PSERIES
1147
+TRAMP_REAL_BEGIN(machine_check_fwnmi)
1148
+ /* See comment at machine_check exception, don't turn on RI */
1149
+ GEN_INT_ENTRY machine_check_early, virt=0
1150
+#endif
1151
+
1152
+#define MACHINE_CHECK_HANDLER_WINDUP \
1153
+ /* Clear MSR_RI before setting SRR0 and SRR1. */\
1154
+ li r9,0; \
1155
+ mtmsrd r9,1; /* Clear MSR_RI */ \
1156
+ /* Decrement paca->in_mce now RI is clear. */ \
1157
+ lhz r12,PACA_IN_MCE(r13); \
1158
+ subi r12,r12,1; \
1159
+ sth r12,PACA_IN_MCE(r13); \
1160
+ EXCEPTION_RESTORE_REGS
1161
+
1162
+EXC_COMMON_BEGIN(machine_check_early_common)
1163
+ __GEN_REALMODE_COMMON_ENTRY machine_check_early
1164
+
2561165 /*
257
- * Register contents:
258
- * R13 = PACA
259
- * R9 = CR
260
- * Original R9 to R13 is saved on PACA_EXMC
261
- *
2621166 * Switch to mc_emergency stack and handle re-entrancy (we limit
2631167 * the nested MCE upto level 4 to avoid stack overflow).
2641168 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
....@@ -279,123 +1183,145 @@
2791183 * the machine check is handled then the idle wakeup code is called
2801184 * to restore state.
2811185 */
282
- mr r11,r1 /* Save r1 */
2831186 lhz r10,PACA_IN_MCE(r13)
2841187 cmpwi r10,0 /* Are we in nested machine check */
285
- bne 0f /* Yes, we are. */
286
- /* First machine check entry */
287
- ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
288
-0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1188
+ cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */
2891189 addi r10,r10,1 /* increment paca->in_mce */
2901190 sth r10,PACA_IN_MCE(r13)
291
- /* Limit nested MCE to level 4 to avoid stack overflow */
292
- cmpwi r10,MAX_MCE_DEPTH
293
- bgt 2f /* Check if we hit limit of 4 */
294
- std r11,GPR1(r1) /* Save r1 on the stack. */
295
- std r11,0(r1) /* make stack chain pointer */
296
- mfspr r11,SPRN_SRR0 /* Save SRR0 */
297
- std r11,_NIP(r1)
298
- mfspr r11,SPRN_SRR1 /* Save SRR1 */
299
- std r11,_MSR(r1)
300
- mfspr r11,SPRN_DAR /* Save DAR */
301
- std r11,_DAR(r1)
302
- mfspr r11,SPRN_DSISR /* Save DSISR */
303
- std r11,_DSISR(r1)
304
- std r9,_CCR(r1) /* Save CR in stackframe */
305
- /* Save r9 through r13 from EXMC save area to stack frame. */
306
- EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
307
- mfmsr r11 /* get MSR value */
308
- ori r11,r11,MSR_ME /* turn on ME bit */
309
- ori r11,r11,MSR_RI /* turn on RI bit */
310
- LOAD_HANDLER(r12, machine_check_handle_early)
311
-1: mtspr SPRN_SRR0,r12
312
- mtspr SPRN_SRR1,r11
313
- RFI_TO_KERNEL
314
- b . /* prevent speculative execution */
315
-2:
316
- /* Stack overflow. Stay on emergency stack and panic.
317
- * Keep the ME bit off while panic-ing, so that if we hit
318
- * another machine check we checkstop.
319
- */
320
- addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
321
- ld r11,PACAKMSR(r13)
322
- LOAD_HANDLER(r12, unrecover_mce)
323
- li r10,MSR_ME
324
- andc r11,r11,r10 /* Turn off MSR_ME */
325
- b 1b
326
- b . /* prevent speculative execution */
1191
+
1192
+ mr r10,r1 /* Save r1 */
1193
+ bne 1f
1194
+ /* First machine check entry */
1195
+ ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
1196
+1: /* Limit nested MCE to level 4 to avoid stack overflow */
1197
+ bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
1198
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1199
+
1200
+ __GEN_COMMON_BODY machine_check_early
1201
+
1202
+BEGIN_FTR_SECTION
1203
+ bl enable_machine_check
3271204 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1205
+ li r10,MSR_RI
1206
+ mtmsrd r10,1
3281207
329
-TRAMP_REAL_BEGIN(machine_check_pSeries)
330
- .globl machine_check_fwnmi
331
-machine_check_fwnmi:
332
- SET_SCRATCH0(r13) /* save r13 */
333
- EXCEPTION_PROLOG_0(PACA_EXMC)
334
-machine_check_pSeries_0:
335
- EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
3361208 /*
337
- * MSR_RI is not enabled, because PACA_EXMC is being used, so a
338
- * nested machine check corrupts it. machine_check_common enables
339
- * MSR_RI.
1209
+ * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
1210
+ * system_reset_common)
3401211 */
341
- EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
1212
+ li r10,IRQS_ALL_DISABLED
1213
+ stb r10,PACAIRQSOFTMASK(r13)
1214
+ lbz r10,PACAIRQHAPPENED(r13)
1215
+ std r10,RESULT(r1)
1216
+ ori r10,r10,PACA_IRQ_HARD_DIS
1217
+ stb r10,PACAIRQHAPPENED(r13)
3421218
343
-TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
1219
+ addi r3,r1,STACK_FRAME_OVERHEAD
1220
+ bl machine_check_early
1221
+ std r3,RESULT(r1) /* Save result */
1222
+ ld r12,_MSR(r1)
1223
+
1224
+ /*
1225
+ * Restore soft mask settings.
1226
+ */
1227
+ ld r10,RESULT(r1)
1228
+ stb r10,PACAIRQHAPPENED(r13)
1229
+ ld r10,SOFTE(r1)
1230
+ stb r10,PACAIRQSOFTMASK(r13)
1231
+
1232
+#ifdef CONFIG_PPC_P7_NAP
1233
+ /*
1234
+ * Check if thread was in power saving mode. We come here when any
1235
+ * of the following is true:
1236
+ * a. thread wasn't in power saving mode
1237
+ * b. thread was in power saving mode with no state loss,
1238
+ * supervisor state loss or hypervisor state loss.
1239
+ *
1240
+ * Go back to nap/sleep/winkle mode again if (b) is true.
1241
+ */
1242
+BEGIN_FTR_SECTION
1243
+ rlwinm. r11,r12,47-31,30,31
1244
+ bne machine_check_idle_common
1245
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1246
+#endif
1247
+
1248
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1249
+ /*
1250
+ * Check if we are coming from guest. If yes, then run the normal
1251
+ * exception handler which will take the
1252
+ * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
1253
+ * to guest.
1254
+ */
1255
+ lbz r11,HSTATE_IN_GUEST(r13)
1256
+ cmpwi r11,0 /* Check if coming from guest */
1257
+ bne mce_deliver /* continue if we are. */
1258
+#endif
1259
+
1260
+ /*
1261
+ * Check if we are coming from userspace. If yes, then run the normal
1262
+ * exception handler which will deliver the MC event to this kernel.
1263
+ */
1264
+ andi. r11,r12,MSR_PR /* See if coming from user. */
1265
+ bne mce_deliver /* continue in V mode if we are. */
1266
+
1267
+ /*
1268
+ * At this point we are coming from kernel context.
1269
+ * Queue up the MCE event and return from the interrupt.
1270
+ * But before that, check if this is an un-recoverable exception.
1271
+ * If yes, then stay on emergency stack and panic.
1272
+ */
1273
+ andi. r11,r12,MSR_RI
1274
+ beq unrecoverable_mce
1275
+
1276
+ /*
1277
+ * Check if we have successfully handled/recovered from error, if not
1278
+ * then stay on emergency stack and panic.
1279
+ */
1280
+ ld r3,RESULT(r1) /* Load result */
1281
+ cmpdi r3,0 /* see if we handled MCE successfully */
1282
+ beq unrecoverable_mce /* if !handled then panic */
1283
+
1284
+ /*
1285
+ * Return from MC interrupt.
1286
+ * Queue up the MCE event so that we can log it later, while
1287
+ * returning from kernel or opal call.
1288
+ */
1289
+ bl machine_check_queue_event
1290
+ MACHINE_CHECK_HANDLER_WINDUP
1291
+ RFI_TO_KERNEL
1292
+
1293
+mce_deliver:
1294
+ /*
1295
+ * This is a host user or guest MCE. Restore all registers, then
1296
+ * run the "late" handler. For host user, this will run the
1297
+ * machine_check_exception handler in virtual mode like a normal
1298
+ * interrupt handler. For guest, this will trigger the KVM test
1299
+ * and branch to the KVM interrupt similarly to other interrupts.
1300
+ */
1301
+BEGIN_FTR_SECTION
1302
+ ld r10,ORIG_GPR3(r1)
1303
+ mtspr SPRN_CFAR,r10
1304
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1305
+ MACHINE_CHECK_HANDLER_WINDUP
1306
+ GEN_INT_ENTRY machine_check, virt=0
3441307
3451308 EXC_COMMON_BEGIN(machine_check_common)
3461309 /*
3471310 * Machine check is different because we use a different
3481311 * save area: PACA_EXMC instead of PACA_EXGEN.
3491312 */
350
- mfspr r10,SPRN_DAR
351
- std r10,PACA_EXMC+EX_DAR(r13)
352
- mfspr r10,SPRN_DSISR
353
- stw r10,PACA_EXMC+EX_DSISR(r13)
354
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1313
+ GEN_COMMON machine_check
1314
+
3551315 FINISH_NAP
356
- RECONCILE_IRQ_STATE(r10, r11)
357
- ld r3,PACA_EXMC+EX_DAR(r13)
358
- lwz r4,PACA_EXMC+EX_DSISR(r13)
3591316 /* Enable MSR_RI when finished with PACA_EXMC */
3601317 li r10,MSR_RI
3611318 mtmsrd r10,1
362
- std r3,_DAR(r1)
363
- std r4,_DSISR(r1)
364
- bl save_nvgprs
3651319 addi r3,r1,STACK_FRAME_OVERHEAD
3661320 bl machine_check_exception
367
- b ret_from_except
1321
+ b interrupt_return
3681322
369
-#define MACHINE_CHECK_HANDLER_WINDUP \
370
- /* Clear MSR_RI before setting SRR0 and SRR1. */\
371
- li r0,MSR_RI; \
372
- mfmsr r9; /* get MSR value */ \
373
- andc r9,r9,r0; \
374
- mtmsrd r9,1; /* Clear MSR_RI */ \
375
- /* Move original SRR0 and SRR1 into the respective regs */ \
376
- ld r9,_MSR(r1); \
377
- mtspr SPRN_SRR1,r9; \
378
- ld r3,_NIP(r1); \
379
- mtspr SPRN_SRR0,r3; \
380
- ld r9,_CTR(r1); \
381
- mtctr r9; \
382
- ld r9,_XER(r1); \
383
- mtxer r9; \
384
- ld r9,_LINK(r1); \
385
- mtlr r9; \
386
- REST_GPR(0, r1); \
387
- REST_8GPRS(2, r1); \
388
- REST_GPR(10, r1); \
389
- ld r11,_CCR(r1); \
390
- mtcr r11; \
391
- /* Decrement paca->in_mce. */ \
392
- lhz r12,PACA_IN_MCE(r13); \
393
- subi r12,r12,1; \
394
- sth r12,PACA_IN_MCE(r13); \
395
- REST_GPR(11, r1); \
396
- REST_2GPRS(12, r1); \
397
- /* restore original r1. */ \
398
- ld r1,GPR1(r1)
1323
+ GEN_KVM machine_check
1324
+
3991325
4001326 #ifdef CONFIG_PPC_P7_NAP
4011327 /*
....@@ -406,89 +1332,32 @@
4061332 bl machine_check_queue_event
4071333
4081334 /*
409
- * We have not used any non-volatile GPRs here, and as a rule
410
- * most exception code including machine check does not.
411
- * Therefore PACA_NAPSTATELOST does not need to be set. Idle
412
- * wakeup will restore volatile registers.
1335
+ * GPR-loss wakeups are relatively straightforward, because the
1336
+ * idle sleep code has saved all non-volatile registers on its
1337
+ * own stack, and r1 in PACAR1.
4131338 *
414
- * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
1339
+ * For no-loss wakeups the r1 and lr registers used by the
1340
+ * early machine check handler have to be restored first. r2 is
1341
+ * the kernel TOC, so no need to restore it.
4151342 *
4161343 * Then decrement MCE nesting after finishing with the stack.
4171344 */
4181345 ld r3,_MSR(r1)
1346
+ ld r4,_LINK(r1)
1347
+ ld r1,GPR1(r1)
4191348
4201349 lhz r11,PACA_IN_MCE(r13)
4211350 subi r11,r11,1
4221351 sth r11,PACA_IN_MCE(r13)
4231352
424
- /* Turn off the RI bit because SRR1 is used by idle wakeup code. */
425
- /* Recoverability could be improved by reducing the use of SRR1. */
426
- li r11,0
427
- mtmsrd r11,1
428
-
429
- b pnv_powersave_wakeup_mce
430
-#endif
431
- /*
432
- * Handle machine check early in real mode. We come here with
433
- * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
434
- */
435
-EXC_COMMON_BEGIN(machine_check_handle_early)
436
- std r0,GPR0(r1) /* Save r0 */
437
- EXCEPTION_PROLOG_COMMON_3(0x200)
438
- bl save_nvgprs
439
- addi r3,r1,STACK_FRAME_OVERHEAD
440
- bl machine_check_early
441
- std r3,RESULT(r1) /* Save result */
442
- ld r12,_MSR(r1)
443
-
444
-#ifdef CONFIG_PPC_P7_NAP
445
- /*
446
- * Check if thread was in power saving mode. We come here when any
447
- * of the following is true:
448
- * a. thread wasn't in power saving mode
449
- * b. thread was in power saving mode with no state loss,
450
- * supervisor state loss or hypervisor state loss.
451
- *
452
- * Go back to nap/sleep/winkle mode again if (b) is true.
453
- */
454
- BEGIN_FTR_SECTION
455
- rlwinm. r11,r12,47-31,30,31
456
- bne machine_check_idle_common
457
- END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1353
+ mtlr r4
1354
+ rlwinm r10,r3,47-31,30,31
1355
+ cmpwi cr1,r10,2
1356
+ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
1357
+ b idle_return_gpr_loss
4581358 #endif
4591359
460
- /*
461
- * Check if we are coming from hypervisor userspace. If yes then we
462
- * continue in host kernel in V mode to deliver the MC event.
463
- */
464
- rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
465
- beq 5f
466
- andi. r11,r12,MSR_PR /* See if coming from user. */
467
- bne 9f /* continue in V mode if we are. */
468
-
469
-5:
470
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
471
- /*
472
- * We are coming from kernel context. Check if we are coming from
473
- * guest. if yes, then we can continue. We will fall through
474
- * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
475
- */
476
- lbz r11,HSTATE_IN_GUEST(r13)
477
- cmpwi r11,0 /* Check if coming from guest */
478
- bne 9f /* continue if we are. */
479
-#endif
480
- /*
481
- * At this point we are not sure about what context we come from.
482
- * Queue up the MCE event and return from the interrupt.
483
- * But before that, check if this is an un-recoverable exception.
484
- * If yes, then stay on emergency stack and panic.
485
- */
486
- andi. r11,r12,MSR_RI
487
- bne 2f
488
-1: mfspr r11,SPRN_SRR0
489
- LOAD_HANDLER(r10,unrecover_mce)
490
- mtspr SPRN_SRR0,r10
491
- ld r10,PACAKMSR(r13)
1360
+EXC_COMMON_BEGIN(unrecoverable_mce)
4921361 /*
4931362 * We are going down. But there are chances that we might get hit by
4941363 * another MCE during panic path and we may run into unstable state
....@@ -496,335 +1365,346 @@
4961365 * when another MCE is hit during panic path, system will checkstop
4971366 * and hypervisor will get restarted cleanly by SP.
4981367 */
499
- li r3,MSR_ME
500
- andc r10,r10,r3 /* Turn off MSR_ME */
501
- mtspr SPRN_SRR1,r10
502
- RFI_TO_KERNEL
503
- b .
504
-2:
505
- /*
506
- * Check if we have successfully handled/recovered from error, if not
507
- * then stay on emergency stack and panic.
508
- */
509
- ld r3,RESULT(r1) /* Load result */
510
- cmpdi r3,0 /* see if we handled MCE successfully */
511
-
512
- beq 1b /* if !handled then panic */
513
- /*
514
- * Return from MC interrupt.
515
- * Queue up the MCE event so that we can log it later, while
516
- * returning from kernel or opal call.
517
- */
518
- bl machine_check_queue_event
519
- MACHINE_CHECK_HANDLER_WINDUP
520
- RFI_TO_USER_OR_KERNEL
521
-9:
522
- /* Deliver the machine check to host kernel in V mode. */
5231368 BEGIN_FTR_SECTION
524
- ld r10,ORIG_GPR3(r1)
525
- mtspr SPRN_CFAR,r10
526
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
527
- MACHINE_CHECK_HANDLER_WINDUP
528
- b machine_check_pSeries
1369
+ li r10,0 /* clear MSR_RI */
1370
+ mtmsrd r10,1
1371
+ bl disable_machine_check
1372
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1373
+ ld r10,PACAKMSR(r13)
1374
+ li r3,MSR_ME
1375
+ andc r10,r10,r3
1376
+ mtmsrd r10
5291377
530
-EXC_COMMON_BEGIN(unrecover_mce)
1378
+ lhz r12,PACA_IN_MCE(r13)
1379
+ subi r12,r12,1
1380
+ sth r12,PACA_IN_MCE(r13)
1381
+
5311382 /* Invoke machine_check_exception to print MCE event and panic. */
5321383 addi r3,r1,STACK_FRAME_OVERHEAD
5331384 bl machine_check_exception
1385
+
5341386 /*
535
- * We will not reach here. Even if we did, there is no way out. Call
536
- * unrecoverable_exception and die.
1387
+ * We will not reach here. Even if we did, there is no way out.
1388
+ * Call unrecoverable_exception and die.
5371389 */
538
-1: addi r3,r1,STACK_FRAME_OVERHEAD
1390
+ addi r3,r1,STACK_FRAME_OVERHEAD
5391391 bl unrecoverable_exception
540
- b 1b
1392
+ b .
5411393
5421394
543
-EXC_REAL_OOL(data_access, 0x300, 0x80)
544
-EXC_VIRT(data_access, 0x4300, 0x80, 0x300)
545
-TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
1395
+/**
1396
+ * Interrupt 0x300 - Data Storage Interrupt (DSI).
1397
+ * This is a synchronous interrupt generated due to a data access exception,
1398
+ * e.g., a load orstore which does not have a valid page table entry with
1399
+ * permissions. DAWR matches also fault here, as do RC updates, and minor misc
1400
+ * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
1401
+ *
1402
+ * Handling:
1403
+ * - Hash MMU
1404
+ * Go to do_hash_page first to see if the HPT can be filled from an entry in
1405
+ * the Linux page table. Hash faults can hit in kernel mode in a fairly
1406
+ * arbitrary state (e.g., interrupts disabled, locks held) when accessing
1407
+ * "non-bolted" regions, e.g., vmalloc space. However these should always be
1408
+ * backed by Linux page tables.
1409
+ *
1410
+ * If none is found, do a Linux page fault. Linux page faults can happen in
1411
+ * kernel mode due to user copy operations of course.
1412
+ *
1413
+ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
1414
+ * MMU context, which may cause a DSI in the host, which must go to the
1415
+ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
1416
+ * always be used regardless of AIL setting.
1417
+ *
1418
+ * - Radix MMU
1419
+ * The hardware loads from the Linux page table directly, so a fault goes
1420
+ * immediately to Linux page fault.
1421
+ *
1422
+ * Conditions like DAWR match are handled on the way in to Linux page fault.
1423
+ */
1424
+INT_DEFINE_BEGIN(data_access)
1425
+ IVEC=0x300
1426
+ IDAR=1
1427
+ IDSISR=1
1428
+ IKVM_SKIP=1
1429
+ IKVM_REAL=1
1430
+INT_DEFINE_END(data_access)
5461431
1432
+EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1433
+ GEN_INT_ENTRY data_access, virt=0
1434
+EXC_REAL_END(data_access, 0x300, 0x80)
1435
+EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1436
+ GEN_INT_ENTRY data_access, virt=1
1437
+EXC_VIRT_END(data_access, 0x4300, 0x80)
5471438 EXC_COMMON_BEGIN(data_access_common)
548
- /*
549
- * Here r13 points to the paca, r9 contains the saved CR,
550
- * SRR0 and SRR1 are saved in r11 and r12,
551
- * r9 - r13 are saved in paca->exgen.
552
- */
553
- mfspr r10,SPRN_DAR
554
- std r10,PACA_EXGEN+EX_DAR(r13)
555
- mfspr r10,SPRN_DSISR
556
- stw r10,PACA_EXGEN+EX_DSISR(r13)
557
- EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
558
- RECONCILE_IRQ_STATE(r10, r11)
559
- ld r12,_MSR(r1)
560
- ld r3,PACA_EXGEN+EX_DAR(r13)
561
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
562
- li r5,0x300
563
- std r3,_DAR(r1)
564
- std r4,_DSISR(r1)
1439
+ GEN_COMMON data_access
1440
+ ld r4,_DAR(r1)
1441
+ ld r5,_DSISR(r1)
5651442 BEGIN_MMU_FTR_SECTION
1443
+ ld r6,_MSR(r1)
1444
+ li r3,0x300
5661445 b do_hash_page /* Try to handle as hpte fault */
5671446 MMU_FTR_SECTION_ELSE
5681447 b handle_page_fault
5691448 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
5701449
1450
+ GEN_KVM data_access
1451
+
1452
+
1453
+/**
1454
+ * Interrupt 0x380 - Data Segment Interrupt (DSLB).
1455
+ * This is a synchronous interrupt in response to an MMU fault missing SLB
1456
+ * entry for HPT, or an address outside RPT translation range.
1457
+ *
1458
+ * Handling:
1459
+ * - HPT:
1460
+ * This refills the SLB, or reports an access fault similarly to a bad page
1461
+ * fault. When coming from user-mode, the SLB handler may access any kernel
1462
+ * data, though it may itself take a DSLB. When coming from kernel mode,
1463
+ * recursive faults must be avoided so access is restricted to the kernel
1464
+ * image text/data, kernel stack, and any data allocated below
1465
+ * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
1466
+ * on user-handler data structures.
1467
+ *
1468
+ * KVM: Same as 0x300, DSLB must test for KVM guest.
1469
+ *
1470
+ * A dedicated save area EXSLB is used (XXX: but it actually need not be
1471
+ * these days, we could use EXGEN).
1472
+ */
1473
+INT_DEFINE_BEGIN(data_access_slb)
1474
+ IVEC=0x380
1475
+ IAREA=PACA_EXSLB
1476
+ IRECONCILE=0
1477
+ IDAR=1
1478
+ IKVM_SKIP=1
1479
+ IKVM_REAL=1
1480
+INT_DEFINE_END(data_access_slb)
5711481
5721482 EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
573
- SET_SCRATCH0(r13)
574
- EXCEPTION_PROLOG_0(PACA_EXSLB)
575
- b tramp_data_access_slb
1483
+ GEN_INT_ENTRY data_access_slb, virt=0
5761484 EXC_REAL_END(data_access_slb, 0x380, 0x80)
577
-
578
-TRAMP_REAL_BEGIN(tramp_data_access_slb)
579
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
580
- mr r12,r3 /* save r3 */
581
- mfspr r3,SPRN_DAR
582
- mfspr r11,SPRN_SRR1
583
- crset 4*cr6+eq
584
- BRANCH_TO_COMMON(r10, slb_miss_common)
585
-
5861485 EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
587
- SET_SCRATCH0(r13)
588
- EXCEPTION_PROLOG_0(PACA_EXSLB)
589
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
590
- mr r12,r3 /* save r3 */
591
- mfspr r3,SPRN_DAR
592
- mfspr r11,SPRN_SRR1
593
- crset 4*cr6+eq
594
- BRANCH_TO_COMMON(r10, slb_miss_common)
1486
+ GEN_INT_ENTRY data_access_slb, virt=1
5951487 EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
596
-TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
597
-
598
-
599
-EXC_REAL_OOL(instruction_access, 0x400, 0x80)
600
-EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
601
-TRAMP_KVM(PACA_EXGEN, 0x400)
602
-
603
-EXC_COMMON_BEGIN(instruction_access_common)
604
- EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
605
- RECONCILE_IRQ_STATE(r10, r11)
606
- ld r12,_MSR(r1)
607
- ld r3,_NIP(r1)
608
- andis. r4,r12,DSISR_SRR1_MATCH_64S@h
609
- li r5,0x400
610
- std r3,_DAR(r1)
611
- std r4,_DSISR(r1)
1488
+EXC_COMMON_BEGIN(data_access_slb_common)
1489
+ GEN_COMMON data_access_slb
1490
+ ld r4,_DAR(r1)
1491
+ addi r3,r1,STACK_FRAME_OVERHEAD
6121492 BEGIN_MMU_FTR_SECTION
1493
+ /* HPT case, do SLB fault */
1494
+ bl do_slb_fault
1495
+ cmpdi r3,0
1496
+ bne- 1f
1497
+ b fast_interrupt_return
1498
+1: /* Error case */
1499
+MMU_FTR_SECTION_ELSE
1500
+ /* Radix case, access is outside page table range */
1501
+ li r3,-EFAULT
1502
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1503
+ std r3,RESULT(r1)
1504
+ RECONCILE_IRQ_STATE(r10, r11)
1505
+ ld r4,_DAR(r1)
1506
+ ld r5,RESULT(r1)
1507
+ addi r3,r1,STACK_FRAME_OVERHEAD
1508
+ bl do_bad_slb_fault
1509
+ b interrupt_return
1510
+
1511
+ GEN_KVM data_access_slb
1512
+
1513
+
1514
+/**
1515
+ * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
1516
+ * This is a synchronous interrupt in response to an MMU fault due to an
1517
+ * instruction fetch.
1518
+ *
1519
+ * Handling:
1520
+ * Similar to DSI, though in response to fetch. The faulting address is found
1521
+ * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
1522
+ */
1523
+INT_DEFINE_BEGIN(instruction_access)
1524
+ IVEC=0x400
1525
+ IISIDE=1
1526
+ IDAR=1
1527
+ IDSISR=1
1528
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1529
+ IKVM_REAL=1
1530
+#endif
1531
+INT_DEFINE_END(instruction_access)
1532
+
1533
+EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1534
+ GEN_INT_ENTRY instruction_access, virt=0
1535
+EXC_REAL_END(instruction_access, 0x400, 0x80)
1536
+EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1537
+ GEN_INT_ENTRY instruction_access, virt=1
1538
+EXC_VIRT_END(instruction_access, 0x4400, 0x80)
1539
+EXC_COMMON_BEGIN(instruction_access_common)
1540
+ GEN_COMMON instruction_access
1541
+ ld r4,_DAR(r1)
1542
+ ld r5,_DSISR(r1)
1543
+BEGIN_MMU_FTR_SECTION
1544
+ ld r6,_MSR(r1)
1545
+ li r3,0x400
6131546 b do_hash_page /* Try to handle as hpte fault */
6141547 MMU_FTR_SECTION_ELSE
6151548 b handle_page_fault
6161549 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
6171550
1551
+ GEN_KVM instruction_access
1552
+
1553
+
1554
+/**
1555
+ * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
1556
+ * This is a synchronous interrupt in response to an MMU fault due to an
1557
+ * instruction fetch.
1558
+ *
1559
+ * Handling:
1560
+ * Similar to DSLB, though in response to fetch. The faulting address is found
1561
+ * in SRR0 (rather than DAR).
1562
+ */
1563
+INT_DEFINE_BEGIN(instruction_access_slb)
1564
+ IVEC=0x480
1565
+ IAREA=PACA_EXSLB
1566
+ IRECONCILE=0
1567
+ IISIDE=1
1568
+ IDAR=1
1569
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1570
+ IKVM_REAL=1
1571
+#endif
1572
+INT_DEFINE_END(instruction_access_slb)
6181573
6191574 EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
620
- SET_SCRATCH0(r13)
621
- EXCEPTION_PROLOG_0(PACA_EXSLB)
622
- b tramp_instruction_access_slb
1575
+ GEN_INT_ENTRY instruction_access_slb, virt=0
6231576 EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
624
-
625
-TRAMP_REAL_BEGIN(tramp_instruction_access_slb)
626
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
627
- mr r12,r3 /* save r3 */
628
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
629
- mfspr r11,SPRN_SRR1
630
- crclr 4*cr6+eq
631
- BRANCH_TO_COMMON(r10, slb_miss_common)
632
-
6331577 EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
634
- SET_SCRATCH0(r13)
635
- EXCEPTION_PROLOG_0(PACA_EXSLB)
636
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
637
- mr r12,r3 /* save r3 */
638
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
639
- mfspr r11,SPRN_SRR1
640
- crclr 4*cr6+eq
641
- BRANCH_TO_COMMON(r10, slb_miss_common)
1578
+ GEN_INT_ENTRY instruction_access_slb, virt=1
6421579 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
643
-TRAMP_KVM(PACA_EXSLB, 0x480)
644
-
645
-
646
-/*
647
- * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as
648
- * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled.
649
- */
650
-EXC_COMMON_BEGIN(slb_miss_common)
651
- /*
652
- * r13 points to the PACA, r9 contains the saved CR,
653
- * r12 contains the saved r3,
654
- * r11 contain the saved SRR1, SRR0 is still ready for return
655
- * r3 has the faulting address
656
- * r9 - r13 are saved in paca->exslb.
657
- * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
658
- * We assume we aren't going to take any exceptions during this
659
- * procedure.
660
- */
661
- mflr r10
662
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
663
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
664
-
665
- andi. r9,r11,MSR_PR // Check for exception from userspace
666
- cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
667
-
668
- /*
669
- * Test MSR_RI before calling slb_allocate_realmode, because the
670
- * MSR in r11 gets clobbered. However we still want to allocate
671
- * SLB in case MSR_RI=0, to minimise the risk of getting stuck in
672
- * recursive SLB faults. So use cr5 for this, which is preserved.
673
- */
674
- andi. r11,r11,MSR_RI /* check for unrecoverable exception */
675
- cmpdi cr5,r11,MSR_RI
676
-
677
- crset 4*cr0+eq
678
-#ifdef CONFIG_PPC_BOOK3S_64
1580
+EXC_COMMON_BEGIN(instruction_access_slb_common)
1581
+ GEN_COMMON instruction_access_slb
1582
+ ld r4,_DAR(r1)
1583
+ addi r3,r1,STACK_FRAME_OVERHEAD
6791584 BEGIN_MMU_FTR_SECTION
680
- bl slb_allocate
681
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
682
-#endif
683
-
684
- ld r10,PACA_EXSLB+EX_LR(r13)
685
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
686
- mtlr r10
687
-
688
- /*
689
- * Large address, check whether we have to allocate new contexts.
690
- */
691
- beq- 8f
692
-
693
- bne- cr5,2f /* if unrecoverable exception, oops */
694
-
695
- /* All done -- return from exception. */
696
-
697
- bne cr4,1f /* returning to kernel */
698
-
699
- mtcrf 0x80,r9
700
- mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
701
- mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
702
- mtcrf 0x02,r9 /* I/D indication is in cr6 */
703
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
704
-
705
- RESTORE_CTR(r9, PACA_EXSLB)
706
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
707
- mr r3,r12
708
- ld r9,PACA_EXSLB+EX_R9(r13)
709
- ld r10,PACA_EXSLB+EX_R10(r13)
710
- ld r11,PACA_EXSLB+EX_R11(r13)
711
- ld r12,PACA_EXSLB+EX_R12(r13)
712
- ld r13,PACA_EXSLB+EX_R13(r13)
713
- RFI_TO_USER
714
- b . /* prevent speculative execution */
715
-1:
716
- mtcrf 0x80,r9
717
- mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
718
- mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
719
- mtcrf 0x02,r9 /* I/D indication is in cr6 */
720
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
721
-
722
- RESTORE_CTR(r9, PACA_EXSLB)
723
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
724
- mr r3,r12
725
- ld r9,PACA_EXSLB+EX_R9(r13)
726
- ld r10,PACA_EXSLB+EX_R10(r13)
727
- ld r11,PACA_EXSLB+EX_R11(r13)
728
- ld r12,PACA_EXSLB+EX_R12(r13)
729
- ld r13,PACA_EXSLB+EX_R13(r13)
730
- RFI_TO_KERNEL
731
- b . /* prevent speculative execution */
732
-
733
-
734
-2: std r3,PACA_EXSLB+EX_DAR(r13)
735
- mr r3,r12
736
- mfspr r11,SPRN_SRR0
737
- mfspr r12,SPRN_SRR1
738
- LOAD_HANDLER(r10,unrecov_slb)
739
- mtspr SPRN_SRR0,r10
740
- ld r10,PACAKMSR(r13)
741
- mtspr SPRN_SRR1,r10
742
- RFI_TO_KERNEL
743
- b .
744
-
745
-8: std r3,PACA_EXSLB+EX_DAR(r13)
746
- mr r3,r12
747
- mfspr r11,SPRN_SRR0
748
- mfspr r12,SPRN_SRR1
749
- LOAD_HANDLER(r10, large_addr_slb)
750
- mtspr SPRN_SRR0,r10
751
- ld r10,PACAKMSR(r13)
752
- mtspr SPRN_SRR1,r10
753
- RFI_TO_KERNEL
754
- b .
755
-
756
-EXC_COMMON_BEGIN(unrecov_slb)
757
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1585
+ /* HPT case, do SLB fault */
1586
+ bl do_slb_fault
1587
+ cmpdi r3,0
1588
+ bne- 1f
1589
+ b fast_interrupt_return
1590
+1: /* Error case */
1591
+MMU_FTR_SECTION_ELSE
1592
+ /* Radix case, access is outside page table range */
1593
+ li r3,-EFAULT
1594
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1595
+ std r3,RESULT(r1)
7581596 RECONCILE_IRQ_STATE(r10, r11)
759
- bl save_nvgprs
760
-1: addi r3,r1,STACK_FRAME_OVERHEAD
761
- bl unrecoverable_exception
762
- b 1b
1597
+ ld r4,_DAR(r1)
1598
+ ld r5,RESULT(r1)
1599
+ addi r3,r1,STACK_FRAME_OVERHEAD
1600
+ bl do_bad_slb_fault
1601
+ b interrupt_return
7631602
764
-EXC_COMMON_BEGIN(large_addr_slb)
765
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
766
- RECONCILE_IRQ_STATE(r10, r11)
767
- ld r3, PACA_EXSLB+EX_DAR(r13)
768
- std r3, _DAR(r1)
769
- beq cr6, 2f
770
- li r10, 0x481 /* fix trap number for I-SLB miss */
771
- std r10, _TRAP(r1)
772
-2: bl save_nvgprs
773
- addi r3, r1, STACK_FRAME_OVERHEAD
774
- bl slb_miss_large_addr
775
- b ret_from_except
1603
+ GEN_KVM instruction_access_slb
1604
+
1605
+
1606
+/**
1607
+ * Interrupt 0x500 - External Interrupt.
1608
+ * This is an asynchronous maskable interrupt in response to an "external
1609
+ * exception" from the interrupt controller or hypervisor (e.g., device
1610
+ * interrupt). It is maskable in hardware by clearing MSR[EE], and
1611
+ * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
1612
+ *
1613
+ * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
1614
+ * interrupts are delivered with HSRR registers, guests use SRRs, which
1615
+ * reqiures IHSRR_IF_HVMODE.
1616
+ *
1617
+ * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
1618
+ * external interrupts are delivered as Hypervisor Virtualization Interrupts
1619
+ * rather than External Interrupts.
1620
+ *
1621
+ * Handling:
1622
+ * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
1623
+ * because registers at the time of the interrupt are not so important as it is
1624
+ * asynchronous.
1625
+ *
1626
+ * If soft masked, the masked handler will note the pending interrupt for
1627
+ * replay, and clear MSR[EE] in the interrupted context.
1628
+ */
1629
+INT_DEFINE_BEGIN(hardware_interrupt)
1630
+ IVEC=0x500
1631
+ IHSRR_IF_HVMODE=1
1632
+ IMASK=IRQS_DISABLED
1633
+ IKVM_REAL=1
1634
+ IKVM_VIRT=1
1635
+INT_DEFINE_END(hardware_interrupt)
7761636
7771637 EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
778
- .globl hardware_interrupt_hv;
779
-hardware_interrupt_hv:
780
- BEGIN_FTR_SECTION
781
- MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
782
- FTR_SECTION_ELSE
783
- MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
784
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1638
+ GEN_INT_ENTRY hardware_interrupt, virt=0
7851639 EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
786
-
7871640 EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
788
- .globl hardware_interrupt_relon_hv;
789
-hardware_interrupt_relon_hv:
790
- BEGIN_FTR_SECTION
791
- MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
792
- IRQS_DISABLED)
793
- FTR_SECTION_ELSE
794
- __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
795
- EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
796
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1641
+ GEN_INT_ENTRY hardware_interrupt, virt=1
7971642 EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1643
+EXC_COMMON_BEGIN(hardware_interrupt_common)
1644
+ GEN_COMMON hardware_interrupt
1645
+ FINISH_NAP
1646
+ RUNLATCH_ON
1647
+ addi r3,r1,STACK_FRAME_OVERHEAD
1648
+ bl do_IRQ
1649
+ b interrupt_return
7981650
799
-TRAMP_KVM(PACA_EXGEN, 0x500)
800
-TRAMP_KVM_HV(PACA_EXGEN, 0x500)
801
-EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
1651
+ GEN_KVM hardware_interrupt
8021652
8031653
804
-EXC_REAL(alignment, 0x600, 0x100)
805
-EXC_VIRT(alignment, 0x4600, 0x100, 0x600)
806
-TRAMP_KVM(PACA_EXGEN, 0x600)
1654
+/**
1655
+ * Interrupt 0x600 - Alignment Interrupt
1656
+ * This is a synchronous interrupt in response to data alignment fault.
1657
+ */
1658
+INT_DEFINE_BEGIN(alignment)
1659
+ IVEC=0x600
1660
+ IDAR=1
1661
+ IDSISR=1
1662
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1663
+ IKVM_REAL=1
1664
+#endif
1665
+INT_DEFINE_END(alignment)
1666
+
1667
+EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1668
+ GEN_INT_ENTRY alignment, virt=0
1669
+EXC_REAL_END(alignment, 0x600, 0x100)
1670
+EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1671
+ GEN_INT_ENTRY alignment, virt=1
1672
+EXC_VIRT_END(alignment, 0x4600, 0x100)
8071673 EXC_COMMON_BEGIN(alignment_common)
808
- mfspr r10,SPRN_DAR
809
- std r10,PACA_EXGEN+EX_DAR(r13)
810
- mfspr r10,SPRN_DSISR
811
- stw r10,PACA_EXGEN+EX_DSISR(r13)
812
- EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
813
- ld r3,PACA_EXGEN+EX_DAR(r13)
814
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
815
- std r3,_DAR(r1)
816
- std r4,_DSISR(r1)
817
- bl save_nvgprs
818
- RECONCILE_IRQ_STATE(r10, r11)
1674
+ GEN_COMMON alignment
8191675 addi r3,r1,STACK_FRAME_OVERHEAD
8201676 bl alignment_exception
821
- b ret_from_except
1677
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1678
+ b interrupt_return
1679
+
1680
+ GEN_KVM alignment
8221681
8231682
824
-EXC_REAL(program_check, 0x700, 0x100)
825
-EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
826
-TRAMP_KVM(PACA_EXGEN, 0x700)
1683
+/**
1684
+ * Interrupt 0x700 - Program Interrupt (program check).
1685
+ * This is a synchronous interrupt in response to various instruction faults:
1686
+ * traps, privilege errors, TM errors, floating point exceptions.
1687
+ *
1688
+ * Handling:
1689
+ * This interrupt may use the "emergency stack" in some cases when being taken
1690
+ * from kernel context, which complicates handling.
1691
+ */
1692
+INT_DEFINE_BEGIN(program_check)
1693
+ IVEC=0x700
1694
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1695
+ IKVM_REAL=1
1696
+#endif
1697
+INT_DEFINE_END(program_check)
1698
+
1699
+EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1700
+ GEN_INT_ENTRY program_check, virt=0
1701
+EXC_REAL_END(program_check, 0x700, 0x100)
1702
+EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1703
+ GEN_INT_ENTRY program_check, virt=1
1704
+EXC_VIRT_END(program_check, 0x4700, 0x100)
8271705 EXC_COMMON_BEGIN(program_check_common)
1706
+ __GEN_COMMON_ENTRY program_check
1707
+
8281708 /*
8291709 * It's possible to receive a TM Bad Thing type program check with
8301710 * userspace register values (in particular r1), but with SRR1 reporting
....@@ -833,39 +1713,71 @@
8331713 * we switch to the emergency stack if we're taking a TM Bad Thing from
8341714 * the kernel.
8351715 */
836
- li r10,MSR_PR /* Build a mask of MSR_PR .. */
837
- oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
838
- and r10,r10,r12 /* Mask SRR1 with that. */
839
- srdi r10,r10,8 /* Shift it so we can compare */
840
- cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
841
- bne 1f /* If != go to normal path. */
8421716
843
- /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
844
- andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
1717
+ andi. r10,r12,MSR_PR
1718
+ bne .Lnormal_stack /* If userspace, go normal path */
1719
+
1720
+ andis. r10,r12,(SRR1_PROGTM)@h
1721
+ bne .Lemergency_stack /* If TM, emergency */
1722
+
1723
+ cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
1724
+ blt .Lnormal_stack /* normal path if not */
1725
+
1726
+ /* Use the emergency stack */
1727
+.Lemergency_stack:
1728
+ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
8451729 /* 3 in EXCEPTION_PROLOG_COMMON */
8461730 mr r10,r1 /* Save r1 */
8471731 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
8481732 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
849
- b 3f /* Jump into the macro !! */
850
-1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
851
- bl save_nvgprs
852
- RECONCILE_IRQ_STATE(r10, r11)
1733
+ __ISTACK(program_check)=0
1734
+ __GEN_COMMON_BODY program_check
1735
+ b .Ldo_program_check
1736
+
1737
+.Lnormal_stack:
1738
+ __ISTACK(program_check)=1
1739
+ __GEN_COMMON_BODY program_check
1740
+
1741
+.Ldo_program_check:
8531742 addi r3,r1,STACK_FRAME_OVERHEAD
8541743 bl program_check_exception
855
- b ret_from_except
1744
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1745
+ b interrupt_return
1746
+
1747
+ GEN_KVM program_check
8561748
8571749
858
-EXC_REAL(fp_unavailable, 0x800, 0x100)
859
-EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
860
-TRAMP_KVM(PACA_EXGEN, 0x800)
1750
+/*
1751
+ * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
1752
+ * This is a synchronous interrupt in response to executing an fp instruction
1753
+ * with MSR[FP]=0.
1754
+ *
1755
+ * Handling:
1756
+ * This will load FP registers and enable the FP bit if coming from userspace,
1757
+ * otherwise report a bad kernel use of FP.
1758
+ */
1759
+INT_DEFINE_BEGIN(fp_unavailable)
1760
+ IVEC=0x800
1761
+ IRECONCILE=0
1762
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1763
+ IKVM_REAL=1
1764
+#endif
1765
+INT_DEFINE_END(fp_unavailable)
1766
+
1767
+EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1768
+ GEN_INT_ENTRY fp_unavailable, virt=0
1769
+EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1770
+EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1771
+ GEN_INT_ENTRY fp_unavailable, virt=1
1772
+EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
8611773 EXC_COMMON_BEGIN(fp_unavailable_common)
862
- EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1774
+ GEN_COMMON fp_unavailable
8631775 bne 1f /* if from user, just load it up */
864
- bl save_nvgprs
8651776 RECONCILE_IRQ_STATE(r10, r11)
8661777 addi r3,r1,STACK_FRAME_OVERHEAD
8671778 bl kernel_fp_unavailable_exception
868
- BUG_OPCODE
1779
+0: trap
1780
+ EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
8691781 1:
8701782 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8711783 BEGIN_FTR_SECTION
....@@ -877,80 +1789,186 @@
8771789 END_FTR_SECTION_IFSET(CPU_FTR_TM)
8781790 #endif
8791791 bl load_up_fpu
880
- b fast_exception_return
1792
+ b fast_interrupt_return
8811793 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8821794 2: /* User process was in a transaction */
883
- bl save_nvgprs
8841795 RECONCILE_IRQ_STATE(r10, r11)
8851796 addi r3,r1,STACK_FRAME_OVERHEAD
8861797 bl fp_unavailable_tm
887
- b ret_from_except
1798
+ b interrupt_return
8881799 #endif
8891800
890
-
891
-EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
892
-EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
893
-TRAMP_KVM(PACA_EXGEN, 0x900)
894
-EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
1801
+ GEN_KVM fp_unavailable
8951802
8961803
897
-EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80)
898
-EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980)
899
-TRAMP_KVM_HV(PACA_EXGEN, 0x980)
900
-EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
1804
+/**
1805
+ * Interrupt 0x900 - Decrementer Interrupt.
1806
+ * This is an asynchronous interrupt in response to a decrementer exception
1807
+ * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
1808
+ * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
1809
+ * local_irq_disable()).
1810
+ *
1811
+ * Handling:
1812
+ * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
1813
+ *
1814
+ * If soft masked, the masked handler will note the pending interrupt for
1815
+ * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
1816
+ * in the interrupted context.
1817
+ * If PPC_WATCHDOG is configured, the soft masked handler will actually set
1818
+ * things back up to run soft_nmi_interrupt as a regular interrupt handler
1819
+ * on the emergency stack.
1820
+ */
1821
+INT_DEFINE_BEGIN(decrementer)
1822
+ IVEC=0x900
1823
+ IMASK=IRQS_DISABLED
1824
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1825
+ IKVM_REAL=1
1826
+#endif
1827
+INT_DEFINE_END(decrementer)
1828
+
1829
+EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1830
+ GEN_INT_ENTRY decrementer, virt=0
1831
+EXC_REAL_END(decrementer, 0x900, 0x80)
1832
+EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1833
+ GEN_INT_ENTRY decrementer, virt=1
1834
+EXC_VIRT_END(decrementer, 0x4900, 0x80)
1835
+EXC_COMMON_BEGIN(decrementer_common)
1836
+ GEN_COMMON decrementer
1837
+ FINISH_NAP
1838
+ RUNLATCH_ON
1839
+ addi r3,r1,STACK_FRAME_OVERHEAD
1840
+ bl timer_interrupt
1841
+ b interrupt_return
1842
+
1843
+ GEN_KVM decrementer
9011844
9021845
903
-EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
904
-EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
905
-TRAMP_KVM(PACA_EXGEN, 0xa00)
1846
+/**
1847
+ * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
1848
+ * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
1849
+ * register.
1850
+ *
1851
+ * Handling:
1852
+ * Linux does not use this outside KVM where it's used to keep a host timer
1853
+ * while the guest is given control of DEC. It should normally be caught by
1854
+ * the KVM test and routed there.
1855
+ */
1856
+INT_DEFINE_BEGIN(hdecrementer)
1857
+ IVEC=0x980
1858
+ IHSRR=1
1859
+ ISTACK=0
1860
+ IRECONCILE=0
1861
+ IKVM_REAL=1
1862
+ IKVM_VIRT=1
1863
+INT_DEFINE_END(hdecrementer)
1864
+
1865
+EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1866
+ GEN_INT_ENTRY hdecrementer, virt=0
1867
+EXC_REAL_END(hdecrementer, 0x980, 0x80)
1868
+EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1869
+ GEN_INT_ENTRY hdecrementer, virt=1
1870
+EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1871
+EXC_COMMON_BEGIN(hdecrementer_common)
1872
+ __GEN_COMMON_ENTRY hdecrementer
1873
+ /*
1874
+ * Hypervisor decrementer interrupts not caught by the KVM test
1875
+ * shouldn't occur but are sometimes left pending on exit from a KVM
1876
+ * guest. We don't need to do anything to clear them, as they are
1877
+ * edge-triggered.
1878
+ *
1879
+ * Be careful to avoid touching the kernel stack.
1880
+ */
1881
+ ld r10,PACA_EXGEN+EX_CTR(r13)
1882
+ mtctr r10
1883
+ mtcrf 0x80,r9
1884
+ ld r9,PACA_EXGEN+EX_R9(r13)
1885
+ ld r10,PACA_EXGEN+EX_R10(r13)
1886
+ ld r11,PACA_EXGEN+EX_R11(r13)
1887
+ ld r12,PACA_EXGEN+EX_R12(r13)
1888
+ ld r13,PACA_EXGEN+EX_R13(r13)
1889
+ HRFI_TO_KERNEL
1890
+
1891
+ GEN_KVM hdecrementer
1892
+
1893
+
1894
+/**
1895
+ * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
1896
+ * This is an asynchronous interrupt in response to a msgsndp doorbell.
1897
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
1898
+ * IRQS_DISABLED mask (i.e., local_irq_disable()).
1899
+ *
1900
+ * Handling:
1901
+ * Guests may use this for IPIs between threads in a core if the
1902
+ * hypervisor supports it. NVGPRS are not saved (see 0x500).
1903
+ *
1904
+ * If soft masked, the masked handler will note the pending interrupt for
1905
+ * replay, leaving MSR[EE] enabled in the interrupted context because the
1906
+ * doorbells are edge triggered.
1907
+ */
1908
+INT_DEFINE_BEGIN(doorbell_super)
1909
+ IVEC=0xa00
1910
+ IMASK=IRQS_DISABLED
1911
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1912
+ IKVM_REAL=1
1913
+#endif
1914
+INT_DEFINE_END(doorbell_super)
1915
+
1916
+EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1917
+ GEN_INT_ENTRY doorbell_super, virt=0
1918
+EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1919
+EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1920
+ GEN_INT_ENTRY doorbell_super, virt=1
1921
+EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1922
+EXC_COMMON_BEGIN(doorbell_super_common)
1923
+ GEN_COMMON doorbell_super
1924
+ FINISH_NAP
1925
+ RUNLATCH_ON
1926
+ addi r3,r1,STACK_FRAME_OVERHEAD
9061927 #ifdef CONFIG_PPC_DOORBELL
907
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1928
+ bl doorbell_exception
9081929 #else
909
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1930
+ bl unknown_exception
9101931 #endif
1932
+ b interrupt_return
1933
+
1934
+ GEN_KVM doorbell_super
9111935
9121936
913
-EXC_REAL(trap_0b, 0xb00, 0x100)
914
-EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
915
-TRAMP_KVM(PACA_EXGEN, 0xb00)
916
-EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1937
+EXC_REAL_NONE(0xb00, 0x100)
1938
+EXC_VIRT_NONE(0x4b00, 0x100)
9171939
918
-/*
919
- * system call / hypercall (0xc00, 0x4c00)
920
- *
921
- * The system call exception is invoked with "sc 0" and does not alter HV bit.
922
- * There is support for kernel code to invoke system calls but there are no
923
- * in-tree users.
924
- *
925
- * The hypercall is invoked with "sc 1" and sets HV=1.
1940
+/**
1941
+ * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
1942
+ * This is a synchronous interrupt invoked with the "sc" instruction. The
1943
+ * system call is invoked with "sc 0" and does not alter the HV bit, so it
1944
+ * is directed to the currently running OS. The hypercall is invoked with
1945
+ * "sc 1" and it sets HV=1, so it elevates to hypervisor.
9261946 *
9271947 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
9281948 * 0x4c00 virtual mode.
9291949 *
1950
+ * Handling:
1951
+ * If the KVM test fires then it was due to a hypercall and is accordingly
1952
+ * routed to KVM. Otherwise this executes a normal Linux system call.
1953
+ *
9301954 * Call convention:
9311955 *
932
- * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
933
- *
934
- * For hypercalls, the register convention is as follows:
935
- * r0 volatile
936
- * r1-2 nonvolatile
937
- * r3 volatile parameter and return value for status
938
- * r4-r10 volatile input and output value
939
- * r11 volatile hypercall number and output value
940
- * r12 volatile input and output value
941
- * r13-r31 nonvolatile
942
- * LR nonvolatile
943
- * CTR volatile
944
- * XER volatile
945
- * CR0-1 CR5-7 volatile
946
- * CR2-4 nonvolatile
947
- * Other registers nonvolatile
1956
+ * syscall and hypercalls register conventions are documented in
1957
+ * Documentation/powerpc/syscall64-abi.rst and
1958
+ * Documentation/powerpc/papr_hcalls.rst respectively.
9481959 *
9491960 * The intersection of volatile registers that don't contain possible
9501961 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
9511962 * without saving, though xer is not a good idea to use, as hardware may
9521963 * interpret some bits so it may be costly to change them.
9531964 */
1965
+INT_DEFINE_BEGIN(system_call)
1966
+ IVEC=0xc00
1967
+ IKVM_REAL=1
1968
+ IKVM_VIRT=1
1969
+INT_DEFINE_END(system_call)
1970
+
1971
+.macro SYSTEM_CALL virt
9541972 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
9551973 /*
9561974 * There is a little bit of juggling to get syscall and hcall
....@@ -960,98 +1978,70 @@
9601978 * Userspace syscalls have already saved the PPR, hcalls must save
9611979 * it before setting HMT_MEDIUM.
9621980 */
963
-#define SYSCALL_KVMTEST \
964
- mtctr r13; \
965
- GET_PACA(r13); \
966
- std r10,PACA_EXGEN+EX_R10(r13); \
967
- INTERRUPT_TO_KERNEL; \
968
- KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
969
- HMT_MEDIUM; \
970
- mfctr r9;
971
-
1981
+ mtctr r13
1982
+ GET_PACA(r13)
1983
+ std r10,PACA_EXGEN+EX_R10(r13)
1984
+ INTERRUPT_TO_KERNEL
1985
+ KVMTEST system_call /* uses r10, branch to system_call_kvm */
1986
+ mfctr r9
9721987 #else
973
-#define SYSCALL_KVMTEST \
974
- HMT_MEDIUM; \
975
- mr r9,r13; \
976
- GET_PACA(r13); \
977
- INTERRUPT_TO_KERNEL;
1988
+ mr r9,r13
1989
+ GET_PACA(r13)
1990
+ INTERRUPT_TO_KERNEL
9781991 #endif
979
-
980
-#define LOAD_SYSCALL_HANDLER(reg) \
981
- __LOAD_HANDLER(reg, system_call_common)
982
-
983
-/*
984
- * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
985
- * and HMT_MEDIUM.
986
- */
987
-#define SYSCALL_REAL \
988
- mfspr r11,SPRN_SRR0 ; \
989
- mfspr r12,SPRN_SRR1 ; \
990
- LOAD_SYSCALL_HANDLER(r10) ; \
991
- mtspr SPRN_SRR0,r10 ; \
992
- ld r10,PACAKMSR(r13) ; \
993
- mtspr SPRN_SRR1,r10 ; \
994
- RFI_TO_KERNEL ; \
995
- b . ; /* prevent speculative execution */
9961992
9971993 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
998
-#define SYSCALL_FASTENDIAN_TEST \
999
-BEGIN_FTR_SECTION \
1000
- cmpdi r0,0x1ebe ; \
1001
- beq- 1f ; \
1002
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
1003
-
1004
-#define SYSCALL_FASTENDIAN \
1005
- /* Fast LE/BE switch system call */ \
1006
-1: mfspr r12,SPRN_SRR1 ; \
1007
- xori r12,r12,MSR_LE ; \
1008
- mtspr SPRN_SRR1,r12 ; \
1009
- mr r13,r9 ; \
1010
- RFI_TO_USER ; /* return to userspace */ \
1011
- b . ; /* prevent speculative execution */
1012
-#else
1013
-#define SYSCALL_FASTENDIAN_TEST
1014
-#define SYSCALL_FASTENDIAN
1015
-#endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */
1016
-
1017
-#if defined(CONFIG_RELOCATABLE)
1018
- /*
1019
- * We can't branch directly so we do it via the CTR which
1020
- * is volatile across system calls.
1021
- */
1022
-#define SYSCALL_VIRT \
1023
- LOAD_SYSCALL_HANDLER(r10) ; \
1024
- mtctr r10 ; \
1025
- mfspr r11,SPRN_SRR0 ; \
1026
- mfspr r12,SPRN_SRR1 ; \
1027
- li r10,MSR_RI ; \
1028
- mtmsrd r10,1 ; \
1029
- bctr ;
1030
-#else
1031
- /* We can branch directly */
1032
-#define SYSCALL_VIRT \
1033
- mfspr r11,SPRN_SRR0 ; \
1034
- mfspr r12,SPRN_SRR1 ; \
1035
- li r10,MSR_RI ; \
1036
- mtmsrd r10,1 ; /* Set RI (EE=0) */ \
1037
- b system_call_common ;
1994
+BEGIN_FTR_SECTION
1995
+ cmpdi r0,0x1ebe
1996
+ beq- 1f
1997
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
10381998 #endif
10391999
1040
-EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1041
- SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1042
- SYSCALL_FASTENDIAN_TEST
1043
- SYSCALL_REAL
1044
- SYSCALL_FASTENDIAN
1045
-EXC_REAL_END(system_call, 0xc00, 0x100)
2000
+ /* We reach here with PACA in r13, r13 in r9. */
2001
+ mfspr r11,SPRN_SRR0
2002
+ mfspr r12,SPRN_SRR1
10462003
2004
+ HMT_MEDIUM
2005
+
2006
+ .if ! \virt
2007
+ __LOAD_HANDLER(r10, system_call_common)
2008
+ mtspr SPRN_SRR0,r10
2009
+ ld r10,PACAKMSR(r13)
2010
+ mtspr SPRN_SRR1,r10
2011
+ RFI_TO_KERNEL
2012
+ b . /* prevent speculative execution */
2013
+ .else
2014
+ li r10,MSR_RI
2015
+ mtmsrd r10,1 /* Set RI (EE=0) */
2016
+#ifdef CONFIG_RELOCATABLE
2017
+ __LOAD_HANDLER(r10, system_call_common)
2018
+ mtctr r10
2019
+ bctr
2020
+#else
2021
+ b system_call_common
2022
+#endif
2023
+ .endif
2024
+
2025
+#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
2026
+ /* Fast LE/BE switch system call */
2027
+1: mfspr r12,SPRN_SRR1
2028
+ xori r12,r12,MSR_LE
2029
+ mtspr SPRN_SRR1,r12
2030
+ mr r13,r9
2031
+ RFI_TO_USER /* return to userspace */
2032
+ b . /* prevent speculative execution */
2033
+#endif
2034
+.endm
2035
+
2036
+EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
2037
+ SYSTEM_CALL 0
2038
+EXC_REAL_END(system_call, 0xc00, 0x100)
10472039 EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1048
- SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1049
- SYSCALL_FASTENDIAN_TEST
1050
- SYSCALL_VIRT
1051
- SYSCALL_FASTENDIAN
2040
+ SYSTEM_CALL 1
10522041 EXC_VIRT_END(system_call, 0x4c00, 0x100)
10532042
10542043 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2044
+TRAMP_REAL_BEGIN(system_call_kvm)
10552045 /*
10562046 * This is a hcall, so register convention is as above, with these
10572047 * differences:
....@@ -1059,134 +2049,307 @@
10592049 * ctr = orig r13
10602050 * orig r10 saved in PACA
10612051 */
1062
-TRAMP_KVM_BEGIN(do_kvm_0xc00)
10632052 /*
10642053 * Save the PPR (on systems that support it) before changing to
10652054 * HMT_MEDIUM. That allows the KVM code to save that value into the
10662055 * guest state (it is the guest's PPR value).
10672056 */
1068
- OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
2057
+BEGIN_FTR_SECTION
2058
+ mfspr r10,SPRN_PPR
2059
+ std r10,HSTATE_PPR(r13)
2060
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
10692061 HMT_MEDIUM
1070
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
10712062 mfctr r10
10722063 SET_SCRATCH0(r10)
1073
- std r9,PACA_EXGEN+EX_R9(r13)
1074
- mfcr r9
1075
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
2064
+ mfcr r10
2065
+ std r12,HSTATE_SCRATCH0(r13)
2066
+ sldi r12,r10,32
2067
+ ori r12,r12,0xc00
2068
+#ifdef CONFIG_RELOCATABLE
2069
+ /*
2070
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
2071
+ * outside the head section.
2072
+ */
2073
+ __LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
2074
+ mtctr r10
2075
+ ld r10,PACA_EXGEN+EX_R10(r13)
2076
+ bctr
2077
+#else
2078
+ ld r10,PACA_EXGEN+EX_R10(r13)
2079
+ b kvmppc_interrupt
2080
+#endif
10762081 #endif
10772082
10782083
1079
-EXC_REAL(single_step, 0xd00, 0x100)
1080
-EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
1081
-TRAMP_KVM(PACA_EXGEN, 0xd00)
1082
-EXC_COMMON(single_step_common, 0xd00, single_step_exception)
1083
-
1084
-EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
1085
-EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
1086
-TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1087
-EXC_COMMON_BEGIN(h_data_storage_common)
1088
- mfspr r10,SPRN_HDAR
1089
- std r10,PACA_EXGEN+EX_DAR(r13)
1090
- mfspr r10,SPRN_HDSISR
1091
- stw r10,PACA_EXGEN+EX_DSISR(r13)
1092
- EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1093
- bl save_nvgprs
1094
- RECONCILE_IRQ_STATE(r10, r11)
1095
- addi r3,r1,STACK_FRAME_OVERHEAD
1096
- bl unknown_exception
1097
- b ret_from_except
1098
-
1099
-
1100
-EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
1101
-EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
1102
-TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1103
-EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1104
-
1105
-
1106
-EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1107
-EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
1108
-TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1109
-EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1110
-
1111
-
1112
-/*
1113
- * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1114
- * first, and then eventaully from there to the trampoline to get into virtual
1115
- * mode.
2084
+/**
2085
+ * Interrupt 0xd00 - Trace Interrupt.
2086
+ * This is a synchronous interrupt in response to instruction step or
2087
+ * breakpoint faults.
11162088 */
1117
-__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
1118
-__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
2089
+INT_DEFINE_BEGIN(single_step)
2090
+ IVEC=0xd00
2091
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2092
+ IKVM_REAL=1
2093
+#endif
2094
+INT_DEFINE_END(single_step)
2095
+
2096
+EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
2097
+ GEN_INT_ENTRY single_step, virt=0
2098
+EXC_REAL_END(single_step, 0xd00, 0x100)
2099
+EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
2100
+ GEN_INT_ENTRY single_step, virt=1
2101
+EXC_VIRT_END(single_step, 0x4d00, 0x100)
2102
+EXC_COMMON_BEGIN(single_step_common)
2103
+ GEN_COMMON single_step
2104
+ addi r3,r1,STACK_FRAME_OVERHEAD
2105
+ bl single_step_exception
2106
+ b interrupt_return
2107
+
2108
+ GEN_KVM single_step
2109
+
2110
+
2111
+/**
2112
+ * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
2113
+ * This is a synchronous interrupt in response to an MMU fault caused by a
2114
+ * guest data access.
2115
+ *
2116
+ * Handling:
2117
+ * This should always get routed to KVM. In radix MMU mode, this is caused
2118
+ * by a guest nested radix access that can't be performed due to the
2119
+ * partition scope page table. In hash mode, this can be caused by guests
2120
+ * running with translation disabled (virtual real mode) or with VPM enabled.
2121
+ * KVM will update the page table structures or disallow the access.
2122
+ */
2123
+INT_DEFINE_BEGIN(h_data_storage)
2124
+ IVEC=0xe00
2125
+ IHSRR=1
2126
+ IDAR=1
2127
+ IDSISR=1
2128
+ IKVM_SKIP=1
2129
+ IKVM_REAL=1
2130
+ IKVM_VIRT=1
2131
+INT_DEFINE_END(h_data_storage)
2132
+
2133
+EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
2134
+ GEN_INT_ENTRY h_data_storage, virt=0, ool=1
2135
+EXC_REAL_END(h_data_storage, 0xe00, 0x20)
2136
+EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
2137
+ GEN_INT_ENTRY h_data_storage, virt=1, ool=1
2138
+EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
2139
+EXC_COMMON_BEGIN(h_data_storage_common)
2140
+ GEN_COMMON h_data_storage
2141
+ addi r3,r1,STACK_FRAME_OVERHEAD
2142
+BEGIN_MMU_FTR_SECTION
2143
+ ld r4,_DAR(r1)
2144
+ li r5,SIGSEGV
2145
+ bl bad_page_fault
2146
+MMU_FTR_SECTION_ELSE
2147
+ bl unknown_exception
2148
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
2149
+ b interrupt_return
2150
+
2151
+ GEN_KVM h_data_storage
2152
+
2153
+
2154
+/**
2155
+ * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
2156
+ * This is a synchronous interrupt in response to an MMU fault caused by a
2157
+ * guest instruction fetch, similar to HDSI.
2158
+ */
2159
+INT_DEFINE_BEGIN(h_instr_storage)
2160
+ IVEC=0xe20
2161
+ IHSRR=1
2162
+ IKVM_REAL=1
2163
+ IKVM_VIRT=1
2164
+INT_DEFINE_END(h_instr_storage)
2165
+
2166
+EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
2167
+ GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
2168
+EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
2169
+EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
2170
+ GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
2171
+EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
2172
+EXC_COMMON_BEGIN(h_instr_storage_common)
2173
+ GEN_COMMON h_instr_storage
2174
+ addi r3,r1,STACK_FRAME_OVERHEAD
2175
+ bl unknown_exception
2176
+ b interrupt_return
2177
+
2178
+ GEN_KVM h_instr_storage
2179
+
2180
+
2181
+/**
2182
+ * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
2183
+ */
2184
+INT_DEFINE_BEGIN(emulation_assist)
2185
+ IVEC=0xe40
2186
+ IHSRR=1
2187
+ IKVM_REAL=1
2188
+ IKVM_VIRT=1
2189
+INT_DEFINE_END(emulation_assist)
2190
+
2191
+EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
2192
+ GEN_INT_ENTRY emulation_assist, virt=0, ool=1
2193
+EXC_REAL_END(emulation_assist, 0xe40, 0x20)
2194
+EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
2195
+ GEN_INT_ENTRY emulation_assist, virt=1, ool=1
2196
+EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
2197
+EXC_COMMON_BEGIN(emulation_assist_common)
2198
+ GEN_COMMON emulation_assist
2199
+ addi r3,r1,STACK_FRAME_OVERHEAD
2200
+ bl emulation_assist_interrupt
2201
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2202
+ b interrupt_return
2203
+
2204
+ GEN_KVM emulation_assist
2205
+
2206
+
2207
+/**
2208
+ * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
2209
+ * This is an asynchronous interrupt caused by a Hypervisor Maintenance
2210
+ * Exception. It is always taken in real mode but uses HSRR registers
2211
+ * unlike SRESET and MCE.
2212
+ *
2213
+ * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
2214
+ * with IRQS_DISABLED mask (i.e., local_irq_disable()).
2215
+ *
2216
+ * Handling:
2217
+ * This is a special case, this is handled similarly to machine checks, with an
2218
+ * initial real mode handler that is not soft-masked, which attempts to fix the
2219
+ * problem. Then a regular handler which is soft-maskable and reports the
2220
+ * problem.
2221
+ *
2222
+ * The emergency stack is used for the early real mode handler.
2223
+ *
2224
+ * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
2225
+ * either use soft-masking for the MCE, or use irq_work for the HMI.
2226
+ *
2227
+ * KVM:
2228
+ * Unlike MCE, this calls into KVM without calling the real mode handler
2229
+ * first.
2230
+ */
2231
+INT_DEFINE_BEGIN(hmi_exception_early)
2232
+ IVEC=0xe60
2233
+ IHSRR=1
2234
+ IREALMODE_COMMON=1
2235
+ ISTACK=0
2236
+ IRECONCILE=0
2237
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
2238
+ IKVM_REAL=1
2239
+INT_DEFINE_END(hmi_exception_early)
2240
+
2241
+INT_DEFINE_BEGIN(hmi_exception)
2242
+ IVEC=0xe60
2243
+ IHSRR=1
2244
+ IMASK=IRQS_DISABLED
2245
+ IKVM_REAL=1
2246
+INT_DEFINE_END(hmi_exception)
2247
+
2248
+EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
2249
+ GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
2250
+EXC_REAL_END(hmi_exception, 0xe60, 0x20)
11192251 EXC_VIRT_NONE(0x4e60, 0x20)
1120
-TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1121
-TRAMP_REAL_BEGIN(hmi_exception_early)
1122
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
2252
+
2253
+EXC_COMMON_BEGIN(hmi_exception_early_common)
2254
+ __GEN_REALMODE_COMMON_ENTRY hmi_exception_early
2255
+
11232256 mr r10,r1 /* Save r1 */
11242257 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
11252258 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1126
- mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
1127
- mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
1128
- EXCEPTION_PROLOG_COMMON_1()
1129
- EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1130
- EXCEPTION_PROLOG_COMMON_3(0xe60)
1131
- addi r3,r1,STACK_FRAME_OVERHEAD
1132
- BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
1133
- cmpdi cr0,r3,0
11342259
1135
- /* Windup the stack. */
1136
- /* Move original HSRR0 and HSRR1 into the respective regs */
1137
- ld r9,_MSR(r1)
1138
- mtspr SPRN_HSRR1,r9
1139
- ld r3,_NIP(r1)
1140
- mtspr SPRN_HSRR0,r3
1141
- ld r9,_CTR(r1)
1142
- mtctr r9
1143
- ld r9,_XER(r1)
1144
- mtxer r9
1145
- ld r9,_LINK(r1)
1146
- mtlr r9
1147
- REST_GPR(0, r1)
1148
- REST_8GPRS(2, r1)
1149
- REST_GPR(10, r1)
1150
- ld r11,_CCR(r1)
1151
- REST_2GPRS(12, r1)
2260
+ __GEN_COMMON_BODY hmi_exception_early
2261
+
2262
+ addi r3,r1,STACK_FRAME_OVERHEAD
2263
+ bl hmi_exception_realmode
2264
+ cmpdi cr0,r3,0
11522265 bne 1f
1153
- mtcr r11
1154
- REST_GPR(11, r1)
1155
- ld r1,GPR1(r1)
2266
+
2267
+ EXCEPTION_RESTORE_REGS hsrr=1
11562268 HRFI_TO_USER_OR_KERNEL
11572269
1158
-1: mtcr r11
1159
- REST_GPR(11, r1)
1160
- ld r1,GPR1(r1)
1161
-
2270
+1:
11622271 /*
11632272 * Go to virtual mode and pull the HMI event information from
11642273 * firmware.
11652274 */
1166
- .globl hmi_exception_after_realmode
1167
-hmi_exception_after_realmode:
1168
- SET_SCRATCH0(r13)
1169
- EXCEPTION_PROLOG_0(PACA_EXGEN)
1170
- b tramp_real_hmi_exception
2275
+ EXCEPTION_RESTORE_REGS hsrr=1
2276
+ GEN_INT_ENTRY hmi_exception, virt=0
2277
+
2278
+ GEN_KVM hmi_exception_early
11712279
11722280 EXC_COMMON_BEGIN(hmi_exception_common)
1173
-EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception,
1174
- ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON)
2281
+ GEN_COMMON hmi_exception
2282
+ FINISH_NAP
2283
+ RUNLATCH_ON
2284
+ addi r3,r1,STACK_FRAME_OVERHEAD
2285
+ bl handle_hmi_exception
2286
+ b interrupt_return
11752287
1176
-EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1177
-EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
1178
-TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
2288
+ GEN_KVM hmi_exception
2289
+
2290
+
2291
+/**
2292
+ * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
2293
+ * This is an asynchronous interrupt in response to a msgsnd doorbell.
2294
+ * Similar to the 0xa00 doorbell but for host rather than guest.
2295
+ */
2296
+INT_DEFINE_BEGIN(h_doorbell)
2297
+ IVEC=0xe80
2298
+ IHSRR=1
2299
+ IMASK=IRQS_DISABLED
2300
+ IKVM_REAL=1
2301
+ IKVM_VIRT=1
2302
+INT_DEFINE_END(h_doorbell)
2303
+
2304
+EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
2305
+ GEN_INT_ENTRY h_doorbell, virt=0, ool=1
2306
+EXC_REAL_END(h_doorbell, 0xe80, 0x20)
2307
+EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
2308
+ GEN_INT_ENTRY h_doorbell, virt=1, ool=1
2309
+EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
2310
+EXC_COMMON_BEGIN(h_doorbell_common)
2311
+ GEN_COMMON h_doorbell
2312
+ FINISH_NAP
2313
+ RUNLATCH_ON
2314
+ addi r3,r1,STACK_FRAME_OVERHEAD
11792315 #ifdef CONFIG_PPC_DOORBELL
1180
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
2316
+ bl doorbell_exception
11812317 #else
1182
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
2318
+ bl unknown_exception
11832319 #endif
2320
+ b interrupt_return
2321
+
2322
+ GEN_KVM h_doorbell
11842323
11852324
1186
-EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1187
-EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
1188
-TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1189
-EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
2325
+/**
2326
+ * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
2327
+ * This is an asynchronous interrupt in response to an "external exception".
2328
+ * Similar to 0x500 but for host only.
2329
+ */
2330
+INT_DEFINE_BEGIN(h_virt_irq)
2331
+ IVEC=0xea0
2332
+ IHSRR=1
2333
+ IMASK=IRQS_DISABLED
2334
+ IKVM_REAL=1
2335
+ IKVM_VIRT=1
2336
+INT_DEFINE_END(h_virt_irq)
2337
+
2338
+EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
2339
+ GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
2340
+EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
2341
+EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
2342
+ GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
2343
+EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
2344
+EXC_COMMON_BEGIN(h_virt_irq_common)
2345
+ GEN_COMMON h_virt_irq
2346
+ FINISH_NAP
2347
+ RUNLATCH_ON
2348
+ addi r3,r1,STACK_FRAME_OVERHEAD
2349
+ bl do_IRQ
2350
+ b interrupt_return
2351
+
2352
+ GEN_KVM h_virt_irq
11902353
11912354
11922355 EXC_REAL_NONE(0xec0, 0x20)
....@@ -1195,17 +2358,69 @@
11952358 EXC_VIRT_NONE(0x4ee0, 0x20)
11962359
11972360
1198
-EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1199
-EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
1200
-TRAMP_KVM(PACA_EXGEN, 0xf00)
1201
-EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
2361
+/*
2362
+ * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
2363
+ * This is an asynchronous interrupt in response to a PMU exception.
2364
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
2365
+ * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
2366
+ *
2367
+ * Handling:
2368
+ * This calls into the perf subsystem.
2369
+ *
2370
+ * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
2371
+ * runs under local_irq_disable. However it may be soft-masked in
2372
+ * powerpc-specific code.
2373
+ *
2374
+ * If soft masked, the masked handler will note the pending interrupt for
2375
+ * replay, and clear MSR[EE] in the interrupted context.
2376
+ */
2377
+INT_DEFINE_BEGIN(performance_monitor)
2378
+ IVEC=0xf00
2379
+ IMASK=IRQS_PMI_DISABLED
2380
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2381
+ IKVM_REAL=1
2382
+#endif
2383
+INT_DEFINE_END(performance_monitor)
2384
+
2385
+EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
2386
+ GEN_INT_ENTRY performance_monitor, virt=0, ool=1
2387
+EXC_REAL_END(performance_monitor, 0xf00, 0x20)
2388
+EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
2389
+ GEN_INT_ENTRY performance_monitor, virt=1, ool=1
2390
+EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
2391
+EXC_COMMON_BEGIN(performance_monitor_common)
2392
+ GEN_COMMON performance_monitor
2393
+ FINISH_NAP
2394
+ RUNLATCH_ON
2395
+ addi r3,r1,STACK_FRAME_OVERHEAD
2396
+ bl performance_monitor_exception
2397
+ b interrupt_return
2398
+
2399
+ GEN_KVM performance_monitor
12022400
12032401
1204
-EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1205
-EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
1206
-TRAMP_KVM(PACA_EXGEN, 0xf20)
2402
+/**
2403
+ * Interrupt 0xf20 - Vector Unavailable Interrupt.
2404
+ * This is a synchronous interrupt in response to
2405
+ * executing a vector (or altivec) instruction with MSR[VEC]=0.
2406
+ * Similar to FP unavailable.
2407
+ */
2408
+INT_DEFINE_BEGIN(altivec_unavailable)
2409
+ IVEC=0xf20
2410
+ IRECONCILE=0
2411
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2412
+ IKVM_REAL=1
2413
+#endif
2414
+INT_DEFINE_END(altivec_unavailable)
2415
+
2416
+EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
2417
+ GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
2418
+EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
2419
+EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
2420
+ GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
2421
+EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
12072422 EXC_COMMON_BEGIN(altivec_unavailable_common)
1208
- EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
2423
+ GEN_COMMON altivec_unavailable
12092424 #ifdef CONFIG_ALTIVEC
12102425 BEGIN_FTR_SECTION
12112426 beq 1f
....@@ -1219,30 +2434,47 @@
12192434 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
12202435 #endif
12212436 bl load_up_altivec
1222
- b fast_exception_return
2437
+ b fast_interrupt_return
12232438 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12242439 2: /* User process was in a transaction */
1225
- bl save_nvgprs
12262440 RECONCILE_IRQ_STATE(r10, r11)
12272441 addi r3,r1,STACK_FRAME_OVERHEAD
12282442 bl altivec_unavailable_tm
1229
- b ret_from_except
2443
+ b interrupt_return
12302444 #endif
12312445 1:
12322446 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
12332447 #endif
1234
- bl save_nvgprs
12352448 RECONCILE_IRQ_STATE(r10, r11)
12362449 addi r3,r1,STACK_FRAME_OVERHEAD
12372450 bl altivec_unavailable_exception
1238
- b ret_from_except
2451
+ b interrupt_return
2452
+
2453
+ GEN_KVM altivec_unavailable
12392454
12402455
1241
-EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1242
-EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
1243
-TRAMP_KVM(PACA_EXGEN, 0xf40)
2456
+/**
2457
+ * Interrupt 0xf40 - VSX Unavailable Interrupt.
2458
+ * This is a synchronous interrupt in response to
2459
+ * executing a VSX instruction with MSR[VSX]=0.
2460
+ * Similar to FP unavailable.
2461
+ */
2462
+INT_DEFINE_BEGIN(vsx_unavailable)
2463
+ IVEC=0xf40
2464
+ IRECONCILE=0
2465
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2466
+ IKVM_REAL=1
2467
+#endif
2468
+INT_DEFINE_END(vsx_unavailable)
2469
+
2470
+EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
2471
+ GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
2472
+EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
2473
+EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
2474
+ GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
2475
+EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
12442476 EXC_COMMON_BEGIN(vsx_unavailable_common)
1245
- EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
2477
+ GEN_COMMON vsx_unavailable
12462478 #ifdef CONFIG_VSX
12472479 BEGIN_FTR_SECTION
12482480 beq 1f
....@@ -1258,32 +2490,80 @@
12582490 b load_up_vsx
12592491 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12602492 2: /* User process was in a transaction */
1261
- bl save_nvgprs
12622493 RECONCILE_IRQ_STATE(r10, r11)
12632494 addi r3,r1,STACK_FRAME_OVERHEAD
12642495 bl vsx_unavailable_tm
1265
- b ret_from_except
2496
+ b interrupt_return
12662497 #endif
12672498 1:
12682499 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
12692500 #endif
1270
- bl save_nvgprs
12712501 RECONCILE_IRQ_STATE(r10, r11)
12722502 addi r3,r1,STACK_FRAME_OVERHEAD
12732503 bl vsx_unavailable_exception
1274
- b ret_from_except
2504
+ b interrupt_return
2505
+
2506
+ GEN_KVM vsx_unavailable
12752507
12762508
1277
-EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1278
-EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1279
-TRAMP_KVM(PACA_EXGEN, 0xf60)
1280
-EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
2509
+/**
2510
+ * Interrupt 0xf60 - Facility Unavailable Interrupt.
2511
+ * This is a synchronous interrupt in response to
2512
+ * executing an instruction without access to the facility that can be
2513
+ * resolved by the OS (e.g., FSCR, MSR).
2514
+ * Similar to FP unavailable.
2515
+ */
2516
+INT_DEFINE_BEGIN(facility_unavailable)
2517
+ IVEC=0xf60
2518
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2519
+ IKVM_REAL=1
2520
+#endif
2521
+INT_DEFINE_END(facility_unavailable)
2522
+
2523
+EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
2524
+ GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
2525
+EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
2526
+EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
2527
+ GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
2528
+EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
2529
+EXC_COMMON_BEGIN(facility_unavailable_common)
2530
+ GEN_COMMON facility_unavailable
2531
+ addi r3,r1,STACK_FRAME_OVERHEAD
2532
+ bl facility_unavailable_exception
2533
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2534
+ b interrupt_return
2535
+
2536
+ GEN_KVM facility_unavailable
12812537
12822538
1283
-EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1284
-EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
1285
-TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1286
-EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
2539
+/**
2540
+ * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
2541
+ * This is a synchronous interrupt in response to
2542
+ * executing an instruction without access to the facility that can only
2543
+ * be resolved in HV mode (e.g., HFSCR).
2544
+ * Similar to FP unavailable.
2545
+ */
2546
+INT_DEFINE_BEGIN(h_facility_unavailable)
2547
+ IVEC=0xf80
2548
+ IHSRR=1
2549
+ IKVM_REAL=1
2550
+ IKVM_VIRT=1
2551
+INT_DEFINE_END(h_facility_unavailable)
2552
+
2553
+EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
2554
+ GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
2555
+EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
2556
+EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
2557
+ GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
2558
+EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
2559
+EXC_COMMON_BEGIN(h_facility_unavailable_common)
2560
+ GEN_COMMON h_facility_unavailable
2561
+ addi r3,r1,STACK_FRAME_OVERHEAD
2562
+ bl facility_unavailable_exception
2563
+ REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
2564
+ b interrupt_return
2565
+
2566
+ GEN_KVM h_facility_unavailable
12872567
12882568
12892569 EXC_REAL_NONE(0xfa0, 0x20)
....@@ -1299,48 +2579,94 @@
12992579 EXC_VIRT_NONE(0x5100, 0x100)
13002580
13012581 #ifdef CONFIG_CBE_RAS
1302
-EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
2582
+INT_DEFINE_BEGIN(cbe_system_error)
2583
+ IVEC=0x1200
2584
+ IHSRR=1
2585
+ IKVM_SKIP=1
2586
+ IKVM_REAL=1
2587
+INT_DEFINE_END(cbe_system_error)
2588
+
2589
+EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
2590
+ GEN_INT_ENTRY cbe_system_error, virt=0
2591
+EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
13032592 EXC_VIRT_NONE(0x5200, 0x100)
1304
-TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
1305
-EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
2593
+EXC_COMMON_BEGIN(cbe_system_error_common)
2594
+ GEN_COMMON cbe_system_error
2595
+ addi r3,r1,STACK_FRAME_OVERHEAD
2596
+ bl cbe_system_error_exception
2597
+ b interrupt_return
2598
+
2599
+ GEN_KVM cbe_system_error
2600
+
13062601 #else /* CONFIG_CBE_RAS */
13072602 EXC_REAL_NONE(0x1200, 0x100)
13082603 EXC_VIRT_NONE(0x5200, 0x100)
13092604 #endif
13102605
13112606
1312
-EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1313
-EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
1314
-TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
1315
-EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
2607
+INT_DEFINE_BEGIN(instruction_breakpoint)
2608
+ IVEC=0x1300
2609
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2610
+ IKVM_SKIP=1
2611
+ IKVM_REAL=1
2612
+#endif
2613
+INT_DEFINE_END(instruction_breakpoint)
2614
+
2615
+EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
2616
+ GEN_INT_ENTRY instruction_breakpoint, virt=0
2617
+EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
2618
+EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
2619
+ GEN_INT_ENTRY instruction_breakpoint, virt=1
2620
+EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
2621
+EXC_COMMON_BEGIN(instruction_breakpoint_common)
2622
+ GEN_COMMON instruction_breakpoint
2623
+ addi r3,r1,STACK_FRAME_OVERHEAD
2624
+ bl instruction_breakpoint_exception
2625
+ b interrupt_return
2626
+
2627
+ GEN_KVM instruction_breakpoint
2628
+
13162629
13172630 EXC_REAL_NONE(0x1400, 0x100)
13182631 EXC_VIRT_NONE(0x5400, 0x100)
13192632
1320
-EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1321
- mtspr SPRN_SPRG_HSCRATCH0,r13
1322
- EXCEPTION_PROLOG_0(PACA_EXGEN)
1323
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
2633
+/**
2634
+ * Interrupt 0x1500 - Soft Patch Interrupt
2635
+ *
2636
+ * Handling:
2637
+ * This is an implementation specific interrupt which can be used for a
2638
+ * range of exceptions.
2639
+ *
2640
+ * This interrupt handler is unique in that it runs the denormal assist
2641
+ * code even for guests (and even in guest context) without going to KVM,
2642
+ * for speed. POWER9 does not raise denorm exceptions, so this special case
2643
+ * could be phased out in future to reduce special cases.
2644
+ */
2645
+INT_DEFINE_BEGIN(denorm_exception)
2646
+ IVEC=0x1500
2647
+ IHSRR=1
2648
+ IBRANCH_TO_COMMON=0
2649
+ IKVM_REAL=1
2650
+INT_DEFINE_END(denorm_exception)
13242651
2652
+EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
2653
+ GEN_INT_ENTRY denorm_exception, virt=0
13252654 #ifdef CONFIG_PPC_DENORMALISATION
1326
- mfspr r10,SPRN_HSRR1
1327
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
2655
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
13282656 bne+ denorm_assist
13292657 #endif
1330
-
1331
- KVMTEST_HV(0x1500)
1332
- EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
1333
-EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
1334
-
2658
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=0
2659
+EXC_REAL_END(denorm_exception, 0x1500, 0x100)
13352660 #ifdef CONFIG_PPC_DENORMALISATION
13362661 EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
1337
- b exc_real_0x1500_denorm_exception_hv
2662
+ GEN_INT_ENTRY denorm_exception, virt=1
2663
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
2664
+ bne+ denorm_assist
2665
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=1
13382666 EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
13392667 #else
13402668 EXC_VIRT_NONE(0x5500, 0x100)
13412669 #endif
1342
-
1343
-TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
13442670
13452671 #ifdef CONFIG_PPC_DENORMALISATION
13462672 TRAMP_REAL_BEGIN(denorm_assist)
....@@ -1355,12 +2681,11 @@
13552681 mtmsrd r10
13562682 sync
13572683
1358
-#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
1359
-#define FMR4(n) FMR2(n) ; FMR2(n+2)
1360
-#define FMR8(n) FMR4(n) ; FMR4(n+4)
1361
-#define FMR16(n) FMR8(n) ; FMR8(n+8)
1362
-#define FMR32(n) FMR16(n) ; FMR16(n+16)
1363
- FMR32(0)
2684
+ .Lreg=0
2685
+ .rept 32
2686
+ fmr .Lreg,.Lreg
2687
+ .Lreg=.Lreg+1
2688
+ .endr
13642689
13652690 FTR_SECTION_ELSE
13662691 /*
....@@ -1372,12 +2697,11 @@
13722697 mtmsrd r10
13732698 sync
13742699
1375
-#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
1376
-#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
1377
-#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
1378
-#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
1379
-#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
1380
- XVCPSGNDP32(0)
2700
+ .Lreg=0
2701
+ .rept 32
2702
+ XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2703
+ .Lreg=.Lreg+1
2704
+ .endr
13812705
13822706 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
13832707
....@@ -1388,14 +2712,22 @@
13882712 * To denormalise we need to move a copy of the register to itself.
13892713 * For POWER8 we need to do that for all 64 VSX registers
13902714 */
1391
- XVCPSGNDP32(32)
2715
+ .Lreg=32
2716
+ .rept 32
2717
+ XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2718
+ .Lreg=.Lreg+1
2719
+ .endr
2720
+
13922721 denorm_done:
13932722 mfspr r11,SPRN_HSRR0
13942723 subi r11,r11,4
13952724 mtspr SPRN_HSRR0,r11
13962725 mtcrf 0x80,r9
13972726 ld r9,PACA_EXGEN+EX_R9(r13)
1398
- RESTORE_PPR_PACA(PACA_EXGEN, r10)
2727
+BEGIN_FTR_SECTION
2728
+ ld r10,PACA_EXGEN+EX_PPR(r13)
2729
+ mtspr SPRN_PPR,r10
2730
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
13992731 BEGIN_FTR_SECTION
14002732 ld r10,PACA_EXGEN+EX_CFAR(r13)
14012733 mtspr SPRN_CFAR,r10
....@@ -1408,50 +2740,101 @@
14082740 b .
14092741 #endif
14102742
1411
-EXC_COMMON(denorm_common, 0x1500, unknown_exception)
2743
+EXC_COMMON_BEGIN(denorm_exception_common)
2744
+ GEN_COMMON denorm_exception
2745
+ addi r3,r1,STACK_FRAME_OVERHEAD
2746
+ bl unknown_exception
2747
+ b interrupt_return
2748
+
2749
+ GEN_KVM denorm_exception
14122750
14132751
14142752 #ifdef CONFIG_CBE_RAS
1415
-EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
2753
+INT_DEFINE_BEGIN(cbe_maintenance)
2754
+ IVEC=0x1600
2755
+ IHSRR=1
2756
+ IKVM_SKIP=1
2757
+ IKVM_REAL=1
2758
+INT_DEFINE_END(cbe_maintenance)
2759
+
2760
+EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
2761
+ GEN_INT_ENTRY cbe_maintenance, virt=0
2762
+EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
14162763 EXC_VIRT_NONE(0x5600, 0x100)
1417
-TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
1418
-EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
2764
+EXC_COMMON_BEGIN(cbe_maintenance_common)
2765
+ GEN_COMMON cbe_maintenance
2766
+ addi r3,r1,STACK_FRAME_OVERHEAD
2767
+ bl cbe_maintenance_exception
2768
+ b interrupt_return
2769
+
2770
+ GEN_KVM cbe_maintenance
2771
+
14192772 #else /* CONFIG_CBE_RAS */
14202773 EXC_REAL_NONE(0x1600, 0x100)
14212774 EXC_VIRT_NONE(0x5600, 0x100)
14222775 #endif
14232776
14242777
1425
-EXC_REAL(altivec_assist, 0x1700, 0x100)
1426
-EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
1427
-TRAMP_KVM(PACA_EXGEN, 0x1700)
1428
-#ifdef CONFIG_ALTIVEC
1429
-EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
1430
-#else
1431
-EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
2778
+INT_DEFINE_BEGIN(altivec_assist)
2779
+ IVEC=0x1700
2780
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2781
+ IKVM_REAL=1
14322782 #endif
2783
+INT_DEFINE_END(altivec_assist)
2784
+
2785
+EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
2786
+ GEN_INT_ENTRY altivec_assist, virt=0
2787
+EXC_REAL_END(altivec_assist, 0x1700, 0x100)
2788
+EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
2789
+ GEN_INT_ENTRY altivec_assist, virt=1
2790
+EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
2791
+EXC_COMMON_BEGIN(altivec_assist_common)
2792
+ GEN_COMMON altivec_assist
2793
+ addi r3,r1,STACK_FRAME_OVERHEAD
2794
+#ifdef CONFIG_ALTIVEC
2795
+ bl altivec_assist_exception
2796
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2797
+#else
2798
+ bl unknown_exception
2799
+#endif
2800
+ b interrupt_return
2801
+
2802
+ GEN_KVM altivec_assist
14332803
14342804
14352805 #ifdef CONFIG_CBE_RAS
1436
-EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
2806
+INT_DEFINE_BEGIN(cbe_thermal)
2807
+ IVEC=0x1800
2808
+ IHSRR=1
2809
+ IKVM_SKIP=1
2810
+ IKVM_REAL=1
2811
+INT_DEFINE_END(cbe_thermal)
2812
+
2813
+EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
2814
+ GEN_INT_ENTRY cbe_thermal, virt=0
2815
+EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
14372816 EXC_VIRT_NONE(0x5800, 0x100)
1438
-TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
1439
-EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
2817
+EXC_COMMON_BEGIN(cbe_thermal_common)
2818
+ GEN_COMMON cbe_thermal
2819
+ addi r3,r1,STACK_FRAME_OVERHEAD
2820
+ bl cbe_thermal_exception
2821
+ b interrupt_return
2822
+
2823
+ GEN_KVM cbe_thermal
2824
+
14402825 #else /* CONFIG_CBE_RAS */
14412826 EXC_REAL_NONE(0x1800, 0x100)
14422827 EXC_VIRT_NONE(0x5800, 0x100)
14432828 #endif
14442829
2830
+
14452831 #ifdef CONFIG_PPC_WATCHDOG
14462832
1447
-#define MASKED_DEC_HANDLER_LABEL 3f
1448
-
1449
-#define MASKED_DEC_HANDLER(_H) \
1450
-3: /* soft-nmi */ \
1451
- std r12,PACA_EXGEN+EX_R12(r13); \
1452
- GET_SCRATCH0(r10); \
1453
- std r10,PACA_EXGEN+EX_R13(r13); \
1454
- EXCEPTION_PROLOG_2(soft_nmi_common, _H)
2833
+INT_DEFINE_BEGIN(soft_nmi)
2834
+ IVEC=0x900
2835
+ ISTACK=0
2836
+ IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */
2837
+INT_DEFINE_END(soft_nmi)
14552838
14562839 /*
14572840 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
....@@ -1463,17 +2846,42 @@
14632846 * and run it entirely with interrupts hard disabled.
14642847 */
14652848 EXC_COMMON_BEGIN(soft_nmi_common)
2849
+ mfspr r11,SPRN_SRR0
14662850 mr r10,r1
14672851 ld r1,PACAEMERGSP(r13)
14682852 subi r1,r1,INT_FRAME_SIZE
1469
- EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
1470
- system_reset, soft_nmi_interrupt,
1471
- ADD_NVGPRS;ADD_RECONCILE)
1472
- b ret_from_except
2853
+ __GEN_COMMON_BODY soft_nmi
14732854
1474
-#else /* CONFIG_PPC_WATCHDOG */
1475
-#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
1476
-#define MASKED_DEC_HANDLER(_H)
2855
+ /*
2856
+ * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
2857
+ * system_reset_common)
2858
+ */
2859
+ li r10,IRQS_ALL_DISABLED
2860
+ stb r10,PACAIRQSOFTMASK(r13)
2861
+ lbz r10,PACAIRQHAPPENED(r13)
2862
+ std r10,RESULT(r1)
2863
+ ori r10,r10,PACA_IRQ_HARD_DIS
2864
+ stb r10,PACAIRQHAPPENED(r13)
2865
+
2866
+ addi r3,r1,STACK_FRAME_OVERHEAD
2867
+ bl soft_nmi_interrupt
2868
+
2869
+ /* Clear MSR_RI before setting SRR0 and SRR1. */
2870
+ li r9,0
2871
+ mtmsrd r9,1
2872
+
2873
+ /*
2874
+ * Restore soft mask settings.
2875
+ */
2876
+ ld r10,RESULT(r1)
2877
+ stb r10,PACAIRQHAPPENED(r13)
2878
+ ld r10,SOFTE(r1)
2879
+ stb r10,PACAIRQSOFTMASK(r13)
2880
+
2881
+ kuap_restore_amr r9, r10
2882
+ EXCEPTION_RESTORE_REGS hsrr=0
2883
+ RFI_TO_KERNEL
2884
+
14772885 #endif /* CONFIG_PPC_WATCHDOG */
14782886
14792887 /*
....@@ -1486,35 +2894,53 @@
14862894 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
14872895 * This is called with r10 containing the value to OR to the paca field.
14882896 */
1489
-#define MASKED_INTERRUPT(_H) \
1490
-masked_##_H##interrupt: \
1491
- std r11,PACA_EXGEN+EX_R11(r13); \
1492
- lbz r11,PACAIRQHAPPENED(r13); \
1493
- or r11,r11,r10; \
1494
- stb r11,PACAIRQHAPPENED(r13); \
1495
- cmpwi r10,PACA_IRQ_DEC; \
1496
- bne 1f; \
1497
- lis r10,0x7fff; \
1498
- ori r10,r10,0xffff; \
1499
- mtspr SPRN_DEC,r10; \
1500
- b MASKED_DEC_HANDLER_LABEL; \
1501
-1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
1502
- beq 2f; \
1503
- mfspr r10,SPRN_##_H##SRR1; \
1504
- xori r10,r10,MSR_EE; /* clear MSR_EE */ \
1505
- mtspr SPRN_##_H##SRR1,r10; \
1506
- ori r11,r11,PACA_IRQ_HARD_DIS; \
1507
- stb r11,PACAIRQHAPPENED(r13); \
1508
-2: /* done */ \
1509
- mtcrf 0x80,r9; \
1510
- std r1,PACAR1(r13); \
1511
- ld r9,PACA_EXGEN+EX_R9(r13); \
1512
- ld r10,PACA_EXGEN+EX_R10(r13); \
1513
- ld r11,PACA_EXGEN+EX_R11(r13); \
1514
- /* returns to kernel where r13 must be set up, so don't restore it */ \
1515
- ##_H##RFI_TO_KERNEL; \
1516
- b .; \
1517
- MASKED_DEC_HANDLER(_H)
2897
+.macro MASKED_INTERRUPT hsrr=0
2898
+ .if \hsrr
2899
+masked_Hinterrupt:
2900
+ .else
2901
+masked_interrupt:
2902
+ .endif
2903
+ lbz r11,PACAIRQHAPPENED(r13)
2904
+ or r11,r11,r10
2905
+ stb r11,PACAIRQHAPPENED(r13)
2906
+ cmpwi r10,PACA_IRQ_DEC
2907
+ bne 1f
2908
+ lis r10,0x7fff
2909
+ ori r10,r10,0xffff
2910
+ mtspr SPRN_DEC,r10
2911
+#ifdef CONFIG_PPC_WATCHDOG
2912
+ b soft_nmi_common
2913
+#else
2914
+ b 2f
2915
+#endif
2916
+1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2917
+ beq 2f
2918
+ xori r12,r12,MSR_EE /* clear MSR_EE */
2919
+ .if \hsrr
2920
+ mtspr SPRN_HSRR1,r12
2921
+ .else
2922
+ mtspr SPRN_SRR1,r12
2923
+ .endif
2924
+ ori r11,r11,PACA_IRQ_HARD_DIS
2925
+ stb r11,PACAIRQHAPPENED(r13)
2926
+2: /* done */
2927
+ ld r10,PACA_EXGEN+EX_CTR(r13)
2928
+ mtctr r10
2929
+ mtcrf 0x80,r9
2930
+ std r1,PACAR1(r13)
2931
+ ld r9,PACA_EXGEN+EX_R9(r13)
2932
+ ld r10,PACA_EXGEN+EX_R10(r13)
2933
+ ld r11,PACA_EXGEN+EX_R11(r13)
2934
+ ld r12,PACA_EXGEN+EX_R12(r13)
2935
+ ld r13,PACA_EXGEN+EX_R13(r13)
2936
+ /* May return to masked low address where r13 is not set up */
2937
+ .if \hsrr
2938
+ HRFI_TO_KERNEL
2939
+ .else
2940
+ RFI_TO_KERNEL
2941
+ .endif
2942
+ b .
2943
+.endm
15182944
15192945 TRAMP_REAL_BEGIN(stf_barrier_fallback)
15202946 std r9,PACA_EXRFI+EX_R9(r13)
....@@ -1570,6 +2996,25 @@
15702996 ld r11,PACA_EXRFI+EX_R11(r13)
15712997 blr
15722998
2999
+/*
3000
+ * The SCV entry flush happens with interrupts enabled, so it must disable
3001
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
3002
+ * (containing LR) does not need to be preserved here because scv entry
3003
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
3004
+ */
3005
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
3006
+ li r10,0
3007
+ mtmsrd r10,1
3008
+ lbz r10,PACAIRQHAPPENED(r13)
3009
+ ori r10,r10,PACA_IRQ_HARD_DIS
3010
+ stb r10,PACAIRQHAPPENED(r13)
3011
+ std r11,PACA_EXRFI+EX_R11(r13)
3012
+ L1D_DISPLACEMENT_FLUSH
3013
+ ld r11,PACA_EXRFI+EX_R11(r13)
3014
+ li r10,MSR_RI
3015
+ mtmsrd r10,1
3016
+ blr
3017
+
15733018 TRAMP_REAL_BEGIN(rfi_flush_fallback)
15743019 SET_SCRATCH0(r13);
15753020 GET_PACA(r13);
....@@ -1606,6 +3051,47 @@
16063051 GET_SCRATCH0(r13);
16073052 hrfid
16083053
3054
+TRAMP_REAL_BEGIN(rfscv_flush_fallback)
3055
+ /* system call volatile */
3056
+ mr r7,r13
3057
+ GET_PACA(r13);
3058
+ mr r8,r1
3059
+ ld r1,PACAKSAVE(r13)
3060
+ mfctr r9
3061
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
3062
+ ld r11,PACA_L1D_FLUSH_SIZE(r13)
3063
+ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
3064
+ mtctr r11
3065
+ DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
3066
+
3067
+ /* order ld/st prior to dcbt stop all streams with flushing */
3068
+ sync
3069
+
3070
+ /*
3071
+ * The load adresses are at staggered offsets within cachelines,
3072
+ * which suits some pipelines better (on others it should not
3073
+ * hurt).
3074
+ */
3075
+1:
3076
+ ld r11,(0x80 + 8)*0(r10)
3077
+ ld r11,(0x80 + 8)*1(r10)
3078
+ ld r11,(0x80 + 8)*2(r10)
3079
+ ld r11,(0x80 + 8)*3(r10)
3080
+ ld r11,(0x80 + 8)*4(r10)
3081
+ ld r11,(0x80 + 8)*5(r10)
3082
+ ld r11,(0x80 + 8)*6(r10)
3083
+ ld r11,(0x80 + 8)*7(r10)
3084
+ addi r10,r10,0x80*8
3085
+ bdnz 1b
3086
+
3087
+ mtctr r9
3088
+ li r9,0
3089
+ li r10,0
3090
+ li r11,0
3091
+ mr r1,r8
3092
+ mr r13,r7
3093
+ RFSCV
3094
+
16093095 USE_TEXT_SECTION()
16103096
16113097 _GLOBAL(do_uaccess_flush)
....@@ -1619,17 +3105,12 @@
16193105 _ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
16203106 EXPORT_SYMBOL(do_uaccess_flush)
16213107
1622
-/*
1623
- * Real mode exceptions actually use this too, but alternate
1624
- * instruction code patches (which end up in the common .text area)
1625
- * cannot reach these if they are put there.
1626
- */
1627
-USE_FIXED_SECTION(virt_trampolines)
1628
- MASKED_INTERRUPT()
1629
- MASKED_INTERRUPT(H)
3108
+
3109
+MASKED_INTERRUPT
3110
+MASKED_INTERRUPT hsrr=1
16303111
16313112 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1632
-TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
3113
+kvmppc_skip_interrupt:
16333114 /*
16343115 * Here all GPRs are unchanged from when the interrupt happened
16353116 * except for r13, which is saved in SPRG_SCRATCH0.
....@@ -1641,7 +3122,7 @@
16413122 RFI_TO_KERNEL
16423123 b .
16433124
1644
-TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
3125
+kvmppc_skip_Hinterrupt:
16453126 /*
16463127 * Here all GPRs are unchanged from when the interrupt happened
16473128 * except for r13, which is saved in SPRG_SCRATCH0.
....@@ -1653,16 +3134,6 @@
16533134 HRFI_TO_KERNEL
16543135 b .
16553136 #endif
1656
-
1657
-/*
1658
- * Ensure that any handlers that get invoked from the exception prologs
1659
- * above are below the first 64KB (0x10000) of the kernel image because
1660
- * the prologs assemble the addresses of these handlers using the
1661
- * LOAD_HANDLER macro, which uses an ori instruction.
1662
- */
1663
-
1664
-/*** Common interrupt handlers ***/
1665
-
16663137
16673138 /*
16683139 * Relocation-on interrupts: A subset of the interrupts can be delivered
....@@ -1685,6 +3156,10 @@
16853156
16863157 USE_FIXED_SECTION(virt_trampolines)
16873158 /*
3159
+ * All code below __end_interrupts is treated as soft-masked. If
3160
+ * any code runs here with MSR[EE]=1, it must then cope with pending
3161
+ * soft interrupt being raised (i.e., by ensuring it is replayed).
3162
+ *
16883163 * The __end_interrupts marker must be past the out-of-line (OOL)
16893164 * handlers, so that they are copied to real address 0x100 when running
16903165 * a relocatable kernel. This ensures they can be reached from the short
....@@ -1697,11 +3172,20 @@
16973172 DEFINE_FIXED_SYMBOL(__end_interrupts)
16983173
16993174 #ifdef CONFIG_PPC_970_NAP
3175
+ /*
3176
+ * Called by exception entry code if _TLF_NAPPING was set, this clears
3177
+ * the NAPPING flag, and redirects the exception exit to
3178
+ * power4_fixup_nap_return.
3179
+ */
3180
+ .globl power4_fixup_nap
17003181 EXC_COMMON_BEGIN(power4_fixup_nap)
17013182 andc r9,r9,r10
17023183 std r9,TI_LOCAL_FLAGS(r11)
1703
- ld r10,_LINK(r1) /* make idle task do the */
1704
- std r10,_NIP(r1) /* equivalent of a blr */
3184
+ LOAD_REG_ADDR(r10, power4_idle_nap_return)
3185
+ std r10,_NIP(r1)
3186
+ blr
3187
+
3188
+power4_idle_nap_return:
17053189 blr
17063190 #endif
17073191
....@@ -1712,6 +3196,35 @@
17123196
17133197 USE_TEXT_SECTION()
17143198
3199
+/* MSR[RI] should be clear because this uses SRR[01] */
3200
+enable_machine_check:
3201
+ mflr r0
3202
+ bcl 20,31,$+4
3203
+0: mflr r3
3204
+ addi r3,r3,(1f - 0b)
3205
+ mtspr SPRN_SRR0,r3
3206
+ mfmsr r3
3207
+ ori r3,r3,MSR_ME
3208
+ mtspr SPRN_SRR1,r3
3209
+ RFI_TO_KERNEL
3210
+1: mtlr r0
3211
+ blr
3212
+
3213
+/* MSR[RI] should be clear because this uses SRR[01] */
3214
+disable_machine_check:
3215
+ mflr r0
3216
+ bcl 20,31,$+4
3217
+0: mflr r3
3218
+ addi r3,r3,(1f - 0b)
3219
+ mtspr SPRN_SRR0,r3
3220
+ mfmsr r3
3221
+ li r4,MSR_ME
3222
+ andc r3,r3,r4
3223
+ mtspr SPRN_SRR1,r3
3224
+ RFI_TO_KERNEL
3225
+1: mtlr r0
3226
+ blr
3227
+
17153228 /*
17163229 * Hash table stuff
17173230 */
....@@ -1720,78 +3233,80 @@
17203233 #ifdef CONFIG_PPC_BOOK3S_64
17213234 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
17223235 ori r0,r0,DSISR_BAD_FAULT_64S@l
1723
- and. r0,r4,r0 /* weird error? */
3236
+ and. r0,r5,r0 /* weird error? */
17243237 bne- handle_page_fault /* if not, try to insert a HPTE */
1725
- CURRENT_THREAD_INFO(r11, r1)
1726
- lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1727
- andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1728
- bne 77f /* then don't call hash_page now */
17293238
17303239 /*
1731
- * r3 contains the faulting address
1732
- * r4 msr
1733
- * r5 contains the trap number
1734
- * r6 contains dsisr
3240
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
3241
+ * don't call hash_page, just fail the fault. This is required to
3242
+ * prevent re-entrancy problems in the hash code, namely perf
3243
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
3244
+ * hash fault. See the comment in hash_preload().
3245
+ */
3246
+ ld r11, PACA_THREAD_INFO(r13)
3247
+ lwz r0,TI_PREEMPT(r11)
3248
+ andis. r0,r0,NMI_MASK@h
3249
+ bne 77f
3250
+
3251
+ /*
3252
+ * r3 contains the trap number
3253
+ * r4 contains the faulting address
3254
+ * r5 contains dsisr
3255
+ * r6 msr
17353256 *
17363257 * at return r3 = 0 for success, 1 for page fault, negative for error
17373258 */
1738
- mr r4,r12
1739
- ld r6,_DSISR(r1)
17403259 bl __hash_page /* build HPTE if possible */
17413260 cmpdi r3,0 /* see if __hash_page succeeded */
17423261
17433262 /* Success */
1744
- beq fast_exc_return_irq /* Return from exception on success */
3263
+ beq interrupt_return /* Return from exception on success */
17453264
17463265 /* Error */
17473266 blt- 13f
17483267
1749
- /* Reload DSISR into r4 for the DABR check below */
1750
- ld r4,_DSISR(r1)
3268
+ /* Reload DAR/DSISR into r4/r5 for the DABR check below */
3269
+ ld r4,_DAR(r1)
3270
+ ld r5,_DSISR(r1)
17513271 #endif /* CONFIG_PPC_BOOK3S_64 */
17523272
17533273 /* Here we have a page fault that hash_page can't handle. */
17543274 handle_page_fault:
1755
-11: andis. r0,r4,DSISR_DABRMATCH@h
3275
+11: andis. r0,r5,DSISR_DABRMATCH@h
17563276 bne- handle_dabr_fault
1757
- ld r4,_DAR(r1)
1758
- ld r5,_DSISR(r1)
17593277 addi r3,r1,STACK_FRAME_OVERHEAD
17603278 bl do_page_fault
17613279 cmpdi r3,0
1762
- beq+ ret_from_except_lite
1763
- bl save_nvgprs
3280
+ beq+ interrupt_return
17643281 mr r5,r3
17653282 addi r3,r1,STACK_FRAME_OVERHEAD
1766
- lwz r4,_DAR(r1)
3283
+ ld r4,_DAR(r1)
17673284 bl bad_page_fault
1768
- b ret_from_except
3285
+ b interrupt_return
17693286
17703287 /* We have a data breakpoint exception - handle it */
17713288 handle_dabr_fault:
1772
- bl save_nvgprs
17733289 ld r4,_DAR(r1)
17743290 ld r5,_DSISR(r1)
17753291 addi r3,r1,STACK_FRAME_OVERHEAD
17763292 bl do_break
17773293 /*
17783294 * do_break() may have changed the NV GPRS while handling a breakpoint.
1779
- * If so, we need to restore them with their updated values. Don't use
1780
- * ret_from_except_lite here.
3295
+ * If so, we need to restore them with their updated values.
17813296 */
1782
- b ret_from_except
3297
+ REST_NVGPRS(r1)
3298
+ b interrupt_return
17833299
17843300
17853301 #ifdef CONFIG_PPC_BOOK3S_64
17863302 /* We have a page fault that hash_page could handle but HV refused
17873303 * the PTE insertion
17883304 */
1789
-13: bl save_nvgprs
1790
- mr r5,r3
3305
+13: mr r5,r3
17913306 addi r3,r1,STACK_FRAME_OVERHEAD
17923307 ld r4,_DAR(r1)
17933308 bl low_hash_fault
1794
- b ret_from_except
3309
+ b interrupt_return
17953310 #endif
17963311
17973312 /*
....@@ -1801,136 +3316,7 @@
18013316 * were soft-disabled. We want to invoke the exception handler for
18023317 * the access, or panic if there isn't a handler.
18033318 */
1804
-77: bl save_nvgprs
1805
- mr r4,r3
1806
- addi r3,r1,STACK_FRAME_OVERHEAD
3319
+77: addi r3,r1,STACK_FRAME_OVERHEAD
18073320 li r5,SIGSEGV
18083321 bl bad_page_fault
1809
- b ret_from_except
1810
-
1811
-/*
1812
- * Here we have detected that the kernel stack pointer is bad.
1813
- * R9 contains the saved CR, r13 points to the paca,
1814
- * r10 contains the (bad) kernel stack pointer,
1815
- * r11 and r12 contain the saved SRR0 and SRR1.
1816
- * We switch to using an emergency stack, save the registers there,
1817
- * and call kernel_bad_stack(), which panics.
1818
- */
1819
-bad_stack:
1820
- ld r1,PACAEMERGSP(r13)
1821
- subi r1,r1,64+INT_FRAME_SIZE
1822
- std r9,_CCR(r1)
1823
- std r10,GPR1(r1)
1824
- std r11,_NIP(r1)
1825
- std r12,_MSR(r1)
1826
- mfspr r11,SPRN_DAR
1827
- mfspr r12,SPRN_DSISR
1828
- std r11,_DAR(r1)
1829
- std r12,_DSISR(r1)
1830
- mflr r10
1831
- mfctr r11
1832
- mfxer r12
1833
- std r10,_LINK(r1)
1834
- std r11,_CTR(r1)
1835
- std r12,_XER(r1)
1836
- SAVE_GPR(0,r1)
1837
- SAVE_GPR(2,r1)
1838
- ld r10,EX_R3(r3)
1839
- std r10,GPR3(r1)
1840
- SAVE_GPR(4,r1)
1841
- SAVE_4GPRS(5,r1)
1842
- ld r9,EX_R9(r3)
1843
- ld r10,EX_R10(r3)
1844
- SAVE_2GPRS(9,r1)
1845
- ld r9,EX_R11(r3)
1846
- ld r10,EX_R12(r3)
1847
- ld r11,EX_R13(r3)
1848
- std r9,GPR11(r1)
1849
- std r10,GPR12(r1)
1850
- std r11,GPR13(r1)
1851
-BEGIN_FTR_SECTION
1852
- ld r10,EX_CFAR(r3)
1853
- std r10,ORIG_GPR3(r1)
1854
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1855
- SAVE_8GPRS(14,r1)
1856
- SAVE_10GPRS(22,r1)
1857
- lhz r12,PACA_TRAP_SAVE(r13)
1858
- std r12,_TRAP(r1)
1859
- addi r11,r1,INT_FRAME_SIZE
1860
- std r11,0(r1)
1861
- li r12,0
1862
- std r12,0(r11)
1863
- ld r2,PACATOC(r13)
1864
- ld r11,exception_marker@toc(r2)
1865
- std r12,RESULT(r1)
1866
- std r11,STACK_FRAME_OVERHEAD-16(r1)
1867
-1: addi r3,r1,STACK_FRAME_OVERHEAD
1868
- bl kernel_bad_stack
1869
- b 1b
1870
-_ASM_NOKPROBE_SYMBOL(bad_stack);
1871
-
1872
-/*
1873
- * When doorbell is triggered from system reset wakeup, the message is
1874
- * not cleared, so it would fire again when EE is enabled.
1875
- *
1876
- * When coming from local_irq_enable, there may be the same problem if
1877
- * we were hard disabled.
1878
- *
1879
- * Execute msgclr to clear pending exceptions before handling it.
1880
- */
1881
-h_doorbell_common_msgclr:
1882
- LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1883
- PPC_MSGCLR(3)
1884
- b h_doorbell_common
1885
-
1886
-doorbell_super_common_msgclr:
1887
- LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
1888
- PPC_MSGCLRP(3)
1889
- b doorbell_super_common
1890
-
1891
-/*
1892
- * Called from arch_local_irq_enable when an interrupt needs
1893
- * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
1894
- * which kind of interrupt. MSR:EE is already off. We generate a
1895
- * stackframe like if a real interrupt had happened.
1896
- *
1897
- * Note: While MSR:EE is off, we need to make sure that _MSR
1898
- * in the generated frame has EE set to 1 or the exception
1899
- * handler will not properly re-enable them.
1900
- *
1901
- * Note that we don't specify LR as the NIP (return address) for
1902
- * the interrupt because that would unbalance the return branch
1903
- * predictor.
1904
- */
1905
-_GLOBAL(__replay_interrupt)
1906
- /* We are going to jump to the exception common code which
1907
- * will retrieve various register values from the PACA which
1908
- * we don't give a damn about, so we don't bother storing them.
1909
- */
1910
- mfmsr r12
1911
- LOAD_REG_ADDR(r11, replay_interrupt_return)
1912
- mfcr r9
1913
- ori r12,r12,MSR_EE
1914
- cmpwi r3,0x900
1915
- beq decrementer_common
1916
- cmpwi r3,0x500
1917
-BEGIN_FTR_SECTION
1918
- beq h_virt_irq_common
1919
-FTR_SECTION_ELSE
1920
- beq hardware_interrupt_common
1921
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
1922
- cmpwi r3,0xf00
1923
- beq performance_monitor_common
1924
-BEGIN_FTR_SECTION
1925
- cmpwi r3,0xa00
1926
- beq h_doorbell_common_msgclr
1927
- cmpwi r3,0xe60
1928
- beq hmi_exception_common
1929
-FTR_SECTION_ELSE
1930
- cmpwi r3,0xa00
1931
- beq doorbell_super_common_msgclr
1932
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1933
-replay_interrupt_return:
1934
- blr
1935
-
1936
-_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
3322
+ b interrupt_return