hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/parisc/kernel/entry.S
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
34 *
....@@ -6,20 +7,6 @@
67 * Copyright (C) 1999 SuSE GmbH Nuernberg
78 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
89 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9
- *
10
- * This program is free software; you can redistribute it and/or modify
11
- * it under the terms of the GNU General Public License as published by
12
- * the Free Software Foundation; either version 2, or (at your option)
13
- * any later version.
14
- *
15
- * This program is distributed in the hope that it will be useful,
16
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
- * GNU General Public License for more details.
19
- *
20
- * You should have received a copy of the GNU General Public License
21
- * along with this program; if not, write to the Free Software
22
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2310 */
2411
2512 #include <asm/asm-offsets.h>
....@@ -32,14 +19,15 @@
3219 #include <asm/psw.h>
3320 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
3421 #include <asm/assembly.h> /* for LDREG/STREG defines */
35
-#include <asm/pgtable.h>
3622 #include <asm/signal.h>
3723 #include <asm/unistd.h>
3824 #include <asm/ldcw.h>
3925 #include <asm/traps.h>
4026 #include <asm/thread_info.h>
27
+#include <asm/alternative.h>
4128
4229 #include <linux/linkage.h>
30
+#include <linux/pgtable.h>
4331
4432 #ifdef CONFIG_64BIT
4533 .level 2.0w
....@@ -47,14 +35,9 @@
4735 .level 2.0
4836 #endif
4937
50
- .import pa_tlb_lock,data
51
- .macro load_pa_tlb_lock reg
52
-#if __PA_LDCW_ALIGNMENT > 4
53
- load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
54
- depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
55
-#else
56
- load32 PA(pa_tlb_lock), \reg
57
-#endif
38
+ /* Get aligned page_table_lock address for this mm from cr28/tr4 */
39
+ .macro get_ptl reg
40
+ mfctl %cr28,\reg
5841 .endm
5942
6043 /* space_to_prot macro creates a prot id from a space id */
....@@ -393,6 +376,7 @@
393376 */
394377 .macro space_check spc,tmp,fault
395378 mfsp %sr7,\tmp
379
+ /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
396380 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
397381 * as kernel, so defeat the space
398382 * check if it is */
....@@ -422,78 +406,69 @@
422406 # endif
423407 #endif
424408 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
409
+#if CONFIG_PGTABLE_LEVELS < 3
425410 copy %r0,\pte
411
+#endif
426412 ldw,s \index(\pmd),\pmd
427413 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
428414 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
429
- copy \pmd,%r9
430
- SHLREG %r9,PxD_VALUE_SHIFT,\pmd
415
+ SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
431416 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
432417 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
433418 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
434
- LDREG %r0(\pmd),\pte
435
- bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
436419 .endm
437420
438
- /* Look up PTE in a 3-Level scheme.
439
- *
440
- * Here we implement a Hybrid L2/L3 scheme: we allocate the
441
- * first pmd adjacent to the pgd. This means that we can
442
- * subtract a constant offset to get to it. The pmd and pgd
443
- * sizes are arranged so that a single pmd covers 4GB (giving
444
- * a full LP64 process access to 8TB) so our lookups are
445
- * effectively L2 for the first 4GB of the kernel (i.e. for
446
- * all ILP32 processes and all the kernel for machines with
447
- * under 4GB of memory) */
421
+ /* Look up PTE in a 3-Level scheme. */
448422 .macro L3_ptep pgd,pte,index,va,fault
449
-#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
450
- extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
423
+#if CONFIG_PGTABLE_LEVELS == 3
451424 copy %r0,\pte
452
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
425
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
453426 ldw,s \index(\pgd),\pgd
454
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
455427 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
456
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
457
- shld \pgd,PxD_VALUE_SHIFT,\index
458
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
459
- copy \index,\pgd
460
- extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
461
- ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
428
+ shld \pgd,PxD_VALUE_SHIFT,\pgd
462429 #endif
463430 L2_ptep \pgd,\pte,\index,\va,\fault
464431 .endm
465432
466
- /* Acquire pa_tlb_lock lock and recheck page is still present. */
467
- .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
468
-#ifdef CONFIG_SMP
469
- cmpib,COND(=),n 0,\spc,2f
470
- load_pa_tlb_lock \tmp
433
+ /* Acquire page_table_lock and check page is present. */
434
+ .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
435
+#ifdef CONFIG_TLB_PTLOCK
436
+98: cmpib,COND(=),n 0,\spc,2f
437
+ get_ptl \tmp
471438 1: LDCW 0(\tmp),\tmp1
472439 cmpib,COND(=) 0,\tmp1,1b
473440 nop
474441 LDREG 0(\ptp),\pte
475
- bb,<,n \pte,_PAGE_PRESENT_BIT,2f
442
+ bb,<,n \pte,_PAGE_PRESENT_BIT,3f
476443 b \fault
477
- stw \spc,0(\tmp)
478
-2:
444
+ stw \spc,0(\tmp)
445
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
446
+#endif
447
+2: LDREG 0(\ptp),\pte
448
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
449
+3:
450
+ .endm
451
+
452
+ /* Release page_table_lock without reloading lock address.
453
+ Note that the values in the register spc are limited to
454
+ NR_SPACE_IDS (262144). Thus, the stw instruction always
455
+ stores a nonzero value even when register spc is 64 bits.
456
+ We use an ordered store to ensure all prior accesses are
457
+ performed prior to releasing the lock. */
458
+ .macro ptl_unlock0 spc,tmp
459
+#ifdef CONFIG_TLB_PTLOCK
460
+98: or,COND(=) %r0,\spc,%r0
461
+ stw,ma \spc,0(\tmp)
462
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
479463 #endif
480464 .endm
481465
482
- /* Release pa_tlb_lock lock without reloading lock address. */
483
- .macro tlb_unlock0 spc,tmp
484
-#ifdef CONFIG_SMP
485
- or,COND(=) %r0,\spc,%r0
486
- sync
487
- or,COND(=) %r0,\spc,%r0
488
- stw \spc,0(\tmp)
489
-#endif
490
- .endm
491
-
492
- /* Release pa_tlb_lock lock. */
493
- .macro tlb_unlock1 spc,tmp
494
-#ifdef CONFIG_SMP
495
- load_pa_tlb_lock \tmp
496
- tlb_unlock0 \spc,\tmp
466
+ /* Release page_table_lock. */
467
+ .macro ptl_unlock1 spc,tmp
468
+#ifdef CONFIG_TLB_PTLOCK
469
+98: get_ptl \tmp
470
+ ptl_unlock0 \spc,\tmp
471
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
497472 #endif
498473 .endm
499474
....@@ -910,19 +885,19 @@
910885 * Only do signals if we are returning to user space
911886 */
912887 LDREG PT_IASQ0(%r16), %r20
913
- cmpib,COND(=),n 0,%r20,intr_restore /* backward */
888
+ cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
914889 LDREG PT_IASQ1(%r16), %r20
915
- cmpib,COND(=),n 0,%r20,intr_restore /* backward */
916
-
917
- /* NOTE: We need to enable interrupts if we have to deliver
918
- * signals. We used to do this earlier but it caused kernel
919
- * stack overflows. */
920
- ssm PSW_SM_I, %r0
890
+ cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
921891
922892 copy %r0, %r25 /* long in_syscall = 0 */
923893 #ifdef CONFIG_64BIT
924894 ldo -16(%r30),%r29 /* Reference param save area */
925895 #endif
896
+
897
+ /* NOTE: We need to enable interrupts if we have to deliver
898
+ * signals. We used to do this earlier but it caused kernel
899
+ * stack overflows. */
900
+ ssm PSW_SM_I, %r0
926901
927902 BL do_notify_resume,%r2
928903 copy %r16, %r26 /* struct pt_regs *regs */
....@@ -953,14 +928,14 @@
953928 rfi
954929 nop
955930
956
-#ifndef CONFIG_PREEMPT
931
+#ifndef CONFIG_PREEMPTION
957932 # define intr_do_preempt intr_restore
958
-#endif /* !CONFIG_PREEMPT */
933
+#endif /* !CONFIG_PREEMPTION */
959934
960935 .import schedule,code
961936 intr_do_resched:
962937 /* Only call schedule on return to userspace. If we're returning
963
- * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
938
+ * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
964939 * we jump back to intr_restore.
965940 */
966941 LDREG PT_IASQ0(%r16), %r20
....@@ -992,7 +967,7 @@
992967 * and preempt_count is 0. otherwise, we continue on
993968 * our merry way back to the current running task.
994969 */
995
-#ifdef CONFIG_PREEMPT
970
+#ifdef CONFIG_PREEMPTION
996971 .import preempt_schedule_irq,code
997972 intr_do_preempt:
998973 rsm PSW_SM_I, %r0 /* disable interrupts */
....@@ -1008,11 +983,18 @@
1008983 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1009984 nop
1010985
986
+ /* ssm PSW_SM_I done later in intr_restore */
987
+#ifdef CONFIG_MLONGCALLS
988
+ ldil L%intr_restore, %r2
989
+ load32 preempt_schedule_irq, %r1
990
+ bv %r0(%r1)
991
+ ldo R%intr_restore(%r2), %r2
992
+#else
993
+ ldil L%intr_restore, %r1
1011994 BL preempt_schedule_irq, %r2
1012
- nop
1013
-
1014
- b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1015
-#endif /* CONFIG_PREEMPT */
995
+ ldo R%intr_restore(%r1), %r2
996
+#endif
997
+#endif /* CONFIG_PREEMPTION */
1016998
1017999 /*
10181000 * External interrupts.
....@@ -1169,14 +1151,14 @@
11691151
11701152 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
11711153
1172
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1154
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
11731155 update_accessed ptp,pte,t0,t1
11741156
11751157 make_insert_tlb spc,pte,prot,t1
11761158
11771159 idtlbt pte,prot
11781160
1179
- tlb_unlock1 spc,t0
1161
+ ptl_unlock1 spc,t0
11801162 rfir
11811163 nop
11821164
....@@ -1195,14 +1177,14 @@
11951177
11961178 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
11971179
1198
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1180
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
11991181 update_accessed ptp,pte,t0,t1
12001182
12011183 make_insert_tlb spc,pte,prot,t1
12021184
12031185 idtlbt pte,prot
12041186
1205
- tlb_unlock1 spc,t0
1187
+ ptl_unlock1 spc,t0
12061188 rfir
12071189 nop
12081190
....@@ -1223,7 +1205,7 @@
12231205
12241206 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
12251207
1226
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1208
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
12271209 update_accessed ptp,pte,t0,t1
12281210
12291211 make_insert_tlb_11 spc,pte,prot
....@@ -1236,7 +1218,7 @@
12361218
12371219 mtsp t1, %sr1 /* Restore sr1 */
12381220
1239
- tlb_unlock1 spc,t0
1221
+ ptl_unlock1 spc,t0
12401222 rfir
12411223 nop
12421224
....@@ -1256,7 +1238,7 @@
12561238
12571239 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
12581240
1259
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1241
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
12601242 update_accessed ptp,pte,t0,t1
12611243
12621244 make_insert_tlb_11 spc,pte,prot
....@@ -1269,7 +1251,7 @@
12691251
12701252 mtsp t1, %sr1 /* Restore sr1 */
12711253
1272
- tlb_unlock1 spc,t0
1254
+ ptl_unlock1 spc,t0
12731255 rfir
12741256 nop
12751257
....@@ -1289,7 +1271,7 @@
12891271
12901272 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
12911273
1292
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1274
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
12931275 update_accessed ptp,pte,t0,t1
12941276
12951277 make_insert_tlb spc,pte,prot,t1
....@@ -1298,7 +1280,7 @@
12981280
12991281 idtlbt pte,prot
13001282
1301
- tlb_unlock1 spc,t0
1283
+ ptl_unlock1 spc,t0
13021284 rfir
13031285 nop
13041286
....@@ -1317,7 +1299,7 @@
13171299
13181300 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
13191301
1320
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1302
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
13211303 update_accessed ptp,pte,t0,t1
13221304
13231305 make_insert_tlb spc,pte,prot,t1
....@@ -1326,7 +1308,7 @@
13261308
13271309 idtlbt pte,prot
13281310
1329
- tlb_unlock1 spc,t0
1311
+ ptl_unlock1 spc,t0
13301312 rfir
13311313 nop
13321314
....@@ -1426,14 +1408,14 @@
14261408
14271409 L3_ptep ptp,pte,t0,va,itlb_fault
14281410
1429
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1411
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
14301412 update_accessed ptp,pte,t0,t1
14311413
14321414 make_insert_tlb spc,pte,prot,t1
14331415
14341416 iitlbt pte,prot
14351417
1436
- tlb_unlock1 spc,t0
1418
+ ptl_unlock1 spc,t0
14371419 rfir
14381420 nop
14391421
....@@ -1450,14 +1432,14 @@
14501432
14511433 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
14521434
1453
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1435
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
14541436 update_accessed ptp,pte,t0,t1
14551437
14561438 make_insert_tlb spc,pte,prot,t1
14571439
14581440 iitlbt pte,prot
14591441
1460
- tlb_unlock1 spc,t0
1442
+ ptl_unlock1 spc,t0
14611443 rfir
14621444 nop
14631445
....@@ -1478,7 +1460,7 @@
14781460
14791461 L2_ptep ptp,pte,t0,va,itlb_fault
14801462
1481
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1463
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
14821464 update_accessed ptp,pte,t0,t1
14831465
14841466 make_insert_tlb_11 spc,pte,prot
....@@ -1491,7 +1473,7 @@
14911473
14921474 mtsp t1, %sr1 /* Restore sr1 */
14931475
1494
- tlb_unlock1 spc,t0
1476
+ ptl_unlock1 spc,t0
14951477 rfir
14961478 nop
14971479
....@@ -1502,7 +1484,7 @@
15021484
15031485 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
15041486
1505
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1487
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
15061488 update_accessed ptp,pte,t0,t1
15071489
15081490 make_insert_tlb_11 spc,pte,prot
....@@ -1515,7 +1497,7 @@
15151497
15161498 mtsp t1, %sr1 /* Restore sr1 */
15171499
1518
- tlb_unlock1 spc,t0
1500
+ ptl_unlock1 spc,t0
15191501 rfir
15201502 nop
15211503
....@@ -1536,7 +1518,7 @@
15361518
15371519 L2_ptep ptp,pte,t0,va,itlb_fault
15381520
1539
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1521
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
15401522 update_accessed ptp,pte,t0,t1
15411523
15421524 make_insert_tlb spc,pte,prot,t1
....@@ -1545,7 +1527,7 @@
15451527
15461528 iitlbt pte,prot
15471529
1548
- tlb_unlock1 spc,t0
1530
+ ptl_unlock1 spc,t0
15491531 rfir
15501532 nop
15511533
....@@ -1556,7 +1538,7 @@
15561538
15571539 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
15581540
1559
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1541
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
15601542 update_accessed ptp,pte,t0,t1
15611543
15621544 make_insert_tlb spc,pte,prot,t1
....@@ -1565,7 +1547,7 @@
15651547
15661548 iitlbt pte,prot
15671549
1568
- tlb_unlock1 spc,t0
1550
+ ptl_unlock1 spc,t0
15691551 rfir
15701552 nop
15711553
....@@ -1588,14 +1570,14 @@
15881570
15891571 L3_ptep ptp,pte,t0,va,dbit_fault
15901572
1591
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1573
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
15921574 update_dirty ptp,pte,t1
15931575
15941576 make_insert_tlb spc,pte,prot,t1
15951577
15961578 idtlbt pte,prot
15971579
1598
- tlb_unlock0 spc,t0
1580
+ ptl_unlock0 spc,t0
15991581 rfir
16001582 nop
16011583 #else
....@@ -1608,7 +1590,7 @@
16081590
16091591 L2_ptep ptp,pte,t0,va,dbit_fault
16101592
1611
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1593
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
16121594 update_dirty ptp,pte,t1
16131595
16141596 make_insert_tlb_11 spc,pte,prot
....@@ -1621,7 +1603,7 @@
16211603
16221604 mtsp t1, %sr1 /* Restore sr1 */
16231605
1624
- tlb_unlock0 spc,t0
1606
+ ptl_unlock0 spc,t0
16251607 rfir
16261608 nop
16271609
....@@ -1632,7 +1614,7 @@
16321614
16331615 L2_ptep ptp,pte,t0,va,dbit_fault
16341616
1635
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1617
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
16361618 update_dirty ptp,pte,t1
16371619
16381620 make_insert_tlb spc,pte,prot,t1
....@@ -1641,7 +1623,7 @@
16411623
16421624 idtlbt pte,prot
16431625
1644
- tlb_unlock0 spc,t0
1626
+ ptl_unlock0 spc,t0
16451627 rfir
16461628 nop
16471629 #endif
....@@ -1658,7 +1640,7 @@
16581640
16591641 itlb_fault:
16601642 b intr_save
1661
- ldi 6,%r8
1643
+ ldi PARISC_ITLB_TRAP,%r8
16621644
16631645 nadtlb_fault:
16641646 b intr_save
....@@ -1745,6 +1727,7 @@
17451727 .endm
17461728
17471729 fork_like clone
1730
+fork_like clone3
17481731 fork_like fork
17491732 fork_like vfork
17501733
....@@ -2008,6 +1991,7 @@
20081991 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
20091992 * have all on one L1 cacheline.
20101993 */
1994
+ ldi 0, %arg3
20111995 b ftrace_function_trampoline
20121996 copy %r3, %arg2 /* caller original %sp */
20131997 ftrace_stub:
....@@ -2025,6 +2009,168 @@
20252009 #endif
20262010 ENDPROC_CFI(mcount)
20272011
2012
+#ifdef CONFIG_DYNAMIC_FTRACE
2013
+
2014
+#ifdef CONFIG_64BIT
2015
+#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2016
+#else
2017
+#define FTRACE_FRAME_SIZE FRAME_SIZE
2018
+#endif
2019
+ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2020
+ftrace_caller:
2021
+ .global ftrace_caller
2022
+
2023
+ STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2024
+ ldo -FTRACE_FRAME_SIZE(%sp), %r3
2025
+ STREG %rp, -RP_OFFSET(%r3)
2026
+
2027
+ /* Offset 0 is already allocated for %r1 */
2028
+ STREG %r23, 2*REG_SZ(%r3)
2029
+ STREG %r24, 3*REG_SZ(%r3)
2030
+ STREG %r25, 4*REG_SZ(%r3)
2031
+ STREG %r26, 5*REG_SZ(%r3)
2032
+ STREG %r28, 6*REG_SZ(%r3)
2033
+ STREG %r29, 7*REG_SZ(%r3)
2034
+#ifdef CONFIG_64BIT
2035
+ STREG %r19, 8*REG_SZ(%r3)
2036
+ STREG %r20, 9*REG_SZ(%r3)
2037
+ STREG %r21, 10*REG_SZ(%r3)
2038
+ STREG %r22, 11*REG_SZ(%r3)
2039
+ STREG %r27, 12*REG_SZ(%r3)
2040
+ STREG %r31, 13*REG_SZ(%r3)
2041
+ loadgp
2042
+ ldo -16(%sp),%r29
2043
+#endif
2044
+ LDREG 0(%r3), %r25
2045
+ copy %rp, %r26
2046
+ ldo -8(%r25), %r25
2047
+ ldi 0, %r23 /* no pt_regs */
2048
+ b,l ftrace_function_trampoline, %rp
2049
+ copy %r3, %r24
2050
+
2051
+ LDREG -RP_OFFSET(%r3), %rp
2052
+ LDREG 2*REG_SZ(%r3), %r23
2053
+ LDREG 3*REG_SZ(%r3), %r24
2054
+ LDREG 4*REG_SZ(%r3), %r25
2055
+ LDREG 5*REG_SZ(%r3), %r26
2056
+ LDREG 6*REG_SZ(%r3), %r28
2057
+ LDREG 7*REG_SZ(%r3), %r29
2058
+#ifdef CONFIG_64BIT
2059
+ LDREG 8*REG_SZ(%r3), %r19
2060
+ LDREG 9*REG_SZ(%r3), %r20
2061
+ LDREG 10*REG_SZ(%r3), %r21
2062
+ LDREG 11*REG_SZ(%r3), %r22
2063
+ LDREG 12*REG_SZ(%r3), %r27
2064
+ LDREG 13*REG_SZ(%r3), %r31
2065
+#endif
2066
+ LDREG 1*REG_SZ(%r3), %r3
2067
+
2068
+ LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2069
+ /* Adjust return point to jump back to beginning of traced function */
2070
+ ldo -4(%r1), %r1
2071
+ bv,n (%r1)
2072
+
2073
+ENDPROC_CFI(ftrace_caller)
2074
+
2075
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2076
+ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2077
+ CALLS,SAVE_RP,SAVE_SP)
2078
+ftrace_regs_caller:
2079
+ .global ftrace_regs_caller
2080
+
2081
+ ldo -FTRACE_FRAME_SIZE(%sp), %r1
2082
+ STREG %rp, -RP_OFFSET(%r1)
2083
+
2084
+ copy %sp, %r1
2085
+ ldo PT_SZ_ALGN(%sp), %sp
2086
+
2087
+ STREG %rp, PT_GR2(%r1)
2088
+ STREG %r3, PT_GR3(%r1)
2089
+ STREG %r4, PT_GR4(%r1)
2090
+ STREG %r5, PT_GR5(%r1)
2091
+ STREG %r6, PT_GR6(%r1)
2092
+ STREG %r7, PT_GR7(%r1)
2093
+ STREG %r8, PT_GR8(%r1)
2094
+ STREG %r9, PT_GR9(%r1)
2095
+ STREG %r10, PT_GR10(%r1)
2096
+ STREG %r11, PT_GR11(%r1)
2097
+ STREG %r12, PT_GR12(%r1)
2098
+ STREG %r13, PT_GR13(%r1)
2099
+ STREG %r14, PT_GR14(%r1)
2100
+ STREG %r15, PT_GR15(%r1)
2101
+ STREG %r16, PT_GR16(%r1)
2102
+ STREG %r17, PT_GR17(%r1)
2103
+ STREG %r18, PT_GR18(%r1)
2104
+ STREG %r19, PT_GR19(%r1)
2105
+ STREG %r20, PT_GR20(%r1)
2106
+ STREG %r21, PT_GR21(%r1)
2107
+ STREG %r22, PT_GR22(%r1)
2108
+ STREG %r23, PT_GR23(%r1)
2109
+ STREG %r24, PT_GR24(%r1)
2110
+ STREG %r25, PT_GR25(%r1)
2111
+ STREG %r26, PT_GR26(%r1)
2112
+ STREG %r27, PT_GR27(%r1)
2113
+ STREG %r28, PT_GR28(%r1)
2114
+ STREG %r29, PT_GR29(%r1)
2115
+ STREG %r30, PT_GR30(%r1)
2116
+ STREG %r31, PT_GR31(%r1)
2117
+ mfctl %cr11, %r26
2118
+ STREG %r26, PT_SAR(%r1)
2119
+
2120
+ copy %rp, %r26
2121
+ LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2122
+ ldo -8(%r25), %r25
2123
+ ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2124
+ b,l ftrace_function_trampoline, %rp
2125
+ copy %r1, %arg3 /* struct pt_regs */
2126
+
2127
+ ldo -PT_SZ_ALGN(%sp), %r1
2128
+
2129
+ LDREG PT_SAR(%r1), %rp
2130
+ mtctl %rp, %cr11
2131
+
2132
+ LDREG PT_GR2(%r1), %rp
2133
+ LDREG PT_GR3(%r1), %r3
2134
+ LDREG PT_GR4(%r1), %r4
2135
+ LDREG PT_GR5(%r1), %r5
2136
+ LDREG PT_GR6(%r1), %r6
2137
+ LDREG PT_GR7(%r1), %r7
2138
+ LDREG PT_GR8(%r1), %r8
2139
+ LDREG PT_GR9(%r1), %r9
2140
+ LDREG PT_GR10(%r1),%r10
2141
+ LDREG PT_GR11(%r1),%r11
2142
+ LDREG PT_GR12(%r1),%r12
2143
+ LDREG PT_GR13(%r1),%r13
2144
+ LDREG PT_GR14(%r1),%r14
2145
+ LDREG PT_GR15(%r1),%r15
2146
+ LDREG PT_GR16(%r1),%r16
2147
+ LDREG PT_GR17(%r1),%r17
2148
+ LDREG PT_GR18(%r1),%r18
2149
+ LDREG PT_GR19(%r1),%r19
2150
+ LDREG PT_GR20(%r1),%r20
2151
+ LDREG PT_GR21(%r1),%r21
2152
+ LDREG PT_GR22(%r1),%r22
2153
+ LDREG PT_GR23(%r1),%r23
2154
+ LDREG PT_GR24(%r1),%r24
2155
+ LDREG PT_GR25(%r1),%r25
2156
+ LDREG PT_GR26(%r1),%r26
2157
+ LDREG PT_GR27(%r1),%r27
2158
+ LDREG PT_GR28(%r1),%r28
2159
+ LDREG PT_GR29(%r1),%r29
2160
+ LDREG PT_GR30(%r1),%r30
2161
+ LDREG PT_GR31(%r1),%r31
2162
+
2163
+ ldo -PT_SZ_ALGN(%sp), %sp
2164
+ LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2165
+ /* Adjust return point to jump back to beginning of traced function */
2166
+ ldo -4(%r1), %r1
2167
+ bv,n (%r1)
2168
+
2169
+ENDPROC_CFI(ftrace_regs_caller)
2170
+
2171
+#endif
2172
+#endif
2173
+
20282174 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
20292175 .align 8
20302176 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)