1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
| /* SPDX-License-Identifier: GPL-2.0 */
| /* clear_page.S: UltraSparc optimized clear page.
| *
| * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
| * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
| */
|
| #include <linux/pgtable.h>
| #include <asm/visasm.h>
| #include <asm/thread_info.h>
| #include <asm/page.h>
| #include <asm/spitfire.h>
| #include <asm/head.h>
| #include <asm/export.h>
|
| /* What we used to do was lock a TLB entry into a specific
| * TLB slot, clear the page with interrupts disabled, then
| * restore the original TLB entry. This was great for
| * disturbing the TLB as little as possible, but it meant
| * we had to keep interrupts disabled for a long time.
| *
| * Now, we simply use the normal TLB loading mechanism,
| * and this makes the cpu choose a slot all by itself.
| * Then we do a normal TLB flush on exit. We need only
| * disable preemption during the clear.
| */
|
| .text
|
| .globl _clear_page
| EXPORT_SYMBOL(_clear_page)
| _clear_page: /* %o0=dest */
| ba,pt %xcc, clear_page_common
| clr %o4
|
| /* This thing is pretty important, it shows up
| * on the profiles via do_anonymous_page().
| */
| .align 32
| .globl clear_user_page
| EXPORT_SYMBOL(clear_user_page)
| clear_user_page: /* %o0=dest, %o1=vaddr */
| lduw [%g6 + TI_PRE_COUNT], %o2
| sethi %hi(PAGE_OFFSET), %g2
| sethi %hi(PAGE_SIZE), %o4
|
| ldx [%g2 + %lo(PAGE_OFFSET)], %g2
| sethi %hi(PAGE_KERNEL_LOCKED), %g3
|
| ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
| sub %o0, %g2, %g1 ! paddr
|
| and %o1, %o4, %o0 ! vaddr D-cache alias bit
|
| or %g1, %g3, %g1 ! TTE data
| sethi %hi(TLBTEMP_BASE), %o3
|
| add %o2, 1, %o4
| add %o0, %o3, %o0 ! TTE vaddr
|
| /* Disable preemption. */
| mov TLB_TAG_ACCESS, %g3
| stw %o4, [%g6 + TI_PRE_COUNT]
|
| /* Load TLB entry. */
| rdpr %pstate, %o4
| wrpr %o4, PSTATE_IE, %pstate
| stxa %o0, [%g3] ASI_DMMU
| stxa %g1, [%g0] ASI_DTLB_DATA_IN
| sethi %hi(KERNBASE), %g1
| flush %g1
| wrpr %o4, 0x0, %pstate
|
| mov 1, %o4
|
| clear_page_common:
| VISEntryHalf
| membar #StoreLoad | #StoreStore | #LoadStore
| fzero %f0
| sethi %hi(PAGE_SIZE/64), %o1
| mov %o0, %g1 ! remember vaddr for tlbflush
| fzero %f2
| or %o1, %lo(PAGE_SIZE/64), %o1
| faddd %f0, %f2, %f4
| fmuld %f0, %f2, %f6
| faddd %f0, %f2, %f8
| fmuld %f0, %f2, %f10
|
| faddd %f0, %f2, %f12
| fmuld %f0, %f2, %f14
| 1: stda %f0, [%o0 + %g0] ASI_BLK_P
| subcc %o1, 1, %o1
| bne,pt %icc, 1b
| add %o0, 0x40, %o0
| membar #Sync
| VISExitHalf
|
| brz,pn %o4, out
| nop
|
| stxa %g0, [%g1] ASI_DMMU_DEMAP
| membar #Sync
| stw %o2, [%g6 + TI_PRE_COUNT]
|
| out: retl
| nop
|
|