forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/powerpc/include/asm/book3s/64/mmu.h
....@@ -2,6 +2,8 @@
22 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
33 #define _ASM_POWERPC_BOOK3S_64_MMU_H_
44
5
+#include <asm/page.h>
6
+
57 #ifndef __ASSEMBLY__
68 /*
79 * Page size definition
....@@ -23,7 +25,6 @@
2325 };
2426 };
2527 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
26
-
2728 #endif /* __ASSEMBLY__ */
2829
2930 /* 64-bit classic hash table MMU */
....@@ -66,6 +67,11 @@
6667 /* Base PID to allocate from */
6768 extern unsigned int mmu_base_pid;
6869
70
+/*
71
+ * memory block size used with radix translation.
72
+ */
73
+extern unsigned long __ro_after_init radix_mem_block_size;
74
+
6975 #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
7076 #define PRTB_ENTRIES (1ul << mmu_pid_bits)
7177
....@@ -80,16 +86,6 @@
8086 /* Maximum possible number of NPUs in a system. */
8187 #define NV_MAX_NPUS 8
8288
83
-/*
84
- * One bit per slice. We have lower slices which cover 256MB segments
85
- * upto 4G range. That gets us 16 low slices. For the rest we track slices
86
- * in 1TB size.
87
- */
88
-struct slice_mask {
89
- u64 low_slices;
90
- DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
91
-};
92
-
9389 typedef struct {
9490 union {
9591 /*
....@@ -103,7 +99,6 @@
10399 mm_context_id_t id;
104100 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
105101 };
106
- u16 user_psize; /* page size index */
107102
108103 /* Number of bits in the mm_cpumask */
109104 atomic_t active_cpus;
....@@ -111,29 +106,12 @@
111106 /* Number of users of the external (Nest) MMU */
112107 atomic_t copros;
113108
114
- /* NPU NMMU context */
115
- struct npu_context *npu_context;
109
+ /* Number of user space windows opened in process mm_context */
110
+ atomic_t vas_windows;
116111
117
-#ifdef CONFIG_PPC_MM_SLICES
118
- /* SLB page size encodings*/
119
- unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
120
- unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
121
- unsigned long slb_addr_limit;
122
-# ifdef CONFIG_PPC_64K_PAGES
123
- struct slice_mask mask_64k;
124
-# endif
125
- struct slice_mask mask_4k;
126
-# ifdef CONFIG_HUGETLB_PAGE
127
- struct slice_mask mask_16m;
128
- struct slice_mask mask_16g;
129
-# endif
130
-#else
131
- u16 sllp; /* SLB page size encoding */
132
-#endif
112
+ struct hash_mm_context *hash_context;
113
+
133114 unsigned long vdso_base;
134
-#ifdef CONFIG_PPC_SUBPAGE_PROT
135
- struct subpage_prot_table spt;
136
-#endif /* CONFIG_PPC_SUBPAGE_PROT */
137115 /*
138116 * pagetable fragment support
139117 */
....@@ -154,6 +132,60 @@
154132 #endif
155133 } mm_context_t;
156134
135
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
136
+{
137
+ return ctx->hash_context->user_psize;
138
+}
139
+
140
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
141
+{
142
+ ctx->hash_context->user_psize = user_psize;
143
+}
144
+
145
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
146
+{
147
+ return ctx->hash_context->low_slices_psize;
148
+}
149
+
150
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
151
+{
152
+ return ctx->hash_context->high_slices_psize;
153
+}
154
+
155
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
156
+{
157
+ return ctx->hash_context->slb_addr_limit;
158
+}
159
+
160
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
161
+{
162
+ ctx->hash_context->slb_addr_limit = limit;
163
+}
164
+
165
+static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
166
+{
167
+#ifdef CONFIG_PPC_64K_PAGES
168
+ if (psize == MMU_PAGE_64K)
169
+ return &ctx->hash_context->mask_64k;
170
+#endif
171
+#ifdef CONFIG_HUGETLB_PAGE
172
+ if (psize == MMU_PAGE_16M)
173
+ return &ctx->hash_context->mask_16m;
174
+ if (psize == MMU_PAGE_16G)
175
+ return &ctx->hash_context->mask_16g;
176
+#endif
177
+ BUG_ON(psize != MMU_PAGE_4K);
178
+
179
+ return &ctx->hash_context->mask_4k;
180
+}
181
+
182
+#ifdef CONFIG_PPC_SUBPAGE_PROT
183
+static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
184
+{
185
+ return ctx->hash_context->spt;
186
+}
187
+#endif
188
+
157189 /*
158190 * The current system page and segment sizes
159191 */
....@@ -167,10 +199,15 @@
167199 void mmu_early_init_devtree(void);
168200 void hash__early_init_devtree(void);
169201 void radix__early_init_devtree(void);
170
-extern void radix_init_native(void);
202
+#ifdef CONFIG_PPC_MEM_KEYS
203
+void pkey_early_init_devtree(void);
204
+#else
205
+static inline void pkey_early_init_devtree(void) {}
206
+#endif
207
+
171208 extern void hash__early_init_mmu(void);
172209 extern void radix__early_init_mmu(void);
173
-static inline void early_init_mmu(void)
210
+static inline void __init early_init_mmu(void)
174211 {
175212 if (radix_enabled())
176213 return radix__early_init_mmu();
....@@ -187,20 +224,17 @@
187224
188225 extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
189226 phys_addr_t first_memblock_size);
190
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
191
- phys_addr_t first_memblock_size);
192227 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
193228 phys_addr_t first_memblock_size)
194229 {
195
- if (early_radix_enabled())
196
- return radix__setup_initial_memory_limit(first_memblock_base,
197
- first_memblock_size);
230
+ /*
231
+ * Hash has more strict restrictions. At this point we don't
232
+ * know which translations we will pick. Hence go with hash
233
+ * restrictions.
234
+ */
198235 return hash__setup_initial_memory_limit(first_memblock_base,
199236 first_memblock_size);
200237 }
201
-
202
-extern int (*register_process_table)(unsigned long base, unsigned long page_size,
203
- unsigned long tbl_size);
204238
205239 #ifdef CONFIG_PPC_PSERIES
206240 extern void radix_init_pseries(void);
....@@ -208,7 +242,19 @@
208242 static inline void radix_init_pseries(void) { };
209243 #endif
210244
211
-static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
245
+#ifdef CONFIG_HOTPLUG_CPU
246
+#define arch_clear_mm_cpumask_cpu(cpu, mm) \
247
+ do { \
248
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
249
+ atomic_dec(&(mm)->context.active_cpus); \
250
+ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
251
+ } \
252
+ } while (0)
253
+
254
+void cleanup_cpu_mmu_context(void);
255
+#endif
256
+
257
+static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
212258 {
213259 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
214260
....@@ -223,7 +269,7 @@
223269 static inline unsigned long get_user_vsid(mm_context_t *ctx,
224270 unsigned long ea, int ssize)
225271 {
226
- unsigned long context = get_ea_context(ctx, ea);
272
+ unsigned long context = get_user_context(ctx, ea);
227273
228274 return get_vsid(context, ea, ssize);
229275 }