hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/include/asm/book3s/32/pgalloc.h
....@@ -5,31 +5,6 @@
55 #include <linux/threads.h>
66 #include <linux/slab.h>
77
8
-/*
9
- * Functions that deal with pagetables that could be at any level of
10
- * the table need to be passed an "index_size" so they know how to
11
- * handle allocation. For PTE pages (which are linked to a struct
12
- * page for now, and drawn from the main get_free_pages() pool), the
13
- * allocation size will be (2^index_size * sizeof(pointer)) and
14
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
15
- *
16
- * The maximum index size needs to be big enough to allow any
17
- * pagetable sizes we need, but small enough to fit in the low bits of
18
- * any page table pointer. In other words all pagetables, even tiny
19
- * ones, must be aligned to allow at least enough low 0 bits to
20
- * contain this value. This value is also used as a mask, so it must
21
- * be one less than a power of two.
22
- */
23
-#define MAX_PGTABLE_INDEX_SIZE 0xf
24
-
25
-extern void __bad_pte(pmd_t *pmd);
26
-
27
-extern struct kmem_cache *pgtable_cache[];
28
-#define PGT_CACHE(shift) ({ \
29
- BUG_ON(!(shift)); \
30
- pgtable_cache[(shift) - 1]; \
31
- })
32
-
338 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
349 {
3510 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
....@@ -50,8 +25,6 @@
5025 #define __pmd_free_tlb(tlb,x,a) do { } while (0)
5126 /* #define pgd_populate(mm, pmd, pte) BUG() */
5227
53
-#ifndef CONFIG_BOOKE
54
-
5528 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
5629 pte_t *pte)
5730 {
....@@ -61,56 +34,21 @@
6134 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
6235 pgtable_t pte_page)
6336 {
64
- *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
65
-}
66
-
67
-#define pmd_pgtable(pmd) pmd_page(pmd)
68
-#else
69
-
70
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
71
- pte_t *pte)
72
-{
73
- *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
74
-}
75
-
76
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
77
- pgtable_t pte_page)
78
-{
79
- *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
80
-}
81
-
82
-#define pmd_pgtable(pmd) pmd_page(pmd)
83
-#endif
84
-
85
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
86
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
87
-
88
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
89
-{
90
- free_page((unsigned long)pte);
91
-}
92
-
93
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
94
-{
95
- pgtable_page_dtor(ptepage);
96
- __free_page(ptepage);
37
+ *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
9738 }
9839
9940 static inline void pgtable_free(void *table, unsigned index_size)
10041 {
10142 if (!index_size) {
102
- pgtable_page_dtor(virt_to_page(table));
103
- free_page((unsigned long)table);
43
+ pte_fragment_free((unsigned long *)table, 0);
10444 } else {
10545 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
10646 kmem_cache_free(PGT_CACHE(index_size), table);
10747 }
10848 }
10949
110
-#define check_pgt_cache() do { } while (0)
11150 #define get_hugepd_cache_index(x) (x)
11251
113
-#ifdef CONFIG_SMP
11452 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
11553 void *table, int shift)
11654 {
....@@ -127,17 +65,10 @@
12765
12866 pgtable_free(table, shift);
12967 }
130
-#else
131
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
132
- void *table, int shift)
133
-{
134
- pgtable_free(table, shift);
135
-}
136
-#endif
13768
13869 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
13970 unsigned long address)
14071 {
141
- pgtable_free_tlb(tlb, page_address(table), 0);
72
+ pgtable_free_tlb(tlb, table, 0);
14273 }
14374 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */