.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012 Regents of the University of California |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or |
---|
5 | | - * modify it under the terms of the GNU General Public License |
---|
6 | | - * as published by the Free Software Foundation, version 2. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | 4 | */ |
---|
13 | 5 | |
---|
14 | 6 | #ifndef _ASM_RISCV_PGTABLE_H |
---|
15 | 7 | #define _ASM_RISCV_PGTABLE_H |
---|
16 | 8 | |
---|
17 | 9 | #include <linux/mmzone.h> |
---|
| 10 | +#include <linux/sizes.h> |
---|
18 | 11 | |
---|
19 | 12 | #include <asm/pgtable-bits.h> |
---|
20 | 13 | |
---|
.. | .. |
---|
26 | 19 | #include <asm/tlbflush.h> |
---|
27 | 20 | #include <linux/mm_types.h> |
---|
28 | 21 | |
---|
| 22 | +#ifdef CONFIG_MMU |
---|
| 23 | + |
---|
| 24 | +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) |
---|
| 25 | +#define VMALLOC_END (PAGE_OFFSET - 1) |
---|
| 26 | +#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) |
---|
| 27 | + |
---|
| 28 | +#define BPF_JIT_REGION_SIZE (SZ_128M) |
---|
| 29 | +#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) |
---|
| 30 | +#define BPF_JIT_REGION_END (VMALLOC_END) |
---|
| 31 | + |
---|
| 32 | +/* |
---|
| 33 | + * Roughly size the vmemmap space to be large enough to fit enough |
---|
| 34 | + * struct pages to map half the virtual address space. Then |
---|
| 35 | + * position vmemmap directly below the VMALLOC region. |
---|
| 36 | + */ |
---|
| 37 | +#define VMEMMAP_SHIFT \ |
---|
| 38 | + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) |
---|
| 39 | +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) |
---|
| 40 | +#define VMEMMAP_END (VMALLOC_START - 1) |
---|
| 41 | +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) |
---|
| 42 | + |
---|
| 43 | +/* |
---|
| 44 | + * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel |
---|
| 45 | + * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. |
---|
| 46 | + */ |
---|
| 47 | +#define vmemmap ((struct page *)VMEMMAP_START) |
---|
| 48 | + |
---|
| 49 | +#define PCI_IO_SIZE SZ_16M |
---|
| 50 | +#define PCI_IO_END VMEMMAP_START |
---|
| 51 | +#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) |
---|
| 52 | + |
---|
| 53 | +#define FIXADDR_TOP PCI_IO_START |
---|
| 54 | +#ifdef CONFIG_64BIT |
---|
| 55 | +#define FIXADDR_SIZE PMD_SIZE |
---|
| 56 | +#else |
---|
| 57 | +#define FIXADDR_SIZE PGDIR_SIZE |
---|
| 58 | +#endif |
---|
| 59 | +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
---|
| 60 | + |
---|
| 61 | +#endif |
---|
| 62 | + |
---|
29 | 63 | #ifdef CONFIG_64BIT |
---|
30 | 64 | #include <asm/pgtable-64.h> |
---|
31 | 65 | #else |
---|
32 | 66 | #include <asm/pgtable-32.h> |
---|
33 | 67 | #endif /* CONFIG_64BIT */ |
---|
34 | 68 | |
---|
| 69 | +#ifdef CONFIG_MMU |
---|
35 | 70 | /* Number of entries in the page global directory */ |
---|
36 | 71 | #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) |
---|
37 | 72 | /* Number of entries in the page table */ |
---|
.. | .. |
---|
39 | 74 | |
---|
40 | 75 | /* Number of PGD entries that a user-mode program can use */ |
---|
41 | 76 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
---|
42 | | -#define FIRST_USER_ADDRESS 0 |
---|
43 | 77 | |
---|
44 | 78 | /* Page protection bits */ |
---|
45 | 79 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) |
---|
.. | .. |
---|
65 | 99 | | _PAGE_DIRTY) |
---|
66 | 100 | |
---|
67 | 101 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) |
---|
| 102 | +#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) |
---|
68 | 103 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) |
---|
| 104 | +#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ |
---|
| 105 | + | _PAGE_EXEC) |
---|
| 106 | + |
---|
| 107 | +#define PAGE_TABLE __pgprot(_PAGE_TABLE) |
---|
| 108 | + |
---|
| 109 | +/* |
---|
| 110 | + * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't |
---|
| 111 | + * change the properties of memory regions. |
---|
| 112 | + */ |
---|
| 113 | +#define _PAGE_IOREMAP _PAGE_KERNEL |
---|
69 | 114 | |
---|
70 | 115 | extern pgd_t swapper_pg_dir[]; |
---|
71 | 116 | |
---|
.. | .. |
---|
89 | 134 | #define __S110 PAGE_SHARED_EXEC |
---|
90 | 135 | #define __S111 PAGE_SHARED_EXEC |
---|
91 | 136 | |
---|
92 | | -/* |
---|
93 | | - * ZERO_PAGE is a global shared page that is always zero, |
---|
94 | | - * used for zero-mapped memory areas, etc. |
---|
95 | | - */ |
---|
96 | | -extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
---|
97 | | -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
---|
98 | | - |
---|
99 | 137 | static inline int pmd_present(pmd_t pmd) |
---|
100 | 138 | { |
---|
101 | 139 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
---|
.. | .. |
---|
111 | 149 | return !pmd_present(pmd); |
---|
112 | 150 | } |
---|
113 | 151 | |
---|
| 152 | +#define pmd_leaf pmd_leaf |
---|
| 153 | +static inline int pmd_leaf(pmd_t pmd) |
---|
| 154 | +{ |
---|
| 155 | + return pmd_present(pmd) && |
---|
| 156 | + (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); |
---|
| 157 | +} |
---|
| 158 | + |
---|
114 | 159 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
---|
115 | 160 | { |
---|
116 | 161 | *pmdp = pmd; |
---|
.. | .. |
---|
121 | 166 | set_pmd(pmdp, __pmd(0)); |
---|
122 | 167 | } |
---|
123 | 168 | |
---|
124 | | - |
---|
125 | 169 | static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) |
---|
126 | 170 | { |
---|
127 | 171 | return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); |
---|
128 | 172 | } |
---|
129 | 173 | |
---|
130 | | -#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
---|
131 | | - |
---|
132 | | -/* Locate an entry in the page global directory */ |
---|
133 | | -static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) |
---|
| 174 | +static inline unsigned long _pgd_pfn(pgd_t pgd) |
---|
134 | 175 | { |
---|
135 | | - return mm->pgd + pgd_index(addr); |
---|
| 176 | + return pgd_val(pgd) >> _PAGE_PFN_SHIFT; |
---|
136 | 177 | } |
---|
137 | | -/* Locate an entry in the kernel page global directory */ |
---|
138 | | -#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) |
---|
139 | 178 | |
---|
140 | 179 | static inline struct page *pmd_page(pmd_t pmd) |
---|
141 | 180 | { |
---|
.. | .. |
---|
161 | 200 | return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); |
---|
162 | 201 | } |
---|
163 | 202 | |
---|
164 | | -static inline pte_t mk_pte(struct page *page, pgprot_t prot) |
---|
165 | | -{ |
---|
166 | | - return pfn_pte(page_to_pfn(page), prot); |
---|
167 | | -} |
---|
168 | | - |
---|
169 | | -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
---|
170 | | - |
---|
171 | | -static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) |
---|
172 | | -{ |
---|
173 | | - return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); |
---|
174 | | -} |
---|
175 | | - |
---|
176 | | -#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) |
---|
177 | | -#define pte_unmap(pte) ((void)(pte)) |
---|
| 203 | +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
---|
178 | 204 | |
---|
179 | 205 | static inline int pte_present(pte_t pte) |
---|
180 | 206 | { |
---|
.. | .. |
---|
256 | 282 | static inline pte_t pte_mkspecial(pte_t pte) |
---|
257 | 283 | { |
---|
258 | 284 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
---|
| 285 | +} |
---|
| 286 | + |
---|
| 287 | +static inline pte_t pte_mkhuge(pte_t pte) |
---|
| 288 | +{ |
---|
| 289 | + return pte; |
---|
259 | 290 | } |
---|
260 | 291 | |
---|
261 | 292 | /* Modify page protection bits */ |
---|
.. | .. |
---|
400 | 431 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
---|
401 | 432 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
---|
402 | 433 | |
---|
403 | | -#ifdef CONFIG_FLATMEM |
---|
404 | | -#define kern_addr_valid(addr) (1) /* FIXME */ |
---|
| 434 | +/* |
---|
| 435 | + * In the RV64 Linux scheme, we give the user half of the virtual-address space |
---|
| 436 | + * and give the kernel the other (upper) half. |
---|
| 437 | + */ |
---|
| 438 | +#ifdef CONFIG_64BIT |
---|
| 439 | +#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE) |
---|
| 440 | +#else |
---|
| 441 | +#define KERN_VIRT_START FIXADDR_START |
---|
405 | 442 | #endif |
---|
406 | 443 | |
---|
407 | | -extern void paging_init(void); |
---|
408 | | - |
---|
409 | | -static inline void pgtable_cache_init(void) |
---|
410 | | -{ |
---|
411 | | - /* No page table caches to initialize */ |
---|
412 | | -} |
---|
413 | | - |
---|
414 | | -#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) |
---|
415 | | -#define VMALLOC_END (PAGE_OFFSET - 1) |
---|
416 | | -#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) |
---|
417 | | - |
---|
418 | 444 | /* |
---|
419 | | - * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32. |
---|
| 445 | + * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. |
---|
420 | 446 | * Note that PGDIR_SIZE must evenly divide TASK_SIZE. |
---|
421 | 447 | */ |
---|
422 | 448 | #ifdef CONFIG_64BIT |
---|
423 | 449 | #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) |
---|
424 | 450 | #else |
---|
425 | | -#define TASK_SIZE VMALLOC_START |
---|
| 451 | +#define TASK_SIZE FIXADDR_START |
---|
426 | 452 | #endif |
---|
427 | 453 | |
---|
428 | | -#include <asm-generic/pgtable.h> |
---|
| 454 | +#else /* CONFIG_MMU */ |
---|
| 455 | + |
---|
| 456 | +#define PAGE_SHARED __pgprot(0) |
---|
| 457 | +#define PAGE_KERNEL __pgprot(0) |
---|
| 458 | +#define swapper_pg_dir NULL |
---|
| 459 | +#define TASK_SIZE 0xffffffffUL |
---|
| 460 | +#define VMALLOC_START 0 |
---|
| 461 | +#define VMALLOC_END TASK_SIZE |
---|
| 462 | + |
---|
| 463 | +static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {} |
---|
| 464 | + |
---|
| 465 | +#endif /* !CONFIG_MMU */ |
---|
| 466 | + |
---|
| 467 | +#define kern_addr_valid(addr) (1) /* FIXME */ |
---|
| 468 | + |
---|
| 469 | +extern void *dtb_early_va; |
---|
| 470 | +extern uintptr_t dtb_early_pa; |
---|
| 471 | +void setup_bootmem(void); |
---|
| 472 | +void paging_init(void); |
---|
| 473 | +void misc_mem_init(void); |
---|
| 474 | + |
---|
| 475 | +#define FIRST_USER_ADDRESS 0 |
---|
| 476 | + |
---|
| 477 | +/* |
---|
| 478 | + * ZERO_PAGE is a global shared page that is always zero, |
---|
| 479 | + * used for zero-mapped memory areas, etc. |
---|
| 480 | + */ |
---|
| 481 | +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
---|
| 482 | +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
---|
429 | 483 | |
---|
430 | 484 | #endif /* !__ASSEMBLY__ */ |
---|
431 | 485 | |
---|