| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * CPU-agnostic ARM page table allocator. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 11 | | - * GNU General Public License for more details. |
|---|
| 12 | | - * |
|---|
| 13 | | - * You should have received a copy of the GNU General Public License |
|---|
| 14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 15 | 4 | * |
|---|
| 16 | 5 | * Copyright (C) 2014 ARM Limited |
|---|
| 17 | 6 | * |
|---|
| .. | .. |
|---|
| 21 | 10 | #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt |
|---|
| 22 | 11 | |
|---|
| 23 | 12 | #include <linux/atomic.h> |
|---|
| 13 | +#include <linux/bitops.h> |
|---|
| 24 | 14 | #include <linux/io-pgtable.h> |
|---|
| 25 | | -#include <linux/iommu.h> |
|---|
| 26 | 15 | #include <linux/kernel.h> |
|---|
| 27 | | -#include <linux/scatterlist.h> |
|---|
| 28 | 16 | #include <linux/sizes.h> |
|---|
| 29 | 17 | #include <linux/slab.h> |
|---|
| 30 | 18 | #include <linux/types.h> |
|---|
| .. | .. |
|---|
| 32 | 20 | |
|---|
| 33 | 21 | #include <asm/barrier.h> |
|---|
| 34 | 22 | |
|---|
| 35 | | -#define ARM_LPAE_MAX_ADDR_BITS 48 |
|---|
| 23 | +#include "io-pgtable-arm.h" |
|---|
| 24 | + |
|---|
| 25 | +#define ARM_LPAE_MAX_ADDR_BITS 52 |
|---|
| 36 | 26 | #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 |
|---|
| 37 | 27 | #define ARM_LPAE_MAX_LEVELS 4 |
|---|
| 38 | 28 | |
|---|
| .. | .. |
|---|
| 44 | 34 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) |
|---|
| 45 | 35 | |
|---|
| 46 | 36 | /* |
|---|
| 47 | | - * For consistency with the architecture, we always consider |
|---|
| 48 | | - * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 |
|---|
| 49 | | - */ |
|---|
| 50 | | -#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) |
|---|
| 51 | | - |
|---|
| 52 | | -/* |
|---|
| 53 | 37 | * Calculate the right shift amount to get to the portion describing level l |
|---|
| 54 | 38 | * in a virtual address mapped by the pagetable in d. |
|---|
| 55 | 39 | */ |
|---|
| 56 | 40 | #define ARM_LPAE_LVL_SHIFT(l,d) \ |
|---|
| 57 | | - ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
|---|
| 58 | | - * (d)->bits_per_level) + (d)->pg_shift) |
|---|
| 41 | + (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ |
|---|
| 42 | + ilog2(sizeof(arm_lpae_iopte))) |
|---|
| 59 | 43 | |
|---|
| 60 | | -#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) |
|---|
| 44 | +#define ARM_LPAE_GRANULE(d) \ |
|---|
| 45 | + (sizeof(arm_lpae_iopte) << (d)->bits_per_level) |
|---|
| 46 | +#define ARM_LPAE_PGD_SIZE(d) \ |
|---|
| 47 | + (sizeof(arm_lpae_iopte) << (d)->pgd_bits) |
|---|
| 61 | 48 | |
|---|
| 62 | | -#define ARM_LPAE_PAGES_PER_PGD(d) \ |
|---|
| 63 | | - DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) |
|---|
| 49 | +#define ARM_LPAE_PTES_PER_TABLE(d) \ |
|---|
| 50 | + (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) |
|---|
| 64 | 51 | |
|---|
| 65 | 52 | /* |
|---|
| 66 | 53 | * Calculate the index at level l used to map virtual address a using the |
|---|
| 67 | 54 | * pagetable in d. |
|---|
| 68 | 55 | */ |
|---|
| 69 | 56 | #define ARM_LPAE_PGD_IDX(l,d) \ |
|---|
| 70 | | - ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
|---|
| 57 | + ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) |
|---|
| 71 | 58 | |
|---|
| 72 | | -#define ARM_LPAE_LVL_MASK(l, d) \ |
|---|
| 73 | | - ((l) == ARM_LPAE_START_LVL(d) ? (1 << (d)->pgd_bits) - 1 : \ |
|---|
| 74 | | - (1 << (d)->bits_per_level) - 1) |
|---|
| 75 | 59 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
|---|
| 76 | 60 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
|---|
| 77 | 61 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
|---|
| 78 | 62 | |
|---|
| 79 | 63 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
|---|
| 80 | | -#define ARM_LPAE_BLOCK_SIZE(l,d) \ |
|---|
| 81 | | - (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ |
|---|
| 82 | | - ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) |
|---|
| 64 | +#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) |
|---|
| 83 | 65 | |
|---|
| 84 | 66 | /* Page table bits */ |
|---|
| 85 | 67 | #define ARM_LPAE_PTE_TYPE_SHIFT 0 |
|---|
| .. | .. |
|---|
| 89 | 71 | #define ARM_LPAE_PTE_TYPE_TABLE 3 |
|---|
| 90 | 72 | #define ARM_LPAE_PTE_TYPE_PAGE 3 |
|---|
| 91 | 73 | |
|---|
| 92 | | -#define ARM_LPAE_PTE_SH_MASK (((arm_lpae_iopte)0x3) << 8) |
|---|
| 74 | +#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) |
|---|
| 75 | + |
|---|
| 93 | 76 | #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) |
|---|
| 94 | 77 | #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) |
|---|
| 95 | 78 | #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) |
|---|
| .. | .. |
|---|
| 108 | 91 | #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) |
|---|
| 109 | 92 | |
|---|
| 110 | 93 | /* Stage-1 PTE */ |
|---|
| 111 | | -#define ARM_LPAE_PTE_AP_PRIV_RW (((arm_lpae_iopte)0) << 6) |
|---|
| 112 | 94 | #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) |
|---|
| 113 | | -#define ARM_LPAE_PTE_AP_PRIV_RO (((arm_lpae_iopte)2) << 6) |
|---|
| 114 | | -#define ARM_LPAE_PTE_AP_RO (((arm_lpae_iopte)3) << 6) |
|---|
| 115 | | -#define ARM_LPAE_PTE_ATTRINDX_MASK 0x7 |
|---|
| 95 | +#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) |
|---|
| 116 | 96 | #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 |
|---|
| 117 | 97 | #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) |
|---|
| 118 | 98 | |
|---|
| .. | .. |
|---|
| 125 | 105 | #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) |
|---|
| 126 | 106 | |
|---|
| 127 | 107 | /* Register bits */ |
|---|
| 128 | | -#define ARM_32_LPAE_TCR_EAE (1 << 31) |
|---|
| 129 | | -#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) |
|---|
| 130 | | - |
|---|
| 131 | | -#define ARM_LPAE_TCR_EPD1 (1 << 23) |
|---|
| 132 | | - |
|---|
| 133 | | -#define ARM_LPAE_TCR_TG0_4K (0 << 14) |
|---|
| 134 | | -#define ARM_LPAE_TCR_TG0_64K (1 << 14) |
|---|
| 135 | | -#define ARM_LPAE_TCR_TG0_16K (2 << 14) |
|---|
| 136 | | - |
|---|
| 137 | | -#define ARM_LPAE_TCR_SH0_SHIFT 12 |
|---|
| 138 | | -#define ARM_LPAE_TCR_SH0_MASK 0x3 |
|---|
| 139 | | -#define ARM_LPAE_TCR_SH_NS 0 |
|---|
| 140 | | -#define ARM_LPAE_TCR_SH_OS 2 |
|---|
| 141 | | -#define ARM_LPAE_TCR_SH_IS 3 |
|---|
| 142 | | - |
|---|
| 143 | | -#define ARM_LPAE_TCR_ORGN0_SHIFT 10 |
|---|
| 144 | | -#define ARM_LPAE_TCR_IRGN0_SHIFT 8 |
|---|
| 145 | | -#define ARM_LPAE_TCR_RGN_MASK 0x3 |
|---|
| 146 | | -#define ARM_LPAE_TCR_RGN_NC 0 |
|---|
| 147 | | -#define ARM_LPAE_TCR_RGN_WBWA 1 |
|---|
| 148 | | -#define ARM_LPAE_TCR_RGN_WT 2 |
|---|
| 149 | | -#define ARM_LPAE_TCR_RGN_WB 3 |
|---|
| 150 | | - |
|---|
| 151 | | -#define ARM_LPAE_TCR_SL0_SHIFT 6 |
|---|
| 152 | | -#define ARM_LPAE_TCR_SL0_MASK 0x3 |
|---|
| 108 | +#define ARM_LPAE_VTCR_SL0_MASK 0x3 |
|---|
| 153 | 109 | |
|---|
| 154 | 110 | #define ARM_LPAE_TCR_T0SZ_SHIFT 0 |
|---|
| 155 | | -#define ARM_LPAE_TCR_SZ_MASK 0xf |
|---|
| 156 | 111 | |
|---|
| 157 | | -#define ARM_LPAE_TCR_PS_SHIFT 16 |
|---|
| 158 | | -#define ARM_LPAE_TCR_PS_MASK 0x7 |
|---|
| 112 | +#define ARM_LPAE_VTCR_PS_SHIFT 16 |
|---|
| 113 | +#define ARM_LPAE_VTCR_PS_MASK 0x7 |
|---|
| 159 | 114 | |
|---|
| 160 | | -#define ARM_LPAE_TCR_IPS_SHIFT 32 |
|---|
| 161 | | -#define ARM_LPAE_TCR_IPS_MASK 0x7 |
|---|
| 115 | +#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) |
|---|
| 116 | +#define ARM_LPAE_MAIR_ATTR_MASK 0xff |
|---|
| 117 | +#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04ULL |
|---|
| 118 | +#define ARM_LPAE_MAIR_ATTR_NC 0x44ULL |
|---|
| 119 | +#define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA 0xe4ULL |
|---|
| 120 | +#define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA 0xefULL |
|---|
| 121 | +#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4ULL |
|---|
| 122 | +#define ARM_LPAE_MAIR_ATTR_WBRWA 0xffULL |
|---|
| 123 | +#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 |
|---|
| 124 | +#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 |
|---|
| 125 | +#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 |
|---|
| 126 | +#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 |
|---|
| 127 | +#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA 4 |
|---|
| 128 | +#define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA 5 |
|---|
| 162 | 129 | |
|---|
| 163 | | -#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL |
|---|
| 164 | | -#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL |
|---|
| 165 | | -#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL |
|---|
| 166 | | -#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL |
|---|
| 167 | | -#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL |
|---|
| 168 | | -#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL |
|---|
| 130 | +#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) |
|---|
| 131 | +#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) |
|---|
| 132 | +#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) |
|---|
| 169 | 133 | |
|---|
| 170 | | -#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) |
|---|
| 171 | | -#define ARM_LPAE_MAIR1_ATTR_SHIFT(n) ((n-4) << 3) |
|---|
| 172 | | -#define ARM_LPAE_MAIR_ATTR_MASK 0xff |
|---|
| 173 | | -#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 |
|---|
| 174 | | -#define ARM_LPAE_MAIR_ATTR_NC 0x44 |
|---|
| 175 | | -#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff |
|---|
| 176 | | -#define ARM_LPAE_MAIR_ATTR_UPSTREAM 0xf4 |
|---|
| 177 | | -#define ARM_LPAE_MAIR_ATTR_LLC_NWA 0xe4 |
|---|
| 178 | | -#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 |
|---|
| 179 | | -#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 |
|---|
| 180 | | -#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 |
|---|
| 181 | | -#define ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM 3 |
|---|
| 182 | | -#define ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA 0x4ULL |
|---|
| 134 | +#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL |
|---|
| 135 | +#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL |
|---|
| 183 | 136 | |
|---|
| 184 | 137 | /* IOPTE accessors */ |
|---|
| 185 | | -#define iopte_deref(pte, d) \ |
|---|
| 186 | | - (__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ |
|---|
| 187 | | - & ~(ARM_LPAE_GRANULE(d) - 1ULL))) |
|---|
| 138 | +#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) |
|---|
| 188 | 139 | |
|---|
| 189 | 140 | #define iopte_type(pte,l) \ |
|---|
| 190 | 141 | (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) |
|---|
| 191 | 142 | |
|---|
| 192 | 143 | #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) |
|---|
| 193 | 144 | |
|---|
| 194 | | -#define iopte_leaf(pte,l) \ |
|---|
| 195 | | - (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ |
|---|
| 196 | | - (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ |
|---|
| 197 | | - (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) |
|---|
| 198 | | - |
|---|
| 199 | | -#define iopte_to_pfn(pte, d) \ |
|---|
| 200 | | - (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) |
|---|
| 201 | | - |
|---|
| 202 | | -#define pfn_to_iopte(pfn, d) \ |
|---|
| 203 | | - (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) |
|---|
| 204 | | - |
|---|
| 205 | 145 | struct arm_lpae_io_pgtable { |
|---|
| 206 | 146 | struct io_pgtable iop; |
|---|
| 207 | 147 | |
|---|
| 208 | | - int levels; |
|---|
| 209 | | - unsigned int pgd_bits; |
|---|
| 210 | | - size_t pgd_size; |
|---|
| 211 | | - unsigned long pg_shift; |
|---|
| 212 | | - unsigned long bits_per_level; |
|---|
| 148 | + int pgd_bits; |
|---|
| 149 | + int start_level; |
|---|
| 150 | + int bits_per_level; |
|---|
| 213 | 151 | |
|---|
| 214 | 152 | void *pgd; |
|---|
| 215 | | - void *pgd_ttbr1; |
|---|
| 216 | 153 | }; |
|---|
| 217 | 154 | |
|---|
| 218 | 155 | typedef u64 arm_lpae_iopte; |
|---|
| 219 | 156 | |
|---|
| 220 | | -/* |
|---|
| 221 | | - * We'll use some ignored bits in table entries to keep track of the number |
|---|
| 222 | | - * of page mappings beneath the table. The maximum number of entries |
|---|
| 223 | | - * beneath any table mapping in armv8 is 8192 (which is possible at the |
|---|
| 224 | | - * 2nd- and 3rd-level when using a 64K granule size). The bits at our |
|---|
| 225 | | - * disposal are: |
|---|
| 226 | | - * |
|---|
| 227 | | - * 4k granule: [54..52], [11..2] |
|---|
| 228 | | - * 64k granule: [54..52], [15..2] |
|---|
| 229 | | - * |
|---|
| 230 | | - * [54..52], [11..2] is enough bits for tracking table mappings at any |
|---|
| 231 | | - * level for any granule, so we'll use those. |
|---|
| 232 | | - */ |
|---|
| 233 | | -#define BOTTOM_IGNORED_MASK 0x3ff |
|---|
| 234 | | -#define BOTTOM_IGNORED_SHIFT 2 |
|---|
| 235 | | -#define BOTTOM_IGNORED_NUM_BITS 10 |
|---|
| 236 | | -#define TOP_IGNORED_MASK 0x7ULL |
|---|
| 237 | | -#define TOP_IGNORED_SHIFT 52 |
|---|
| 238 | | -#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \ |
|---|
| 239 | | - (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT)) |
|---|
| 240 | | - |
|---|
| 241 | | -static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte) |
|---|
| 157 | +static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, |
|---|
| 158 | + enum io_pgtable_fmt fmt) |
|---|
| 242 | 159 | { |
|---|
| 243 | | - return table_pte & ~IOPTE_RESERVED_MASK; |
|---|
| 160 | + if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) |
|---|
| 161 | + return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE; |
|---|
| 162 | + |
|---|
| 163 | + return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK; |
|---|
| 244 | 164 | } |
|---|
| 245 | 165 | |
|---|
| 246 | | -static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte) |
|---|
| 166 | +static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, |
|---|
| 167 | + struct arm_lpae_io_pgtable *data) |
|---|
| 247 | 168 | { |
|---|
| 248 | | - return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT)) |
|---|
| 249 | | - >> BOTTOM_IGNORED_SHIFT; |
|---|
| 169 | + arm_lpae_iopte pte = paddr; |
|---|
| 170 | + |
|---|
| 171 | + /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ |
|---|
| 172 | + return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; |
|---|
| 250 | 173 | } |
|---|
| 251 | 174 | |
|---|
| 252 | | -static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte) |
|---|
| 175 | +static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, |
|---|
| 176 | + struct arm_lpae_io_pgtable *data) |
|---|
| 253 | 177 | { |
|---|
| 254 | | - return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT)) |
|---|
| 255 | | - >> TOP_IGNORED_SHIFT; |
|---|
| 256 | | -} |
|---|
| 178 | + u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; |
|---|
| 257 | 179 | |
|---|
| 258 | | -static int iopte_tblcnt(arm_lpae_iopte table_pte) |
|---|
| 259 | | -{ |
|---|
| 260 | | - return (_iopte_bottom_ignored_val(table_pte) | |
|---|
| 261 | | - (_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS)); |
|---|
| 262 | | -} |
|---|
| 180 | + if (ARM_LPAE_GRANULE(data) < SZ_64K) |
|---|
| 181 | + return paddr; |
|---|
| 263 | 182 | |
|---|
| 264 | | -static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val) |
|---|
| 265 | | -{ |
|---|
| 266 | | - arm_lpae_iopte pte = iopte_val(*table_pte); |
|---|
| 267 | | - |
|---|
| 268 | | - pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) | |
|---|
| 269 | | - (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS)) |
|---|
| 270 | | - >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT); |
|---|
| 271 | | - *table_pte = pte; |
|---|
| 272 | | -} |
|---|
| 273 | | - |
|---|
| 274 | | -static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt) |
|---|
| 275 | | -{ |
|---|
| 276 | | - arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep); |
|---|
| 277 | | - |
|---|
| 278 | | - current_cnt -= cnt; |
|---|
| 279 | | - iopte_tblcnt_set(table_ptep, current_cnt); |
|---|
| 280 | | -} |
|---|
| 281 | | - |
|---|
| 282 | | -static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt) |
|---|
| 283 | | -{ |
|---|
| 284 | | - arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep); |
|---|
| 285 | | - |
|---|
| 286 | | - current_cnt += cnt; |
|---|
| 287 | | - iopte_tblcnt_set(table_ptep, current_cnt); |
|---|
| 183 | + /* Rotate the packed high-order bits back to the top */ |
|---|
| 184 | + return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); |
|---|
| 288 | 185 | } |
|---|
| 289 | 186 | |
|---|
| 290 | 187 | static bool selftest_running = false; |
|---|
| .. | .. |
|---|
| 294 | 191 | return (dma_addr_t)virt_to_phys(pages); |
|---|
| 295 | 192 | } |
|---|
| 296 | 193 | |
|---|
| 297 | | -static inline void pgtable_dma_sync_single_for_device( |
|---|
| 298 | | - struct io_pgtable_cfg *cfg, |
|---|
| 299 | | - dma_addr_t addr, size_t size, |
|---|
| 300 | | - enum dma_data_direction dir) |
|---|
| 301 | | -{ |
|---|
| 302 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) |
|---|
| 303 | | - dma_sync_single_for_device(cfg->iommu_dev, addr, size, |
|---|
| 304 | | - dir); |
|---|
| 305 | | -} |
|---|
| 306 | | - |
|---|
| 307 | 194 | static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, |
|---|
| 308 | | - struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 195 | + struct io_pgtable_cfg *cfg) |
|---|
| 309 | 196 | { |
|---|
| 310 | 197 | struct device *dev = cfg->iommu_dev; |
|---|
| 198 | + int order = get_order(size); |
|---|
| 199 | + struct page *p; |
|---|
| 311 | 200 | dma_addr_t dma; |
|---|
| 312 | | - void *pages = io_pgtable_alloc_pages_exact(cfg, cookie, size, |
|---|
| 313 | | - gfp | __GFP_ZERO); |
|---|
| 201 | + void *pages; |
|---|
| 314 | 202 | |
|---|
| 315 | | - if (!pages) |
|---|
| 203 | + VM_BUG_ON((gfp & __GFP_HIGHMEM)); |
|---|
| 204 | + p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, |
|---|
| 205 | + gfp | __GFP_ZERO, order); |
|---|
| 206 | + if (!p) |
|---|
| 316 | 207 | return NULL; |
|---|
| 317 | 208 | |
|---|
| 318 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { |
|---|
| 209 | + pages = page_address(p); |
|---|
| 210 | + if (!cfg->coherent_walk) { |
|---|
| 319 | 211 | dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); |
|---|
| 320 | 212 | if (dma_mapping_error(dev, dma)) |
|---|
| 321 | 213 | goto out_free; |
|---|
| .. | .. |
|---|
| 334 | 226 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); |
|---|
| 335 | 227 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); |
|---|
| 336 | 228 | out_free: |
|---|
| 337 | | - io_pgtable_free_pages_exact(cfg, cookie, pages, size); |
|---|
| 229 | + __free_pages(p, order); |
|---|
| 338 | 230 | return NULL; |
|---|
| 339 | 231 | } |
|---|
| 340 | 232 | |
|---|
| 341 | 233 | static void __arm_lpae_free_pages(void *pages, size_t size, |
|---|
| 342 | | - struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 234 | + struct io_pgtable_cfg *cfg) |
|---|
| 343 | 235 | { |
|---|
| 344 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) |
|---|
| 236 | + if (!cfg->coherent_walk) |
|---|
| 345 | 237 | dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), |
|---|
| 346 | 238 | size, DMA_TO_DEVICE); |
|---|
| 347 | | - io_pgtable_free_pages_exact(cfg, cookie, pages, size); |
|---|
| 239 | + free_pages((unsigned long)pages, get_order(size)); |
|---|
| 348 | 240 | } |
|---|
| 349 | 241 | |
|---|
| 350 | | -static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, |
|---|
| 242 | +static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, |
|---|
| 351 | 243 | struct io_pgtable_cfg *cfg) |
|---|
| 352 | 244 | { |
|---|
| 353 | | - pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ptep), |
|---|
| 354 | | - sizeof(*ptep), DMA_TO_DEVICE); |
|---|
| 245 | + dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), |
|---|
| 246 | + sizeof(*ptep) * num_entries, DMA_TO_DEVICE); |
|---|
| 355 | 247 | } |
|---|
| 356 | 248 | |
|---|
| 357 | | -static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, |
|---|
| 358 | | - struct io_pgtable_cfg *cfg) |
|---|
| 249 | +static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) |
|---|
| 359 | 250 | { |
|---|
| 360 | | - *ptep = pte; |
|---|
| 361 | 251 | |
|---|
| 362 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) |
|---|
| 363 | | - __arm_lpae_sync_pte(ptep, cfg); |
|---|
| 252 | + *ptep = 0; |
|---|
| 253 | + |
|---|
| 254 | + if (!cfg->coherent_walk) |
|---|
| 255 | + __arm_lpae_sync_pte(ptep, 1, cfg); |
|---|
| 364 | 256 | } |
|---|
| 365 | 257 | |
|---|
| 366 | 258 | static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, |
|---|
| 367 | | - unsigned long iova, size_t size, int lvl, |
|---|
| 368 | | - arm_lpae_iopte *ptep); |
|---|
| 369 | | - |
|---|
| 259 | + struct iommu_iotlb_gather *gather, |
|---|
| 260 | + unsigned long iova, size_t size, size_t pgcount, |
|---|
| 261 | + int lvl, arm_lpae_iopte *ptep); |
|---|
| 370 | 262 | |
|---|
| 371 | 263 | static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, |
|---|
| 372 | 264 | phys_addr_t paddr, arm_lpae_iopte prot, |
|---|
| 373 | | - int lvl, arm_lpae_iopte *ptep, |
|---|
| 374 | | - bool flush) |
|---|
| 265 | + int lvl, int num_entries, arm_lpae_iopte *ptep) |
|---|
| 375 | 266 | { |
|---|
| 376 | 267 | arm_lpae_iopte pte = prot; |
|---|
| 268 | + struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 269 | + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); |
|---|
| 270 | + int i; |
|---|
| 377 | 271 | |
|---|
| 378 | | - if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) |
|---|
| 379 | | - pte |= ARM_LPAE_PTE_NS; |
|---|
| 380 | | - |
|---|
| 381 | | - if (lvl == ARM_LPAE_MAX_LEVELS - 1) |
|---|
| 272 | + if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) |
|---|
| 382 | 273 | pte |= ARM_LPAE_PTE_TYPE_PAGE; |
|---|
| 383 | 274 | else |
|---|
| 384 | 275 | pte |= ARM_LPAE_PTE_TYPE_BLOCK; |
|---|
| 385 | 276 | |
|---|
| 386 | | - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS; |
|---|
| 387 | | - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); |
|---|
| 277 | + for (i = 0; i < num_entries; i++) |
|---|
| 278 | + ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); |
|---|
| 388 | 279 | |
|---|
| 389 | | - if (flush) |
|---|
| 390 | | - __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); |
|---|
| 391 | | - else |
|---|
| 392 | | - *ptep = pte; |
|---|
| 280 | + if (!cfg->coherent_walk) |
|---|
| 281 | + __arm_lpae_sync_pte(ptep, num_entries, cfg); |
|---|
| 393 | 282 | } |
|---|
| 394 | 283 | |
|---|
| 395 | 284 | static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, |
|---|
| 396 | 285 | unsigned long iova, phys_addr_t paddr, |
|---|
| 397 | | - arm_lpae_iopte prot, int lvl, |
|---|
| 398 | | - arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep, |
|---|
| 399 | | - bool flush) |
|---|
| 286 | + arm_lpae_iopte prot, int lvl, int num_entries, |
|---|
| 287 | + arm_lpae_iopte *ptep) |
|---|
| 400 | 288 | { |
|---|
| 401 | | - arm_lpae_iopte pte = *ptep; |
|---|
| 289 | + int i; |
|---|
| 402 | 290 | |
|---|
| 403 | | - /* We require an unmap first */ |
|---|
| 404 | | - if (pte & ARM_LPAE_PTE_VALID) { |
|---|
| 405 | | - WARN_RATELIMIT(1, "map without unmap\n"); |
|---|
| 406 | | - return -EEXIST; |
|---|
| 407 | | - } |
|---|
| 291 | + for (i = 0; i < num_entries; i++) |
|---|
| 292 | + if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { |
|---|
| 293 | + /* We require an unmap first */ |
|---|
| 294 | + WARN_ON(!selftest_running); |
|---|
| 295 | + return -EEXIST; |
|---|
| 296 | + } else if (iopte_type(ptep[i], lvl) == ARM_LPAE_PTE_TYPE_TABLE) { |
|---|
| 297 | + /* |
|---|
| 298 | + * We need to unmap and free the old table before |
|---|
| 299 | + * overwriting it with a block entry. |
|---|
| 300 | + */ |
|---|
| 301 | + arm_lpae_iopte *tblp; |
|---|
| 302 | + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); |
|---|
| 408 | 303 | |
|---|
| 409 | | - __arm_lpae_init_pte(data, paddr, prot, lvl, ptep, flush); |
|---|
| 304 | + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 305 | + if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, |
|---|
| 306 | + lvl, tblp) != sz) { |
|---|
| 307 | + WARN_ON(1); |
|---|
| 308 | + return -EINVAL; |
|---|
| 309 | + } |
|---|
| 310 | + } |
|---|
| 410 | 311 | |
|---|
| 411 | | - if (prev_ptep) |
|---|
| 412 | | - iopte_tblcnt_add(prev_ptep, 1); |
|---|
| 312 | + __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); |
|---|
| 413 | 313 | return 0; |
|---|
| 414 | 314 | } |
|---|
| 415 | 315 | |
|---|
| 416 | 316 | static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, |
|---|
| 417 | 317 | arm_lpae_iopte *ptep, |
|---|
| 418 | 318 | arm_lpae_iopte curr, |
|---|
| 419 | | - struct io_pgtable_cfg *cfg, |
|---|
| 420 | | - int ref_count) |
|---|
| 319 | + struct arm_lpae_io_pgtable *data) |
|---|
| 421 | 320 | { |
|---|
| 422 | 321 | arm_lpae_iopte old, new; |
|---|
| 322 | + struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 423 | 323 | |
|---|
| 424 | | - new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; |
|---|
| 324 | + new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; |
|---|
| 425 | 325 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) |
|---|
| 426 | 326 | new |= ARM_LPAE_PTE_NSTABLE; |
|---|
| 427 | | - iopte_tblcnt_set(&new, ref_count); |
|---|
| 428 | 327 | |
|---|
| 429 | 328 | /* |
|---|
| 430 | 329 | * Ensure the table itself is visible before its PTE can be. |
|---|
| .. | .. |
|---|
| 435 | 334 | |
|---|
| 436 | 335 | old = cmpxchg64_relaxed(ptep, curr, new); |
|---|
| 437 | 336 | |
|---|
| 438 | | - if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || |
|---|
| 439 | | - (old & ARM_LPAE_PTE_SW_SYNC)) |
|---|
| 337 | + if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) |
|---|
| 440 | 338 | return old; |
|---|
| 441 | 339 | |
|---|
| 442 | 340 | /* Even if it's not ours, there's no point waiting; just kick it */ |
|---|
| 443 | | - __arm_lpae_sync_pte(ptep, cfg); |
|---|
| 341 | + __arm_lpae_sync_pte(ptep, 1, cfg); |
|---|
| 444 | 342 | if (old == curr) |
|---|
| 445 | 343 | WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); |
|---|
| 446 | 344 | |
|---|
| 447 | 345 | return old; |
|---|
| 448 | 346 | } |
|---|
| 449 | 347 | |
|---|
| 450 | | -struct map_state { |
|---|
| 451 | | - unsigned long iova_end; |
|---|
| 452 | | - unsigned int pgsize; |
|---|
| 453 | | - arm_lpae_iopte *pgtable; |
|---|
| 454 | | - arm_lpae_iopte *prev_pgtable; |
|---|
| 455 | | - arm_lpae_iopte *pte_start; |
|---|
| 456 | | - unsigned int num_pte; |
|---|
| 457 | | -}; |
|---|
| 458 | | -/* map state optimization works at level 3 (the 2nd-to-last level) */ |
|---|
| 459 | | -#define MAP_STATE_LVL 3 |
|---|
| 460 | | - |
|---|
| 461 | 348 | static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, |
|---|
| 462 | | - phys_addr_t paddr, size_t size, arm_lpae_iopte prot, |
|---|
| 463 | | - int lvl, arm_lpae_iopte *ptep, |
|---|
| 464 | | - arm_lpae_iopte *prev_ptep, struct map_state *ms) |
|---|
| 349 | + phys_addr_t paddr, size_t size, size_t pgcount, |
|---|
| 350 | + arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, |
|---|
| 351 | + gfp_t gfp, size_t *mapped) |
|---|
| 465 | 352 | { |
|---|
| 466 | 353 | arm_lpae_iopte *cptep, pte; |
|---|
| 467 | 354 | size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); |
|---|
| 468 | 355 | size_t tblsz = ARM_LPAE_GRANULE(data); |
|---|
| 469 | 356 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 470 | | - void *cookie = data->iop.cookie; |
|---|
| 471 | | - arm_lpae_iopte *pgtable = ptep; |
|---|
| 357 | + int ret = 0, num_entries, max_entries, map_idx_start; |
|---|
| 472 | 358 | |
|---|
| 473 | 359 | /* Find our entry at the current level */ |
|---|
| 474 | | - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 360 | + map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 361 | + ptep += map_idx_start; |
|---|
| 475 | 362 | |
|---|
| 476 | 363 | /* If we can install a leaf entry at this level, then do so */ |
|---|
| 477 | | - if (size == block_size && (size & cfg->pgsize_bitmap)) { |
|---|
| 478 | | - if (!ms) |
|---|
| 479 | | - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, |
|---|
| 480 | | - ptep, prev_ptep, true); |
|---|
| 364 | + if (size == block_size) { |
|---|
| 365 | + max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; |
|---|
| 366 | + num_entries = min_t(int, pgcount, max_entries); |
|---|
| 367 | + ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); |
|---|
| 368 | + if (!ret && mapped) |
|---|
| 369 | + *mapped += num_entries * size; |
|---|
| 481 | 370 | |
|---|
| 482 | | - if (lvl == MAP_STATE_LVL) { |
|---|
| 483 | | - if (ms->pgtable) |
|---|
| 484 | | - pgtable_dma_sync_single_for_device(cfg, |
|---|
| 485 | | - __arm_lpae_dma_addr(ms->pte_start), |
|---|
| 486 | | - ms->num_pte * sizeof(*ptep), |
|---|
| 487 | | - DMA_TO_DEVICE); |
|---|
| 488 | | - |
|---|
| 489 | | - ms->iova_end = round_down(iova, SZ_2M) + SZ_2M; |
|---|
| 490 | | - ms->pgtable = pgtable; |
|---|
| 491 | | - ms->prev_pgtable = prev_ptep; |
|---|
| 492 | | - ms->pgsize = size; |
|---|
| 493 | | - ms->pte_start = ptep; |
|---|
| 494 | | - ms->num_pte = 1; |
|---|
| 495 | | - } else { |
|---|
| 496 | | - /* |
|---|
| 497 | | - * We have some map state from previous page |
|---|
| 498 | | - * mappings, but we're about to set up a block |
|---|
| 499 | | - * mapping. Flush out the previous page mappings. |
|---|
| 500 | | - */ |
|---|
| 501 | | - if (ms->pgtable) |
|---|
| 502 | | - pgtable_dma_sync_single_for_device(cfg, |
|---|
| 503 | | - __arm_lpae_dma_addr(ms->pte_start), |
|---|
| 504 | | - ms->num_pte * sizeof(*ptep), |
|---|
| 505 | | - DMA_TO_DEVICE); |
|---|
| 506 | | - memset(ms, 0, sizeof(*ms)); |
|---|
| 507 | | - ms = NULL; |
|---|
| 508 | | - } |
|---|
| 509 | | - |
|---|
| 510 | | - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, |
|---|
| 511 | | - ptep, prev_ptep, ms == NULL); |
|---|
| 371 | + return ret; |
|---|
| 512 | 372 | } |
|---|
| 513 | 373 | |
|---|
| 514 | 374 | /* We can't allocate tables at the final level */ |
|---|
| .. | .. |
|---|
| 518 | 378 | /* Grab a pointer to the next level */ |
|---|
| 519 | 379 | pte = READ_ONCE(*ptep); |
|---|
| 520 | 380 | if (!pte) { |
|---|
| 521 | | - cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg, cookie); |
|---|
| 381 | + cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); |
|---|
| 522 | 382 | if (!cptep) |
|---|
| 523 | 383 | return -ENOMEM; |
|---|
| 524 | 384 | |
|---|
| 525 | | - pte = arm_lpae_install_table(cptep, ptep, 0, cfg, 0); |
|---|
| 385 | + pte = arm_lpae_install_table(cptep, ptep, 0, data); |
|---|
| 526 | 386 | if (pte) |
|---|
| 527 | | - __arm_lpae_free_pages(cptep, tblsz, cfg, cookie); |
|---|
| 528 | | - |
|---|
| 529 | | - } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && |
|---|
| 530 | | - !(pte & ARM_LPAE_PTE_SW_SYNC)) { |
|---|
| 531 | | - __arm_lpae_sync_pte(ptep, cfg); |
|---|
| 387 | + __arm_lpae_free_pages(cptep, tblsz, cfg); |
|---|
| 388 | + } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { |
|---|
| 389 | + __arm_lpae_sync_pte(ptep, 1, cfg); |
|---|
| 532 | 390 | } |
|---|
| 533 | 391 | |
|---|
| 534 | | - if (pte && !iopte_leaf(pte, lvl)) { |
|---|
| 392 | + if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { |
|---|
| 535 | 393 | cptep = iopte_deref(pte, data); |
|---|
| 536 | 394 | } else if (pte) { |
|---|
| 537 | 395 | /* We require an unmap first */ |
|---|
| .. | .. |
|---|
| 540 | 398 | } |
|---|
| 541 | 399 | |
|---|
| 542 | 400 | /* Rinse, repeat */ |
|---|
| 543 | | - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, |
|---|
| 544 | | - ptep, ms); |
|---|
| 401 | + return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, |
|---|
| 402 | + cptep, gfp, mapped); |
|---|
| 545 | 403 | } |
|---|
| 546 | 404 | |
|---|
| 547 | 405 | static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, |
|---|
| .. | .. |
|---|
| 552 | 410 | if (data->iop.fmt == ARM_64_LPAE_S1 || |
|---|
| 553 | 411 | data->iop.fmt == ARM_32_LPAE_S1) { |
|---|
| 554 | 412 | pte = ARM_LPAE_PTE_nG; |
|---|
| 555 | | - |
|---|
| 556 | | - if (prot & IOMMU_WRITE) |
|---|
| 557 | | - pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW |
|---|
| 558 | | - : ARM_LPAE_PTE_AP_UNPRIV; |
|---|
| 559 | | - else |
|---|
| 560 | | - pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO |
|---|
| 561 | | - : ARM_LPAE_PTE_AP_RO; |
|---|
| 562 | | - |
|---|
| 413 | + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) |
|---|
| 414 | + pte |= ARM_LPAE_PTE_AP_RDONLY; |
|---|
| 563 | 415 | if (!(prot & IOMMU_PRIV)) |
|---|
| 564 | 416 | pte |= ARM_LPAE_PTE_AP_UNPRIV; |
|---|
| 565 | | - |
|---|
| 566 | | - if (prot & IOMMU_MMIO) |
|---|
| 567 | | - pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV |
|---|
| 568 | | - << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 569 | | - else if (prot & IOMMU_CACHE) |
|---|
| 570 | | - pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE |
|---|
| 571 | | - << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 572 | | - else if (prot & IOMMU_USE_UPSTREAM_HINT) |
|---|
| 573 | | - pte |= (ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM |
|---|
| 574 | | - << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 575 | | - else if (prot & IOMMU_USE_LLC_NWA) |
|---|
| 576 | | - pte |= (ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA |
|---|
| 577 | | - << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 578 | 417 | } else { |
|---|
| 579 | 418 | pte = ARM_LPAE_PTE_HAP_FAULT; |
|---|
| 580 | 419 | if (prot & IOMMU_READ) |
|---|
| 581 | 420 | pte |= ARM_LPAE_PTE_HAP_READ; |
|---|
| 582 | 421 | if (prot & IOMMU_WRITE) |
|---|
| 583 | 422 | pte |= ARM_LPAE_PTE_HAP_WRITE; |
|---|
| 423 | + } |
|---|
| 424 | + |
|---|
| 425 | + /* |
|---|
| 426 | + * Note that this logic is structured to accommodate Mali LPAE |
|---|
| 427 | + * having stage-1-like attributes but stage-2-like permissions. |
|---|
| 428 | + */ |
|---|
| 429 | + if (data->iop.fmt == ARM_64_LPAE_S2 || |
|---|
| 430 | + data->iop.fmt == ARM_32_LPAE_S2) { |
|---|
| 584 | 431 | if (prot & IOMMU_MMIO) |
|---|
| 585 | 432 | pte |= ARM_LPAE_PTE_MEMATTR_DEV; |
|---|
| 586 | 433 | else if (prot & IOMMU_CACHE) |
|---|
| 587 | 434 | pte |= ARM_LPAE_PTE_MEMATTR_OIWB; |
|---|
| 588 | 435 | else |
|---|
| 589 | 436 | pte |= ARM_LPAE_PTE_MEMATTR_NC; |
|---|
| 437 | + } else { |
|---|
| 438 | + if (prot & IOMMU_MMIO) |
|---|
| 439 | + pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV |
|---|
| 440 | + << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 441 | + else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA)) |
|---|
| 442 | + pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA |
|---|
| 443 | + << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 444 | + /* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */ |
|---|
| 445 | + else if (prot & IOMMU_CACHE) |
|---|
| 446 | + pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE |
|---|
| 447 | + << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 448 | + else if (prot & IOMMU_SYS_CACHE) |
|---|
| 449 | + pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE |
|---|
| 450 | + << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 451 | + else if (prot & IOMMU_SYS_CACHE_NWA) |
|---|
| 452 | + pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA |
|---|
| 453 | + << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
|---|
| 590 | 454 | } |
|---|
| 455 | + |
|---|
| 456 | + /* |
|---|
| 457 | + * Also Mali has its own notions of shareability wherein its Inner |
|---|
| 458 | + * domain covers the cores within the GPU, and its Outer domain is |
|---|
| 459 | + * "outside the GPU" (i.e. either the Inner or System domain in CPU |
|---|
| 460 | + * terms, depending on coherency). |
|---|
| 461 | + */ |
|---|
| 462 | + if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) |
|---|
| 463 | + pte |= ARM_LPAE_PTE_SH_IS; |
|---|
| 464 | + else |
|---|
| 465 | + pte |= ARM_LPAE_PTE_SH_OS; |
|---|
| 591 | 466 | |
|---|
| 592 | 467 | if (prot & IOMMU_NOEXEC) |
|---|
| 593 | 468 | pte |= ARM_LPAE_PTE_XN; |
|---|
| 594 | 469 | |
|---|
| 470 | + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) |
|---|
| 471 | + pte |= ARM_LPAE_PTE_NS; |
|---|
| 472 | + |
|---|
| 473 | + if (data->iop.fmt != ARM_MALI_LPAE) |
|---|
| 474 | + pte |= ARM_LPAE_PTE_AF; |
|---|
| 475 | + |
|---|
| 595 | 476 | return pte; |
|---|
| 596 | 477 | } |
|---|
| 597 | 478 | |
|---|
| 598 | | -static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 599 | | - phys_addr_t paddr, size_t size, int iommu_prot) |
|---|
| 479 | +static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 480 | + phys_addr_t paddr, size_t pgsize, size_t pgcount, |
|---|
| 481 | + int iommu_prot, gfp_t gfp, size_t *mapped) |
|---|
| 600 | 482 | { |
|---|
| 601 | 483 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 484 | + struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 602 | 485 | arm_lpae_iopte *ptep = data->pgd; |
|---|
| 603 | | - int ret, lvl = ARM_LPAE_START_LVL(data); |
|---|
| 486 | + int ret, lvl = data->start_level; |
|---|
| 604 | 487 | arm_lpae_iopte prot; |
|---|
| 488 | + long iaext = (s64)iova >> cfg->ias; |
|---|
| 605 | 489 | |
|---|
| 606 | 490 | /* If no access, then nothing to do */ |
|---|
| 607 | 491 | if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) |
|---|
| 608 | 492 | return 0; |
|---|
| 609 | 493 | |
|---|
| 610 | | - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || |
|---|
| 611 | | - paddr >= (1ULL << data->iop.cfg.oas))) |
|---|
| 494 | + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) |
|---|
| 495 | + return -EINVAL; |
|---|
| 496 | + |
|---|
| 497 | + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) |
|---|
| 498 | + iaext = ~iaext; |
|---|
| 499 | + if (WARN_ON(iaext || paddr >> cfg->oas)) |
|---|
| 612 | 500 | return -ERANGE; |
|---|
| 613 | 501 | |
|---|
| 614 | 502 | prot = arm_lpae_prot_to_pte(data, iommu_prot); |
|---|
| 615 | | - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL, |
|---|
| 616 | | - NULL); |
|---|
| 503 | + ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, |
|---|
| 504 | + ptep, gfp, mapped); |
|---|
| 617 | 505 | /* |
|---|
| 618 | 506 | * Synchronise all PTE updates for the new mapping before there's |
|---|
| 619 | 507 | * a chance for anything to kick off a table walk for the new iova. |
|---|
| .. | .. |
|---|
| 623 | 511 | return ret; |
|---|
| 624 | 512 | } |
|---|
| 625 | 513 | |
|---|
| 626 | | -static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 627 | | - struct scatterlist *sg, unsigned int nents, |
|---|
| 628 | | - int iommu_prot, size_t *size) |
|---|
| 514 | + |
|---|
| 515 | +static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 516 | + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) |
|---|
| 629 | 517 | { |
|---|
| 630 | | - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 631 | | - arm_lpae_iopte *ptep = data->pgd; |
|---|
| 632 | | - int lvl = ARM_LPAE_START_LVL(data); |
|---|
| 633 | | - arm_lpae_iopte prot; |
|---|
| 634 | | - struct scatterlist *s; |
|---|
| 635 | | - size_t mapped = 0; |
|---|
| 636 | | - int i; |
|---|
| 637 | | - int ret = -EINVAL; |
|---|
| 638 | | - unsigned int min_pagesz; |
|---|
| 639 | | - struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 640 | | - struct map_state ms; |
|---|
| 641 | | - |
|---|
| 642 | | - /* If no access, then nothing to do */ |
|---|
| 643 | | - if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) |
|---|
| 644 | | - goto out_err; |
|---|
| 645 | | - |
|---|
| 646 | | - prot = arm_lpae_prot_to_pte(data, iommu_prot); |
|---|
| 647 | | - |
|---|
| 648 | | - min_pagesz = 1 << __ffs(cfg->pgsize_bitmap); |
|---|
| 649 | | - |
|---|
| 650 | | - memset(&ms, 0, sizeof(ms)); |
|---|
| 651 | | - |
|---|
| 652 | | - for_each_sg(sg, s, nents, i) { |
|---|
| 653 | | - phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; |
|---|
| 654 | | - size_t size = s->length; |
|---|
| 655 | | - |
|---|
| 656 | | - /* |
|---|
| 657 | | - * We are mapping on IOMMU page boundaries, so offset within |
|---|
| 658 | | - * the page must be 0. However, the IOMMU may support pages |
|---|
| 659 | | - * smaller than PAGE_SIZE, so s->offset may still represent |
|---|
| 660 | | - * an offset of that boundary within the CPU page. |
|---|
| 661 | | - */ |
|---|
| 662 | | - if (!IS_ALIGNED(s->offset, min_pagesz)) |
|---|
| 663 | | - goto out_err; |
|---|
| 664 | | - |
|---|
| 665 | | - while (size) { |
|---|
| 666 | | - size_t pgsize = iommu_pgsize( |
|---|
| 667 | | - cfg->pgsize_bitmap, iova | phys, size); |
|---|
| 668 | | - |
|---|
| 669 | | - if (ms.pgtable && (iova < ms.iova_end)) { |
|---|
| 670 | | - arm_lpae_iopte *ptep = ms.pgtable + |
|---|
| 671 | | - ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL, |
|---|
| 672 | | - data); |
|---|
| 673 | | - arm_lpae_init_pte( |
|---|
| 674 | | - data, iova, phys, prot, MAP_STATE_LVL, |
|---|
| 675 | | - ptep, ms.prev_pgtable, false); |
|---|
| 676 | | - ms.num_pte++; |
|---|
| 677 | | - } else { |
|---|
| 678 | | - ret = __arm_lpae_map(data, iova, phys, pgsize, |
|---|
| 679 | | - prot, lvl, ptep, NULL, &ms); |
|---|
| 680 | | - if (ret) |
|---|
| 681 | | - goto out_err; |
|---|
| 682 | | - } |
|---|
| 683 | | - |
|---|
| 684 | | - iova += pgsize; |
|---|
| 685 | | - mapped += pgsize; |
|---|
| 686 | | - phys += pgsize; |
|---|
| 687 | | - size -= pgsize; |
|---|
| 688 | | - } |
|---|
| 689 | | - } |
|---|
| 690 | | - |
|---|
| 691 | | - if (ms.pgtable) |
|---|
| 692 | | - pgtable_dma_sync_single_for_device(cfg, |
|---|
| 693 | | - __arm_lpae_dma_addr(ms.pte_start), |
|---|
| 694 | | - ms.num_pte * sizeof(*ms.pte_start), |
|---|
| 695 | | - DMA_TO_DEVICE); |
|---|
| 696 | | - |
|---|
| 697 | | - /* |
|---|
| 698 | | - * Synchronise all PTE updates for the new mapping before there's |
|---|
| 699 | | - * a chance for anything to kick off a table walk for the new iova. |
|---|
| 700 | | - */ |
|---|
| 701 | | - wmb(); |
|---|
| 702 | | - |
|---|
| 703 | | - return mapped; |
|---|
| 704 | | - |
|---|
| 705 | | -out_err: |
|---|
| 706 | | - /* Return the size of the partial mapping so that they can be undone */ |
|---|
| 707 | | - *size = mapped; |
|---|
| 708 | | - return ret; |
|---|
| 518 | + return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp, |
|---|
| 519 | + NULL); |
|---|
| 709 | 520 | } |
|---|
| 710 | 521 | |
|---|
| 711 | 522 | static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, |
|---|
| .. | .. |
|---|
| 713 | 524 | { |
|---|
| 714 | 525 | arm_lpae_iopte *start, *end; |
|---|
| 715 | 526 | unsigned long table_size; |
|---|
| 716 | | - void *cookie = data->iop.cookie; |
|---|
| 717 | 527 | |
|---|
| 718 | | - if (lvl == ARM_LPAE_START_LVL(data)) |
|---|
| 719 | | - table_size = data->pgd_size; |
|---|
| 528 | + if (lvl == data->start_level) |
|---|
| 529 | + table_size = ARM_LPAE_PGD_SIZE(data); |
|---|
| 720 | 530 | else |
|---|
| 721 | 531 | table_size = ARM_LPAE_GRANULE(data); |
|---|
| 722 | 532 | |
|---|
| .. | .. |
|---|
| 731 | 541 | while (ptep != end) { |
|---|
| 732 | 542 | arm_lpae_iopte pte = *ptep++; |
|---|
| 733 | 543 | |
|---|
| 734 | | - if (!pte || iopte_leaf(pte, lvl)) |
|---|
| 544 | + if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) |
|---|
| 735 | 545 | continue; |
|---|
| 736 | 546 | |
|---|
| 737 | 547 | __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); |
|---|
| 738 | 548 | } |
|---|
| 739 | 549 | |
|---|
| 740 | | - __arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie); |
|---|
| 550 | + __arm_lpae_free_pages(start, table_size, &data->iop.cfg); |
|---|
| 741 | 551 | } |
|---|
| 742 | 552 | |
|---|
| 743 | 553 | static void arm_lpae_free_pgtable(struct io_pgtable *iop) |
|---|
| 744 | 554 | { |
|---|
| 745 | 555 | struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); |
|---|
| 746 | 556 | |
|---|
| 747 | | - __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); |
|---|
| 748 | | - __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), |
|---|
| 749 | | - data->pgd_ttbr1); |
|---|
| 557 | + __arm_lpae_free_pgtable(data, data->start_level, data->pgd); |
|---|
| 750 | 558 | kfree(data); |
|---|
| 751 | 559 | } |
|---|
| 752 | 560 | |
|---|
| 753 | 561 | static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, |
|---|
| 562 | + struct iommu_iotlb_gather *gather, |
|---|
| 754 | 563 | unsigned long iova, size_t size, |
|---|
| 755 | 564 | arm_lpae_iopte blk_pte, int lvl, |
|---|
| 756 | | - arm_lpae_iopte *ptep) |
|---|
| 565 | + arm_lpae_iopte *ptep, size_t pgcount) |
|---|
| 757 | 566 | { |
|---|
| 758 | 567 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 759 | 568 | arm_lpae_iopte pte, *tablep; |
|---|
| 760 | 569 | phys_addr_t blk_paddr; |
|---|
| 761 | 570 | size_t tablesz = ARM_LPAE_GRANULE(data); |
|---|
| 762 | 571 | size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); |
|---|
| 763 | | - int i, unmap_idx = -1; |
|---|
| 764 | | - void *cookie = data->iop.cookie; |
|---|
| 765 | | - int child_cnt = 0; |
|---|
| 766 | | - |
|---|
| 767 | | - size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size); |
|---|
| 572 | + int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); |
|---|
| 573 | + int i, unmap_idx_start = -1, num_entries = 0, max_entries; |
|---|
| 768 | 574 | |
|---|
| 769 | 575 | if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) |
|---|
| 770 | 576 | return 0; |
|---|
| 771 | 577 | |
|---|
| 772 | | - tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, cookie); |
|---|
| 578 | + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); |
|---|
| 773 | 579 | if (!tablep) |
|---|
| 774 | 580 | return 0; /* Bytes unmapped */ |
|---|
| 775 | 581 | |
|---|
| 776 | | - if (size == split_sz) |
|---|
| 777 | | - unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 778 | | - |
|---|
| 779 | | - blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift; |
|---|
| 780 | | - pte = iopte_prot(blk_pte); |
|---|
| 781 | | - |
|---|
| 782 | | - for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { |
|---|
| 783 | | - /* Unmap! */ |
|---|
| 784 | | - if (i == unmap_idx) |
|---|
| 785 | | - continue; |
|---|
| 786 | | - |
|---|
| 787 | | - __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i], |
|---|
| 788 | | - true); |
|---|
| 789 | | - child_cnt++; |
|---|
| 582 | + if (size == split_sz) { |
|---|
| 583 | + unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 584 | + max_entries = ptes_per_table - unmap_idx_start; |
|---|
| 585 | + num_entries = min_t(int, pgcount, max_entries); |
|---|
| 790 | 586 | } |
|---|
| 791 | 587 | |
|---|
| 792 | | - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg, child_cnt); |
|---|
| 588 | + blk_paddr = iopte_to_paddr(blk_pte, data); |
|---|
| 589 | + pte = iopte_prot(blk_pte); |
|---|
| 590 | + |
|---|
| 591 | + for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { |
|---|
| 592 | + /* Unmap! */ |
|---|
| 593 | + if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) |
|---|
| 594 | + continue; |
|---|
| 595 | + |
|---|
| 596 | + __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); |
|---|
| 597 | + } |
|---|
| 598 | + |
|---|
| 599 | + pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); |
|---|
| 793 | 600 | if (pte != blk_pte) { |
|---|
| 794 | | - __arm_lpae_free_pages(tablep, tablesz, cfg, cookie); |
|---|
| 601 | + __arm_lpae_free_pages(tablep, tablesz, cfg); |
|---|
| 795 | 602 | /* |
|---|
| 796 | 603 | * We may race against someone unmapping another part of this |
|---|
| 797 | 604 | * block, but anything else is invalid. We can't misinterpret |
|---|
| .. | .. |
|---|
| 801 | 608 | return 0; |
|---|
| 802 | 609 | |
|---|
| 803 | 610 | tablep = iopte_deref(pte, data); |
|---|
| 804 | | - } else if (unmap_idx >= 0) { |
|---|
| 805 | | - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); |
|---|
| 806 | | - return size; |
|---|
| 611 | + } else if (unmap_idx_start >= 0) { |
|---|
| 612 | + for (i = 0; i < num_entries; i++) |
|---|
| 613 | + io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); |
|---|
| 614 | + |
|---|
| 615 | + return num_entries * size; |
|---|
| 807 | 616 | } |
|---|
| 808 | 617 | |
|---|
| 809 | | - return __arm_lpae_unmap(data, iova, size, lvl, tablep); |
|---|
| 618 | + return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); |
|---|
| 810 | 619 | } |
|---|
| 811 | 620 | |
|---|
| 812 | 621 | static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, |
|---|
| 813 | | - unsigned long iova, size_t size, int lvl, |
|---|
| 814 | | - arm_lpae_iopte *ptep) |
|---|
| 622 | + struct iommu_iotlb_gather *gather, |
|---|
| 623 | + unsigned long iova, size_t size, size_t pgcount, |
|---|
| 624 | + int lvl, arm_lpae_iopte *ptep) |
|---|
| 815 | 625 | { |
|---|
| 816 | 626 | arm_lpae_iopte pte; |
|---|
| 817 | 627 | struct io_pgtable *iop = &data->iop; |
|---|
| 628 | + int i = 0, num_entries, max_entries, unmap_idx_start; |
|---|
| 818 | 629 | |
|---|
| 819 | 630 | /* Something went horribly wrong and we ran out of page table */ |
|---|
| 820 | 631 | if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) |
|---|
| 821 | 632 | return 0; |
|---|
| 822 | 633 | |
|---|
| 823 | | - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 634 | + unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 635 | + ptep += unmap_idx_start; |
|---|
| 824 | 636 | pte = READ_ONCE(*ptep); |
|---|
| 825 | 637 | if (WARN_ON(!pte)) |
|---|
| 826 | 638 | return 0; |
|---|
| 827 | 639 | |
|---|
| 828 | 640 | /* If the size matches this level, we're in the right place */ |
|---|
| 829 | 641 | if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { |
|---|
| 830 | | - __arm_lpae_set_pte(ptep, 0, &iop->cfg); |
|---|
| 642 | + max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; |
|---|
| 643 | + num_entries = min_t(int, pgcount, max_entries); |
|---|
| 831 | 644 | |
|---|
| 832 | | - if (!iopte_leaf(pte, lvl)) { |
|---|
| 833 | | - /* Also flush any partial walks */ |
|---|
| 834 | | - ptep = iopte_deref(pte, data); |
|---|
| 835 | | - __arm_lpae_free_pgtable(data, lvl + 1, ptep); |
|---|
| 645 | + while (i < num_entries) { |
|---|
| 646 | + pte = READ_ONCE(*ptep); |
|---|
| 647 | + if (WARN_ON(!pte)) |
|---|
| 648 | + break; |
|---|
| 649 | + |
|---|
| 650 | + __arm_lpae_clear_pte(ptep, &iop->cfg); |
|---|
| 651 | + |
|---|
| 652 | + if (!iopte_leaf(pte, lvl, iop->fmt)) { |
|---|
| 653 | + /* Also flush any partial walks */ |
|---|
| 654 | + io_pgtable_tlb_flush_walk(iop, iova + i * size, size, |
|---|
| 655 | + ARM_LPAE_GRANULE(data)); |
|---|
| 656 | + __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); |
|---|
| 657 | + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { |
|---|
| 658 | + /* |
|---|
| 659 | + * Order the PTE update against queueing the IOVA, to |
|---|
| 660 | + * guarantee that a flush callback from a different CPU |
|---|
| 661 | + * has observed it before the TLBIALL can be issued. |
|---|
| 662 | + */ |
|---|
| 663 | + smp_wmb(); |
|---|
| 664 | + } else { |
|---|
| 665 | + io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); |
|---|
| 666 | + } |
|---|
| 667 | + |
|---|
| 668 | + ptep++; |
|---|
| 669 | + i++; |
|---|
| 836 | 670 | } |
|---|
| 837 | 671 | |
|---|
| 838 | | - return size; |
|---|
| 839 | | - } else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) { |
|---|
| 840 | | - arm_lpae_iopte *table = iopte_deref(pte, data); |
|---|
| 841 | | - arm_lpae_iopte *table_base = table; |
|---|
| 842 | | - int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data); |
|---|
| 843 | | - int entry_size = ARM_LPAE_GRANULE(data); |
|---|
| 844 | | - int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >> |
|---|
| 845 | | - data->pg_shift; |
|---|
| 846 | | - int entries = min_t(int, size / entry_size, |
|---|
| 847 | | - max_entries - tl_offset); |
|---|
| 848 | | - int table_len = entries * sizeof(*table); |
|---|
| 849 | | - |
|---|
| 850 | | - /* |
|---|
| 851 | | - * This isn't a block mapping so it must be a table mapping |
|---|
| 852 | | - * and since it's the 2nd-to-last level the next level has |
|---|
| 853 | | - * to be all page mappings. Zero them all out in one fell |
|---|
| 854 | | - * swoop. |
|---|
| 855 | | - */ |
|---|
| 856 | | - |
|---|
| 857 | | - table += tl_offset; |
|---|
| 858 | | - |
|---|
| 859 | | - memset(table, 0, table_len); |
|---|
| 860 | | - pgtable_dma_sync_single_for_device(&iop->cfg, |
|---|
| 861 | | - __arm_lpae_dma_addr(table), |
|---|
| 862 | | - table_len, DMA_TO_DEVICE); |
|---|
| 863 | | - |
|---|
| 864 | | - iopte_tblcnt_sub(ptep, entries); |
|---|
| 865 | | - if (!iopte_tblcnt(*ptep)) { |
|---|
| 866 | | - /* no valid mappings left under this table. free it. */ |
|---|
| 867 | | - __arm_lpae_set_pte(ptep, 0, &iop->cfg); |
|---|
| 868 | | - __arm_lpae_free_pgtable(data, lvl + 1, table_base); |
|---|
| 869 | | - } |
|---|
| 870 | | - |
|---|
| 871 | | - return entries * entry_size; |
|---|
| 872 | | - } else if (iopte_leaf(pte, lvl)) { |
|---|
| 672 | + return i * size; |
|---|
| 673 | + } else if (iopte_leaf(pte, lvl, iop->fmt)) { |
|---|
| 873 | 674 | /* |
|---|
| 874 | 675 | * Insert a table at the next level to map the old region, |
|---|
| 875 | 676 | * minus the part we want to unmap |
|---|
| 876 | 677 | */ |
|---|
| 877 | | - return arm_lpae_split_blk_unmap(data, iova, size, pte, |
|---|
| 878 | | - lvl + 1, ptep); |
|---|
| 678 | + return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, |
|---|
| 679 | + lvl + 1, ptep, pgcount); |
|---|
| 879 | 680 | } |
|---|
| 880 | 681 | |
|---|
| 881 | 682 | /* Keep on walkin' */ |
|---|
| 882 | 683 | ptep = iopte_deref(pte, data); |
|---|
| 883 | | - return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); |
|---|
| 684 | + return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); |
|---|
| 685 | +} |
|---|
| 686 | + |
|---|
| 687 | +static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 688 | + size_t pgsize, size_t pgcount, |
|---|
| 689 | + struct iommu_iotlb_gather *gather) |
|---|
| 690 | +{ |
|---|
| 691 | + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 692 | + struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 693 | + arm_lpae_iopte *ptep = data->pgd; |
|---|
| 694 | + long iaext = (s64)iova >> cfg->ias; |
|---|
| 695 | + |
|---|
| 696 | + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) |
|---|
| 697 | + return 0; |
|---|
| 698 | + |
|---|
| 699 | + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) |
|---|
| 700 | + iaext = ~iaext; |
|---|
| 701 | + if (WARN_ON(iaext)) |
|---|
| 702 | + return 0; |
|---|
| 703 | + |
|---|
| 704 | + return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, |
|---|
| 705 | + data->start_level, ptep); |
|---|
| 884 | 706 | } |
|---|
| 885 | 707 | |
|---|
| 886 | 708 | static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 887 | | - size_t size) |
|---|
| 709 | + size_t size, struct iommu_iotlb_gather *gather) |
|---|
| 888 | 710 | { |
|---|
| 889 | | - size_t unmapped = 0; |
|---|
| 890 | | - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 891 | | - arm_lpae_iopte *ptep = data->pgd; |
|---|
| 892 | | - int lvl = ARM_LPAE_START_LVL(data); |
|---|
| 893 | | - |
|---|
| 894 | | - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) |
|---|
| 895 | | - return 0; |
|---|
| 896 | | - |
|---|
| 897 | | - while (unmapped < size) { |
|---|
| 898 | | - size_t ret, size_to_unmap, remaining; |
|---|
| 899 | | - |
|---|
| 900 | | - remaining = (size - unmapped); |
|---|
| 901 | | - size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, |
|---|
| 902 | | - remaining); |
|---|
| 903 | | - size_to_unmap = size_to_unmap >= SZ_2M ? |
|---|
| 904 | | - size_to_unmap : |
|---|
| 905 | | - min_t(unsigned long, remaining, |
|---|
| 906 | | - (ALIGN(iova + 1, SZ_2M) - iova)); |
|---|
| 907 | | - ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep); |
|---|
| 908 | | - if (ret == 0) |
|---|
| 909 | | - break; |
|---|
| 910 | | - unmapped += ret; |
|---|
| 911 | | - iova += ret; |
|---|
| 912 | | - } |
|---|
| 913 | | - |
|---|
| 914 | | - if (unmapped) |
|---|
| 915 | | - io_pgtable_tlb_flush_all(&data->iop); |
|---|
| 916 | | - |
|---|
| 917 | | - return unmapped; |
|---|
| 918 | | -} |
|---|
| 919 | | - |
|---|
| 920 | | -static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data, |
|---|
| 921 | | - unsigned long iova, int *plvl_ret, |
|---|
| 922 | | - arm_lpae_iopte *ptep_ret) |
|---|
| 923 | | -{ |
|---|
| 924 | | - arm_lpae_iopte pte, *ptep = data->pgd; |
|---|
| 925 | | - *plvl_ret = ARM_LPAE_START_LVL(data); |
|---|
| 926 | | - *ptep_ret = 0; |
|---|
| 927 | | - |
|---|
| 928 | | - do { |
|---|
| 929 | | - /* Valid IOPTE pointer? */ |
|---|
| 930 | | - if (!ptep) |
|---|
| 931 | | - return -EINVAL; |
|---|
| 932 | | - |
|---|
| 933 | | - /* Grab the IOPTE we're interested in */ |
|---|
| 934 | | - pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data)); |
|---|
| 935 | | - |
|---|
| 936 | | - /* Valid entry? */ |
|---|
| 937 | | - if (!pte) |
|---|
| 938 | | - return -EINVAL; |
|---|
| 939 | | - |
|---|
| 940 | | - /* Leaf entry? */ |
|---|
| 941 | | - if (iopte_leaf(pte, *plvl_ret)) |
|---|
| 942 | | - goto found_translation; |
|---|
| 943 | | - |
|---|
| 944 | | - /* Take it to the next level */ |
|---|
| 945 | | - ptep = iopte_deref(pte, data); |
|---|
| 946 | | - } while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS); |
|---|
| 947 | | - |
|---|
| 948 | | - /* Ran out of page tables to walk */ |
|---|
| 949 | | - return -EINVAL; |
|---|
| 950 | | - |
|---|
| 951 | | -found_translation: |
|---|
| 952 | | - *ptep_ret = pte; |
|---|
| 953 | | - return 0; |
|---|
| 954 | | -} |
|---|
| 955 | | - |
|---|
| 956 | | -static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops, |
|---|
| 957 | | - unsigned long iova) |
|---|
| 958 | | -{ |
|---|
| 959 | | - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 960 | | - arm_lpae_iopte pte; |
|---|
| 961 | | - int lvl; |
|---|
| 962 | | - |
|---|
| 963 | | - if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) |
|---|
| 964 | | - return pte; |
|---|
| 965 | | - |
|---|
| 966 | | - return 0; |
|---|
| 711 | + return arm_lpae_unmap_pages(ops, iova, size, 1, gather); |
|---|
| 967 | 712 | } |
|---|
| 968 | 713 | |
|---|
| 969 | 714 | static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, |
|---|
| 970 | 715 | unsigned long iova) |
|---|
| 971 | 716 | { |
|---|
| 972 | 717 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 973 | | - arm_lpae_iopte pte; |
|---|
| 974 | | - int lvl; |
|---|
| 975 | | - phys_addr_t phys = 0; |
|---|
| 718 | + arm_lpae_iopte pte, *ptep = data->pgd; |
|---|
| 719 | + int lvl = data->start_level; |
|---|
| 976 | 720 | |
|---|
| 977 | | - if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) { |
|---|
| 978 | | - iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1); |
|---|
| 979 | | - phys = ((phys_addr_t)iopte_to_pfn(pte, data) |
|---|
| 980 | | - << data->pg_shift) | iova; |
|---|
| 981 | | - } |
|---|
| 721 | + do { |
|---|
| 722 | + /* Valid IOPTE pointer? */ |
|---|
| 723 | + if (!ptep) |
|---|
| 724 | + return 0; |
|---|
| 982 | 725 | |
|---|
| 983 | | - return phys; |
|---|
| 984 | | -} |
|---|
| 726 | + /* Grab the IOPTE we're interested in */ |
|---|
| 727 | + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); |
|---|
| 728 | + pte = READ_ONCE(*ptep); |
|---|
| 985 | 729 | |
|---|
| 986 | | -static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data, |
|---|
| 987 | | - arm_lpae_iopte *ptep) |
|---|
| 988 | | -{ |
|---|
| 989 | | - if (data->iop.fmt == ARM_64_LPAE_S1 || |
|---|
| 990 | | - data->iop.fmt == ARM_32_LPAE_S1) { |
|---|
| 991 | | - int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK << |
|---|
| 992 | | - ARM_LPAE_PTE_ATTRINDX_SHIFT)) >> |
|---|
| 993 | | - ARM_LPAE_PTE_ATTRINDX_SHIFT; |
|---|
| 994 | | - if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) && |
|---|
| 995 | | - (((*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_IS) |
|---|
| 996 | | - || |
|---|
| 997 | | - (*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_OS)) |
|---|
| 998 | | - return true; |
|---|
| 999 | | - } else { |
|---|
| 1000 | | - if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB) |
|---|
| 1001 | | - return true; |
|---|
| 1002 | | - } |
|---|
| 730 | + /* Valid entry? */ |
|---|
| 731 | + if (!pte) |
|---|
| 732 | + return 0; |
|---|
| 1003 | 733 | |
|---|
| 1004 | | - return false; |
|---|
| 1005 | | -} |
|---|
| 734 | + /* Leaf entry? */ |
|---|
| 735 | + if (iopte_leaf(pte, lvl, data->iop.fmt)) |
|---|
| 736 | + goto found_translation; |
|---|
| 1006 | 737 | |
|---|
| 1007 | | -static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops, |
|---|
| 1008 | | - unsigned long iova) |
|---|
| 1009 | | -{ |
|---|
| 1010 | | - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 1011 | | - arm_lpae_iopte pte; |
|---|
| 1012 | | - int lvl; |
|---|
| 1013 | | - bool ret = false; |
|---|
| 738 | + /* Take it to the next level */ |
|---|
| 739 | + ptep = iopte_deref(pte, data); |
|---|
| 740 | + } while (++lvl < ARM_LPAE_MAX_LEVELS); |
|---|
| 1014 | 741 | |
|---|
| 1015 | | - if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) |
|---|
| 1016 | | - ret = __arm_lpae_is_iova_coherent(data, &pte); |
|---|
| 742 | + /* Ran out of page tables to walk */ |
|---|
| 743 | + return 0; |
|---|
| 1017 | 744 | |
|---|
| 1018 | | - return ret; |
|---|
| 745 | +found_translation: |
|---|
| 746 | + iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); |
|---|
| 747 | + return iopte_to_paddr(pte, data) | iova; |
|---|
| 1019 | 748 | } |
|---|
| 1020 | 749 | |
|---|
| 1021 | 750 | static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) |
|---|
| 1022 | 751 | { |
|---|
| 1023 | | - unsigned long granule; |
|---|
| 752 | + unsigned long granule, page_sizes; |
|---|
| 753 | + unsigned int max_addr_bits = 48; |
|---|
| 1024 | 754 | |
|---|
| 1025 | 755 | /* |
|---|
| 1026 | 756 | * We need to restrict the supported page sizes to match the |
|---|
| .. | .. |
|---|
| 1040 | 770 | |
|---|
| 1041 | 771 | switch (granule) { |
|---|
| 1042 | 772 | case SZ_4K: |
|---|
| 1043 | | - cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); |
|---|
| 773 | + page_sizes = (SZ_4K | SZ_2M | SZ_1G); |
|---|
| 1044 | 774 | break; |
|---|
| 1045 | 775 | case SZ_16K: |
|---|
| 1046 | | - cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); |
|---|
| 776 | + page_sizes = (SZ_16K | SZ_32M); |
|---|
| 1047 | 777 | break; |
|---|
| 1048 | 778 | case SZ_64K: |
|---|
| 1049 | | - cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); |
|---|
| 779 | + max_addr_bits = 52; |
|---|
| 780 | + page_sizes = (SZ_64K | SZ_512M); |
|---|
| 781 | + if (cfg->oas > 48) |
|---|
| 782 | + page_sizes |= 1ULL << 42; /* 4TB */ |
|---|
| 1050 | 783 | break; |
|---|
| 1051 | 784 | default: |
|---|
| 1052 | | - cfg->pgsize_bitmap = 0; |
|---|
| 785 | + page_sizes = 0; |
|---|
| 1053 | 786 | } |
|---|
| 787 | + |
|---|
| 788 | + cfg->pgsize_bitmap &= page_sizes; |
|---|
| 789 | + cfg->ias = min(cfg->ias, max_addr_bits); |
|---|
| 790 | + cfg->oas = min(cfg->oas, max_addr_bits); |
|---|
| 1054 | 791 | } |
|---|
| 1055 | 792 | |
|---|
| 1056 | 793 | static struct arm_lpae_io_pgtable * |
|---|
| 1057 | 794 | arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) |
|---|
| 1058 | 795 | { |
|---|
| 1059 | | - unsigned long va_bits, pgd_bits; |
|---|
| 1060 | 796 | struct arm_lpae_io_pgtable *data; |
|---|
| 797 | + int levels, va_bits, pg_shift; |
|---|
| 1061 | 798 | |
|---|
| 1062 | 799 | arm_lpae_restrict_pgsizes(cfg); |
|---|
| 1063 | 800 | |
|---|
| .. | .. |
|---|
| 1070 | 807 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) |
|---|
| 1071 | 808 | return NULL; |
|---|
| 1072 | 809 | |
|---|
| 1073 | | - if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { |
|---|
| 1074 | | - dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); |
|---|
| 1075 | | - return NULL; |
|---|
| 1076 | | - } |
|---|
| 1077 | | - |
|---|
| 1078 | 810 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
|---|
| 1079 | 811 | if (!data) |
|---|
| 1080 | 812 | return NULL; |
|---|
| 1081 | 813 | |
|---|
| 1082 | | - data->pg_shift = __ffs(cfg->pgsize_bitmap); |
|---|
| 1083 | | - data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); |
|---|
| 814 | + pg_shift = __ffs(cfg->pgsize_bitmap); |
|---|
| 815 | + data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); |
|---|
| 1084 | 816 | |
|---|
| 1085 | | - va_bits = cfg->ias - data->pg_shift; |
|---|
| 1086 | | - data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); |
|---|
| 817 | + va_bits = cfg->ias - pg_shift; |
|---|
| 818 | + levels = DIV_ROUND_UP(va_bits, data->bits_per_level); |
|---|
| 819 | + data->start_level = ARM_LPAE_MAX_LEVELS - levels; |
|---|
| 1087 | 820 | |
|---|
| 1088 | 821 | /* Calculate the actual size of our pgd (without concatenation) */ |
|---|
| 1089 | | - pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); |
|---|
| 1090 | | - data->pgd_bits = pgd_bits; |
|---|
| 1091 | | - data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); |
|---|
| 822 | + data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); |
|---|
| 1092 | 823 | |
|---|
| 1093 | 824 | data->iop.ops = (struct io_pgtable_ops) { |
|---|
| 1094 | 825 | .map = arm_lpae_map, |
|---|
| 1095 | | - .map_sg = arm_lpae_map_sg, |
|---|
| 826 | + .map_pages = arm_lpae_map_pages, |
|---|
| 1096 | 827 | .unmap = arm_lpae_unmap, |
|---|
| 828 | + .unmap_pages = arm_lpae_unmap_pages, |
|---|
| 1097 | 829 | .iova_to_phys = arm_lpae_iova_to_phys, |
|---|
| 1098 | | - .is_iova_coherent = arm_lpae_is_iova_coherent, |
|---|
| 1099 | | - .iova_to_pte = arm_lpae_iova_get_pte, |
|---|
| 1100 | 830 | }; |
|---|
| 1101 | 831 | |
|---|
| 1102 | 832 | return data; |
|---|
| .. | .. |
|---|
| 1107 | 837 | { |
|---|
| 1108 | 838 | u64 reg; |
|---|
| 1109 | 839 | struct arm_lpae_io_pgtable *data; |
|---|
| 840 | + typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; |
|---|
| 841 | + bool tg1; |
|---|
| 1110 | 842 | |
|---|
| 1111 | | - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|---|
| 1112 | | - | IO_PGTABLE_QUIRK_NO_DMA |
|---|
| 1113 | | - | IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT |
|---|
| 1114 | | - | IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE |
|---|
| 1115 | | - | IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA)) |
|---|
| 843 | + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | |
|---|
| 844 | + IO_PGTABLE_QUIRK_NON_STRICT | |
|---|
| 845 | + IO_PGTABLE_QUIRK_ARM_TTBR1)) |
|---|
| 1116 | 846 | return NULL; |
|---|
| 1117 | 847 | |
|---|
| 1118 | 848 | data = arm_lpae_alloc_pgtable(cfg); |
|---|
| .. | .. |
|---|
| 1120 | 850 | return NULL; |
|---|
| 1121 | 851 | |
|---|
| 1122 | 852 | /* TCR */ |
|---|
| 1123 | | - if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) |
|---|
| 1124 | | - reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1125 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1126 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 1127 | | - else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) && |
|---|
| 1128 | | - (cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE)) |
|---|
| 1129 | | - reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1130 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1131 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 1132 | | - else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) |
|---|
| 1133 | | - reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1134 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1135 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 1136 | | - else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA) && |
|---|
| 1137 | | - (cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE)) |
|---|
| 1138 | | - reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1139 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1140 | | - (ARM_LPAE_TCR_RGN_WB << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 1141 | | - else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA) |
|---|
| 1142 | | - reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1143 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1144 | | - (ARM_LPAE_TCR_RGN_WB << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 1145 | | - else |
|---|
| 1146 | | - reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1147 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1148 | | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 853 | + if (cfg->coherent_walk) { |
|---|
| 854 | + tcr->sh = ARM_LPAE_TCR_SH_IS; |
|---|
| 855 | + tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; |
|---|
| 856 | + tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; |
|---|
| 857 | + } else { |
|---|
| 858 | + tcr->sh = ARM_LPAE_TCR_SH_OS; |
|---|
| 859 | + tcr->irgn = ARM_LPAE_TCR_RGN_NC; |
|---|
| 860 | + tcr->orgn = ARM_LPAE_TCR_RGN_NC; |
|---|
| 861 | + } |
|---|
| 1149 | 862 | |
|---|
| 863 | + tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; |
|---|
| 1150 | 864 | switch (ARM_LPAE_GRANULE(data)) { |
|---|
| 1151 | 865 | case SZ_4K: |
|---|
| 1152 | | - reg |= ARM_LPAE_TCR_TG0_4K; |
|---|
| 866 | + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; |
|---|
| 1153 | 867 | break; |
|---|
| 1154 | 868 | case SZ_16K: |
|---|
| 1155 | | - reg |= ARM_LPAE_TCR_TG0_16K; |
|---|
| 869 | + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; |
|---|
| 1156 | 870 | break; |
|---|
| 1157 | 871 | case SZ_64K: |
|---|
| 1158 | | - reg |= ARM_LPAE_TCR_TG0_64K; |
|---|
| 872 | + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; |
|---|
| 1159 | 873 | break; |
|---|
| 1160 | 874 | } |
|---|
| 1161 | 875 | |
|---|
| 1162 | 876 | switch (cfg->oas) { |
|---|
| 1163 | 877 | case 32: |
|---|
| 1164 | | - reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 878 | + tcr->ips = ARM_LPAE_TCR_PS_32_BIT; |
|---|
| 1165 | 879 | break; |
|---|
| 1166 | 880 | case 36: |
|---|
| 1167 | | - reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 881 | + tcr->ips = ARM_LPAE_TCR_PS_36_BIT; |
|---|
| 1168 | 882 | break; |
|---|
| 1169 | 883 | case 40: |
|---|
| 1170 | | - reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 884 | + tcr->ips = ARM_LPAE_TCR_PS_40_BIT; |
|---|
| 1171 | 885 | break; |
|---|
| 1172 | 886 | case 42: |
|---|
| 1173 | | - reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 887 | + tcr->ips = ARM_LPAE_TCR_PS_42_BIT; |
|---|
| 1174 | 888 | break; |
|---|
| 1175 | 889 | case 44: |
|---|
| 1176 | | - reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 890 | + tcr->ips = ARM_LPAE_TCR_PS_44_BIT; |
|---|
| 1177 | 891 | break; |
|---|
| 1178 | 892 | case 48: |
|---|
| 1179 | | - reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); |
|---|
| 893 | + tcr->ips = ARM_LPAE_TCR_PS_48_BIT; |
|---|
| 894 | + break; |
|---|
| 895 | + case 52: |
|---|
| 896 | + tcr->ips = ARM_LPAE_TCR_PS_52_BIT; |
|---|
| 1180 | 897 | break; |
|---|
| 1181 | 898 | default: |
|---|
| 1182 | 899 | goto out_free_data; |
|---|
| 1183 | 900 | } |
|---|
| 1184 | 901 | |
|---|
| 1185 | | - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; |
|---|
| 1186 | | - |
|---|
| 1187 | | - /* Disable speculative walks through TTBR1 */ |
|---|
| 1188 | | - reg |= ARM_LPAE_TCR_EPD1; |
|---|
| 1189 | | - cfg->arm_lpae_s1_cfg.tcr = reg; |
|---|
| 902 | + tcr->tsz = 64ULL - cfg->ias; |
|---|
| 1190 | 903 | |
|---|
| 1191 | 904 | /* MAIRs */ |
|---|
| 1192 | 905 | reg = (ARM_LPAE_MAIR_ATTR_NC |
|---|
| .. | .. |
|---|
| 1195 | 908 | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | |
|---|
| 1196 | 909 | (ARM_LPAE_MAIR_ATTR_DEVICE |
|---|
| 1197 | 910 | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | |
|---|
| 1198 | | - (ARM_LPAE_MAIR_ATTR_UPSTREAM |
|---|
| 1199 | | - << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM)); |
|---|
| 911 | + (ARM_LPAE_MAIR_ATTR_INC_OWBRWA |
|---|
| 912 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) | |
|---|
| 913 | + (ARM_LPAE_MAIR_ATTR_INC_OWBRANWA |
|---|
| 914 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) | |
|---|
| 915 | + (ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA |
|---|
| 916 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA)); |
|---|
| 1200 | 917 | |
|---|
| 1201 | | - cfg->arm_lpae_s1_cfg.mair[0] = reg; |
|---|
| 1202 | | - |
|---|
| 1203 | | - reg = ARM_LPAE_MAIR_ATTR_LLC_NWA |
|---|
| 1204 | | - << ARM_LPAE_MAIR1_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA); |
|---|
| 1205 | | - |
|---|
| 1206 | | - cfg->arm_lpae_s1_cfg.mair[1] = reg; |
|---|
| 918 | + cfg->arm_lpae_s1_cfg.mair = reg; |
|---|
| 1207 | 919 | |
|---|
| 1208 | 920 | /* Looking good; allocate a pgd */ |
|---|
| 1209 | | - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, |
|---|
| 1210 | | - cfg, cookie); |
|---|
| 921 | + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), |
|---|
| 922 | + GFP_KERNEL, cfg); |
|---|
| 1211 | 923 | if (!data->pgd) |
|---|
| 1212 | 924 | goto out_free_data; |
|---|
| 1213 | | - |
|---|
| 1214 | | - data->pgd_ttbr1 = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, |
|---|
| 1215 | | - cfg, cookie); |
|---|
| 1216 | | - if (!data->pgd_ttbr1) |
|---|
| 1217 | | - goto out_free_pgd; |
|---|
| 1218 | 925 | |
|---|
| 1219 | 926 | /* Ensure the empty pgd is visible before any actual TTBR write */ |
|---|
| 1220 | 927 | wmb(); |
|---|
| 1221 | 928 | |
|---|
| 1222 | | - /* TTBRs */ |
|---|
| 1223 | | - cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); |
|---|
| 1224 | | - cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd_ttbr1); |
|---|
| 929 | + /* TTBR */ |
|---|
| 930 | + cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); |
|---|
| 1225 | 931 | return &data->iop; |
|---|
| 1226 | | - |
|---|
| 1227 | | -out_free_pgd: |
|---|
| 1228 | | - __arm_lpae_free_pages(data->pgd, data->pgd_size, cfg, cookie); |
|---|
| 1229 | 932 | |
|---|
| 1230 | 933 | out_free_data: |
|---|
| 1231 | 934 | kfree(data); |
|---|
| .. | .. |
|---|
| 1235 | 938 | static struct io_pgtable * |
|---|
| 1236 | 939 | arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 1237 | 940 | { |
|---|
| 1238 | | - u64 reg, sl; |
|---|
| 941 | + u64 sl; |
|---|
| 1239 | 942 | struct arm_lpae_io_pgtable *data; |
|---|
| 943 | + typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; |
|---|
| 1240 | 944 | |
|---|
| 1241 | 945 | /* The NS quirk doesn't apply at stage 2 */ |
|---|
| 1242 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) |
|---|
| 946 | + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT)) |
|---|
| 1243 | 947 | return NULL; |
|---|
| 1244 | 948 | |
|---|
| 1245 | 949 | data = arm_lpae_alloc_pgtable(cfg); |
|---|
| .. | .. |
|---|
| 1250 | 954 | * Concatenate PGDs at level 1 if possible in order to reduce |
|---|
| 1251 | 955 | * the depth of the stage-2 walk. |
|---|
| 1252 | 956 | */ |
|---|
| 1253 | | - if (data->levels == ARM_LPAE_MAX_LEVELS) { |
|---|
| 957 | + if (data->start_level == 0) { |
|---|
| 1254 | 958 | unsigned long pgd_pages; |
|---|
| 1255 | 959 | |
|---|
| 1256 | | - pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); |
|---|
| 960 | + pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); |
|---|
| 1257 | 961 | if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { |
|---|
| 1258 | | - data->pgd_size = pgd_pages << data->pg_shift; |
|---|
| 1259 | | - data->levels--; |
|---|
| 962 | + data->pgd_bits += data->bits_per_level; |
|---|
| 963 | + data->start_level++; |
|---|
| 1260 | 964 | } |
|---|
| 1261 | 965 | } |
|---|
| 1262 | 966 | |
|---|
| 1263 | 967 | /* VTCR */ |
|---|
| 1264 | | - reg = ARM_64_LPAE_S2_TCR_RES1 | |
|---|
| 1265 | | - (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | |
|---|
| 1266 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | |
|---|
| 1267 | | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); |
|---|
| 968 | + if (cfg->coherent_walk) { |
|---|
| 969 | + vtcr->sh = ARM_LPAE_TCR_SH_IS; |
|---|
| 970 | + vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; |
|---|
| 971 | + vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; |
|---|
| 972 | + } else { |
|---|
| 973 | + vtcr->sh = ARM_LPAE_TCR_SH_OS; |
|---|
| 974 | + vtcr->irgn = ARM_LPAE_TCR_RGN_NC; |
|---|
| 975 | + vtcr->orgn = ARM_LPAE_TCR_RGN_NC; |
|---|
| 976 | + } |
|---|
| 1268 | 977 | |
|---|
| 1269 | | - sl = ARM_LPAE_START_LVL(data); |
|---|
| 978 | + sl = data->start_level; |
|---|
| 1270 | 979 | |
|---|
| 1271 | 980 | switch (ARM_LPAE_GRANULE(data)) { |
|---|
| 1272 | 981 | case SZ_4K: |
|---|
| 1273 | | - reg |= ARM_LPAE_TCR_TG0_4K; |
|---|
| 982 | + vtcr->tg = ARM_LPAE_TCR_TG0_4K; |
|---|
| 1274 | 983 | sl++; /* SL0 format is different for 4K granule size */ |
|---|
| 1275 | 984 | break; |
|---|
| 1276 | 985 | case SZ_16K: |
|---|
| 1277 | | - reg |= ARM_LPAE_TCR_TG0_16K; |
|---|
| 986 | + vtcr->tg = ARM_LPAE_TCR_TG0_16K; |
|---|
| 1278 | 987 | break; |
|---|
| 1279 | 988 | case SZ_64K: |
|---|
| 1280 | | - reg |= ARM_LPAE_TCR_TG0_64K; |
|---|
| 989 | + vtcr->tg = ARM_LPAE_TCR_TG0_64K; |
|---|
| 1281 | 990 | break; |
|---|
| 1282 | 991 | } |
|---|
| 1283 | 992 | |
|---|
| 1284 | 993 | switch (cfg->oas) { |
|---|
| 1285 | 994 | case 32: |
|---|
| 1286 | | - reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 995 | + vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; |
|---|
| 1287 | 996 | break; |
|---|
| 1288 | 997 | case 36: |
|---|
| 1289 | | - reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 998 | + vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; |
|---|
| 1290 | 999 | break; |
|---|
| 1291 | 1000 | case 40: |
|---|
| 1292 | | - reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 1001 | + vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; |
|---|
| 1293 | 1002 | break; |
|---|
| 1294 | 1003 | case 42: |
|---|
| 1295 | | - reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 1004 | + vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; |
|---|
| 1296 | 1005 | break; |
|---|
| 1297 | 1006 | case 44: |
|---|
| 1298 | | - reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 1007 | + vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; |
|---|
| 1299 | 1008 | break; |
|---|
| 1300 | 1009 | case 48: |
|---|
| 1301 | | - reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); |
|---|
| 1010 | + vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; |
|---|
| 1011 | + break; |
|---|
| 1012 | + case 52: |
|---|
| 1013 | + vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; |
|---|
| 1302 | 1014 | break; |
|---|
| 1303 | 1015 | default: |
|---|
| 1304 | 1016 | goto out_free_data; |
|---|
| 1305 | 1017 | } |
|---|
| 1306 | 1018 | |
|---|
| 1307 | | - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; |
|---|
| 1308 | | - reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; |
|---|
| 1309 | | - cfg->arm_lpae_s2_cfg.vtcr = reg; |
|---|
| 1019 | + vtcr->tsz = 64ULL - cfg->ias; |
|---|
| 1020 | + vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; |
|---|
| 1310 | 1021 | |
|---|
| 1311 | 1022 | /* Allocate pgd pages */ |
|---|
| 1312 | | - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, |
|---|
| 1313 | | - cfg, cookie); |
|---|
| 1023 | + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), |
|---|
| 1024 | + GFP_KERNEL, cfg); |
|---|
| 1314 | 1025 | if (!data->pgd) |
|---|
| 1315 | 1026 | goto out_free_data; |
|---|
| 1316 | 1027 | |
|---|
| .. | .. |
|---|
| 1329 | 1040 | static struct io_pgtable * |
|---|
| 1330 | 1041 | arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 1331 | 1042 | { |
|---|
| 1332 | | - struct io_pgtable *iop; |
|---|
| 1333 | | - |
|---|
| 1334 | 1043 | if (cfg->ias > 32 || cfg->oas > 40) |
|---|
| 1335 | 1044 | return NULL; |
|---|
| 1336 | 1045 | |
|---|
| 1337 | 1046 | cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); |
|---|
| 1338 | | - iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); |
|---|
| 1339 | | - if (iop) { |
|---|
| 1340 | | - cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; |
|---|
| 1341 | | - cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; |
|---|
| 1342 | | - } |
|---|
| 1343 | | - |
|---|
| 1344 | | - return iop; |
|---|
| 1047 | + return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); |
|---|
| 1345 | 1048 | } |
|---|
| 1346 | 1049 | |
|---|
| 1347 | 1050 | static struct io_pgtable * |
|---|
| 1348 | 1051 | arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 1349 | 1052 | { |
|---|
| 1350 | | - struct io_pgtable *iop; |
|---|
| 1351 | | - |
|---|
| 1352 | 1053 | if (cfg->ias > 40 || cfg->oas > 40) |
|---|
| 1353 | 1054 | return NULL; |
|---|
| 1354 | 1055 | |
|---|
| 1355 | 1056 | cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); |
|---|
| 1356 | | - iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); |
|---|
| 1357 | | - if (iop) |
|---|
| 1358 | | - cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; |
|---|
| 1057 | + return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); |
|---|
| 1058 | +} |
|---|
| 1359 | 1059 | |
|---|
| 1360 | | - return iop; |
|---|
| 1060 | +static struct io_pgtable * |
|---|
| 1061 | +arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) |
|---|
| 1062 | +{ |
|---|
| 1063 | + struct arm_lpae_io_pgtable *data; |
|---|
| 1064 | + |
|---|
| 1065 | + /* No quirks for Mali (hopefully) */ |
|---|
| 1066 | + if (cfg->quirks) |
|---|
| 1067 | + return NULL; |
|---|
| 1068 | + |
|---|
| 1069 | + if (cfg->ias > 48 || cfg->oas > 40) |
|---|
| 1070 | + return NULL; |
|---|
| 1071 | + |
|---|
| 1072 | + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); |
|---|
| 1073 | + |
|---|
| 1074 | + data = arm_lpae_alloc_pgtable(cfg); |
|---|
| 1075 | + if (!data) |
|---|
| 1076 | + return NULL; |
|---|
| 1077 | + |
|---|
| 1078 | + /* Mali seems to need a full 4-level table regardless of IAS */ |
|---|
| 1079 | + if (data->start_level > 0) { |
|---|
| 1080 | + data->start_level = 0; |
|---|
| 1081 | + data->pgd_bits = 0; |
|---|
| 1082 | + } |
|---|
| 1083 | + /* |
|---|
| 1084 | + * MEMATTR: Mali has no actual notion of a non-cacheable type, so the |
|---|
| 1085 | + * best we can do is mimic the out-of-tree driver and hope that the |
|---|
| 1086 | + * "implementation-defined caching policy" is good enough. Similarly, |
|---|
| 1087 | + * we'll use it for the sake of a valid attribute for our 'device' |
|---|
| 1088 | + * index, although callers should never request that in practice. |
|---|
| 1089 | + */ |
|---|
| 1090 | + cfg->arm_mali_lpae_cfg.memattr = |
|---|
| 1091 | + (ARM_MALI_LPAE_MEMATTR_IMP_DEF |
|---|
| 1092 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | |
|---|
| 1093 | + (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC |
|---|
| 1094 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | |
|---|
| 1095 | + (ARM_MALI_LPAE_MEMATTR_IMP_DEF |
|---|
| 1096 | + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); |
|---|
| 1097 | + |
|---|
| 1098 | + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, |
|---|
| 1099 | + cfg); |
|---|
| 1100 | + if (!data->pgd) |
|---|
| 1101 | + goto out_free_data; |
|---|
| 1102 | + |
|---|
| 1103 | + /* Ensure the empty pgd is visible before TRANSTAB can be written */ |
|---|
| 1104 | + wmb(); |
|---|
| 1105 | + |
|---|
| 1106 | + cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | |
|---|
| 1107 | + ARM_MALI_LPAE_TTBR_READ_INNER | |
|---|
| 1108 | + ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; |
|---|
| 1109 | + if (cfg->coherent_walk) |
|---|
| 1110 | + cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; |
|---|
| 1111 | + |
|---|
| 1112 | + return &data->iop; |
|---|
| 1113 | + |
|---|
| 1114 | +out_free_data: |
|---|
| 1115 | + kfree(data); |
|---|
| 1116 | + return NULL; |
|---|
| 1361 | 1117 | } |
|---|
| 1362 | 1118 | |
|---|
| 1363 | 1119 | struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { |
|---|
| .. | .. |
|---|
| 1380 | 1136 | .free = arm_lpae_free_pgtable, |
|---|
| 1381 | 1137 | }; |
|---|
| 1382 | 1138 | |
|---|
| 1139 | +struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { |
|---|
| 1140 | + .alloc = arm_mali_lpae_alloc_pgtable, |
|---|
| 1141 | + .free = arm_lpae_free_pgtable, |
|---|
| 1142 | +}; |
|---|
| 1143 | + |
|---|
| 1383 | 1144 | #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST |
|---|
| 1384 | 1145 | |
|---|
| 1385 | | -static struct io_pgtable_cfg *cfg_cookie; |
|---|
| 1146 | +static struct io_pgtable_cfg *cfg_cookie __initdata; |
|---|
| 1386 | 1147 | |
|---|
| 1387 | | -static void dummy_tlb_flush_all(void *cookie) |
|---|
| 1148 | +static void __init dummy_tlb_flush_all(void *cookie) |
|---|
| 1388 | 1149 | { |
|---|
| 1389 | 1150 | WARN_ON(cookie != cfg_cookie); |
|---|
| 1390 | 1151 | } |
|---|
| 1391 | 1152 | |
|---|
| 1392 | | -static void dummy_tlb_add_flush(unsigned long iova, size_t size, |
|---|
| 1393 | | - size_t granule, bool leaf, void *cookie) |
|---|
| 1153 | +static void __init dummy_tlb_flush(unsigned long iova, size_t size, |
|---|
| 1154 | + size_t granule, void *cookie) |
|---|
| 1394 | 1155 | { |
|---|
| 1395 | 1156 | WARN_ON(cookie != cfg_cookie); |
|---|
| 1157 | + WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); |
|---|
| 1396 | 1158 | } |
|---|
| 1397 | 1159 | |
|---|
| 1398 | | -static void dummy_tlb_sync(void *cookie) |
|---|
| 1160 | +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, |
|---|
| 1161 | + unsigned long iova, size_t granule, |
|---|
| 1162 | + void *cookie) |
|---|
| 1399 | 1163 | { |
|---|
| 1400 | | - WARN_ON(cookie != cfg_cookie); |
|---|
| 1164 | + dummy_tlb_flush(iova, granule, granule, cookie); |
|---|
| 1401 | 1165 | } |
|---|
| 1402 | 1166 | |
|---|
| 1403 | | -static const struct iommu_gather_ops dummy_tlb_ops __initconst = { |
|---|
| 1167 | +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { |
|---|
| 1404 | 1168 | .tlb_flush_all = dummy_tlb_flush_all, |
|---|
| 1405 | | - .tlb_add_flush = dummy_tlb_add_flush, |
|---|
| 1406 | | - .tlb_sync = dummy_tlb_sync, |
|---|
| 1169 | + .tlb_flush_walk = dummy_tlb_flush, |
|---|
| 1170 | + .tlb_add_page = dummy_tlb_add_page, |
|---|
| 1407 | 1171 | }; |
|---|
| 1408 | 1172 | |
|---|
| 1409 | 1173 | static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) |
|---|
| .. | .. |
|---|
| 1413 | 1177 | |
|---|
| 1414 | 1178 | pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", |
|---|
| 1415 | 1179 | cfg->pgsize_bitmap, cfg->ias); |
|---|
| 1416 | | - pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", |
|---|
| 1417 | | - data->levels, data->pgd_size, data->pg_shift, |
|---|
| 1418 | | - data->bits_per_level, data->pgd); |
|---|
| 1180 | + pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", |
|---|
| 1181 | + ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), |
|---|
| 1182 | + ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); |
|---|
| 1419 | 1183 | } |
|---|
| 1420 | 1184 | |
|---|
| 1421 | 1185 | #define __FAIL(ops, i) ({ \ |
|---|
| .. | .. |
|---|
| 1425 | 1189 | -EFAULT; \ |
|---|
| 1426 | 1190 | }) |
|---|
| 1427 | 1191 | |
|---|
| 1428 | | -/* |
|---|
| 1429 | | - * Returns true if there's any mapping in the given iova range in ops. |
|---|
| 1430 | | - */ |
|---|
| 1431 | | -static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops, |
|---|
| 1432 | | - unsigned long iova_start, size_t size) |
|---|
| 1433 | | -{ |
|---|
| 1434 | | - unsigned long iova = iova_start; |
|---|
| 1435 | | - |
|---|
| 1436 | | - while (iova < (iova_start + size)) { |
|---|
| 1437 | | - if (ops->iova_to_phys(ops, iova + 42)) |
|---|
| 1438 | | - return true; |
|---|
| 1439 | | - iova += SZ_4K; |
|---|
| 1440 | | - } |
|---|
| 1441 | | - return false; |
|---|
| 1442 | | -} |
|---|
| 1443 | | - |
|---|
| 1444 | | -/* |
|---|
| 1445 | | - * Returns true if the iova range is successfully mapped to the contiguous |
|---|
| 1446 | | - * phys range in ops. |
|---|
| 1447 | | - */ |
|---|
| 1448 | | -static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops, |
|---|
| 1449 | | - const unsigned long iova_start, |
|---|
| 1450 | | - const phys_addr_t phys_start, |
|---|
| 1451 | | - const size_t size) |
|---|
| 1452 | | -{ |
|---|
| 1453 | | - unsigned long iova = iova_start; |
|---|
| 1454 | | - phys_addr_t phys = phys_start; |
|---|
| 1455 | | - |
|---|
| 1456 | | - while (iova < (iova_start + size)) { |
|---|
| 1457 | | - if (ops->iova_to_phys(ops, iova + 42) != (phys + 42)) |
|---|
| 1458 | | - return false; |
|---|
| 1459 | | - iova += SZ_4K; |
|---|
| 1460 | | - phys += SZ_4K; |
|---|
| 1461 | | - } |
|---|
| 1462 | | - return true; |
|---|
| 1463 | | -} |
|---|
| 1464 | | - |
|---|
| 1465 | 1192 | static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) |
|---|
| 1466 | 1193 | { |
|---|
| 1467 | | - static const enum io_pgtable_fmt fmts[] = { |
|---|
| 1194 | + static const enum io_pgtable_fmt fmts[] __initconst = { |
|---|
| 1468 | 1195 | ARM_64_LPAE_S1, |
|---|
| 1469 | 1196 | ARM_64_LPAE_S2, |
|---|
| 1470 | 1197 | }; |
|---|
| 1471 | 1198 | |
|---|
| 1472 | | - int i, j, k; |
|---|
| 1199 | + int i, j; |
|---|
| 1473 | 1200 | unsigned long iova; |
|---|
| 1474 | 1201 | size_t size; |
|---|
| 1475 | 1202 | struct io_pgtable_ops *ops; |
|---|
| 1203 | + |
|---|
| 1476 | 1204 | selftest_running = true; |
|---|
| 1477 | 1205 | |
|---|
| 1478 | 1206 | for (i = 0; i < ARRAY_SIZE(fmts); ++i) { |
|---|
| 1479 | | - unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M, |
|---|
| 1480 | | - SZ_1M * 12, SZ_1M * 20 }; |
|---|
| 1481 | | - |
|---|
| 1482 | 1207 | cfg_cookie = cfg; |
|---|
| 1483 | 1208 | ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); |
|---|
| 1484 | 1209 | if (!ops) { |
|---|
| .. | .. |
|---|
| 1487 | 1212 | } |
|---|
| 1488 | 1213 | |
|---|
| 1489 | 1214 | /* |
|---|
| 1490 | | - * Initial sanity checks. Empty page tables shouldn't |
|---|
| 1491 | | - * provide any translations. TODO: check entire supported |
|---|
| 1492 | | - * range for these ops rather than first 2G |
|---|
| 1215 | + * Initial sanity checks. |
|---|
| 1216 | + * Empty page tables shouldn't provide any translations. |
|---|
| 1493 | 1217 | */ |
|---|
| 1494 | | - if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) |
|---|
| 1218 | + if (ops->iova_to_phys(ops, 42)) |
|---|
| 1219 | + return __FAIL(ops, i); |
|---|
| 1220 | + |
|---|
| 1221 | + if (ops->iova_to_phys(ops, SZ_1G + 42)) |
|---|
| 1222 | + return __FAIL(ops, i); |
|---|
| 1223 | + |
|---|
| 1224 | + if (ops->iova_to_phys(ops, SZ_2G + 42)) |
|---|
| 1495 | 1225 | return __FAIL(ops, i); |
|---|
| 1496 | 1226 | |
|---|
| 1497 | 1227 | /* |
|---|
| .. | .. |
|---|
| 1504 | 1234 | if (ops->map(ops, iova, iova, size, IOMMU_READ | |
|---|
| 1505 | 1235 | IOMMU_WRITE | |
|---|
| 1506 | 1236 | IOMMU_NOEXEC | |
|---|
| 1507 | | - IOMMU_CACHE)) |
|---|
| 1237 | + IOMMU_CACHE, GFP_KERNEL)) |
|---|
| 1508 | 1238 | return __FAIL(ops, i); |
|---|
| 1509 | 1239 | |
|---|
| 1510 | 1240 | /* Overlapping mappings */ |
|---|
| 1511 | 1241 | if (!ops->map(ops, iova, iova + size, size, |
|---|
| 1512 | | - IOMMU_READ | IOMMU_NOEXEC)) |
|---|
| 1242 | + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) |
|---|
| 1513 | 1243 | return __FAIL(ops, i); |
|---|
| 1514 | 1244 | |
|---|
| 1515 | | - if (!arm_lpae_range_has_specific_mapping(ops, iova, |
|---|
| 1516 | | - iova, size)) |
|---|
| 1245 | + if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
|---|
| 1517 | 1246 | return __FAIL(ops, i); |
|---|
| 1518 | 1247 | |
|---|
| 1519 | 1248 | iova += SZ_1G; |
|---|
| .. | .. |
|---|
| 1521 | 1250 | |
|---|
| 1522 | 1251 | /* Partial unmap */ |
|---|
| 1523 | 1252 | size = 1UL << __ffs(cfg->pgsize_bitmap); |
|---|
| 1524 | | - if (ops->unmap(ops, SZ_1G + size, size) != size) |
|---|
| 1525 | | - return __FAIL(ops, i); |
|---|
| 1526 | | - |
|---|
| 1527 | | - if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size)) |
|---|
| 1253 | + if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) |
|---|
| 1528 | 1254 | return __FAIL(ops, i); |
|---|
| 1529 | 1255 | |
|---|
| 1530 | 1256 | /* Remap of partial unmap */ |
|---|
| 1531 | | - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) |
|---|
| 1257 | + if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) |
|---|
| 1532 | 1258 | return __FAIL(ops, i); |
|---|
| 1533 | 1259 | |
|---|
| 1534 | | - if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size, |
|---|
| 1535 | | - size, size)) |
|---|
| 1260 | + if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) |
|---|
| 1536 | 1261 | return __FAIL(ops, i); |
|---|
| 1537 | 1262 | |
|---|
| 1538 | 1263 | /* Full unmap */ |
|---|
| .. | .. |
|---|
| 1540 | 1265 | for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { |
|---|
| 1541 | 1266 | size = 1UL << j; |
|---|
| 1542 | 1267 | |
|---|
| 1543 | | - if (ops->unmap(ops, iova, size) != size) |
|---|
| 1268 | + if (ops->unmap(ops, iova, size, NULL) != size) |
|---|
| 1544 | 1269 | return __FAIL(ops, i); |
|---|
| 1545 | 1270 | |
|---|
| 1546 | 1271 | if (ops->iova_to_phys(ops, iova + 42)) |
|---|
| 1547 | 1272 | return __FAIL(ops, i); |
|---|
| 1548 | 1273 | |
|---|
| 1549 | 1274 | /* Remap full block */ |
|---|
| 1550 | | - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) |
|---|
| 1275 | + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) |
|---|
| 1551 | 1276 | return __FAIL(ops, i); |
|---|
| 1552 | 1277 | |
|---|
| 1553 | 1278 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
|---|
| 1554 | | - return __FAIL(ops, i); |
|---|
| 1555 | | - |
|---|
| 1556 | | - if (ops->unmap(ops, iova, size) != size) |
|---|
| 1557 | 1279 | return __FAIL(ops, i); |
|---|
| 1558 | 1280 | |
|---|
| 1559 | 1281 | iova += SZ_1G; |
|---|
| 1560 | 1282 | } |
|---|
| 1561 | | - |
|---|
| 1562 | | - if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) |
|---|
| 1563 | | - return __FAIL(ops, i); |
|---|
| 1564 | | - |
|---|
| 1565 | | - if ((cfg->pgsize_bitmap & SZ_2M) && |
|---|
| 1566 | | - (cfg->pgsize_bitmap & SZ_4K)) { |
|---|
| 1567 | | - /* mixed block + page mappings */ |
|---|
| 1568 | | - iova = 0; |
|---|
| 1569 | | - if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ)) |
|---|
| 1570 | | - return __FAIL(ops, i); |
|---|
| 1571 | | - |
|---|
| 1572 | | - if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K, |
|---|
| 1573 | | - IOMMU_READ)) |
|---|
| 1574 | | - return __FAIL(ops, i); |
|---|
| 1575 | | - |
|---|
| 1576 | | - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
|---|
| 1577 | | - return __FAIL(ops, i); |
|---|
| 1578 | | - |
|---|
| 1579 | | - if (ops->iova_to_phys(ops, iova + SZ_2M + 42) != |
|---|
| 1580 | | - (iova + SZ_2M + 42)) |
|---|
| 1581 | | - return __FAIL(ops, i); |
|---|
| 1582 | | - |
|---|
| 1583 | | - /* unmap both mappings at once */ |
|---|
| 1584 | | - if (ops->unmap(ops, iova, SZ_2M + SZ_4K) != |
|---|
| 1585 | | - (SZ_2M + SZ_4K)) |
|---|
| 1586 | | - return __FAIL(ops, i); |
|---|
| 1587 | | - |
|---|
| 1588 | | - if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) |
|---|
| 1589 | | - return __FAIL(ops, i); |
|---|
| 1590 | | - } |
|---|
| 1591 | | - |
|---|
| 1592 | | - /* map_sg */ |
|---|
| 1593 | | - for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) { |
|---|
| 1594 | | - size_t mapped; |
|---|
| 1595 | | - size_t unused; |
|---|
| 1596 | | - struct page *page; |
|---|
| 1597 | | - phys_addr_t page_phys; |
|---|
| 1598 | | - struct sg_table table; |
|---|
| 1599 | | - struct scatterlist *sg; |
|---|
| 1600 | | - unsigned long total_size = test_sg_sizes[j]; |
|---|
| 1601 | | - int chunk_size = 1UL << find_first_bit( |
|---|
| 1602 | | - &cfg->pgsize_bitmap, BITS_PER_LONG); |
|---|
| 1603 | | - int nents = total_size / chunk_size; |
|---|
| 1604 | | - int ret; |
|---|
| 1605 | | - |
|---|
| 1606 | | - if (total_size < chunk_size) |
|---|
| 1607 | | - continue; |
|---|
| 1608 | | - |
|---|
| 1609 | | - page = alloc_pages(GFP_KERNEL, get_order(chunk_size)); |
|---|
| 1610 | | - if (!page) |
|---|
| 1611 | | - return -ENOMEM; |
|---|
| 1612 | | - page_phys = page_to_phys(page); |
|---|
| 1613 | | - |
|---|
| 1614 | | - iova = 0; |
|---|
| 1615 | | - ret = sg_alloc_table(&table, nents, GFP_KERNEL); |
|---|
| 1616 | | - if (ret) |
|---|
| 1617 | | - return ret; |
|---|
| 1618 | | - for_each_sg(table.sgl, sg, table.nents, k) |
|---|
| 1619 | | - sg_set_page(sg, page, chunk_size, 0); |
|---|
| 1620 | | - |
|---|
| 1621 | | - mapped = ops->map_sg(ops, iova, table.sgl, table.nents, |
|---|
| 1622 | | - IOMMU_READ | IOMMU_WRITE, &unused); |
|---|
| 1623 | | - |
|---|
| 1624 | | - if (mapped != total_size) |
|---|
| 1625 | | - return __FAIL(ops, i); |
|---|
| 1626 | | - |
|---|
| 1627 | | - if (!arm_lpae_range_has_mapping(ops, iova, total_size)) |
|---|
| 1628 | | - return __FAIL(ops, i); |
|---|
| 1629 | | - |
|---|
| 1630 | | - if (arm_lpae_range_has_mapping(ops, iova + total_size, |
|---|
| 1631 | | - SZ_2G - (iova + total_size))) |
|---|
| 1632 | | - return __FAIL(ops, i); |
|---|
| 1633 | | - |
|---|
| 1634 | | - for_each_sg(table.sgl, sg, table.nents, k) { |
|---|
| 1635 | | - dma_addr_t newphys = |
|---|
| 1636 | | - ops->iova_to_phys(ops, iova + 42); |
|---|
| 1637 | | - if (newphys != (page_phys + 42)) |
|---|
| 1638 | | - return __FAIL(ops, i); |
|---|
| 1639 | | - iova += chunk_size; |
|---|
| 1640 | | - } |
|---|
| 1641 | | - |
|---|
| 1642 | | - if (ops->unmap(ops, 0, total_size) != total_size) |
|---|
| 1643 | | - return __FAIL(ops, i); |
|---|
| 1644 | | - |
|---|
| 1645 | | - if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) |
|---|
| 1646 | | - return __FAIL(ops, i); |
|---|
| 1647 | | - |
|---|
| 1648 | | - sg_free_table(&table); |
|---|
| 1649 | | - __free_pages(page, get_order(chunk_size)); |
|---|
| 1650 | | - } |
|---|
| 1651 | | - |
|---|
| 1652 | | - if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) |
|---|
| 1653 | | - return __FAIL(ops, i); |
|---|
| 1654 | 1283 | |
|---|
| 1655 | 1284 | free_io_pgtable_ops(ops); |
|---|
| 1656 | 1285 | } |
|---|
| .. | .. |
|---|
| 1661 | 1290 | |
|---|
| 1662 | 1291 | static int __init arm_lpae_do_selftests(void) |
|---|
| 1663 | 1292 | { |
|---|
| 1664 | | - static const unsigned long pgsize[] = { |
|---|
| 1293 | + static const unsigned long pgsize[] __initconst = { |
|---|
| 1665 | 1294 | SZ_4K | SZ_2M | SZ_1G, |
|---|
| 1295 | + SZ_16K | SZ_32M, |
|---|
| 1296 | + SZ_64K | SZ_512M, |
|---|
| 1666 | 1297 | }; |
|---|
| 1667 | 1298 | |
|---|
| 1668 | | - static const unsigned int ias[] = { |
|---|
| 1299 | + static const unsigned int ias[] __initconst = { |
|---|
| 1669 | 1300 | 32, 36, 40, 42, 44, 48, |
|---|
| 1670 | 1301 | }; |
|---|
| 1671 | 1302 | |
|---|
| .. | .. |
|---|
| 1673 | 1304 | struct io_pgtable_cfg cfg = { |
|---|
| 1674 | 1305 | .tlb = &dummy_tlb_ops, |
|---|
| 1675 | 1306 | .oas = 48, |
|---|
| 1676 | | - .quirks = IO_PGTABLE_QUIRK_NO_DMA, |
|---|
| 1307 | + .coherent_walk = true, |
|---|
| 1677 | 1308 | }; |
|---|
| 1678 | 1309 | |
|---|
| 1679 | 1310 | for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { |
|---|