| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * CPU-agnostic ARM page table allocator. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 13 | 14 | * Almost certainly never supporting: |
|---|
| 14 | 15 | * - PXN |
|---|
| 15 | 16 | * - Domains |
|---|
| 16 | | - * |
|---|
| 17 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 18 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 19 | | - * published by the Free Software Foundation. |
|---|
| 20 | | - * |
|---|
| 21 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 22 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 23 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 24 | | - * GNU General Public License for more details. |
|---|
| 25 | | - * |
|---|
| 26 | | - * You should have received a copy of the GNU General Public License |
|---|
| 27 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 28 | 17 | * |
|---|
| 29 | 18 | * Copyright (C) 2014-2015 ARM Limited |
|---|
| 30 | 19 | * Copyright (c) 2014-2015 MediaTek Inc. |
|---|
| .. | .. |
|---|
| 55 | 44 | |
|---|
| 56 | 45 | /* |
|---|
| 57 | 46 | * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2, |
|---|
| 58 | | - * and 12 bits in a page. With some carefully-chosen coefficients we can |
|---|
| 59 | | - * hide the ugly inconsistencies behind these macros and at least let the |
|---|
| 60 | | - * rest of the code pretend to be somewhat sane. |
|---|
| 47 | + * and 12 bits in a page. |
|---|
| 48 | + * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2. |
|---|
| 61 | 49 | */ |
|---|
| 62 | 50 | #define ARM_V7S_ADDR_BITS 32 |
|---|
| 63 | | -#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4) |
|---|
| 64 | | -#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl))) |
|---|
| 51 | +#define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8) |
|---|
| 52 | +#define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12) |
|---|
| 65 | 53 | #define ARM_V7S_TABLE_SHIFT 10 |
|---|
| 66 | 54 | |
|---|
| 67 | | -#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl)) |
|---|
| 68 | | -#define ARM_V7S_TABLE_SIZE(lvl) \ |
|---|
| 69 | | - (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte)) |
|---|
| 55 | +#define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg)) |
|---|
| 56 | +#define ARM_V7S_TABLE_SIZE(lvl, cfg) \ |
|---|
| 57 | + (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte)) |
|---|
| 70 | 58 | |
|---|
| 71 | 59 | #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl)) |
|---|
| 72 | 60 | #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl))) |
|---|
| 73 | 61 | #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT)) |
|---|
| 74 | | -#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1) |
|---|
| 75 | | -#define ARM_V7S_LVL_IDX(addr, lvl) ({ \ |
|---|
| 62 | +#define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1) |
|---|
| 63 | +#define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \ |
|---|
| 76 | 64 | int _l = lvl; \ |
|---|
| 77 | | - ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \ |
|---|
| 65 | + ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \ |
|---|
| 78 | 66 | }) |
|---|
| 79 | 67 | |
|---|
| 80 | 68 | /* |
|---|
| .. | .. |
|---|
| 123 | 111 | #define ARM_V7S_TEX_MASK 0x7 |
|---|
| 124 | 112 | #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) |
|---|
| 125 | 113 | |
|---|
| 126 | | -#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */ |
|---|
| 114 | +/* MediaTek extend the bits below for PA 32bit/33bit/34bit */ |
|---|
| 115 | +#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9) |
|---|
| 116 | +#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4) |
|---|
| 117 | +#define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5) |
|---|
| 127 | 118 | |
|---|
| 128 | 119 | /* *well, except for TEX on level 2 large pages, of course :( */ |
|---|
| 129 | 120 | #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 |
|---|
| .. | .. |
|---|
| 158 | 149 | #define ARM_V7S_TTBR_IRGN_ATTR(attr) \ |
|---|
| 159 | 150 | ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) |
|---|
| 160 | 151 | |
|---|
| 161 | | -#define ARM_V7S_TCR_PD1 BIT(5) |
|---|
| 162 | | - |
|---|
| 163 | 152 | #ifdef CONFIG_ZONE_DMA32 |
|---|
| 164 | 153 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 |
|---|
| 165 | 154 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 |
|---|
| .. | .. |
|---|
| 180 | 169 | spinlock_t split_lock; |
|---|
| 181 | 170 | }; |
|---|
| 182 | 171 | |
|---|
| 172 | +static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl); |
|---|
| 173 | + |
|---|
| 183 | 174 | static dma_addr_t __arm_v7s_dma_addr(void *pages) |
|---|
| 184 | 175 | { |
|---|
| 185 | 176 | return (dma_addr_t)virt_to_phys(pages); |
|---|
| 186 | 177 | } |
|---|
| 187 | 178 | |
|---|
| 188 | | -static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) |
|---|
| 179 | +static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg) |
|---|
| 189 | 180 | { |
|---|
| 181 | + return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && |
|---|
| 182 | + (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT); |
|---|
| 183 | +} |
|---|
| 184 | + |
|---|
| 185 | +static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl, |
|---|
| 186 | + struct io_pgtable_cfg *cfg) |
|---|
| 187 | +{ |
|---|
| 188 | + arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl); |
|---|
| 189 | + |
|---|
| 190 | + if (!arm_v7s_is_mtk_enabled(cfg)) |
|---|
| 191 | + return pte; |
|---|
| 192 | + |
|---|
| 193 | + if (paddr & BIT_ULL(32)) |
|---|
| 194 | + pte |= ARM_V7S_ATTR_MTK_PA_BIT32; |
|---|
| 195 | + if (paddr & BIT_ULL(33)) |
|---|
| 196 | + pte |= ARM_V7S_ATTR_MTK_PA_BIT33; |
|---|
| 197 | + if (paddr & BIT_ULL(34)) |
|---|
| 198 | + pte |= ARM_V7S_ATTR_MTK_PA_BIT34; |
|---|
| 199 | + return pte; |
|---|
| 200 | +} |
|---|
| 201 | + |
|---|
| 202 | +static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, |
|---|
| 203 | + struct io_pgtable_cfg *cfg) |
|---|
| 204 | +{ |
|---|
| 205 | + arm_v7s_iopte mask; |
|---|
| 206 | + phys_addr_t paddr; |
|---|
| 207 | + |
|---|
| 190 | 208 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) |
|---|
| 191 | | - pte &= ARM_V7S_TABLE_MASK; |
|---|
| 209 | + mask = ARM_V7S_TABLE_MASK; |
|---|
| 210 | + else if (arm_v7s_pte_is_cont(pte, lvl)) |
|---|
| 211 | + mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES; |
|---|
| 192 | 212 | else |
|---|
| 193 | | - pte &= ARM_V7S_LVL_MASK(lvl); |
|---|
| 194 | | - return phys_to_virt(pte); |
|---|
| 213 | + mask = ARM_V7S_LVL_MASK(lvl); |
|---|
| 214 | + |
|---|
| 215 | + paddr = pte & mask; |
|---|
| 216 | + if (!arm_v7s_is_mtk_enabled(cfg)) |
|---|
| 217 | + return paddr; |
|---|
| 218 | + |
|---|
| 219 | + if (pte & ARM_V7S_ATTR_MTK_PA_BIT32) |
|---|
| 220 | + paddr |= BIT_ULL(32); |
|---|
| 221 | + if (pte & ARM_V7S_ATTR_MTK_PA_BIT33) |
|---|
| 222 | + paddr |= BIT_ULL(33); |
|---|
| 223 | + if (pte & ARM_V7S_ATTR_MTK_PA_BIT34) |
|---|
| 224 | + paddr |= BIT_ULL(34); |
|---|
| 225 | + return paddr; |
|---|
| 226 | +} |
|---|
| 227 | + |
|---|
| 228 | +static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl, |
|---|
| 229 | + struct arm_v7s_io_pgtable *data) |
|---|
| 230 | +{ |
|---|
| 231 | + return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg)); |
|---|
| 195 | 232 | } |
|---|
| 196 | 233 | |
|---|
| 197 | 234 | static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, |
|---|
| .. | .. |
|---|
| 201 | 238 | struct device *dev = cfg->iommu_dev; |
|---|
| 202 | 239 | phys_addr_t phys; |
|---|
| 203 | 240 | dma_addr_t dma; |
|---|
| 204 | | - size_t size = ARM_V7S_TABLE_SIZE(lvl); |
|---|
| 241 | + size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg); |
|---|
| 205 | 242 | void *table = NULL; |
|---|
| 206 | 243 | |
|---|
| 207 | 244 | if (lvl == 1) |
|---|
| .. | .. |
|---|
| 209 | 246 | __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); |
|---|
| 210 | 247 | else if (lvl == 2) |
|---|
| 211 | 248 | table = kmem_cache_zalloc(data->l2_tables, gfp); |
|---|
| 249 | + |
|---|
| 250 | + if (!table) |
|---|
| 251 | + return NULL; |
|---|
| 252 | + |
|---|
| 212 | 253 | phys = virt_to_phys(table); |
|---|
| 213 | 254 | if (phys != (arm_v7s_iopte)phys) { |
|---|
| 214 | 255 | /* Doesn't fit in PTE */ |
|---|
| 215 | 256 | dev_err(dev, "Page table does not fit in PTE: %pa", &phys); |
|---|
| 216 | 257 | goto out_free; |
|---|
| 217 | 258 | } |
|---|
| 218 | | - if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { |
|---|
| 259 | + if (!cfg->coherent_walk) { |
|---|
| 219 | 260 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); |
|---|
| 220 | 261 | if (dma_mapping_error(dev, dma)) |
|---|
| 221 | 262 | goto out_free; |
|---|
| .. | .. |
|---|
| 247 | 288 | { |
|---|
| 248 | 289 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 249 | 290 | struct device *dev = cfg->iommu_dev; |
|---|
| 250 | | - size_t size = ARM_V7S_TABLE_SIZE(lvl); |
|---|
| 291 | + size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg); |
|---|
| 251 | 292 | |
|---|
| 252 | | - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) |
|---|
| 293 | + if (!cfg->coherent_walk) |
|---|
| 253 | 294 | dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, |
|---|
| 254 | 295 | DMA_TO_DEVICE); |
|---|
| 255 | 296 | if (lvl == 1) |
|---|
| .. | .. |
|---|
| 261 | 302 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, |
|---|
| 262 | 303 | struct io_pgtable_cfg *cfg) |
|---|
| 263 | 304 | { |
|---|
| 264 | | - if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) |
|---|
| 305 | + if (cfg->coherent_walk) |
|---|
| 265 | 306 | return; |
|---|
| 266 | 307 | |
|---|
| 267 | 308 | dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), |
|---|
| .. | .. |
|---|
| 305 | 346 | pte |= ARM_V7S_PTE_TYPE_PAGE; |
|---|
| 306 | 347 | if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) |
|---|
| 307 | 348 | pte |= ARM_V7S_ATTR_NS_SECTION; |
|---|
| 308 | | - |
|---|
| 309 | | - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) |
|---|
| 310 | | - pte |= ARM_V7S_ATTR_MTK_4GB; |
|---|
| 311 | 349 | |
|---|
| 312 | 350 | return pte; |
|---|
| 313 | 351 | } |
|---|
| .. | .. |
|---|
| 373 | 411 | return false; |
|---|
| 374 | 412 | } |
|---|
| 375 | 413 | |
|---|
| 376 | | -static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, |
|---|
| 414 | +static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, |
|---|
| 415 | + struct iommu_iotlb_gather *, unsigned long, |
|---|
| 377 | 416 | size_t, int, arm_v7s_iopte *); |
|---|
| 378 | 417 | |
|---|
| 379 | 418 | static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, |
|---|
| .. | .. |
|---|
| 393 | 432 | arm_v7s_iopte *tblp; |
|---|
| 394 | 433 | size_t sz = ARM_V7S_BLOCK_SIZE(lvl); |
|---|
| 395 | 434 | |
|---|
| 396 | | - tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); |
|---|
| 397 | | - if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, |
|---|
| 435 | + tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg); |
|---|
| 436 | + if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, |
|---|
| 398 | 437 | sz, lvl, tblp) != sz)) |
|---|
| 399 | 438 | return -EINVAL; |
|---|
| 400 | 439 | } else if (ptep[i]) { |
|---|
| .. | .. |
|---|
| 407 | 446 | if (num_entries > 1) |
|---|
| 408 | 447 | pte = arm_v7s_pte_to_cont(pte, lvl); |
|---|
| 409 | 448 | |
|---|
| 410 | | - pte |= paddr & ARM_V7S_LVL_MASK(lvl); |
|---|
| 449 | + pte |= paddr_to_iopte(paddr, lvl, cfg); |
|---|
| 411 | 450 | |
|---|
| 412 | 451 | __arm_v7s_set_pte(ptep, pte, num_entries, cfg); |
|---|
| 413 | 452 | return 0; |
|---|
| .. | .. |
|---|
| 439 | 478 | |
|---|
| 440 | 479 | static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, |
|---|
| 441 | 480 | phys_addr_t paddr, size_t size, int prot, |
|---|
| 442 | | - int lvl, arm_v7s_iopte *ptep) |
|---|
| 481 | + int lvl, arm_v7s_iopte *ptep, gfp_t gfp) |
|---|
| 443 | 482 | { |
|---|
| 444 | 483 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
|---|
| 445 | 484 | arm_v7s_iopte pte, *cptep; |
|---|
| 446 | 485 | int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); |
|---|
| 447 | 486 | |
|---|
| 448 | 487 | /* Find our entry at the current level */ |
|---|
| 449 | | - ptep += ARM_V7S_LVL_IDX(iova, lvl); |
|---|
| 488 | + ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg); |
|---|
| 450 | 489 | |
|---|
| 451 | 490 | /* If we can install a leaf entry at this level, then do so */ |
|---|
| 452 | 491 | if (num_entries) |
|---|
| .. | .. |
|---|
| 460 | 499 | /* Grab a pointer to the next level */ |
|---|
| 461 | 500 | pte = READ_ONCE(*ptep); |
|---|
| 462 | 501 | if (!pte) { |
|---|
| 463 | | - cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); |
|---|
| 502 | + cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data); |
|---|
| 464 | 503 | if (!cptep) |
|---|
| 465 | 504 | return -ENOMEM; |
|---|
| 466 | 505 | |
|---|
| .. | .. |
|---|
| 473 | 512 | } |
|---|
| 474 | 513 | |
|---|
| 475 | 514 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { |
|---|
| 476 | | - cptep = iopte_deref(pte, lvl); |
|---|
| 515 | + cptep = iopte_deref(pte, lvl, data); |
|---|
| 477 | 516 | } else if (pte) { |
|---|
| 478 | 517 | /* We require an unmap first */ |
|---|
| 479 | 518 | WARN_ON(!selftest_running); |
|---|
| .. | .. |
|---|
| 481 | 520 | } |
|---|
| 482 | 521 | |
|---|
| 483 | 522 | /* Rinse, repeat */ |
|---|
| 484 | | - return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); |
|---|
| 523 | + return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); |
|---|
| 485 | 524 | } |
|---|
| 486 | 525 | |
|---|
| 487 | | -static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 488 | | - phys_addr_t paddr, size_t size, int prot) |
|---|
| 526 | +static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 527 | + phys_addr_t paddr, size_t pgsize, size_t pgcount, |
|---|
| 528 | + int prot, gfp_t gfp, size_t *mapped) |
|---|
| 489 | 529 | { |
|---|
| 490 | 530 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 491 | | - struct io_pgtable *iop = &data->iop; |
|---|
| 492 | | - int ret; |
|---|
| 531 | + int ret = -EINVAL; |
|---|
| 493 | 532 | |
|---|
| 494 | 533 | /* If no access, then nothing to do */ |
|---|
| 495 | 534 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
|---|
| 496 | 535 | return 0; |
|---|
| 497 | 536 | |
|---|
| 498 | | - if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) |
|---|
| 537 | + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || |
|---|
| 538 | + paddr >= (1ULL << data->iop.cfg.oas))) |
|---|
| 499 | 539 | return -ERANGE; |
|---|
| 500 | 540 | |
|---|
| 501 | | - ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); |
|---|
| 541 | + while (pgcount--) { |
|---|
| 542 | + ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, |
|---|
| 543 | + gfp); |
|---|
| 544 | + if (ret) |
|---|
| 545 | + break; |
|---|
| 546 | + |
|---|
| 547 | + iova += pgsize; |
|---|
| 548 | + paddr += pgsize; |
|---|
| 549 | + if (mapped) |
|---|
| 550 | + *mapped += pgsize; |
|---|
| 551 | + } |
|---|
| 502 | 552 | /* |
|---|
| 503 | 553 | * Synchronise all PTE updates for the new mapping before there's |
|---|
| 504 | 554 | * a chance for anything to kick off a table walk for the new iova. |
|---|
| 505 | 555 | */ |
|---|
| 506 | | - if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { |
|---|
| 507 | | - io_pgtable_tlb_add_flush(iop, iova, size, |
|---|
| 508 | | - ARM_V7S_BLOCK_SIZE(2), false); |
|---|
| 509 | | - io_pgtable_tlb_sync(iop); |
|---|
| 510 | | - } else { |
|---|
| 511 | | - wmb(); |
|---|
| 512 | | - } |
|---|
| 556 | + wmb(); |
|---|
| 513 | 557 | |
|---|
| 514 | 558 | return ret; |
|---|
| 559 | +} |
|---|
| 560 | + |
|---|
| 561 | +static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 562 | + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
|---|
| 563 | +{ |
|---|
| 564 | + return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL); |
|---|
| 515 | 565 | } |
|---|
| 516 | 566 | |
|---|
| 517 | 567 | static void arm_v7s_free_pgtable(struct io_pgtable *iop) |
|---|
| .. | .. |
|---|
| 519 | 569 | struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); |
|---|
| 520 | 570 | int i; |
|---|
| 521 | 571 | |
|---|
| 522 | | - for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) { |
|---|
| 572 | + for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) { |
|---|
| 523 | 573 | arm_v7s_iopte pte = data->pgd[i]; |
|---|
| 524 | 574 | |
|---|
| 525 | 575 | if (ARM_V7S_PTE_IS_TABLE(pte, 1)) |
|---|
| 526 | | - __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); |
|---|
| 576 | + __arm_v7s_free_table(iopte_deref(pte, 1, data), |
|---|
| 577 | + 2, data); |
|---|
| 527 | 578 | } |
|---|
| 528 | 579 | __arm_v7s_free_table(data->pgd, 1, data); |
|---|
| 529 | 580 | kmem_cache_destroy(data->l2_tables); |
|---|
| .. | .. |
|---|
| 552 | 603 | __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); |
|---|
| 553 | 604 | |
|---|
| 554 | 605 | size *= ARM_V7S_CONT_PAGES; |
|---|
| 555 | | - io_pgtable_tlb_add_flush(iop, iova, size, size, true); |
|---|
| 556 | | - io_pgtable_tlb_sync(iop); |
|---|
| 606 | + io_pgtable_tlb_flush_walk(iop, iova, size, size); |
|---|
| 557 | 607 | return pte; |
|---|
| 558 | 608 | } |
|---|
| 559 | 609 | |
|---|
| 560 | 610 | static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, |
|---|
| 611 | + struct iommu_iotlb_gather *gather, |
|---|
| 561 | 612 | unsigned long iova, size_t size, |
|---|
| 562 | 613 | arm_v7s_iopte blk_pte, |
|---|
| 563 | 614 | arm_v7s_iopte *ptep) |
|---|
| .. | .. |
|---|
| 570 | 621 | if (!tablep) |
|---|
| 571 | 622 | return 0; /* Bytes unmapped */ |
|---|
| 572 | 623 | |
|---|
| 573 | | - num_ptes = ARM_V7S_PTES_PER_LVL(2); |
|---|
| 624 | + num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg); |
|---|
| 574 | 625 | num_entries = size >> ARM_V7S_LVL_SHIFT(2); |
|---|
| 575 | | - unmap_idx = ARM_V7S_LVL_IDX(iova, 2); |
|---|
| 626 | + unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg); |
|---|
| 576 | 627 | |
|---|
| 577 | 628 | pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg); |
|---|
| 578 | 629 | if (num_entries > 1) |
|---|
| .. | .. |
|---|
| 593 | 644 | if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) |
|---|
| 594 | 645 | return 0; |
|---|
| 595 | 646 | |
|---|
| 596 | | - tablep = iopte_deref(pte, 1); |
|---|
| 597 | | - return __arm_v7s_unmap(data, iova, size, 2, tablep); |
|---|
| 647 | + tablep = iopte_deref(pte, 1, data); |
|---|
| 648 | + return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); |
|---|
| 598 | 649 | } |
|---|
| 599 | 650 | |
|---|
| 600 | | - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); |
|---|
| 651 | + io_pgtable_tlb_add_page(&data->iop, gather, iova, size); |
|---|
| 601 | 652 | return size; |
|---|
| 602 | 653 | } |
|---|
| 603 | 654 | |
|---|
| 604 | 655 | static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, |
|---|
| 656 | + struct iommu_iotlb_gather *gather, |
|---|
| 605 | 657 | unsigned long iova, size_t size, int lvl, |
|---|
| 606 | 658 | arm_v7s_iopte *ptep) |
|---|
| 607 | 659 | { |
|---|
| .. | .. |
|---|
| 613 | 665 | if (WARN_ON(lvl > 2)) |
|---|
| 614 | 666 | return 0; |
|---|
| 615 | 667 | |
|---|
| 616 | | - idx = ARM_V7S_LVL_IDX(iova, lvl); |
|---|
| 668 | + idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg); |
|---|
| 617 | 669 | ptep += idx; |
|---|
| 618 | 670 | do { |
|---|
| 619 | 671 | pte[i] = READ_ONCE(ptep[i]); |
|---|
| .. | .. |
|---|
| 648 | 700 | for (i = 0; i < num_entries; i++) { |
|---|
| 649 | 701 | if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { |
|---|
| 650 | 702 | /* Also flush any partial walks */ |
|---|
| 651 | | - io_pgtable_tlb_add_flush(iop, iova, blk_size, |
|---|
| 652 | | - ARM_V7S_BLOCK_SIZE(lvl + 1), false); |
|---|
| 653 | | - io_pgtable_tlb_sync(iop); |
|---|
| 654 | | - ptep = iopte_deref(pte[i], lvl); |
|---|
| 703 | + io_pgtable_tlb_flush_walk(iop, iova, blk_size, |
|---|
| 704 | + ARM_V7S_BLOCK_SIZE(lvl + 1)); |
|---|
| 705 | + ptep = iopte_deref(pte[i], lvl, data); |
|---|
| 655 | 706 | __arm_v7s_free_table(ptep, lvl + 1, data); |
|---|
| 707 | + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { |
|---|
| 708 | + /* |
|---|
| 709 | + * Order the PTE update against queueing the IOVA, to |
|---|
| 710 | + * guarantee that a flush callback from a different CPU |
|---|
| 711 | + * has observed it before the TLBIALL can be issued. |
|---|
| 712 | + */ |
|---|
| 713 | + smp_wmb(); |
|---|
| 656 | 714 | } else { |
|---|
| 657 | | - io_pgtable_tlb_add_flush(iop, iova, blk_size, |
|---|
| 658 | | - blk_size, true); |
|---|
| 715 | + io_pgtable_tlb_add_page(iop, gather, iova, blk_size); |
|---|
| 659 | 716 | } |
|---|
| 660 | 717 | iova += blk_size; |
|---|
| 661 | 718 | } |
|---|
| .. | .. |
|---|
| 665 | 722 | * Insert a table at the next level to map the old region, |
|---|
| 666 | 723 | * minus the part we want to unmap |
|---|
| 667 | 724 | */ |
|---|
| 668 | | - return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); |
|---|
| 725 | + return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0], |
|---|
| 726 | + ptep); |
|---|
| 669 | 727 | } |
|---|
| 670 | 728 | |
|---|
| 671 | 729 | /* Keep on walkin' */ |
|---|
| 672 | | - ptep = iopte_deref(pte[0], lvl); |
|---|
| 673 | | - return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); |
|---|
| 730 | + ptep = iopte_deref(pte[0], lvl, data); |
|---|
| 731 | + return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep); |
|---|
| 732 | +} |
|---|
| 733 | + |
|---|
| 734 | +static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 735 | + size_t pgsize, size_t pgcount, |
|---|
| 736 | + struct iommu_iotlb_gather *gather) |
|---|
| 737 | +{ |
|---|
| 738 | + struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 739 | + size_t unmapped = 0, ret; |
|---|
| 740 | + |
|---|
| 741 | + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) |
|---|
| 742 | + return 0; |
|---|
| 743 | + |
|---|
| 744 | + while (pgcount--) { |
|---|
| 745 | + ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd); |
|---|
| 746 | + if (!ret) |
|---|
| 747 | + break; |
|---|
| 748 | + |
|---|
| 749 | + unmapped += pgsize; |
|---|
| 750 | + iova += pgsize; |
|---|
| 751 | + } |
|---|
| 752 | + |
|---|
| 753 | + return unmapped; |
|---|
| 674 | 754 | } |
|---|
| 675 | 755 | |
|---|
| 676 | 756 | static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, |
|---|
| 677 | | - size_t size) |
|---|
| 757 | + size_t size, struct iommu_iotlb_gather *gather) |
|---|
| 678 | 758 | { |
|---|
| 679 | | - struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
|---|
| 680 | | - |
|---|
| 681 | | - if (WARN_ON(upper_32_bits(iova))) |
|---|
| 682 | | - return 0; |
|---|
| 683 | | - |
|---|
| 684 | | - return __arm_v7s_unmap(data, iova, size, 1, data->pgd); |
|---|
| 759 | + return arm_v7s_unmap_pages(ops, iova, size, 1, gather); |
|---|
| 685 | 760 | } |
|---|
| 686 | 761 | |
|---|
| 687 | 762 | static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, |
|---|
| .. | .. |
|---|
| 693 | 768 | u32 mask; |
|---|
| 694 | 769 | |
|---|
| 695 | 770 | do { |
|---|
| 696 | | - ptep += ARM_V7S_LVL_IDX(iova, ++lvl); |
|---|
| 771 | + ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg); |
|---|
| 697 | 772 | pte = READ_ONCE(*ptep); |
|---|
| 698 | | - ptep = iopte_deref(pte, lvl); |
|---|
| 773 | + ptep = iopte_deref(pte, lvl, data); |
|---|
| 699 | 774 | } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); |
|---|
| 700 | 775 | |
|---|
| 701 | 776 | if (!ARM_V7S_PTE_IS_VALID(pte)) |
|---|
| .. | .. |
|---|
| 704 | 779 | mask = ARM_V7S_LVL_MASK(lvl); |
|---|
| 705 | 780 | if (arm_v7s_pte_is_cont(pte, lvl)) |
|---|
| 706 | 781 | mask *= ARM_V7S_CONT_PAGES; |
|---|
| 707 | | - return (pte & mask) | (iova & ~mask); |
|---|
| 782 | + return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask); |
|---|
| 708 | 783 | } |
|---|
| 709 | 784 | |
|---|
| 710 | 785 | static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, |
|---|
| .. | .. |
|---|
| 712 | 787 | { |
|---|
| 713 | 788 | struct arm_v7s_io_pgtable *data; |
|---|
| 714 | 789 | |
|---|
| 715 | | -#ifdef PHYS_OFFSET |
|---|
| 716 | | - if (upper_32_bits(PHYS_OFFSET)) |
|---|
| 790 | + if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS)) |
|---|
| 717 | 791 | return NULL; |
|---|
| 718 | | -#endif |
|---|
| 719 | | - if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) |
|---|
| 792 | + |
|---|
| 793 | + if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS)) |
|---|
| 720 | 794 | return NULL; |
|---|
| 721 | 795 | |
|---|
| 722 | 796 | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | |
|---|
| 723 | 797 | IO_PGTABLE_QUIRK_NO_PERMS | |
|---|
| 724 | | - IO_PGTABLE_QUIRK_TLBI_ON_MAP | |
|---|
| 725 | | - IO_PGTABLE_QUIRK_ARM_MTK_4GB | |
|---|
| 726 | | - IO_PGTABLE_QUIRK_NO_DMA)) |
|---|
| 798 | + IO_PGTABLE_QUIRK_ARM_MTK_EXT | |
|---|
| 799 | + IO_PGTABLE_QUIRK_NON_STRICT)) |
|---|
| 727 | 800 | return NULL; |
|---|
| 728 | 801 | |
|---|
| 729 | 802 | /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ |
|---|
| 730 | | - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB && |
|---|
| 803 | + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT && |
|---|
| 731 | 804 | !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) |
|---|
| 732 | 805 | return NULL; |
|---|
| 733 | 806 | |
|---|
| .. | .. |
|---|
| 737 | 810 | |
|---|
| 738 | 811 | spin_lock_init(&data->split_lock); |
|---|
| 739 | 812 | data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", |
|---|
| 740 | | - ARM_V7S_TABLE_SIZE(2), |
|---|
| 741 | | - ARM_V7S_TABLE_SIZE(2), |
|---|
| 813 | + ARM_V7S_TABLE_SIZE(2, cfg), |
|---|
| 814 | + ARM_V7S_TABLE_SIZE(2, cfg), |
|---|
| 742 | 815 | ARM_V7S_TABLE_SLAB_FLAGS, NULL); |
|---|
| 743 | 816 | if (!data->l2_tables) |
|---|
| 744 | 817 | goto out_free_data; |
|---|
| 745 | 818 | |
|---|
| 746 | 819 | data->iop.ops = (struct io_pgtable_ops) { |
|---|
| 747 | 820 | .map = arm_v7s_map, |
|---|
| 821 | + .map_pages = arm_v7s_map_pages, |
|---|
| 748 | 822 | .unmap = arm_v7s_unmap, |
|---|
| 823 | + .unmap_pages = arm_v7s_unmap_pages, |
|---|
| 749 | 824 | .iova_to_phys = arm_v7s_iova_to_phys, |
|---|
| 750 | 825 | }; |
|---|
| 751 | 826 | |
|---|
| .. | .. |
|---|
| 758 | 833 | */ |
|---|
| 759 | 834 | cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; |
|---|
| 760 | 835 | |
|---|
| 761 | | - /* TCR: T0SZ=0, disable TTBR1 */ |
|---|
| 762 | | - cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1; |
|---|
| 836 | + /* TCR: T0SZ=0, EAE=0 (if applicable) */ |
|---|
| 837 | + cfg->arm_v7s_cfg.tcr = 0; |
|---|
| 763 | 838 | |
|---|
| 764 | 839 | /* |
|---|
| 765 | 840 | * TEX remap: the indices used map to the closest equivalent types |
|---|
| .. | .. |
|---|
| 782 | 857 | /* Ensure the empty pgd is visible before any actual TTBR write */ |
|---|
| 783 | 858 | wmb(); |
|---|
| 784 | 859 | |
|---|
| 785 | | - /* TTBRs */ |
|---|
| 786 | | - cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | |
|---|
| 787 | | - ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | |
|---|
| 788 | | - ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | |
|---|
| 789 | | - ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA); |
|---|
| 790 | | - cfg->arm_v7s_cfg.ttbr[1] = 0; |
|---|
| 860 | + /* TTBR */ |
|---|
| 861 | + cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S | |
|---|
| 862 | + (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS | |
|---|
| 863 | + ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | |
|---|
| 864 | + ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : |
|---|
| 865 | + (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | |
|---|
| 866 | + ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC))); |
|---|
| 791 | 867 | return &data->iop; |
|---|
| 792 | 868 | |
|---|
| 793 | 869 | out_free_data: |
|---|
| .. | .. |
|---|
| 803 | 879 | |
|---|
| 804 | 880 | #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST |
|---|
| 805 | 881 | |
|---|
| 806 | | -static struct io_pgtable_cfg *cfg_cookie; |
|---|
| 882 | +static struct io_pgtable_cfg *cfg_cookie __initdata; |
|---|
| 807 | 883 | |
|---|
| 808 | | -static void dummy_tlb_flush_all(void *cookie) |
|---|
| 884 | +static void __init dummy_tlb_flush_all(void *cookie) |
|---|
| 809 | 885 | { |
|---|
| 810 | 886 | WARN_ON(cookie != cfg_cookie); |
|---|
| 811 | 887 | } |
|---|
| 812 | 888 | |
|---|
| 813 | | -static void dummy_tlb_add_flush(unsigned long iova, size_t size, |
|---|
| 814 | | - size_t granule, bool leaf, void *cookie) |
|---|
| 889 | +static void __init dummy_tlb_flush(unsigned long iova, size_t size, |
|---|
| 890 | + size_t granule, void *cookie) |
|---|
| 815 | 891 | { |
|---|
| 816 | 892 | WARN_ON(cookie != cfg_cookie); |
|---|
| 817 | 893 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); |
|---|
| 818 | 894 | } |
|---|
| 819 | 895 | |
|---|
| 820 | | -static void dummy_tlb_sync(void *cookie) |
|---|
| 896 | +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, |
|---|
| 897 | + unsigned long iova, size_t granule, |
|---|
| 898 | + void *cookie) |
|---|
| 821 | 899 | { |
|---|
| 822 | | - WARN_ON(cookie != cfg_cookie); |
|---|
| 900 | + dummy_tlb_flush(iova, granule, granule, cookie); |
|---|
| 823 | 901 | } |
|---|
| 824 | 902 | |
|---|
| 825 | | -static const struct iommu_gather_ops dummy_tlb_ops = { |
|---|
| 903 | +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { |
|---|
| 826 | 904 | .tlb_flush_all = dummy_tlb_flush_all, |
|---|
| 827 | | - .tlb_add_flush = dummy_tlb_add_flush, |
|---|
| 828 | | - .tlb_sync = dummy_tlb_sync, |
|---|
| 905 | + .tlb_flush_walk = dummy_tlb_flush, |
|---|
| 906 | + .tlb_add_page = dummy_tlb_add_page, |
|---|
| 829 | 907 | }; |
|---|
| 830 | 908 | |
|---|
| 831 | 909 | #define __FAIL(ops) ({ \ |
|---|
| .. | .. |
|---|
| 841 | 919 | .tlb = &dummy_tlb_ops, |
|---|
| 842 | 920 | .oas = 32, |
|---|
| 843 | 921 | .ias = 32, |
|---|
| 844 | | - .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA, |
|---|
| 922 | + .coherent_walk = true, |
|---|
| 923 | + .quirks = IO_PGTABLE_QUIRK_ARM_NS, |
|---|
| 845 | 924 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
|---|
| 846 | 925 | }; |
|---|
| 847 | 926 | unsigned int iova, size, iova_start; |
|---|
| .. | .. |
|---|
| 879 | 958 | if (ops->map(ops, iova, iova, size, IOMMU_READ | |
|---|
| 880 | 959 | IOMMU_WRITE | |
|---|
| 881 | 960 | IOMMU_NOEXEC | |
|---|
| 882 | | - IOMMU_CACHE)) |
|---|
| 961 | + IOMMU_CACHE, GFP_KERNEL)) |
|---|
| 883 | 962 | return __FAIL(ops); |
|---|
| 884 | 963 | |
|---|
| 885 | 964 | /* Overlapping mappings */ |
|---|
| 886 | 965 | if (!ops->map(ops, iova, iova + size, size, |
|---|
| 887 | | - IOMMU_READ | IOMMU_NOEXEC)) |
|---|
| 966 | + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) |
|---|
| 888 | 967 | return __FAIL(ops); |
|---|
| 889 | 968 | |
|---|
| 890 | 969 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
|---|
| .. | .. |
|---|
| 899 | 978 | size = 1UL << __ffs(cfg.pgsize_bitmap); |
|---|
| 900 | 979 | while (i < loopnr) { |
|---|
| 901 | 980 | iova_start = i * SZ_16M; |
|---|
| 902 | | - if (ops->unmap(ops, iova_start + size, size) != size) |
|---|
| 981 | + if (ops->unmap(ops, iova_start + size, size, NULL) != size) |
|---|
| 903 | 982 | return __FAIL(ops); |
|---|
| 904 | 983 | |
|---|
| 905 | 984 | /* Remap of partial unmap */ |
|---|
| 906 | | - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) |
|---|
| 985 | + if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL)) |
|---|
| 907 | 986 | return __FAIL(ops); |
|---|
| 908 | 987 | |
|---|
| 909 | 988 | if (ops->iova_to_phys(ops, iova_start + size + 42) |
|---|
| .. | .. |
|---|
| 917 | 996 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
|---|
| 918 | 997 | size = 1UL << i; |
|---|
| 919 | 998 | |
|---|
| 920 | | - if (ops->unmap(ops, iova, size) != size) |
|---|
| 999 | + if (ops->unmap(ops, iova, size, NULL) != size) |
|---|
| 921 | 1000 | return __FAIL(ops); |
|---|
| 922 | 1001 | |
|---|
| 923 | 1002 | if (ops->iova_to_phys(ops, iova + 42)) |
|---|
| 924 | 1003 | return __FAIL(ops); |
|---|
| 925 | 1004 | |
|---|
| 926 | 1005 | /* Remap full block */ |
|---|
| 927 | | - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) |
|---|
| 1006 | + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) |
|---|
| 928 | 1007 | return __FAIL(ops); |
|---|
| 929 | 1008 | |
|---|
| 930 | 1009 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
|---|