.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright © 2006-2015, Intel Corporation. |
---|
3 | 4 | * |
---|
4 | 5 | * Authors: Ashok Raj <ashok.raj@intel.com> |
---|
5 | 6 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
---|
6 | 7 | * David Woodhouse <David.Woodhouse@intel.com> |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or modify it |
---|
9 | | - * under the terms and conditions of the GNU General Public License, |
---|
10 | | - * version 2, as published by the Free Software Foundation. |
---|
11 | | - * |
---|
12 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
13 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
14 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
15 | | - * more details. |
---|
16 | | - * |
---|
17 | | - * You should have received a copy of the GNU General Public License along with |
---|
18 | | - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
---|
19 | | - * Place - Suite 330, Boston, MA 02111-1307 USA. |
---|
20 | 8 | */ |
---|
21 | 9 | |
---|
22 | 10 | #ifndef _INTEL_IOMMU_H_ |
---|
.. | .. |
---|
26 | 14 | #include <linux/iova.h> |
---|
27 | 15 | #include <linux/io.h> |
---|
28 | 16 | #include <linux/idr.h> |
---|
29 | | -#include <linux/dma_remapping.h> |
---|
30 | 17 | #include <linux/mmu_notifier.h> |
---|
31 | 18 | #include <linux/list.h> |
---|
32 | 19 | #include <linux/iommu.h> |
---|
33 | 20 | #include <linux/io-64-nonatomic-lo-hi.h> |
---|
34 | 21 | #include <linux/dmar.h> |
---|
| 22 | +#include <linux/ioasid.h> |
---|
35 | 23 | |
---|
36 | 24 | #include <asm/cacheflush.h> |
---|
37 | 25 | #include <asm/iommu.h> |
---|
38 | 26 | |
---|
39 | 27 | /* |
---|
| 28 | + * VT-d hardware uses 4KiB page size regardless of host page size. |
---|
| 29 | + */ |
---|
| 30 | +#define VTD_PAGE_SHIFT (12) |
---|
| 31 | +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
---|
| 32 | +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
---|
| 33 | +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
---|
| 34 | + |
---|
| 35 | +#define VTD_STRIDE_SHIFT (9) |
---|
| 36 | +#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) |
---|
| 37 | + |
---|
| 38 | +#define DMA_PTE_READ BIT_ULL(0) |
---|
| 39 | +#define DMA_PTE_WRITE BIT_ULL(1) |
---|
| 40 | +#define DMA_PTE_LARGE_PAGE BIT_ULL(7) |
---|
| 41 | +#define DMA_PTE_SNP BIT_ULL(11) |
---|
| 42 | + |
---|
| 43 | +#define DMA_FL_PTE_PRESENT BIT_ULL(0) |
---|
| 44 | +#define DMA_FL_PTE_US BIT_ULL(2) |
---|
| 45 | +#define DMA_FL_PTE_ACCESS BIT_ULL(5) |
---|
| 46 | +#define DMA_FL_PTE_DIRTY BIT_ULL(6) |
---|
| 47 | +#define DMA_FL_PTE_XD BIT_ULL(63) |
---|
| 48 | + |
---|
| 49 | +#define ADDR_WIDTH_5LEVEL (57) |
---|
| 50 | +#define ADDR_WIDTH_4LEVEL (48) |
---|
| 51 | + |
---|
| 52 | +#define CONTEXT_TT_MULTI_LEVEL 0 |
---|
| 53 | +#define CONTEXT_TT_DEV_IOTLB 1 |
---|
| 54 | +#define CONTEXT_TT_PASS_THROUGH 2 |
---|
| 55 | +#define CONTEXT_PASIDE BIT_ULL(3) |
---|
| 56 | + |
---|
| 57 | +/* |
---|
40 | 58 | * Intel IOMMU register specification per version 1.0 public spec. |
---|
41 | 59 | */ |
---|
42 | | - |
---|
43 | 60 | #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ |
---|
44 | 61 | #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ |
---|
45 | 62 | #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ |
---|
.. | .. |
---|
72 | 89 | #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ |
---|
73 | 90 | #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ |
---|
74 | 91 | #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ |
---|
| 92 | +#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ |
---|
| 93 | +#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ |
---|
| 94 | +#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ |
---|
| 95 | +#define DMAR_MTRR_FIX16K_80000_REG 0x128 |
---|
| 96 | +#define DMAR_MTRR_FIX16K_A0000_REG 0x130 |
---|
| 97 | +#define DMAR_MTRR_FIX4K_C0000_REG 0x138 |
---|
| 98 | +#define DMAR_MTRR_FIX4K_C8000_REG 0x140 |
---|
| 99 | +#define DMAR_MTRR_FIX4K_D0000_REG 0x148 |
---|
| 100 | +#define DMAR_MTRR_FIX4K_D8000_REG 0x150 |
---|
| 101 | +#define DMAR_MTRR_FIX4K_E0000_REG 0x158 |
---|
| 102 | +#define DMAR_MTRR_FIX4K_E8000_REG 0x160 |
---|
| 103 | +#define DMAR_MTRR_FIX4K_F0000_REG 0x168 |
---|
| 104 | +#define DMAR_MTRR_FIX4K_F8000_REG 0x170 |
---|
| 105 | +#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ |
---|
| 106 | +#define DMAR_MTRR_PHYSMASK0_REG 0x188 |
---|
| 107 | +#define DMAR_MTRR_PHYSBASE1_REG 0x190 |
---|
| 108 | +#define DMAR_MTRR_PHYSMASK1_REG 0x198 |
---|
| 109 | +#define DMAR_MTRR_PHYSBASE2_REG 0x1a0 |
---|
| 110 | +#define DMAR_MTRR_PHYSMASK2_REG 0x1a8 |
---|
| 111 | +#define DMAR_MTRR_PHYSBASE3_REG 0x1b0 |
---|
| 112 | +#define DMAR_MTRR_PHYSMASK3_REG 0x1b8 |
---|
| 113 | +#define DMAR_MTRR_PHYSBASE4_REG 0x1c0 |
---|
| 114 | +#define DMAR_MTRR_PHYSMASK4_REG 0x1c8 |
---|
| 115 | +#define DMAR_MTRR_PHYSBASE5_REG 0x1d0 |
---|
| 116 | +#define DMAR_MTRR_PHYSMASK5_REG 0x1d8 |
---|
| 117 | +#define DMAR_MTRR_PHYSBASE6_REG 0x1e0 |
---|
| 118 | +#define DMAR_MTRR_PHYSMASK6_REG 0x1e8 |
---|
| 119 | +#define DMAR_MTRR_PHYSBASE7_REG 0x1f0 |
---|
| 120 | +#define DMAR_MTRR_PHYSMASK7_REG 0x1f8 |
---|
| 121 | +#define DMAR_MTRR_PHYSBASE8_REG 0x200 |
---|
| 122 | +#define DMAR_MTRR_PHYSMASK8_REG 0x208 |
---|
| 123 | +#define DMAR_MTRR_PHYSBASE9_REG 0x210 |
---|
| 124 | +#define DMAR_MTRR_PHYSMASK9_REG 0x218 |
---|
| 125 | +#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ |
---|
| 126 | +#define DMAR_VCMD_REG 0xe00 /* Virtual command register */ |
---|
| 127 | +#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ |
---|
75 | 128 | |
---|
76 | 129 | #define OFFSET_STRIDE (9) |
---|
77 | 130 | |
---|
78 | 131 | #define dmar_readq(a) readq(a) |
---|
79 | 132 | #define dmar_writeq(a,v) writeq(v,a) |
---|
| 133 | +#define dmar_readl(a) readl(a) |
---|
| 134 | +#define dmar_writel(a, v) writel(v, a) |
---|
80 | 135 | |
---|
81 | 136 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) |
---|
82 | 137 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) |
---|
.. | .. |
---|
115 | 170 | * Extended Capability Register |
---|
116 | 171 | */ |
---|
117 | 172 | |
---|
| 173 | +#define ecap_smpwc(e) (((e) >> 48) & 0x1) |
---|
| 174 | +#define ecap_flts(e) (((e) >> 47) & 0x1) |
---|
| 175 | +#define ecap_slts(e) (((e) >> 46) & 0x1) |
---|
| 176 | +#define ecap_vcs(e) (((e) >> 44) & 0x1) |
---|
| 177 | +#define ecap_smts(e) (((e) >> 43) & 0x1) |
---|
118 | 178 | #define ecap_dit(e) ((e >> 41) & 0x1) |
---|
119 | 179 | #define ecap_pasid(e) ((e >> 40) & 0x1) |
---|
120 | 180 | #define ecap_pss(e) ((e >> 35) & 0x1f) |
---|
.. | .. |
---|
138 | 198 | #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) |
---|
139 | 199 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) |
---|
140 | 200 | #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ |
---|
| 201 | + |
---|
| 202 | +/* Virtual command interface capability */ |
---|
| 203 | +#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ |
---|
141 | 204 | |
---|
142 | 205 | /* IOTLB_REG */ |
---|
143 | 206 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 |
---|
.. | .. |
---|
193 | 256 | |
---|
194 | 257 | /* DMA_RTADDR_REG */ |
---|
195 | 258 | #define DMA_RTADDR_RTT (((u64)1) << 11) |
---|
| 259 | +#define DMA_RTADDR_SMT (((u64)1) << 10) |
---|
196 | 260 | |
---|
197 | 261 | /* CCMD_REG */ |
---|
198 | 262 | #define DMA_CCMD_ICC (((u64)1) << 63) |
---|
.. | .. |
---|
224 | 288 | #define dma_frcd_type(d) ((d >> 30) & 1) |
---|
225 | 289 | #define dma_frcd_fault_reason(c) (c & 0xff) |
---|
226 | 290 | #define dma_frcd_source_id(c) (c & 0xffff) |
---|
| 291 | +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) |
---|
| 292 | +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) |
---|
227 | 293 | /* low 64 bit */ |
---|
228 | 294 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) |
---|
229 | 295 | |
---|
230 | 296 | /* PRS_REG */ |
---|
231 | 297 | #define DMA_PRS_PPR ((u32)1) |
---|
| 298 | +#define DMA_PRS_PRO ((u32)2) |
---|
| 299 | + |
---|
| 300 | +#define DMA_VCS_PAS ((u64)1) |
---|
232 | 301 | |
---|
233 | 302 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ |
---|
234 | 303 | do { \ |
---|
.. | .. |
---|
269 | 338 | |
---|
270 | 339 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
---|
271 | 340 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
---|
| 341 | +#define QI_IWD_FENCE (((u64)1) << 6) |
---|
| 342 | +#define QI_IWD_PRQ_DRAIN (((u64)1) << 7) |
---|
272 | 343 | |
---|
273 | 344 | #define QI_IOTLB_DID(did) (((u64)did) << 16) |
---|
274 | 345 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) |
---|
.. | .. |
---|
276 | 347 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) |
---|
277 | 348 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) |
---|
278 | 349 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) |
---|
279 | | -#define QI_IOTLB_AM(am) (((u8)am)) |
---|
| 350 | +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) |
---|
280 | 351 | |
---|
281 | 352 | #define QI_CC_FM(fm) (((u64)fm) << 48) |
---|
282 | 353 | #define QI_CC_SID(sid) (((u64)sid) << 32) |
---|
.. | .. |
---|
295 | 366 | #define QI_PC_DID(did) (((u64)did) << 16) |
---|
296 | 367 | #define QI_PC_GRAN(gran) (((u64)gran) << 4) |
---|
297 | 368 | |
---|
298 | | -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) |
---|
299 | | -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) |
---|
| 369 | +/* PASID cache invalidation granu */ |
---|
| 370 | +#define QI_PC_ALL_PASIDS 0 |
---|
| 371 | +#define QI_PC_PASID_SEL 1 |
---|
| 372 | +#define QI_PC_GLOBAL 3 |
---|
300 | 373 | |
---|
301 | 374 | #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) |
---|
302 | | -#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) |
---|
303 | 375 | #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) |
---|
304 | | -#define QI_EIOTLB_AM(am) (((u64)am)) |
---|
| 376 | +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) |
---|
305 | 377 | #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) |
---|
306 | 378 | #define QI_EIOTLB_DID(did) (((u64)did) << 16) |
---|
307 | 379 | #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) |
---|
308 | 380 | |
---|
| 381 | +/* QI Dev-IOTLB inv granu */ |
---|
| 382 | +#define QI_DEV_IOTLB_GRAN_ALL 1 |
---|
| 383 | +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 |
---|
| 384 | + |
---|
309 | 385 | #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) |
---|
310 | 386 | #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) |
---|
311 | | -#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) |
---|
312 | 387 | #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) |
---|
313 | 388 | #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) |
---|
314 | 389 | #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) |
---|
.. | .. |
---|
316 | 391 | ((u64)((pfsid >> 4) & 0xfff) << 52)) |
---|
317 | 392 | #define QI_DEV_EIOTLB_MAX_INVS 32 |
---|
318 | 393 | |
---|
319 | | -#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) |
---|
320 | | -#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) |
---|
321 | | -#define QI_PGRP_RESP_CODE(res) ((u64)(res)) |
---|
322 | | -#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) |
---|
323 | | -#define QI_PGRP_DID(did) (((u64)(did)) << 16) |
---|
| 394 | +/* Page group response descriptor QW0 */ |
---|
324 | 395 | #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) |
---|
| 396 | +#define QI_PGRP_PDP(p) (((u64)(p)) << 5) |
---|
| 397 | +#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) |
---|
| 398 | +#define QI_PGRP_DID(rid) (((u64)(rid)) << 16) |
---|
| 399 | +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) |
---|
325 | 400 | |
---|
326 | | -#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) |
---|
327 | | -#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) |
---|
328 | | -#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) |
---|
329 | | -#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) |
---|
330 | | -#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) |
---|
331 | | -#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) |
---|
332 | | -#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) |
---|
| 401 | +/* Page group response descriptor QW1 */ |
---|
| 402 | +#define QI_PGRP_LPIG(x) (((u64)(x)) << 2) |
---|
| 403 | +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) |
---|
| 404 | + |
---|
333 | 405 | |
---|
334 | 406 | #define QI_RESP_SUCCESS 0x0 |
---|
335 | 407 | #define QI_RESP_INVALID 0x1 |
---|
336 | 408 | #define QI_RESP_FAILURE 0xf |
---|
337 | 409 | |
---|
338 | | -#define QI_GRAN_ALL_ALL 0 |
---|
339 | | -#define QI_GRAN_NONG_ALL 1 |
---|
340 | 410 | #define QI_GRAN_NONG_PASID 2 |
---|
341 | 411 | #define QI_GRAN_PSI_PASID 3 |
---|
342 | 412 | |
---|
| 413 | +#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) |
---|
| 414 | + |
---|
343 | 415 | struct qi_desc { |
---|
344 | | - u64 low, high; |
---|
| 416 | + u64 qw0; |
---|
| 417 | + u64 qw1; |
---|
| 418 | + u64 qw2; |
---|
| 419 | + u64 qw3; |
---|
345 | 420 | }; |
---|
346 | 421 | |
---|
347 | 422 | struct q_inval { |
---|
348 | 423 | raw_spinlock_t q_lock; |
---|
349 | | - struct qi_desc *desc; /* invalidation queue */ |
---|
| 424 | + void *desc; /* invalidation queue */ |
---|
350 | 425 | int *desc_status; /* desc status */ |
---|
351 | 426 | int free_head; /* first free entry */ |
---|
352 | 427 | int free_tail; /* last free entry */ |
---|
353 | 428 | int free_cnt; |
---|
354 | 429 | }; |
---|
| 430 | + |
---|
| 431 | +struct dmar_pci_notify_info; |
---|
355 | 432 | |
---|
356 | 433 | #ifdef CONFIG_IRQ_REMAP |
---|
357 | 434 | /* 1MB - maximum possible interrupt remapping table size */ |
---|
.. | .. |
---|
367 | 444 | struct irte *base; |
---|
368 | 445 | unsigned long *bitmap; |
---|
369 | 446 | }; |
---|
| 447 | + |
---|
| 448 | +void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); |
---|
| 449 | +#else |
---|
| 450 | +static inline void |
---|
| 451 | +intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } |
---|
370 | 452 | #endif |
---|
371 | 453 | |
---|
372 | 454 | struct iommu_flush { |
---|
.. | .. |
---|
386 | 468 | |
---|
387 | 469 | #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) |
---|
388 | 470 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) |
---|
| 471 | +#define VTD_FLAG_SVM_CAPABLE (1 << 2) |
---|
| 472 | + |
---|
| 473 | +extern int intel_iommu_sm; |
---|
| 474 | +extern spinlock_t device_domain_lock; |
---|
| 475 | + |
---|
| 476 | +#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) |
---|
| 477 | +#define pasid_supported(iommu) (sm_supported(iommu) && \ |
---|
| 478 | + ecap_pasid((iommu)->ecap)) |
---|
389 | 479 | |
---|
390 | 480 | struct pasid_entry; |
---|
391 | 481 | struct pasid_state_entry; |
---|
392 | 482 | struct page_req_dsc; |
---|
| 483 | + |
---|
| 484 | +/* |
---|
| 485 | + * 0: Present |
---|
| 486 | + * 1-11: Reserved |
---|
| 487 | + * 12-63: Context Ptr (12 - (haw-1)) |
---|
| 488 | + * 64-127: Reserved |
---|
| 489 | + */ |
---|
| 490 | +struct root_entry { |
---|
| 491 | + u64 lo; |
---|
| 492 | + u64 hi; |
---|
| 493 | +}; |
---|
| 494 | + |
---|
| 495 | +/* |
---|
| 496 | + * low 64 bits: |
---|
| 497 | + * 0: present |
---|
| 498 | + * 1: fault processing disable |
---|
| 499 | + * 2-3: translation type |
---|
| 500 | + * 12-63: address space root |
---|
| 501 | + * high 64 bits: |
---|
| 502 | + * 0-2: address width |
---|
| 503 | + * 3-6: aval |
---|
| 504 | + * 8-23: domain id |
---|
| 505 | + */ |
---|
| 506 | +struct context_entry { |
---|
| 507 | + u64 lo; |
---|
| 508 | + u64 hi; |
---|
| 509 | +}; |
---|
| 510 | + |
---|
| 511 | +/* si_domain contains mulitple devices */ |
---|
| 512 | +#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0) |
---|
| 513 | + |
---|
| 514 | +/* |
---|
| 515 | + * When VT-d works in the scalable mode, it allows DMA translation to |
---|
| 516 | + * happen through either first level or second level page table. This |
---|
| 517 | + * bit marks that the DMA translation for the domain goes through the |
---|
| 518 | + * first level page table, otherwise, it goes through the second level. |
---|
| 519 | + */ |
---|
| 520 | +#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) |
---|
| 521 | + |
---|
| 522 | +/* |
---|
| 523 | + * Domain represents a virtual machine which demands iommu nested |
---|
| 524 | + * translation mode support. |
---|
| 525 | + */ |
---|
| 526 | +#define DOMAIN_FLAG_NESTING_MODE BIT(2) |
---|
393 | 527 | |
---|
394 | 528 | struct dmar_domain { |
---|
395 | 529 | int nid; /* node id */ |
---|
.. | .. |
---|
402 | 536 | /* Domain ids per IOMMU. Use u16 since |
---|
403 | 537 | * domain ids are 16 bit wide according |
---|
404 | 538 | * to VT-d spec, section 9.3 */ |
---|
| 539 | + unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */ |
---|
405 | 540 | |
---|
406 | 541 | bool has_iotlb_device; |
---|
407 | 542 | struct list_head devices; /* all devices' list */ |
---|
| 543 | + struct list_head auxd; /* link to device's auxiliary list */ |
---|
408 | 544 | struct iova_domain iovad; /* iova's that belong to this domain */ |
---|
409 | 545 | |
---|
410 | 546 | struct dma_pte *pgd; /* virtual address */ |
---|
.. | .. |
---|
423 | 559 | 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ |
---|
424 | 560 | u64 max_addr; /* maximum mapped address */ |
---|
425 | 561 | |
---|
| 562 | + u32 default_pasid; /* |
---|
| 563 | + * The default pasid used for non-SVM |
---|
| 564 | + * traffic on mediated devices. |
---|
| 565 | + */ |
---|
| 566 | + |
---|
426 | 567 | struct iommu_domain domain; /* generic domain data structure for |
---|
427 | 568 | iommu core */ |
---|
428 | 569 | }; |
---|
.. | .. |
---|
433 | 574 | u64 reg_size; /* size of hw register set */ |
---|
434 | 575 | u64 cap; |
---|
435 | 576 | u64 ecap; |
---|
| 577 | + u64 vccap; |
---|
436 | 578 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
---|
437 | 579 | raw_spinlock_t register_lock; /* protect register handling */ |
---|
438 | 580 | int seq_id; /* sequence id of the iommu */ |
---|
.. | .. |
---|
451 | 593 | struct iommu_flush flush; |
---|
452 | 594 | #endif |
---|
453 | 595 | #ifdef CONFIG_INTEL_IOMMU_SVM |
---|
454 | | - /* These are large and need to be contiguous, so we allocate just |
---|
455 | | - * one for now. We'll maybe want to rethink that if we truly give |
---|
456 | | - * devices away to userspace processes (e.g. for DPDK) and don't |
---|
457 | | - * want to trust that userspace will use *only* the PASID it was |
---|
458 | | - * told to. But while it's all driver-arbitrated, we're fine. */ |
---|
459 | | - struct pasid_state_entry *pasid_state_table; |
---|
460 | 596 | struct page_req_dsc *prq; |
---|
461 | 597 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ |
---|
462 | | - u32 pasid_max; |
---|
| 598 | + struct completion prq_complete; |
---|
| 599 | + struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ |
---|
463 | 600 | #endif |
---|
464 | 601 | struct q_inval *qi; /* Queued invalidation info */ |
---|
465 | 602 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ |
---|
.. | .. |
---|
481 | 618 | struct list_head link; /* link to domain siblings */ |
---|
482 | 619 | struct list_head global; /* link to global list */ |
---|
483 | 620 | struct list_head table; /* link to pasid table */ |
---|
| 621 | + struct list_head auxiliary_domains; /* auxiliary domains |
---|
| 622 | + * attached to this device |
---|
| 623 | + */ |
---|
| 624 | + u32 segment; /* PCI segment number */ |
---|
484 | 625 | u8 bus; /* PCI bus number */ |
---|
485 | 626 | u8 devfn; /* PCI devfn number */ |
---|
486 | 627 | u16 pfsid; /* SRIOV physical function source ID */ |
---|
.. | .. |
---|
490 | 631 | u8 pri_enabled:1; |
---|
491 | 632 | u8 ats_supported:1; |
---|
492 | 633 | u8 ats_enabled:1; |
---|
| 634 | + u8 auxd_enabled:1; /* Multiple domains per device */ |
---|
493 | 635 | u8 ats_qdep; |
---|
494 | 636 | struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ |
---|
495 | 637 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
---|
.. | .. |
---|
502 | 644 | { |
---|
503 | 645 | if (!ecap_coherent(iommu->ecap)) |
---|
504 | 646 | clflush_cache_range(addr, size); |
---|
| 647 | +} |
---|
| 648 | + |
---|
| 649 | +/* Convert generic struct iommu_domain to private struct dmar_domain */ |
---|
| 650 | +static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) |
---|
| 651 | +{ |
---|
| 652 | + return container_of(dom, struct dmar_domain, domain); |
---|
| 653 | +} |
---|
| 654 | + |
---|
| 655 | +/* |
---|
| 656 | + * 0: readable |
---|
| 657 | + * 1: writable |
---|
| 658 | + * 2-6: reserved |
---|
| 659 | + * 7: super page |
---|
| 660 | + * 8-10: available |
---|
| 661 | + * 11: snoop behavior |
---|
| 662 | + * 12-63: Host physcial address |
---|
| 663 | + */ |
---|
| 664 | +struct dma_pte { |
---|
| 665 | + u64 val; |
---|
| 666 | +}; |
---|
| 667 | + |
---|
| 668 | +static inline void dma_clear_pte(struct dma_pte *pte) |
---|
| 669 | +{ |
---|
| 670 | + pte->val = 0; |
---|
| 671 | +} |
---|
| 672 | + |
---|
| 673 | +static inline u64 dma_pte_addr(struct dma_pte *pte) |
---|
| 674 | +{ |
---|
| 675 | +#ifdef CONFIG_64BIT |
---|
| 676 | + return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); |
---|
| 677 | +#else |
---|
| 678 | + /* Must have a full atomic 64-bit read */ |
---|
| 679 | + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & |
---|
| 680 | + VTD_PAGE_MASK & (~DMA_FL_PTE_XD); |
---|
| 681 | +#endif |
---|
| 682 | +} |
---|
| 683 | + |
---|
| 684 | +static inline bool dma_pte_present(struct dma_pte *pte) |
---|
| 685 | +{ |
---|
| 686 | + return (pte->val & 3) != 0; |
---|
| 687 | +} |
---|
| 688 | + |
---|
| 689 | +static inline bool dma_pte_superpage(struct dma_pte *pte) |
---|
| 690 | +{ |
---|
| 691 | + return (pte->val & DMA_PTE_LARGE_PAGE); |
---|
| 692 | +} |
---|
| 693 | + |
---|
| 694 | +static inline int first_pte_in_page(struct dma_pte *pte) |
---|
| 695 | +{ |
---|
| 696 | + return !((unsigned long)pte & ~VTD_PAGE_MASK); |
---|
505 | 697 | } |
---|
506 | 698 | |
---|
507 | 699 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); |
---|
.. | .. |
---|
518 | 710 | unsigned int size_order, u64 type); |
---|
519 | 711 | extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
---|
520 | 712 | u16 qdep, u64 addr, unsigned mask); |
---|
521 | | -extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
---|
| 713 | + |
---|
| 714 | +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, |
---|
| 715 | + unsigned long npages, bool ih); |
---|
| 716 | + |
---|
| 717 | +void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
---|
| 718 | + u32 pasid, u16 qdep, u64 addr, |
---|
| 719 | + unsigned int size_order); |
---|
| 720 | +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, |
---|
| 721 | + u32 pasid); |
---|
| 722 | + |
---|
| 723 | +int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, |
---|
| 724 | + unsigned int count, unsigned long options); |
---|
| 725 | +/* |
---|
| 726 | + * Options used in qi_submit_sync: |
---|
| 727 | + * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. |
---|
| 728 | + */ |
---|
| 729 | +#define QI_OPT_WAIT_DRAIN BIT(0) |
---|
522 | 730 | |
---|
523 | 731 | extern int dmar_ir_support(void); |
---|
524 | 732 | |
---|
525 | | -struct dmar_domain *get_valid_domain_for_dev(struct device *dev); |
---|
526 | 733 | void *alloc_pgtable_page(int node); |
---|
527 | 734 | void free_pgtable_page(void *vaddr); |
---|
528 | 735 | struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); |
---|
529 | 736 | int for_each_device_domain(int (*fn)(struct device_domain_info *info, |
---|
530 | 737 | void *data), void *data); |
---|
| 738 | +void iommu_flush_write_buffer(struct intel_iommu *iommu); |
---|
| 739 | +int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); |
---|
| 740 | +struct dmar_domain *find_domain(struct device *dev); |
---|
| 741 | +struct device_domain_info *get_domain_info(struct device *dev); |
---|
| 742 | +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); |
---|
531 | 743 | |
---|
532 | 744 | #ifdef CONFIG_INTEL_IOMMU_SVM |
---|
533 | | -int intel_svm_init(struct intel_iommu *iommu); |
---|
534 | | -int intel_svm_exit(struct intel_iommu *iommu); |
---|
| 745 | +extern void intel_svm_check(struct intel_iommu *iommu); |
---|
535 | 746 | extern int intel_svm_enable_prq(struct intel_iommu *iommu); |
---|
536 | 747 | extern int intel_svm_finish_prq(struct intel_iommu *iommu); |
---|
| 748 | +int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, |
---|
| 749 | + struct iommu_gpasid_bind_data *data); |
---|
| 750 | +int intel_svm_unbind_gpasid(struct device *dev, u32 pasid); |
---|
| 751 | +struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, |
---|
| 752 | + void *drvdata); |
---|
| 753 | +void intel_svm_unbind(struct iommu_sva *handle); |
---|
| 754 | +u32 intel_svm_get_pasid(struct iommu_sva *handle); |
---|
| 755 | +int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, |
---|
| 756 | + struct iommu_page_response *msg); |
---|
537 | 757 | |
---|
538 | 758 | struct svm_dev_ops; |
---|
539 | 759 | |
---|
.. | .. |
---|
541 | 761 | struct list_head list; |
---|
542 | 762 | struct rcu_head rcu; |
---|
543 | 763 | struct device *dev; |
---|
| 764 | + struct intel_iommu *iommu; |
---|
544 | 765 | struct svm_dev_ops *ops; |
---|
| 766 | + struct iommu_sva sva; |
---|
| 767 | + u32 pasid; |
---|
545 | 768 | int users; |
---|
546 | 769 | u16 did; |
---|
547 | 770 | u16 dev_iotlb:1; |
---|
.. | .. |
---|
551 | 774 | struct intel_svm { |
---|
552 | 775 | struct mmu_notifier notifier; |
---|
553 | 776 | struct mm_struct *mm; |
---|
554 | | - struct intel_iommu *iommu; |
---|
555 | | - int flags; |
---|
556 | | - int pasid; |
---|
| 777 | + |
---|
| 778 | + unsigned int flags; |
---|
| 779 | + u32 pasid; |
---|
| 780 | + int gpasid; /* In case that guest PASID is different from host PASID */ |
---|
557 | 781 | struct list_head devs; |
---|
558 | 782 | struct list_head list; |
---|
559 | 783 | }; |
---|
560 | | - |
---|
561 | | -extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); |
---|
562 | | -extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); |
---|
| 784 | +#else |
---|
| 785 | +static inline void intel_svm_check(struct intel_iommu *iommu) {} |
---|
563 | 786 | #endif |
---|
564 | 787 | |
---|
| 788 | +#ifdef CONFIG_INTEL_IOMMU_DEBUGFS |
---|
| 789 | +void intel_iommu_debugfs_init(void); |
---|
| 790 | +#else |
---|
| 791 | +static inline void intel_iommu_debugfs_init(void) {} |
---|
| 792 | +#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ |
---|
| 793 | + |
---|
565 | 794 | extern const struct attribute_group *intel_iommu_groups[]; |
---|
| 795 | +bool context_present(struct context_entry *context); |
---|
| 796 | +struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, |
---|
| 797 | + u8 devfn, int alloc); |
---|
| 798 | + |
---|
| 799 | +#ifdef CONFIG_INTEL_IOMMU |
---|
| 800 | +extern int iommu_calculate_agaw(struct intel_iommu *iommu); |
---|
| 801 | +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
---|
| 802 | +extern int dmar_disabled; |
---|
| 803 | +extern int intel_iommu_enabled; |
---|
| 804 | +extern int intel_iommu_gfx_mapped; |
---|
| 805 | +#else |
---|
| 806 | +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
---|
| 807 | +{ |
---|
| 808 | + return 0; |
---|
| 809 | +} |
---|
| 810 | +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) |
---|
| 811 | +{ |
---|
| 812 | + return 0; |
---|
| 813 | +} |
---|
| 814 | +#define dmar_disabled (1) |
---|
| 815 | +#define intel_iommu_enabled (0) |
---|
| 816 | +#endif |
---|
566 | 817 | |
---|
567 | 818 | #endif |
---|