| .. | .. |
|---|
| 1 | | -/* SPDX-License-Identifier: GPL-2.0 */ |
|---|
| 1 | +/* SPDX-License-Identifier: MIT */ |
|---|
| 2 | 2 | #ifndef __NVKM_MMU_H__ |
|---|
| 3 | 3 | #define __NVKM_MMU_H__ |
|---|
| 4 | 4 | #include <core/subdev.h> |
|---|
| .. | .. |
|---|
| 17 | 17 | bool part:1; /* Region was split from an allocated region by map(). */ |
|---|
| 18 | 18 | bool user:1; /* Region user-allocated. */ |
|---|
| 19 | 19 | bool busy:1; /* Region busy (for temporarily preventing user access). */ |
|---|
| 20 | + bool mapped:1; /* Region contains valid pages. */ |
|---|
| 20 | 21 | struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ |
|---|
| 21 | 22 | struct nvkm_tags *tags; /* Compression tag reference. */ |
|---|
| 22 | 23 | }; |
|---|
| .. | .. |
|---|
| 44 | 45 | |
|---|
| 45 | 46 | dma_addr_t null; |
|---|
| 46 | 47 | void *nullp; |
|---|
| 48 | + |
|---|
| 49 | + bool replay; |
|---|
| 47 | 50 | }; |
|---|
| 48 | 51 | |
|---|
| 49 | 52 | int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc, |
|---|
| .. | .. |
|---|
| 63 | 66 | struct nvkm_mm_node *mem; |
|---|
| 64 | 67 | struct scatterlist *sgl; |
|---|
| 65 | 68 | dma_addr_t *dma; |
|---|
| 69 | + u64 *pfn; |
|---|
| 66 | 70 | u64 off; |
|---|
| 67 | 71 | |
|---|
| 68 | 72 | const struct nvkm_vmm_page *page; |
|---|
| .. | .. |
|---|
| 130 | 134 | int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
|---|
| 131 | 135 | int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
|---|
| 132 | 136 | int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
|---|
| 137 | +int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); |
|---|
| 133 | 138 | #endif |
|---|