.. | .. |
---|
25 | 25 | #include <linux/mm.h> |
---|
26 | 26 | #include <linux/io-mapping.h> |
---|
27 | 27 | |
---|
28 | | -#include <asm/pgtable.h> |
---|
29 | 28 | |
---|
30 | 29 | #include "i915_drv.h" |
---|
31 | 30 | |
---|
.. | .. |
---|
33 | 32 | struct mm_struct *mm; |
---|
34 | 33 | unsigned long pfn; |
---|
35 | 34 | pgprot_t prot; |
---|
| 35 | + |
---|
| 36 | + struct sgt_iter sgt; |
---|
| 37 | + resource_size_t iobase; |
---|
36 | 38 | }; |
---|
37 | 39 | |
---|
38 | | -static int remap_pfn(pte_t *pte, pgtable_t token, |
---|
39 | | - unsigned long addr, void *data) |
---|
| 40 | +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) |
---|
40 | 41 | { |
---|
41 | 42 | struct remap_pfn *r = data; |
---|
42 | 43 | |
---|
43 | 44 | /* Special PTE are not associated with any struct page */ |
---|
44 | 45 | set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); |
---|
45 | 46 | r->pfn++; |
---|
| 47 | + |
---|
| 48 | + return 0; |
---|
| 49 | +} |
---|
| 50 | + |
---|
| 51 | +#define use_dma(io) ((io) != -1) |
---|
| 52 | + |
---|
| 53 | +static inline unsigned long sgt_pfn(const struct remap_pfn *r) |
---|
| 54 | +{ |
---|
| 55 | + if (use_dma(r->iobase)) |
---|
| 56 | + return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; |
---|
| 57 | + else |
---|
| 58 | + return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); |
---|
| 59 | +} |
---|
| 60 | + |
---|
| 61 | +static int remap_sg(pte_t *pte, unsigned long addr, void *data) |
---|
| 62 | +{ |
---|
| 63 | + struct remap_pfn *r = data; |
---|
| 64 | + |
---|
| 65 | + if (GEM_WARN_ON(!r->sgt.pfn)) |
---|
| 66 | + return -EINVAL; |
---|
| 67 | + |
---|
| 68 | + /* Special PTE are not associated with any struct page */ |
---|
| 69 | + set_pte_at(r->mm, addr, pte, |
---|
| 70 | + pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); |
---|
| 71 | + r->pfn++; /* track insertions in case we need to unwind later */ |
---|
| 72 | + |
---|
| 73 | + r->sgt.curr += PAGE_SIZE; |
---|
| 74 | + if (r->sgt.curr >= r->sgt.max) |
---|
| 75 | + r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); |
---|
46 | 76 | |
---|
47 | 77 | return 0; |
---|
48 | 78 | } |
---|
.. | .. |
---|
64 | 94 | struct remap_pfn r; |
---|
65 | 95 | int err; |
---|
66 | 96 | |
---|
67 | | - GEM_BUG_ON((vma->vm_flags & |
---|
68 | | - (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) != |
---|
69 | | - (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)); |
---|
| 97 | +#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) |
---|
| 98 | + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); |
---|
70 | 99 | |
---|
71 | 100 | /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ |
---|
72 | 101 | r.mm = vma->vm_mm; |
---|
.. | .. |
---|
82 | 111 | |
---|
83 | 112 | return 0; |
---|
84 | 113 | } |
---|
| 114 | + |
---|
| 115 | +/** |
---|
| 116 | + * remap_io_sg - remap an IO mapping to userspace |
---|
| 117 | + * @vma: user vma to map to |
---|
| 118 | + * @addr: target user address to start at |
---|
| 119 | + * @size: size of map area |
---|
| 120 | + * @sgl: Start sg entry |
---|
| 121 | + * @iobase: Use stored dma address offset by this address or pfn if -1 |
---|
| 122 | + * |
---|
| 123 | + * Note: this is only safe if the mm semaphore is held when called. |
---|
| 124 | + */ |
---|
| 125 | +int remap_io_sg(struct vm_area_struct *vma, |
---|
| 126 | + unsigned long addr, unsigned long size, |
---|
| 127 | + struct scatterlist *sgl, resource_size_t iobase) |
---|
| 128 | +{ |
---|
| 129 | + struct remap_pfn r = { |
---|
| 130 | + .mm = vma->vm_mm, |
---|
| 131 | + .prot = vma->vm_page_prot, |
---|
| 132 | + .sgt = __sgt_iter(sgl, use_dma(iobase)), |
---|
| 133 | + .iobase = iobase, |
---|
| 134 | + }; |
---|
| 135 | + int err; |
---|
| 136 | + |
---|
| 137 | + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ |
---|
| 138 | + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); |
---|
| 139 | + |
---|
| 140 | + if (!use_dma(iobase)) |
---|
| 141 | + flush_cache_range(vma, addr, size); |
---|
| 142 | + |
---|
| 143 | + err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); |
---|
| 144 | + if (unlikely(err)) { |
---|
| 145 | + zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); |
---|
| 146 | + return err; |
---|
| 147 | + } |
---|
| 148 | + |
---|
| 149 | + return 0; |
---|
| 150 | +} |
---|