.. | .. |
---|
42 | 42 | #include <linux/uaccess.h> |
---|
43 | 43 | #include <linux/mem_encrypt.h> |
---|
44 | 44 | |
---|
45 | | -#define TTM_BO_VM_NUM_PREFAULT 16 |
---|
46 | | - |
---|
47 | 45 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
---|
48 | 46 | struct vm_fault *vmf) |
---|
49 | 47 | { |
---|
.. | .. |
---|
60 | 58 | goto out_clear; |
---|
61 | 59 | |
---|
62 | 60 | /* |
---|
63 | | - * If possible, avoid waiting for GPU with mmap_sem |
---|
64 | | - * held. |
---|
| 61 | + * If possible, avoid waiting for GPU with mmap_lock |
---|
| 62 | + * held. We only do this if the fault allows retry and this |
---|
| 63 | + * is the first attempt. |
---|
65 | 64 | */ |
---|
66 | | - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
---|
| 65 | + if (fault_flag_allow_retry_first(vmf->flags)) { |
---|
67 | 66 | ret = VM_FAULT_RETRY; |
---|
68 | 67 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
---|
69 | 68 | goto out_unlock; |
---|
70 | 69 | |
---|
71 | 70 | ttm_bo_get(bo); |
---|
72 | | - up_read(&vmf->vma->vm_mm->mmap_sem); |
---|
| 71 | + mmap_read_unlock(vmf->vma->vm_mm); |
---|
73 | 72 | (void) dma_fence_wait(bo->moving, true); |
---|
74 | | - ttm_bo_unreserve(bo); |
---|
| 73 | + dma_resv_unlock(bo->base.resv); |
---|
75 | 74 | ttm_bo_put(bo); |
---|
76 | 75 | goto out_unlock; |
---|
77 | 76 | } |
---|
.. | .. |
---|
102 | 101 | if (bdev->driver->io_mem_pfn) |
---|
103 | 102 | return bdev->driver->io_mem_pfn(bo, page_offset); |
---|
104 | 103 | |
---|
105 | | - return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) |
---|
106 | | - + page_offset; |
---|
| 104 | + return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; |
---|
107 | 105 | } |
---|
108 | 106 | |
---|
109 | | -static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
---|
| 107 | +/** |
---|
| 108 | + * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback |
---|
| 109 | + * @bo: The buffer object |
---|
| 110 | + * @vmf: The fault structure handed to the callback |
---|
| 111 | + * |
---|
| 112 | + * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped |
---|
| 113 | + * during long waits, and after the wait the callback will be restarted. This |
---|
| 114 | + * is to allow other threads using the same virtual memory space concurrent |
---|
| 115 | + * access to map(), unmap() completely unrelated buffer objects. TTM buffer |
---|
| 116 | + * object reservations sometimes wait for GPU and should therefore be |
---|
| 117 | + * considered long waits. This function reserves the buffer object interruptibly |
---|
| 118 | + * taking this into account. Starvation is avoided by the vm system not |
---|
| 119 | + * allowing too many repeated restarts. |
---|
| 120 | + * This function is intended to be used in customized fault() and _mkwrite() |
---|
| 121 | + * handlers. |
---|
| 122 | + * |
---|
| 123 | + * Return: |
---|
| 124 | + * 0 on success and the bo was reserved. |
---|
| 125 | + * VM_FAULT_RETRY if blocking wait. |
---|
| 126 | + * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. |
---|
| 127 | + */ |
---|
| 128 | +vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, |
---|
| 129 | + struct vm_fault *vmf) |
---|
| 130 | +{ |
---|
| 131 | + /* |
---|
| 132 | + * Work around locking order reversal in fault / nopfn |
---|
| 133 | + * between mmap_lock and bo_reserve: Perform a trylock operation |
---|
| 134 | + * for reserve, and if it fails, retry the fault after waiting |
---|
| 135 | + * for the buffer to become unreserved. |
---|
| 136 | + */ |
---|
| 137 | + if (unlikely(!dma_resv_trylock(bo->base.resv))) { |
---|
| 138 | + /* |
---|
| 139 | + * If the fault allows retry and this is the first |
---|
| 140 | + * fault attempt, we try to release the mmap_lock |
---|
| 141 | + * before waiting |
---|
| 142 | + */ |
---|
| 143 | + if (fault_flag_allow_retry_first(vmf->flags)) { |
---|
| 144 | + if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
---|
| 145 | + ttm_bo_get(bo); |
---|
| 146 | + mmap_read_unlock(vmf->vma->vm_mm); |
---|
| 147 | + if (!dma_resv_lock_interruptible(bo->base.resv, |
---|
| 148 | + NULL)) |
---|
| 149 | + dma_resv_unlock(bo->base.resv); |
---|
| 150 | + ttm_bo_put(bo); |
---|
| 151 | + } |
---|
| 152 | + |
---|
| 153 | + return VM_FAULT_RETRY; |
---|
| 154 | + } |
---|
| 155 | + |
---|
| 156 | + if (dma_resv_lock_interruptible(bo->base.resv, NULL)) |
---|
| 157 | + return VM_FAULT_NOPAGE; |
---|
| 158 | + } |
---|
| 159 | + |
---|
| 160 | + return 0; |
---|
| 161 | +} |
---|
| 162 | +EXPORT_SYMBOL(ttm_bo_vm_reserve); |
---|
| 163 | + |
---|
| 164 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 165 | +/** |
---|
| 166 | + * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults |
---|
| 167 | + * @vmf: Fault data |
---|
| 168 | + * @bo: The buffer object |
---|
| 169 | + * @page_offset: Page offset from bo start |
---|
| 170 | + * @fault_page_size: The size of the fault in pages. |
---|
| 171 | + * @pgprot: The page protections. |
---|
| 172 | + * Does additional checking whether it's possible to insert a PUD or PMD |
---|
| 173 | + * pfn and performs the insertion. |
---|
| 174 | + * |
---|
| 175 | + * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if |
---|
| 176 | + * a huge fault was not possible, or on insertion error. |
---|
| 177 | + */ |
---|
| 178 | +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, |
---|
| 179 | + struct ttm_buffer_object *bo, |
---|
| 180 | + pgoff_t page_offset, |
---|
| 181 | + pgoff_t fault_page_size, |
---|
| 182 | + pgprot_t pgprot) |
---|
| 183 | +{ |
---|
| 184 | + pgoff_t i; |
---|
| 185 | + vm_fault_t ret; |
---|
| 186 | + unsigned long pfn; |
---|
| 187 | + pfn_t pfnt; |
---|
| 188 | + struct ttm_tt *ttm = bo->ttm; |
---|
| 189 | + bool write = vmf->flags & FAULT_FLAG_WRITE; |
---|
| 190 | + |
---|
| 191 | + /* Fault should not cross bo boundary. */ |
---|
| 192 | + page_offset &= ~(fault_page_size - 1); |
---|
| 193 | + if (page_offset + fault_page_size > bo->num_pages) |
---|
| 194 | + goto out_fallback; |
---|
| 195 | + |
---|
| 196 | + if (bo->mem.bus.is_iomem) |
---|
| 197 | + pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
---|
| 198 | + else |
---|
| 199 | + pfn = page_to_pfn(ttm->pages[page_offset]); |
---|
| 200 | + |
---|
| 201 | + /* pfn must be fault_page_size aligned. */ |
---|
| 202 | + if ((pfn & (fault_page_size - 1)) != 0) |
---|
| 203 | + goto out_fallback; |
---|
| 204 | + |
---|
| 205 | + /* Check that memory is contiguous. */ |
---|
| 206 | + if (!bo->mem.bus.is_iomem) { |
---|
| 207 | + for (i = 1; i < fault_page_size; ++i) { |
---|
| 208 | + if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) |
---|
| 209 | + goto out_fallback; |
---|
| 210 | + } |
---|
| 211 | + } else if (bo->bdev->driver->io_mem_pfn) { |
---|
| 212 | + for (i = 1; i < fault_page_size; ++i) { |
---|
| 213 | + if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) |
---|
| 214 | + goto out_fallback; |
---|
| 215 | + } |
---|
| 216 | + } |
---|
| 217 | + |
---|
| 218 | + pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); |
---|
| 219 | + if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) |
---|
| 220 | + ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); |
---|
| 221 | +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
---|
| 222 | + else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) |
---|
| 223 | + ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); |
---|
| 224 | +#endif |
---|
| 225 | + else |
---|
| 226 | + WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); |
---|
| 227 | + |
---|
| 228 | + if (ret != VM_FAULT_NOPAGE) |
---|
| 229 | + goto out_fallback; |
---|
| 230 | + |
---|
| 231 | + return VM_FAULT_NOPAGE; |
---|
| 232 | +out_fallback: |
---|
| 233 | + count_vm_event(THP_FAULT_FALLBACK); |
---|
| 234 | + return VM_FAULT_FALLBACK; |
---|
| 235 | +} |
---|
| 236 | +#else |
---|
| 237 | +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, |
---|
| 238 | + struct ttm_buffer_object *bo, |
---|
| 239 | + pgoff_t page_offset, |
---|
| 240 | + pgoff_t fault_page_size, |
---|
| 241 | + pgprot_t pgprot) |
---|
| 242 | +{ |
---|
| 243 | + return VM_FAULT_FALLBACK; |
---|
| 244 | +} |
---|
| 245 | +#endif |
---|
| 246 | + |
---|
| 247 | +/** |
---|
| 248 | + * ttm_bo_vm_fault_reserved - TTM fault helper |
---|
| 249 | + * @vmf: The struct vm_fault given as argument to the fault callback |
---|
| 250 | + * @prot: The page protection to be used for this memory area. |
---|
| 251 | + * @num_prefault: Maximum number of prefault pages. The caller may want to |
---|
| 252 | + * specify this based on madvice settings and the size of the GPU object |
---|
| 253 | + * backed by the memory. |
---|
| 254 | + * @fault_page_size: The size of the fault in pages. |
---|
| 255 | + * |
---|
| 256 | + * This function inserts one or more page table entries pointing to the |
---|
| 257 | + * memory backing the buffer object, and then returns a return code |
---|
| 258 | + * instructing the caller to retry the page access. |
---|
| 259 | + * |
---|
| 260 | + * Return: |
---|
| 261 | + * VM_FAULT_NOPAGE on success or pending signal |
---|
| 262 | + * VM_FAULT_SIGBUS on unspecified error |
---|
| 263 | + * VM_FAULT_OOM on out-of-memory |
---|
| 264 | + * VM_FAULT_RETRY if retryable wait |
---|
| 265 | + */ |
---|
| 266 | +vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, |
---|
| 267 | + pgprot_t prot, |
---|
| 268 | + pgoff_t num_prefault, |
---|
| 269 | + pgoff_t fault_page_size) |
---|
110 | 270 | { |
---|
111 | 271 | struct vm_area_struct *vma = vmf->vma; |
---|
112 | | - struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
---|
113 | | - vma->vm_private_data; |
---|
| 272 | + struct ttm_buffer_object *bo = vma->vm_private_data; |
---|
114 | 273 | struct ttm_bo_device *bdev = bo->bdev; |
---|
115 | 274 | unsigned long page_offset; |
---|
116 | 275 | unsigned long page_last; |
---|
.. | .. |
---|
118 | 277 | struct ttm_tt *ttm = NULL; |
---|
119 | 278 | struct page *page; |
---|
120 | 279 | int err; |
---|
121 | | - int i; |
---|
| 280 | + pgoff_t i; |
---|
122 | 281 | vm_fault_t ret = VM_FAULT_NOPAGE; |
---|
123 | 282 | unsigned long address = vmf->address; |
---|
124 | | - struct ttm_mem_type_manager *man = |
---|
125 | | - &bdev->man[bo->mem.mem_type]; |
---|
126 | | - struct vm_area_struct cvma; |
---|
127 | | - |
---|
128 | | - /* |
---|
129 | | - * Work around locking order reversal in fault / nopfn |
---|
130 | | - * between mmap_sem and bo_reserve: Perform a trylock operation |
---|
131 | | - * for reserve, and if it fails, retry the fault after waiting |
---|
132 | | - * for the buffer to become unreserved. |
---|
133 | | - */ |
---|
134 | | - err = ttm_bo_reserve(bo, true, true, NULL); |
---|
135 | | - if (unlikely(err != 0)) { |
---|
136 | | - if (err != -EBUSY) |
---|
137 | | - return VM_FAULT_NOPAGE; |
---|
138 | | - |
---|
139 | | - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
---|
140 | | - if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
---|
141 | | - ttm_bo_get(bo); |
---|
142 | | - up_read(&vmf->vma->vm_mm->mmap_sem); |
---|
143 | | - (void) ttm_bo_wait_unreserved(bo); |
---|
144 | | - ttm_bo_put(bo); |
---|
145 | | - } |
---|
146 | | - |
---|
147 | | - return VM_FAULT_RETRY; |
---|
148 | | - } |
---|
149 | | - |
---|
150 | | - /* |
---|
151 | | - * If we'd want to change locking order to |
---|
152 | | - * mmap_sem -> bo::reserve, we'd use a blocking reserve here |
---|
153 | | - * instead of retrying the fault... |
---|
154 | | - */ |
---|
155 | | - return VM_FAULT_NOPAGE; |
---|
156 | | - } |
---|
157 | 283 | |
---|
158 | 284 | /* |
---|
159 | 285 | * Refuse to fault imported pages. This should be handled |
---|
160 | 286 | * (if at all) by redirecting mmap to the exporter. |
---|
161 | 287 | */ |
---|
162 | | - if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { |
---|
163 | | - ret = VM_FAULT_SIGBUS; |
---|
164 | | - goto out_unlock; |
---|
165 | | - } |
---|
| 288 | + if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) |
---|
| 289 | + return VM_FAULT_SIGBUS; |
---|
166 | 290 | |
---|
167 | 291 | if (bdev->driver->fault_reserve_notify) { |
---|
| 292 | + struct dma_fence *moving = dma_fence_get(bo->moving); |
---|
| 293 | + |
---|
168 | 294 | err = bdev->driver->fault_reserve_notify(bo); |
---|
169 | 295 | switch (err) { |
---|
170 | 296 | case 0: |
---|
171 | 297 | break; |
---|
172 | 298 | case -EBUSY: |
---|
173 | 299 | case -ERESTARTSYS: |
---|
174 | | - ret = VM_FAULT_NOPAGE; |
---|
175 | | - goto out_unlock; |
---|
| 300 | + dma_fence_put(moving); |
---|
| 301 | + return VM_FAULT_NOPAGE; |
---|
176 | 302 | default: |
---|
177 | | - ret = VM_FAULT_SIGBUS; |
---|
178 | | - goto out_unlock; |
---|
| 303 | + dma_fence_put(moving); |
---|
| 304 | + return VM_FAULT_SIGBUS; |
---|
179 | 305 | } |
---|
| 306 | + |
---|
| 307 | + if (bo->moving != moving) { |
---|
| 308 | + ttm_bo_move_to_lru_tail_unlocked(bo); |
---|
| 309 | + } |
---|
| 310 | + dma_fence_put(moving); |
---|
180 | 311 | } |
---|
181 | 312 | |
---|
182 | 313 | /* |
---|
.. | .. |
---|
184 | 315 | * move. |
---|
185 | 316 | */ |
---|
186 | 317 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
---|
187 | | - if (unlikely(ret != 0)) { |
---|
188 | | - if (ret == VM_FAULT_RETRY && |
---|
189 | | - !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
---|
190 | | - /* The BO has already been unreserved. */ |
---|
191 | | - return ret; |
---|
192 | | - } |
---|
| 318 | + if (unlikely(ret != 0)) |
---|
| 319 | + return ret; |
---|
193 | 320 | |
---|
194 | | - goto out_unlock; |
---|
195 | | - } |
---|
196 | | - |
---|
197 | | - err = ttm_mem_io_lock(man, true); |
---|
198 | | - if (unlikely(err != 0)) { |
---|
199 | | - ret = VM_FAULT_NOPAGE; |
---|
200 | | - goto out_unlock; |
---|
201 | | - } |
---|
202 | | - err = ttm_mem_io_reserve_vm(bo); |
---|
203 | | - if (unlikely(err != 0)) { |
---|
204 | | - ret = VM_FAULT_SIGBUS; |
---|
205 | | - goto out_io_unlock; |
---|
206 | | - } |
---|
| 321 | + err = ttm_mem_io_reserve(bdev, &bo->mem); |
---|
| 322 | + if (unlikely(err != 0)) |
---|
| 323 | + return VM_FAULT_SIGBUS; |
---|
207 | 324 | |
---|
208 | 325 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
---|
209 | | - vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); |
---|
| 326 | + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); |
---|
210 | 327 | page_last = vma_pages(vma) + vma->vm_pgoff - |
---|
211 | | - drm_vma_node_start(&bo->vma_node); |
---|
| 328 | + drm_vma_node_start(&bo->base.vma_node); |
---|
212 | 329 | |
---|
213 | | - if (unlikely(page_offset >= bo->num_pages)) { |
---|
214 | | - ret = VM_FAULT_SIGBUS; |
---|
215 | | - goto out_io_unlock; |
---|
216 | | - } |
---|
| 330 | + if (unlikely(page_offset >= bo->num_pages)) |
---|
| 331 | + return VM_FAULT_SIGBUS; |
---|
217 | 332 | |
---|
218 | | - /* |
---|
219 | | - * Make a local vma copy to modify the page_prot member |
---|
220 | | - * and vm_flags if necessary. The vma parameter is protected |
---|
221 | | - * by mmap_sem in write mode. |
---|
222 | | - */ |
---|
223 | | - cvma = *vma; |
---|
224 | | - cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); |
---|
225 | | - |
---|
226 | | - if (bo->mem.bus.is_iomem) { |
---|
227 | | - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
---|
228 | | - cvma.vm_page_prot); |
---|
229 | | - } else { |
---|
| 333 | + prot = ttm_io_prot(bo->mem.placement, prot); |
---|
| 334 | + if (!bo->mem.bus.is_iomem) { |
---|
230 | 335 | struct ttm_operation_ctx ctx = { |
---|
231 | 336 | .interruptible = false, |
---|
232 | 337 | .no_wait_gpu = false, |
---|
.. | .. |
---|
235 | 340 | }; |
---|
236 | 341 | |
---|
237 | 342 | ttm = bo->ttm; |
---|
238 | | - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
---|
239 | | - cvma.vm_page_prot); |
---|
240 | | - |
---|
241 | | - /* Allocate all page at once, most common usage */ |
---|
242 | | - if (ttm_tt_populate(ttm, &ctx)) { |
---|
243 | | - ret = VM_FAULT_OOM; |
---|
244 | | - goto out_io_unlock; |
---|
245 | | - } |
---|
| 343 | + if (ttm_tt_populate(bdev, bo->ttm, &ctx)) |
---|
| 344 | + return VM_FAULT_OOM; |
---|
| 345 | + } else { |
---|
| 346 | + /* Iomem should not be marked encrypted */ |
---|
| 347 | + prot = pgprot_decrypted(prot); |
---|
246 | 348 | } |
---|
| 349 | + |
---|
| 350 | + /* We don't prefault on huge faults. Yet. */ |
---|
| 351 | + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) |
---|
| 352 | + return ttm_bo_vm_insert_huge(vmf, bo, page_offset, |
---|
| 353 | + fault_page_size, prot); |
---|
247 | 354 | |
---|
248 | 355 | /* |
---|
249 | 356 | * Speculatively prefault a number of pages. Only error on |
---|
250 | 357 | * first page. |
---|
251 | 358 | */ |
---|
252 | | - for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
---|
| 359 | + for (i = 0; i < num_prefault; ++i) { |
---|
253 | 360 | if (bo->mem.bus.is_iomem) { |
---|
254 | | - /* Iomem should not be marked encrypted */ |
---|
255 | | - cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); |
---|
256 | 361 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
---|
257 | 362 | } else { |
---|
258 | 363 | page = ttm->pages[page_offset]; |
---|
259 | 364 | if (unlikely(!page && i == 0)) { |
---|
260 | | - ret = VM_FAULT_OOM; |
---|
261 | | - goto out_io_unlock; |
---|
| 365 | + return VM_FAULT_OOM; |
---|
262 | 366 | } else if (unlikely(!page)) { |
---|
263 | 367 | break; |
---|
264 | 368 | } |
---|
265 | | - page->index = drm_vma_node_start(&bo->vma_node) + |
---|
| 369 | + page->index = drm_vma_node_start(&bo->base.vma_node) + |
---|
266 | 370 | page_offset; |
---|
267 | 371 | pfn = page_to_pfn(page); |
---|
268 | 372 | } |
---|
269 | 373 | |
---|
| 374 | + /* |
---|
| 375 | + * Note that the value of @prot at this point may differ from |
---|
| 376 | + * the value of @vma->vm_page_prot in the caching- and |
---|
| 377 | + * encryption bits. This is because the exact location of the |
---|
| 378 | + * data may not be known at mmap() time and may also change |
---|
| 379 | + * at arbitrary times while the data is mmap'ed. |
---|
| 380 | + * See vmf_insert_mixed_prot() for a discussion. |
---|
| 381 | + */ |
---|
270 | 382 | if (vma->vm_flags & VM_MIXEDMAP) |
---|
271 | | - ret = vmf_insert_mixed(&cvma, address, |
---|
272 | | - __pfn_to_pfn_t(pfn, PFN_DEV)); |
---|
| 383 | + ret = vmf_insert_mixed_prot(vma, address, |
---|
| 384 | + __pfn_to_pfn_t(pfn, PFN_DEV), |
---|
| 385 | + prot); |
---|
273 | 386 | else |
---|
274 | | - ret = vmf_insert_pfn(&cvma, address, pfn); |
---|
| 387 | + ret = vmf_insert_pfn_prot(vma, address, pfn, prot); |
---|
275 | 388 | |
---|
276 | 389 | /* Never error on prefaulted PTEs */ |
---|
277 | 390 | if (unlikely((ret & VM_FAULT_ERROR))) { |
---|
278 | 391 | if (i == 0) |
---|
279 | | - goto out_io_unlock; |
---|
| 392 | + return VM_FAULT_NOPAGE; |
---|
280 | 393 | else |
---|
281 | 394 | break; |
---|
282 | 395 | } |
---|
.. | .. |
---|
285 | 398 | if (unlikely(++page_offset >= page_last)) |
---|
286 | 399 | break; |
---|
287 | 400 | } |
---|
288 | | - ret = VM_FAULT_NOPAGE; |
---|
289 | | -out_io_unlock: |
---|
290 | | - ttm_mem_io_unlock(man); |
---|
291 | | -out_unlock: |
---|
292 | | - ttm_bo_unreserve(bo); |
---|
293 | 401 | return ret; |
---|
294 | 402 | } |
---|
| 403 | +EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); |
---|
295 | 404 | |
---|
296 | | -static void ttm_bo_vm_open(struct vm_area_struct *vma) |
---|
| 405 | +vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
---|
297 | 406 | { |
---|
298 | | - struct ttm_buffer_object *bo = |
---|
299 | | - (struct ttm_buffer_object *)vma->vm_private_data; |
---|
| 407 | + struct vm_area_struct *vma = vmf->vma; |
---|
| 408 | + pgprot_t prot; |
---|
| 409 | + struct ttm_buffer_object *bo = vma->vm_private_data; |
---|
| 410 | + vm_fault_t ret; |
---|
| 411 | + |
---|
| 412 | + ret = ttm_bo_vm_reserve(bo, vmf); |
---|
| 413 | + if (ret) |
---|
| 414 | + return ret; |
---|
| 415 | + |
---|
| 416 | + prot = vma->vm_page_prot; |
---|
| 417 | + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); |
---|
| 418 | + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
---|
| 419 | + return ret; |
---|
| 420 | + |
---|
| 421 | + dma_resv_unlock(bo->base.resv); |
---|
| 422 | + |
---|
| 423 | + return ret; |
---|
| 424 | +} |
---|
| 425 | +EXPORT_SYMBOL(ttm_bo_vm_fault); |
---|
| 426 | + |
---|
| 427 | +void ttm_bo_vm_open(struct vm_area_struct *vma) |
---|
| 428 | +{ |
---|
| 429 | + struct ttm_buffer_object *bo = vma->vm_private_data; |
---|
300 | 430 | |
---|
301 | 431 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); |
---|
302 | 432 | |
---|
303 | 433 | ttm_bo_get(bo); |
---|
304 | 434 | } |
---|
| 435 | +EXPORT_SYMBOL(ttm_bo_vm_open); |
---|
305 | 436 | |
---|
306 | | -static void ttm_bo_vm_close(struct vm_area_struct *vma) |
---|
| 437 | +void ttm_bo_vm_close(struct vm_area_struct *vma) |
---|
307 | 438 | { |
---|
308 | | - struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; |
---|
| 439 | + struct ttm_buffer_object *bo = vma->vm_private_data; |
---|
309 | 440 | |
---|
310 | 441 | ttm_bo_put(bo); |
---|
311 | 442 | vma->vm_private_data = NULL; |
---|
312 | 443 | } |
---|
| 444 | +EXPORT_SYMBOL(ttm_bo_vm_close); |
---|
313 | 445 | |
---|
314 | 446 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
---|
315 | 447 | unsigned long offset, |
---|
.. | .. |
---|
350 | 482 | return len; |
---|
351 | 483 | } |
---|
352 | 484 | |
---|
353 | | -static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
---|
354 | | - void *buf, int len, int write) |
---|
| 485 | +int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
---|
| 486 | + void *buf, int len, int write) |
---|
355 | 487 | { |
---|
356 | | - unsigned long offset = (addr) - vma->vm_start; |
---|
357 | 488 | struct ttm_buffer_object *bo = vma->vm_private_data; |
---|
| 489 | + unsigned long offset = (addr) - vma->vm_start + |
---|
| 490 | + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) |
---|
| 491 | + << PAGE_SHIFT); |
---|
358 | 492 | int ret; |
---|
359 | 493 | |
---|
360 | 494 | if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) |
---|
.. | .. |
---|
366 | 500 | |
---|
367 | 501 | switch (bo->mem.mem_type) { |
---|
368 | 502 | case TTM_PL_SYSTEM: |
---|
369 | | - if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { |
---|
370 | | - ret = ttm_tt_swapin(bo->ttm); |
---|
371 | | - if (unlikely(ret != 0)) |
---|
372 | | - return ret; |
---|
373 | | - } |
---|
374 | | - /* fall through */ |
---|
| 503 | + fallthrough; |
---|
375 | 504 | case TTM_PL_TT: |
---|
376 | 505 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); |
---|
377 | 506 | break; |
---|
.. | .. |
---|
387 | 516 | |
---|
388 | 517 | return ret; |
---|
389 | 518 | } |
---|
| 519 | +EXPORT_SYMBOL(ttm_bo_vm_access); |
---|
390 | 520 | |
---|
391 | 521 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
---|
392 | 522 | .fault = ttm_bo_vm_fault, |
---|
393 | 523 | .open = ttm_bo_vm_open, |
---|
394 | 524 | .close = ttm_bo_vm_close, |
---|
395 | | - .access = ttm_bo_vm_access |
---|
| 525 | + .access = ttm_bo_vm_access, |
---|
396 | 526 | }; |
---|
397 | 527 | |
---|
398 | 528 | static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
---|
.. | .. |
---|
402 | 532 | struct drm_vma_offset_node *node; |
---|
403 | 533 | struct ttm_buffer_object *bo = NULL; |
---|
404 | 534 | |
---|
405 | | - drm_vma_offset_lock_lookup(&bdev->vma_manager); |
---|
| 535 | + drm_vma_offset_lock_lookup(bdev->vma_manager); |
---|
406 | 536 | |
---|
407 | | - node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); |
---|
| 537 | + node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); |
---|
408 | 538 | if (likely(node)) { |
---|
409 | | - bo = container_of(node, struct ttm_buffer_object, vma_node); |
---|
410 | | - if (!kref_get_unless_zero(&bo->kref)) |
---|
411 | | - bo = NULL; |
---|
| 539 | + bo = container_of(node, struct ttm_buffer_object, |
---|
| 540 | + base.vma_node); |
---|
| 541 | + bo = ttm_bo_get_unless_zero(bo); |
---|
412 | 542 | } |
---|
413 | 543 | |
---|
414 | | - drm_vma_offset_unlock_lookup(&bdev->vma_manager); |
---|
| 544 | + drm_vma_offset_unlock_lookup(bdev->vma_manager); |
---|
415 | 545 | |
---|
416 | 546 | if (!bo) |
---|
417 | 547 | pr_err("Could not find buffer object to map\n"); |
---|
.. | .. |
---|
419 | 549 | return bo; |
---|
420 | 550 | } |
---|
421 | 551 | |
---|
422 | | -int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
---|
423 | | - struct ttm_bo_device *bdev) |
---|
| 552 | +static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) |
---|
424 | 553 | { |
---|
425 | | - struct ttm_bo_driver *driver; |
---|
426 | | - struct ttm_buffer_object *bo; |
---|
427 | | - int ret; |
---|
428 | | - |
---|
429 | | - bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
---|
430 | | - if (unlikely(!bo)) |
---|
431 | | - return -EINVAL; |
---|
432 | | - |
---|
433 | | - driver = bo->bdev->driver; |
---|
434 | | - if (unlikely(!driver->verify_access)) { |
---|
435 | | - ret = -EPERM; |
---|
436 | | - goto out_unref; |
---|
437 | | - } |
---|
438 | | - ret = driver->verify_access(bo, filp); |
---|
439 | | - if (unlikely(ret != 0)) |
---|
440 | | - goto out_unref; |
---|
441 | | - |
---|
442 | 554 | vma->vm_ops = &ttm_bo_vm_ops; |
---|
443 | 555 | |
---|
444 | 556 | /* |
---|
.. | .. |
---|
457 | 569 | */ |
---|
458 | 570 | vma->vm_flags |= VM_MIXEDMAP; |
---|
459 | 571 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
---|
| 572 | +} |
---|
| 573 | + |
---|
| 574 | +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
---|
| 575 | + struct ttm_bo_device *bdev) |
---|
| 576 | +{ |
---|
| 577 | + struct ttm_bo_driver *driver; |
---|
| 578 | + struct ttm_buffer_object *bo; |
---|
| 579 | + int ret; |
---|
| 580 | + |
---|
| 581 | + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) |
---|
| 582 | + return -EINVAL; |
---|
| 583 | + |
---|
| 584 | + bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
---|
| 585 | + if (unlikely(!bo)) |
---|
| 586 | + return -EINVAL; |
---|
| 587 | + |
---|
| 588 | + driver = bo->bdev->driver; |
---|
| 589 | + if (unlikely(!driver->verify_access)) { |
---|
| 590 | + ret = -EPERM; |
---|
| 591 | + goto out_unref; |
---|
| 592 | + } |
---|
| 593 | + ret = driver->verify_access(bo, filp); |
---|
| 594 | + if (unlikely(ret != 0)) |
---|
| 595 | + goto out_unref; |
---|
| 596 | + |
---|
| 597 | + ttm_bo_mmap_vma_setup(bo, vma); |
---|
460 | 598 | return 0; |
---|
461 | 599 | out_unref: |
---|
462 | 600 | ttm_bo_put(bo); |
---|
.. | .. |
---|
464 | 602 | } |
---|
465 | 603 | EXPORT_SYMBOL(ttm_bo_mmap); |
---|
466 | 604 | |
---|
467 | | -int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
---|
| 605 | +int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
---|
468 | 606 | { |
---|
469 | | - if (vma->vm_pgoff != 0) |
---|
470 | | - return -EACCES; |
---|
471 | | - |
---|
472 | 607 | ttm_bo_get(bo); |
---|
473 | | - |
---|
474 | | - vma->vm_ops = &ttm_bo_vm_ops; |
---|
475 | | - vma->vm_private_data = bo; |
---|
476 | | - vma->vm_flags |= VM_MIXEDMAP; |
---|
477 | | - vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
---|
| 608 | + ttm_bo_mmap_vma_setup(bo, vma); |
---|
478 | 609 | return 0; |
---|
479 | 610 | } |
---|
480 | | -EXPORT_SYMBOL(ttm_fbdev_mmap); |
---|
| 611 | +EXPORT_SYMBOL(ttm_bo_mmap_obj); |
---|