.. | .. |
---|
41 | 41 | #include <linux/file.h> |
---|
42 | 42 | #include <linux/module.h> |
---|
43 | 43 | #include <linux/atomic.h> |
---|
44 | | -#include <linux/reservation.h> |
---|
| 44 | +#include <linux/dma-resv.h> |
---|
45 | 45 | |
---|
46 | 46 | static void ttm_bo_global_kobj_release(struct kobject *kobj); |
---|
| 47 | + |
---|
| 48 | +/** |
---|
| 49 | + * ttm_global_mutex - protecting the global BO state |
---|
| 50 | + */ |
---|
| 51 | +DEFINE_MUTEX(ttm_global_mutex); |
---|
| 52 | +unsigned ttm_bo_glob_use_count; |
---|
| 53 | +struct ttm_bo_global ttm_bo_glob; |
---|
| 54 | +EXPORT_SYMBOL(ttm_bo_glob); |
---|
47 | 55 | |
---|
48 | 56 | static struct attribute ttm_bo_count = { |
---|
49 | 57 | .name = "bo_count", |
---|
.. | .. |
---|
56 | 64 | kfree(bo); |
---|
57 | 65 | } |
---|
58 | 66 | |
---|
59 | | -static inline int ttm_mem_type_from_place(const struct ttm_place *place, |
---|
60 | | - uint32_t *mem_type) |
---|
61 | | -{ |
---|
62 | | - int pos; |
---|
63 | | - |
---|
64 | | - pos = ffs(place->flags & TTM_PL_MASK_MEM); |
---|
65 | | - if (unlikely(!pos)) |
---|
66 | | - return -EINVAL; |
---|
67 | | - |
---|
68 | | - *mem_type = pos - 1; |
---|
69 | | - return 0; |
---|
70 | | -} |
---|
71 | | - |
---|
72 | | -static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) |
---|
73 | | -{ |
---|
74 | | - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
---|
75 | | - struct drm_printer p = drm_debug_printer(TTM_PFX); |
---|
76 | | - |
---|
77 | | - pr_err(" has_type: %d\n", man->has_type); |
---|
78 | | - pr_err(" use_type: %d\n", man->use_type); |
---|
79 | | - pr_err(" flags: 0x%08X\n", man->flags); |
---|
80 | | - pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); |
---|
81 | | - pr_err(" size: %llu\n", man->size); |
---|
82 | | - pr_err(" available_caching: 0x%08X\n", man->available_caching); |
---|
83 | | - pr_err(" default_caching: 0x%08X\n", man->default_caching); |
---|
84 | | - if (mem_type != TTM_PL_SYSTEM) |
---|
85 | | - (*man->func->debug)(man, &p); |
---|
86 | | -} |
---|
87 | | - |
---|
88 | 67 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
---|
89 | 68 | struct ttm_placement *placement) |
---|
90 | 69 | { |
---|
91 | | - int i, ret, mem_type; |
---|
| 70 | + struct drm_printer p = drm_debug_printer(TTM_PFX); |
---|
| 71 | + struct ttm_resource_manager *man; |
---|
| 72 | + int i, mem_type; |
---|
92 | 73 | |
---|
93 | | - pr_err("No space for %p (%lu pages, %luK, %luM)\n", |
---|
94 | | - bo, bo->mem.num_pages, bo->mem.size >> 10, |
---|
95 | | - bo->mem.size >> 20); |
---|
| 74 | + drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", |
---|
| 75 | + bo, bo->mem.num_pages, bo->mem.size >> 10, |
---|
| 76 | + bo->mem.size >> 20); |
---|
96 | 77 | for (i = 0; i < placement->num_placement; i++) { |
---|
97 | | - ret = ttm_mem_type_from_place(&placement->placement[i], |
---|
98 | | - &mem_type); |
---|
99 | | - if (ret) |
---|
100 | | - return; |
---|
101 | | - pr_err(" placement[%d]=0x%08X (%d)\n", |
---|
102 | | - i, placement->placement[i].flags, mem_type); |
---|
103 | | - ttm_mem_type_debug(bo->bdev, mem_type); |
---|
| 78 | + mem_type = placement->placement[i].mem_type; |
---|
| 79 | + drm_printf(&p, " placement[%d]=0x%08X (%d)\n", |
---|
| 80 | + i, placement->placement[i].flags, mem_type); |
---|
| 81 | + man = ttm_manager_type(bo->bdev, mem_type); |
---|
| 82 | + ttm_resource_manager_debug(man, &p); |
---|
104 | 83 | } |
---|
105 | 84 | } |
---|
106 | 85 | |
---|
.. | .. |
---|
130 | 109 | .default_attrs = ttm_bo_global_attrs |
---|
131 | 110 | }; |
---|
132 | 111 | |
---|
133 | | - |
---|
134 | | -static inline uint32_t ttm_bo_type_flags(unsigned type) |
---|
135 | | -{ |
---|
136 | | - return 1 << (type); |
---|
137 | | -} |
---|
138 | | - |
---|
139 | | -static void ttm_bo_release_list(struct kref *list_kref) |
---|
140 | | -{ |
---|
141 | | - struct ttm_buffer_object *bo = |
---|
142 | | - container_of(list_kref, struct ttm_buffer_object, list_kref); |
---|
143 | | - struct ttm_bo_device *bdev = bo->bdev; |
---|
144 | | - size_t acc_size = bo->acc_size; |
---|
145 | | - |
---|
146 | | - BUG_ON(kref_read(&bo->list_kref)); |
---|
147 | | - BUG_ON(kref_read(&bo->kref)); |
---|
148 | | - BUG_ON(atomic_read(&bo->cpu_writers)); |
---|
149 | | - BUG_ON(bo->mem.mm_node != NULL); |
---|
150 | | - BUG_ON(!list_empty(&bo->lru)); |
---|
151 | | - BUG_ON(!list_empty(&bo->ddestroy)); |
---|
152 | | - ttm_tt_destroy(bo->ttm); |
---|
153 | | - atomic_dec(&bo->bdev->glob->bo_count); |
---|
154 | | - dma_fence_put(bo->moving); |
---|
155 | | - reservation_object_fini(&bo->ttm_resv); |
---|
156 | | - mutex_destroy(&bo->wu_mutex); |
---|
157 | | - bo->destroy(bo); |
---|
158 | | - ttm_mem_global_free(bdev->glob->mem_glob, acc_size); |
---|
159 | | -} |
---|
160 | | - |
---|
161 | | -void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
---|
| 112 | +static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, |
---|
| 113 | + struct ttm_resource *mem) |
---|
162 | 114 | { |
---|
163 | 115 | struct ttm_bo_device *bdev = bo->bdev; |
---|
164 | | - struct ttm_mem_type_manager *man; |
---|
| 116 | + struct ttm_resource_manager *man; |
---|
165 | 117 | |
---|
166 | | - reservation_object_assert_held(bo->resv); |
---|
| 118 | + if (!list_empty(&bo->lru)) |
---|
| 119 | + return; |
---|
167 | 120 | |
---|
168 | | - if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
---|
169 | | - BUG_ON(!list_empty(&bo->lru)); |
---|
| 121 | + if (mem->placement & TTM_PL_FLAG_NO_EVICT) |
---|
| 122 | + return; |
---|
170 | 123 | |
---|
171 | | - man = &bdev->man[bo->mem.mem_type]; |
---|
172 | | - list_add_tail(&bo->lru, &man->lru[bo->priority]); |
---|
173 | | - kref_get(&bo->list_kref); |
---|
| 124 | + man = ttm_manager_type(bdev, mem->mem_type); |
---|
| 125 | + list_add_tail(&bo->lru, &man->lru[bo->priority]); |
---|
174 | 126 | |
---|
175 | | - if (bo->ttm && !(bo->ttm->page_flags & |
---|
176 | | - (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { |
---|
177 | | - list_add_tail(&bo->swap, |
---|
178 | | - &bdev->glob->swap_lru[bo->priority]); |
---|
179 | | - kref_get(&bo->list_kref); |
---|
180 | | - } |
---|
| 127 | + if (man->use_tt && bo->ttm && |
---|
| 128 | + !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | |
---|
| 129 | + TTM_PAGE_FLAG_SWAPPED))) { |
---|
| 130 | + list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); |
---|
181 | 131 | } |
---|
182 | 132 | } |
---|
183 | | -EXPORT_SYMBOL(ttm_bo_add_to_lru); |
---|
184 | 133 | |
---|
185 | | -static void ttm_bo_ref_bug(struct kref *list_kref) |
---|
| 134 | +static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
---|
186 | 135 | { |
---|
187 | | - BUG(); |
---|
188 | | -} |
---|
| 136 | + struct ttm_bo_device *bdev = bo->bdev; |
---|
| 137 | + bool notify = false; |
---|
189 | 138 | |
---|
190 | | -void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
---|
191 | | -{ |
---|
192 | 139 | if (!list_empty(&bo->swap)) { |
---|
193 | 140 | list_del_init(&bo->swap); |
---|
194 | | - kref_put(&bo->list_kref, ttm_bo_ref_bug); |
---|
| 141 | + notify = true; |
---|
195 | 142 | } |
---|
196 | 143 | if (!list_empty(&bo->lru)) { |
---|
197 | 144 | list_del_init(&bo->lru); |
---|
198 | | - kref_put(&bo->list_kref, ttm_bo_ref_bug); |
---|
| 145 | + notify = true; |
---|
199 | 146 | } |
---|
200 | 147 | |
---|
201 | | - /* |
---|
202 | | - * TODO: Add a driver hook to delete from |
---|
203 | | - * driver-specific LRU's here. |
---|
204 | | - */ |
---|
| 148 | + if (notify && bdev->driver->del_from_lru_notify) |
---|
| 149 | + bdev->driver->del_from_lru_notify(bo); |
---|
205 | 150 | } |
---|
206 | 151 | |
---|
207 | | -void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
---|
| 152 | +static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, |
---|
| 153 | + struct ttm_buffer_object *bo) |
---|
208 | 154 | { |
---|
209 | | - struct ttm_bo_global *glob = bo->bdev->glob; |
---|
210 | | - |
---|
211 | | - spin_lock(&glob->lru_lock); |
---|
212 | | - ttm_bo_del_from_lru(bo); |
---|
213 | | - spin_unlock(&glob->lru_lock); |
---|
| 155 | + if (!pos->first) |
---|
| 156 | + pos->first = bo; |
---|
| 157 | + pos->last = bo; |
---|
214 | 158 | } |
---|
215 | | -EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
---|
216 | 159 | |
---|
217 | | -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) |
---|
| 160 | +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, |
---|
| 161 | + struct ttm_lru_bulk_move *bulk) |
---|
218 | 162 | { |
---|
219 | | - reservation_object_assert_held(bo->resv); |
---|
| 163 | + dma_resv_assert_held(bo->base.resv); |
---|
220 | 164 | |
---|
221 | 165 | ttm_bo_del_from_lru(bo); |
---|
222 | | - ttm_bo_add_to_lru(bo); |
---|
| 166 | + ttm_bo_add_mem_to_lru(bo, &bo->mem); |
---|
| 167 | + |
---|
| 168 | + if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
---|
| 169 | + switch (bo->mem.mem_type) { |
---|
| 170 | + case TTM_PL_TT: |
---|
| 171 | + ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); |
---|
| 172 | + break; |
---|
| 173 | + |
---|
| 174 | + case TTM_PL_VRAM: |
---|
| 175 | + ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); |
---|
| 176 | + break; |
---|
| 177 | + } |
---|
| 178 | + if (bo->ttm && !(bo->ttm->page_flags & |
---|
| 179 | + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) |
---|
| 180 | + ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); |
---|
| 181 | + } |
---|
223 | 182 | } |
---|
224 | 183 | EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); |
---|
225 | 184 | |
---|
| 185 | +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) |
---|
| 186 | +{ |
---|
| 187 | + unsigned i; |
---|
| 188 | + |
---|
| 189 | + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
| 190 | + struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; |
---|
| 191 | + struct ttm_resource_manager *man; |
---|
| 192 | + |
---|
| 193 | + if (!pos->first) |
---|
| 194 | + continue; |
---|
| 195 | + |
---|
| 196 | + dma_resv_assert_held(pos->first->base.resv); |
---|
| 197 | + dma_resv_assert_held(pos->last->base.resv); |
---|
| 198 | + |
---|
| 199 | + man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); |
---|
| 200 | + list_bulk_move_tail(&man->lru[i], &pos->first->lru, |
---|
| 201 | + &pos->last->lru); |
---|
| 202 | + } |
---|
| 203 | + |
---|
| 204 | + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
| 205 | + struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; |
---|
| 206 | + struct ttm_resource_manager *man; |
---|
| 207 | + |
---|
| 208 | + if (!pos->first) |
---|
| 209 | + continue; |
---|
| 210 | + |
---|
| 211 | + dma_resv_assert_held(pos->first->base.resv); |
---|
| 212 | + dma_resv_assert_held(pos->last->base.resv); |
---|
| 213 | + |
---|
| 214 | + man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); |
---|
| 215 | + list_bulk_move_tail(&man->lru[i], &pos->first->lru, |
---|
| 216 | + &pos->last->lru); |
---|
| 217 | + } |
---|
| 218 | + |
---|
| 219 | + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
| 220 | + struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i]; |
---|
| 221 | + struct list_head *lru; |
---|
| 222 | + |
---|
| 223 | + if (!pos->first) |
---|
| 224 | + continue; |
---|
| 225 | + |
---|
| 226 | + dma_resv_assert_held(pos->first->base.resv); |
---|
| 227 | + dma_resv_assert_held(pos->last->base.resv); |
---|
| 228 | + |
---|
| 229 | + lru = &ttm_bo_glob.swap_lru[i]; |
---|
| 230 | + list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); |
---|
| 231 | + } |
---|
| 232 | +} |
---|
| 233 | +EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); |
---|
| 234 | + |
---|
226 | 235 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
---|
227 | | - struct ttm_mem_reg *mem, bool evict, |
---|
| 236 | + struct ttm_resource *mem, bool evict, |
---|
228 | 237 | struct ttm_operation_ctx *ctx) |
---|
229 | 238 | { |
---|
230 | 239 | struct ttm_bo_device *bdev = bo->bdev; |
---|
231 | | - bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
---|
232 | | - bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
---|
233 | | - struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
---|
234 | | - struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
---|
235 | | - int ret = 0; |
---|
| 240 | + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); |
---|
| 241 | + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); |
---|
| 242 | + int ret; |
---|
236 | 243 | |
---|
237 | | - if (old_is_pci || new_is_pci || |
---|
238 | | - ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
---|
239 | | - ret = ttm_mem_io_lock(old_man, true); |
---|
240 | | - if (unlikely(ret != 0)) |
---|
241 | | - goto out_err; |
---|
242 | | - ttm_bo_unmap_virtual_locked(bo); |
---|
243 | | - ttm_mem_io_unlock(old_man); |
---|
244 | | - } |
---|
| 244 | + ttm_bo_unmap_virtual(bo); |
---|
245 | 245 | |
---|
246 | 246 | /* |
---|
247 | 247 | * Create and bind a ttm if required. |
---|
248 | 248 | */ |
---|
249 | 249 | |
---|
250 | | - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
---|
251 | | - if (bo->ttm == NULL) { |
---|
252 | | - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
---|
253 | | - ret = ttm_tt_create(bo, zero); |
---|
254 | | - if (ret) |
---|
255 | | - goto out_err; |
---|
256 | | - } |
---|
| 250 | + if (new_man->use_tt) { |
---|
| 251 | + /* Zero init the new TTM structure if the old location should |
---|
| 252 | + * have used one as well. |
---|
| 253 | + */ |
---|
| 254 | + ret = ttm_tt_create(bo, old_man->use_tt); |
---|
| 255 | + if (ret) |
---|
| 256 | + goto out_err; |
---|
257 | 257 | |
---|
258 | 258 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
---|
259 | 259 | if (ret) |
---|
260 | 260 | goto out_err; |
---|
261 | 261 | |
---|
262 | 262 | if (mem->mem_type != TTM_PL_SYSTEM) { |
---|
263 | | - ret = ttm_tt_bind(bo->ttm, mem, ctx); |
---|
| 263 | + ret = ttm_tt_populate(bdev, bo->ttm, ctx); |
---|
| 264 | + if (ret) |
---|
| 265 | + goto out_err; |
---|
| 266 | + |
---|
| 267 | + ret = ttm_bo_tt_bind(bo, mem); |
---|
264 | 268 | if (ret) |
---|
265 | 269 | goto out_err; |
---|
266 | 270 | } |
---|
.. | .. |
---|
269 | 273 | if (bdev->driver->move_notify) |
---|
270 | 274 | bdev->driver->move_notify(bo, evict, mem); |
---|
271 | 275 | bo->mem = *mem; |
---|
272 | | - mem->mm_node = NULL; |
---|
273 | 276 | goto moved; |
---|
274 | 277 | } |
---|
275 | 278 | } |
---|
.. | .. |
---|
277 | 280 | if (bdev->driver->move_notify) |
---|
278 | 281 | bdev->driver->move_notify(bo, evict, mem); |
---|
279 | 282 | |
---|
280 | | - if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
---|
281 | | - !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
---|
| 283 | + if (old_man->use_tt && new_man->use_tt) |
---|
282 | 284 | ret = ttm_bo_move_ttm(bo, ctx, mem); |
---|
283 | 285 | else if (bdev->driver->move) |
---|
284 | 286 | ret = bdev->driver->move(bo, evict, ctx, mem); |
---|
.. | .. |
---|
296 | 298 | } |
---|
297 | 299 | |
---|
298 | 300 | moved: |
---|
299 | | - if (bo->evicted) { |
---|
300 | | - if (bdev->driver->invalidate_caches) { |
---|
301 | | - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
---|
302 | | - if (ret) |
---|
303 | | - pr_err("Can not flush read caches\n"); |
---|
304 | | - } |
---|
305 | | - bo->evicted = false; |
---|
306 | | - } |
---|
307 | | - |
---|
308 | | - if (bo->mem.mm_node) |
---|
309 | | - bo->offset = (bo->mem.start << PAGE_SHIFT) + |
---|
310 | | - bdev->man[bo->mem.mem_type].gpu_offset; |
---|
311 | | - else |
---|
312 | | - bo->offset = 0; |
---|
313 | | - |
---|
314 | 301 | ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; |
---|
315 | 302 | return 0; |
---|
316 | 303 | |
---|
317 | 304 | out_err: |
---|
318 | | - new_man = &bdev->man[bo->mem.mem_type]; |
---|
319 | | - if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
---|
320 | | - ttm_tt_destroy(bo->ttm); |
---|
321 | | - bo->ttm = NULL; |
---|
322 | | - } |
---|
| 305 | + new_man = ttm_manager_type(bdev, bo->mem.mem_type); |
---|
| 306 | + if (!new_man->use_tt) |
---|
| 307 | + ttm_bo_tt_destroy(bo); |
---|
323 | 308 | |
---|
324 | 309 | return ret; |
---|
325 | 310 | } |
---|
.. | .. |
---|
337 | 322 | if (bo->bdev->driver->move_notify) |
---|
338 | 323 | bo->bdev->driver->move_notify(bo, false, NULL); |
---|
339 | 324 | |
---|
340 | | - ttm_tt_destroy(bo->ttm); |
---|
341 | | - bo->ttm = NULL; |
---|
342 | | - ttm_bo_mem_put(bo, &bo->mem); |
---|
| 325 | + ttm_bo_tt_destroy(bo); |
---|
| 326 | + ttm_resource_free(bo, &bo->mem); |
---|
343 | 327 | } |
---|
344 | 328 | |
---|
345 | 329 | static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) |
---|
346 | 330 | { |
---|
347 | 331 | int r; |
---|
348 | 332 | |
---|
349 | | - if (bo->resv == &bo->ttm_resv) |
---|
| 333 | + if (bo->base.resv == &bo->base._resv) |
---|
350 | 334 | return 0; |
---|
351 | 335 | |
---|
352 | | - BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); |
---|
| 336 | + BUG_ON(!dma_resv_trylock(&bo->base._resv)); |
---|
353 | 337 | |
---|
354 | | - r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); |
---|
| 338 | + r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); |
---|
| 339 | + dma_resv_unlock(&bo->base._resv); |
---|
355 | 340 | if (r) |
---|
356 | | - reservation_object_unlock(&bo->ttm_resv); |
---|
| 341 | + return r; |
---|
| 342 | + |
---|
| 343 | + if (bo->type != ttm_bo_type_sg) { |
---|
| 344 | + /* This works because the BO is about to be destroyed and nobody |
---|
| 345 | + * reference it any more. The only tricky case is the trylock on |
---|
| 346 | + * the resv object while holding the lru_lock. |
---|
| 347 | + */ |
---|
| 348 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
| 349 | + bo->base.resv = &bo->base._resv; |
---|
| 350 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 351 | + } |
---|
357 | 352 | |
---|
358 | 353 | return r; |
---|
359 | 354 | } |
---|
360 | 355 | |
---|
361 | 356 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) |
---|
362 | 357 | { |
---|
363 | | - struct reservation_object_list *fobj; |
---|
| 358 | + struct dma_resv *resv = &bo->base._resv; |
---|
| 359 | + struct dma_resv_list *fobj; |
---|
364 | 360 | struct dma_fence *fence; |
---|
365 | 361 | int i; |
---|
366 | 362 | |
---|
367 | | - fobj = reservation_object_get_list(&bo->ttm_resv); |
---|
368 | | - fence = reservation_object_get_excl(&bo->ttm_resv); |
---|
| 363 | + rcu_read_lock(); |
---|
| 364 | + fobj = rcu_dereference(resv->fence); |
---|
| 365 | + fence = rcu_dereference(resv->fence_excl); |
---|
369 | 366 | if (fence && !fence->ops->signaled) |
---|
370 | 367 | dma_fence_enable_sw_signaling(fence); |
---|
371 | 368 | |
---|
372 | 369 | for (i = 0; fobj && i < fobj->shared_count; ++i) { |
---|
373 | | - fence = rcu_dereference_protected(fobj->shared[i], |
---|
374 | | - reservation_object_held(bo->resv)); |
---|
| 370 | + fence = rcu_dereference(fobj->shared[i]); |
---|
375 | 371 | |
---|
376 | 372 | if (!fence->ops->signaled) |
---|
377 | 373 | dma_fence_enable_sw_signaling(fence); |
---|
378 | 374 | } |
---|
379 | | -} |
---|
380 | | - |
---|
381 | | -static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
---|
382 | | -{ |
---|
383 | | - struct ttm_bo_device *bdev = bo->bdev; |
---|
384 | | - struct ttm_bo_global *glob = bdev->glob; |
---|
385 | | - int ret; |
---|
386 | | - |
---|
387 | | - ret = ttm_bo_individualize_resv(bo); |
---|
388 | | - if (ret) { |
---|
389 | | - /* Last resort, if we fail to allocate memory for the |
---|
390 | | - * fences block for the BO to become idle |
---|
391 | | - */ |
---|
392 | | - reservation_object_wait_timeout_rcu(bo->resv, true, false, |
---|
393 | | - 30 * HZ); |
---|
394 | | - spin_lock(&glob->lru_lock); |
---|
395 | | - goto error; |
---|
396 | | - } |
---|
397 | | - |
---|
398 | | - spin_lock(&glob->lru_lock); |
---|
399 | | - ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY; |
---|
400 | | - if (!ret) { |
---|
401 | | - if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { |
---|
402 | | - ttm_bo_del_from_lru(bo); |
---|
403 | | - spin_unlock(&glob->lru_lock); |
---|
404 | | - if (bo->resv != &bo->ttm_resv) |
---|
405 | | - reservation_object_unlock(&bo->ttm_resv); |
---|
406 | | - |
---|
407 | | - ttm_bo_cleanup_memtype_use(bo); |
---|
408 | | - reservation_object_unlock(bo->resv); |
---|
409 | | - return; |
---|
410 | | - } |
---|
411 | | - |
---|
412 | | - ttm_bo_flush_all_fences(bo); |
---|
413 | | - |
---|
414 | | - /* |
---|
415 | | - * Make NO_EVICT bos immediately available to |
---|
416 | | - * shrinkers, now that they are queued for |
---|
417 | | - * destruction. |
---|
418 | | - */ |
---|
419 | | - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
---|
420 | | - bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
---|
421 | | - ttm_bo_add_to_lru(bo); |
---|
422 | | - } |
---|
423 | | - |
---|
424 | | - reservation_object_unlock(bo->resv); |
---|
425 | | - } |
---|
426 | | - if (bo->resv != &bo->ttm_resv) |
---|
427 | | - reservation_object_unlock(&bo->ttm_resv); |
---|
428 | | - |
---|
429 | | -error: |
---|
430 | | - kref_get(&bo->list_kref); |
---|
431 | | - list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
---|
432 | | - spin_unlock(&glob->lru_lock); |
---|
433 | | - |
---|
434 | | - schedule_delayed_work(&bdev->wq, |
---|
435 | | - ((HZ / 100) < 1) ? 1 : HZ / 100); |
---|
| 375 | + rcu_read_unlock(); |
---|
436 | 376 | } |
---|
437 | 377 | |
---|
438 | 378 | /** |
---|
439 | 379 | * function ttm_bo_cleanup_refs |
---|
440 | | - * If bo idle, remove from delayed- and lru lists, and unref. |
---|
441 | | - * If not idle, do nothing. |
---|
| 380 | + * If bo idle, remove from lru lists, and unref. |
---|
| 381 | + * If not idle, block if possible. |
---|
442 | 382 | * |
---|
443 | 383 | * Must be called with lru_lock and reservation held, this function |
---|
444 | 384 | * will drop the lru lock and optionally the reservation lock before returning. |
---|
.. | .. |
---|
452 | 392 | bool interruptible, bool no_wait_gpu, |
---|
453 | 393 | bool unlock_resv) |
---|
454 | 394 | { |
---|
455 | | - struct ttm_bo_global *glob = bo->bdev->glob; |
---|
456 | | - struct reservation_object *resv; |
---|
| 395 | + struct dma_resv *resv = &bo->base._resv; |
---|
457 | 396 | int ret; |
---|
458 | 397 | |
---|
459 | | - if (unlikely(list_empty(&bo->ddestroy))) |
---|
460 | | - resv = bo->resv; |
---|
461 | | - else |
---|
462 | | - resv = &bo->ttm_resv; |
---|
463 | | - |
---|
464 | | - if (reservation_object_test_signaled_rcu(resv, true)) |
---|
| 398 | + if (dma_resv_test_signaled_rcu(resv, true)) |
---|
465 | 399 | ret = 0; |
---|
466 | 400 | else |
---|
467 | 401 | ret = -EBUSY; |
---|
.. | .. |
---|
470 | 404 | long lret; |
---|
471 | 405 | |
---|
472 | 406 | if (unlock_resv) |
---|
473 | | - reservation_object_unlock(bo->resv); |
---|
474 | | - spin_unlock(&glob->lru_lock); |
---|
| 407 | + dma_resv_unlock(bo->base.resv); |
---|
| 408 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
475 | 409 | |
---|
476 | | - lret = reservation_object_wait_timeout_rcu(resv, true, |
---|
477 | | - interruptible, |
---|
478 | | - 30 * HZ); |
---|
| 410 | + lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, |
---|
| 411 | + 30 * HZ); |
---|
479 | 412 | |
---|
480 | 413 | if (lret < 0) |
---|
481 | 414 | return lret; |
---|
482 | 415 | else if (lret == 0) |
---|
483 | 416 | return -EBUSY; |
---|
484 | 417 | |
---|
485 | | - spin_lock(&glob->lru_lock); |
---|
486 | | - if (unlock_resv && !reservation_object_trylock(bo->resv)) { |
---|
| 418 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
| 419 | + if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { |
---|
487 | 420 | /* |
---|
488 | 421 | * We raced, and lost, someone else holds the reservation now, |
---|
489 | 422 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
---|
.. | .. |
---|
492 | 425 | * delayed destruction would succeed, so just return success |
---|
493 | 426 | * here. |
---|
494 | 427 | */ |
---|
495 | | - spin_unlock(&glob->lru_lock); |
---|
| 428 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
496 | 429 | return 0; |
---|
497 | 430 | } |
---|
498 | 431 | ret = 0; |
---|
.. | .. |
---|
500 | 433 | |
---|
501 | 434 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
---|
502 | 435 | if (unlock_resv) |
---|
503 | | - reservation_object_unlock(bo->resv); |
---|
504 | | - spin_unlock(&glob->lru_lock); |
---|
| 436 | + dma_resv_unlock(bo->base.resv); |
---|
| 437 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
505 | 438 | return ret; |
---|
506 | 439 | } |
---|
507 | 440 | |
---|
508 | 441 | ttm_bo_del_from_lru(bo); |
---|
509 | 442 | list_del_init(&bo->ddestroy); |
---|
510 | | - kref_put(&bo->list_kref, ttm_bo_ref_bug); |
---|
511 | | - |
---|
512 | | - spin_unlock(&glob->lru_lock); |
---|
| 443 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
513 | 444 | ttm_bo_cleanup_memtype_use(bo); |
---|
514 | 445 | |
---|
515 | 446 | if (unlock_resv) |
---|
516 | | - reservation_object_unlock(bo->resv); |
---|
| 447 | + dma_resv_unlock(bo->base.resv); |
---|
| 448 | + |
---|
| 449 | + ttm_bo_put(bo); |
---|
517 | 450 | |
---|
518 | 451 | return 0; |
---|
519 | 452 | } |
---|
.. | .. |
---|
524 | 457 | */ |
---|
525 | 458 | static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
---|
526 | 459 | { |
---|
527 | | - struct ttm_bo_global *glob = bdev->glob; |
---|
| 460 | + struct ttm_bo_global *glob = &ttm_bo_glob; |
---|
528 | 461 | struct list_head removed; |
---|
529 | 462 | bool empty; |
---|
530 | 463 | |
---|
.. | .. |
---|
536 | 469 | |
---|
537 | 470 | bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, |
---|
538 | 471 | ddestroy); |
---|
539 | | - kref_get(&bo->list_kref); |
---|
540 | 472 | list_move_tail(&bo->ddestroy, &removed); |
---|
| 473 | + if (!ttm_bo_get_unless_zero(bo)) |
---|
| 474 | + continue; |
---|
541 | 475 | |
---|
542 | | - if (remove_all || bo->resv != &bo->ttm_resv) { |
---|
| 476 | + if (remove_all || bo->base.resv != &bo->base._resv) { |
---|
543 | 477 | spin_unlock(&glob->lru_lock); |
---|
544 | | - reservation_object_lock(bo->resv, NULL); |
---|
| 478 | + dma_resv_lock(bo->base.resv, NULL); |
---|
545 | 479 | |
---|
546 | 480 | spin_lock(&glob->lru_lock); |
---|
547 | 481 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); |
---|
548 | 482 | |
---|
549 | | - } else if (reservation_object_trylock(bo->resv)) { |
---|
| 483 | + } else if (dma_resv_trylock(bo->base.resv)) { |
---|
550 | 484 | ttm_bo_cleanup_refs(bo, false, !remove_all, true); |
---|
551 | 485 | } else { |
---|
552 | 486 | spin_unlock(&glob->lru_lock); |
---|
553 | 487 | } |
---|
554 | 488 | |
---|
555 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 489 | + ttm_bo_put(bo); |
---|
556 | 490 | spin_lock(&glob->lru_lock); |
---|
557 | 491 | } |
---|
558 | 492 | list_splice_tail(&removed, &bdev->ddestroy); |
---|
.. | .. |
---|
577 | 511 | struct ttm_buffer_object *bo = |
---|
578 | 512 | container_of(kref, struct ttm_buffer_object, kref); |
---|
579 | 513 | struct ttm_bo_device *bdev = bo->bdev; |
---|
580 | | - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
---|
| 514 | + size_t acc_size = bo->acc_size; |
---|
| 515 | + int ret; |
---|
581 | 516 | |
---|
582 | | - drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
---|
583 | | - ttm_mem_io_lock(man, false); |
---|
584 | | - ttm_mem_io_free_vm(bo); |
---|
585 | | - ttm_mem_io_unlock(man); |
---|
586 | | - ttm_bo_cleanup_refs_or_queue(bo); |
---|
587 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 517 | + if (!bo->deleted) { |
---|
| 518 | + ret = ttm_bo_individualize_resv(bo); |
---|
| 519 | + if (ret) { |
---|
| 520 | + /* Last resort, if we fail to allocate memory for the |
---|
| 521 | + * fences block for the BO to become idle |
---|
| 522 | + */ |
---|
| 523 | + dma_resv_wait_timeout_rcu(bo->base.resv, true, false, |
---|
| 524 | + 30 * HZ); |
---|
| 525 | + } |
---|
| 526 | + |
---|
| 527 | + if (bo->bdev->driver->release_notify) |
---|
| 528 | + bo->bdev->driver->release_notify(bo); |
---|
| 529 | + |
---|
| 530 | + drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); |
---|
| 531 | + ttm_mem_io_free(bdev, &bo->mem); |
---|
| 532 | + } |
---|
| 533 | + |
---|
| 534 | + if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || |
---|
| 535 | + !dma_resv_trylock(bo->base.resv)) { |
---|
| 536 | + /* The BO is not idle, resurrect it for delayed destroy */ |
---|
| 537 | + ttm_bo_flush_all_fences(bo); |
---|
| 538 | + bo->deleted = true; |
---|
| 539 | + |
---|
| 540 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
| 541 | + |
---|
| 542 | + /* |
---|
| 543 | + * Make NO_EVICT bos immediately available to |
---|
| 544 | + * shrinkers, now that they are queued for |
---|
| 545 | + * destruction. |
---|
| 546 | + */ |
---|
| 547 | + if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
---|
| 548 | + bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
---|
| 549 | + ttm_bo_del_from_lru(bo); |
---|
| 550 | + ttm_bo_add_mem_to_lru(bo, &bo->mem); |
---|
| 551 | + } |
---|
| 552 | + |
---|
| 553 | + kref_init(&bo->kref); |
---|
| 554 | + list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
---|
| 555 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 556 | + |
---|
| 557 | + schedule_delayed_work(&bdev->wq, |
---|
| 558 | + ((HZ / 100) < 1) ? 1 : HZ / 100); |
---|
| 559 | + return; |
---|
| 560 | + } |
---|
| 561 | + |
---|
| 562 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
| 563 | + ttm_bo_del_from_lru(bo); |
---|
| 564 | + list_del(&bo->ddestroy); |
---|
| 565 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 566 | + |
---|
| 567 | + ttm_bo_cleanup_memtype_use(bo); |
---|
| 568 | + dma_resv_unlock(bo->base.resv); |
---|
| 569 | + |
---|
| 570 | + atomic_dec(&ttm_bo_glob.bo_count); |
---|
| 571 | + dma_fence_put(bo->moving); |
---|
| 572 | + if (!ttm_bo_uses_embedded_gem_object(bo)) |
---|
| 573 | + dma_resv_fini(&bo->base._resv); |
---|
| 574 | + bo->destroy(bo); |
---|
| 575 | + ttm_mem_global_free(&ttm_mem_glob, acc_size); |
---|
588 | 576 | } |
---|
589 | 577 | |
---|
590 | 578 | void ttm_bo_put(struct ttm_buffer_object *bo) |
---|
.. | .. |
---|
592 | 580 | kref_put(&bo->kref, ttm_bo_release); |
---|
593 | 581 | } |
---|
594 | 582 | EXPORT_SYMBOL(ttm_bo_put); |
---|
595 | | - |
---|
596 | | -void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
---|
597 | | -{ |
---|
598 | | - struct ttm_buffer_object *bo = *p_bo; |
---|
599 | | - |
---|
600 | | - *p_bo = NULL; |
---|
601 | | - ttm_bo_put(bo); |
---|
602 | | -} |
---|
603 | | -EXPORT_SYMBOL(ttm_bo_unref); |
---|
604 | 583 | |
---|
605 | 584 | int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) |
---|
606 | 585 | { |
---|
.. | .. |
---|
620 | 599 | struct ttm_operation_ctx *ctx) |
---|
621 | 600 | { |
---|
622 | 601 | struct ttm_bo_device *bdev = bo->bdev; |
---|
623 | | - struct ttm_mem_reg evict_mem; |
---|
| 602 | + struct ttm_resource evict_mem; |
---|
624 | 603 | struct ttm_placement placement; |
---|
625 | 604 | int ret = 0; |
---|
626 | 605 | |
---|
627 | | - reservation_object_assert_held(bo->resv); |
---|
| 606 | + dma_resv_assert_held(bo->base.resv); |
---|
628 | 607 | |
---|
629 | 608 | placement.num_placement = 0; |
---|
630 | 609 | placement.num_busy_placement = 0; |
---|
631 | 610 | bdev->driver->evict_flags(bo, &placement); |
---|
632 | 611 | |
---|
633 | 612 | if (!placement.num_placement && !placement.num_busy_placement) { |
---|
634 | | - ret = ttm_bo_pipeline_gutting(bo); |
---|
635 | | - if (ret) |
---|
636 | | - return ret; |
---|
| 613 | + ttm_bo_wait(bo, false, false); |
---|
637 | 614 | |
---|
| 615 | + ttm_bo_cleanup_memtype_use(bo); |
---|
638 | 616 | return ttm_tt_create(bo, false); |
---|
639 | 617 | } |
---|
640 | 618 | |
---|
641 | 619 | evict_mem = bo->mem; |
---|
642 | 620 | evict_mem.mm_node = NULL; |
---|
643 | | - evict_mem.bus.io_reserved_vm = false; |
---|
644 | | - evict_mem.bus.io_reserved_count = 0; |
---|
| 621 | + evict_mem.bus.offset = 0; |
---|
| 622 | + evict_mem.bus.addr = NULL; |
---|
645 | 623 | |
---|
646 | 624 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); |
---|
647 | 625 | if (ret) { |
---|
.. | .. |
---|
657 | 635 | if (unlikely(ret)) { |
---|
658 | 636 | if (ret != -ERESTARTSYS) |
---|
659 | 637 | pr_err("Buffer eviction failed\n"); |
---|
660 | | - ttm_bo_mem_put(bo, &evict_mem); |
---|
661 | | - goto out; |
---|
| 638 | + ttm_resource_free(bo, &evict_mem); |
---|
662 | 639 | } |
---|
663 | | - bo->evicted = true; |
---|
664 | 640 | out: |
---|
665 | 641 | return ret; |
---|
666 | 642 | } |
---|
.. | .. |
---|
690 | 666 | * b. Otherwise, trylock it. |
---|
691 | 667 | */ |
---|
692 | 668 | static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, |
---|
693 | | - struct ttm_operation_ctx *ctx, bool *locked) |
---|
| 669 | + struct ttm_operation_ctx *ctx, bool *locked, bool *busy) |
---|
694 | 670 | { |
---|
695 | 671 | bool ret = false; |
---|
696 | 672 | |
---|
697 | | - *locked = false; |
---|
698 | | - if (bo->resv == ctx->resv) { |
---|
699 | | - reservation_object_assert_held(bo->resv); |
---|
700 | | - if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT |
---|
701 | | - || !list_empty(&bo->ddestroy)) |
---|
| 673 | + if (bo->base.resv == ctx->resv) { |
---|
| 674 | + dma_resv_assert_held(bo->base.resv); |
---|
| 675 | + if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT) |
---|
702 | 676 | ret = true; |
---|
| 677 | + *locked = false; |
---|
| 678 | + if (busy) |
---|
| 679 | + *busy = false; |
---|
703 | 680 | } else { |
---|
704 | | - *locked = reservation_object_trylock(bo->resv); |
---|
705 | | - ret = *locked; |
---|
| 681 | + ret = dma_resv_trylock(bo->base.resv); |
---|
| 682 | + *locked = ret; |
---|
| 683 | + if (busy) |
---|
| 684 | + *busy = !ret; |
---|
706 | 685 | } |
---|
707 | 686 | |
---|
708 | 687 | return ret; |
---|
709 | 688 | } |
---|
710 | 689 | |
---|
711 | | -static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
---|
712 | | - uint32_t mem_type, |
---|
713 | | - const struct ttm_place *place, |
---|
714 | | - struct ttm_operation_ctx *ctx) |
---|
| 690 | +/** |
---|
| 691 | + * ttm_mem_evict_wait_busy - wait for a busy BO to become available |
---|
| 692 | + * |
---|
| 693 | + * @busy_bo: BO which couldn't be locked with trylock |
---|
| 694 | + * @ctx: operation context |
---|
| 695 | + * @ticket: acquire ticket |
---|
| 696 | + * |
---|
| 697 | + * Try to lock a busy buffer object to avoid failing eviction. |
---|
| 698 | + */ |
---|
| 699 | +static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, |
---|
| 700 | + struct ttm_operation_ctx *ctx, |
---|
| 701 | + struct ww_acquire_ctx *ticket) |
---|
715 | 702 | { |
---|
716 | | - struct ttm_bo_global *glob = bdev->glob; |
---|
717 | | - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
---|
718 | | - struct ttm_buffer_object *bo = NULL; |
---|
| 703 | + int r; |
---|
| 704 | + |
---|
| 705 | + if (!busy_bo || !ticket) |
---|
| 706 | + return -EBUSY; |
---|
| 707 | + |
---|
| 708 | + if (ctx->interruptible) |
---|
| 709 | + r = dma_resv_lock_interruptible(busy_bo->base.resv, |
---|
| 710 | + ticket); |
---|
| 711 | + else |
---|
| 712 | + r = dma_resv_lock(busy_bo->base.resv, ticket); |
---|
| 713 | + |
---|
| 714 | + /* |
---|
| 715 | + * TODO: It would be better to keep the BO locked until allocation is at |
---|
| 716 | + * least tried one more time, but that would mean a much larger rework |
---|
| 717 | + * of TTM. |
---|
| 718 | + */ |
---|
| 719 | + if (!r) |
---|
| 720 | + dma_resv_unlock(busy_bo->base.resv); |
---|
| 721 | + |
---|
| 722 | + return r == -EDEADLK ? -EBUSY : r; |
---|
| 723 | +} |
---|
| 724 | + |
---|
| 725 | +int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
---|
| 726 | + struct ttm_resource_manager *man, |
---|
| 727 | + const struct ttm_place *place, |
---|
| 728 | + struct ttm_operation_ctx *ctx, |
---|
| 729 | + struct ww_acquire_ctx *ticket) |
---|
| 730 | +{ |
---|
| 731 | + struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; |
---|
719 | 732 | bool locked = false; |
---|
720 | 733 | unsigned i; |
---|
721 | 734 | int ret; |
---|
722 | 735 | |
---|
723 | | - spin_lock(&glob->lru_lock); |
---|
| 736 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
724 | 737 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
725 | 738 | list_for_each_entry(bo, &man->lru[i], lru) { |
---|
726 | | - if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) |
---|
| 739 | + bool busy; |
---|
| 740 | + |
---|
| 741 | + if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, |
---|
| 742 | + &busy)) { |
---|
| 743 | + if (busy && !busy_bo && ticket != |
---|
| 744 | + dma_resv_locking_ctx(bo->base.resv)) |
---|
| 745 | + busy_bo = bo; |
---|
727 | 746 | continue; |
---|
| 747 | + } |
---|
728 | 748 | |
---|
729 | 749 | if (place && !bdev->driver->eviction_valuable(bo, |
---|
730 | 750 | place)) { |
---|
731 | 751 | if (locked) |
---|
732 | | - reservation_object_unlock(bo->resv); |
---|
| 752 | + dma_resv_unlock(bo->base.resv); |
---|
| 753 | + continue; |
---|
| 754 | + } |
---|
| 755 | + if (!ttm_bo_get_unless_zero(bo)) { |
---|
| 756 | + if (locked) |
---|
| 757 | + dma_resv_unlock(bo->base.resv); |
---|
733 | 758 | continue; |
---|
734 | 759 | } |
---|
735 | 760 | break; |
---|
.. | .. |
---|
743 | 768 | } |
---|
744 | 769 | |
---|
745 | 770 | if (!bo) { |
---|
746 | | - spin_unlock(&glob->lru_lock); |
---|
747 | | - return -EBUSY; |
---|
748 | | - } |
---|
749 | | - |
---|
750 | | - kref_get(&bo->list_kref); |
---|
751 | | - |
---|
752 | | - if (!list_empty(&bo->ddestroy)) { |
---|
753 | | - ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, |
---|
754 | | - ctx->no_wait_gpu, locked); |
---|
755 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 771 | + if (busy_bo && !ttm_bo_get_unless_zero(busy_bo)) |
---|
| 772 | + busy_bo = NULL; |
---|
| 773 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 774 | + ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); |
---|
| 775 | + if (busy_bo) |
---|
| 776 | + ttm_bo_put(busy_bo); |
---|
756 | 777 | return ret; |
---|
757 | 778 | } |
---|
758 | 779 | |
---|
759 | | - ttm_bo_del_from_lru(bo); |
---|
760 | | - spin_unlock(&glob->lru_lock); |
---|
761 | | - |
---|
762 | | - ret = ttm_bo_evict(bo, ctx); |
---|
763 | | - if (locked) { |
---|
764 | | - ttm_bo_unreserve(bo); |
---|
765 | | - } else { |
---|
766 | | - spin_lock(&glob->lru_lock); |
---|
767 | | - ttm_bo_add_to_lru(bo); |
---|
768 | | - spin_unlock(&glob->lru_lock); |
---|
| 780 | + if (bo->deleted) { |
---|
| 781 | + ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, |
---|
| 782 | + ctx->no_wait_gpu, locked); |
---|
| 783 | + ttm_bo_put(bo); |
---|
| 784 | + return ret; |
---|
769 | 785 | } |
---|
770 | 786 | |
---|
771 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 787 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 788 | + |
---|
| 789 | + ret = ttm_bo_evict(bo, ctx); |
---|
| 790 | + if (locked) |
---|
| 791 | + ttm_bo_unreserve(bo); |
---|
| 792 | + else |
---|
| 793 | + ttm_bo_move_to_lru_tail_unlocked(bo); |
---|
| 794 | + |
---|
| 795 | + ttm_bo_put(bo); |
---|
772 | 796 | return ret; |
---|
773 | 797 | } |
---|
774 | | - |
---|
775 | | -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
---|
776 | | -{ |
---|
777 | | - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
---|
778 | | - |
---|
779 | | - if (mem->mm_node) |
---|
780 | | - (*man->func->put_node)(man, mem); |
---|
781 | | -} |
---|
782 | | -EXPORT_SYMBOL(ttm_bo_mem_put); |
---|
783 | 798 | |
---|
784 | 799 | /** |
---|
785 | 800 | * Add the last move fence to the BO and reserve a new shared slot. |
---|
786 | 801 | */ |
---|
787 | 802 | static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, |
---|
788 | | - struct ttm_mem_type_manager *man, |
---|
789 | | - struct ttm_mem_reg *mem) |
---|
| 803 | + struct ttm_resource_manager *man, |
---|
| 804 | + struct ttm_resource *mem, |
---|
| 805 | + bool no_wait_gpu) |
---|
790 | 806 | { |
---|
791 | 807 | struct dma_fence *fence; |
---|
792 | 808 | int ret; |
---|
.. | .. |
---|
795 | 811 | fence = dma_fence_get(man->move); |
---|
796 | 812 | spin_unlock(&man->move_lock); |
---|
797 | 813 | |
---|
798 | | - if (fence) { |
---|
799 | | - reservation_object_add_shared_fence(bo->resv, fence); |
---|
| 814 | + if (!fence) |
---|
| 815 | + return 0; |
---|
800 | 816 | |
---|
801 | | - ret = reservation_object_reserve_shared(bo->resv); |
---|
802 | | - if (unlikely(ret)) |
---|
803 | | - return ret; |
---|
804 | | - |
---|
805 | | - dma_fence_put(bo->moving); |
---|
806 | | - bo->moving = fence; |
---|
| 817 | + if (no_wait_gpu) { |
---|
| 818 | + dma_fence_put(fence); |
---|
| 819 | + return -EBUSY; |
---|
807 | 820 | } |
---|
808 | 821 | |
---|
| 822 | + dma_resv_add_shared_fence(bo->base.resv, fence); |
---|
| 823 | + |
---|
| 824 | + ret = dma_resv_reserve_shared(bo->base.resv, 1); |
---|
| 825 | + if (unlikely(ret)) { |
---|
| 826 | + dma_fence_put(fence); |
---|
| 827 | + return ret; |
---|
| 828 | + } |
---|
| 829 | + |
---|
| 830 | + dma_fence_put(bo->moving); |
---|
| 831 | + bo->moving = fence; |
---|
809 | 832 | return 0; |
---|
810 | 833 | } |
---|
811 | 834 | |
---|
.. | .. |
---|
814 | 837 | * space, or we've evicted everything and there isn't enough space. |
---|
815 | 838 | */ |
---|
816 | 839 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
---|
817 | | - uint32_t mem_type, |
---|
818 | | - const struct ttm_place *place, |
---|
819 | | - struct ttm_mem_reg *mem, |
---|
820 | | - struct ttm_operation_ctx *ctx) |
---|
| 840 | + const struct ttm_place *place, |
---|
| 841 | + struct ttm_resource *mem, |
---|
| 842 | + struct ttm_operation_ctx *ctx) |
---|
821 | 843 | { |
---|
822 | 844 | struct ttm_bo_device *bdev = bo->bdev; |
---|
823 | | - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
---|
| 845 | + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); |
---|
| 846 | + struct ww_acquire_ctx *ticket; |
---|
824 | 847 | int ret; |
---|
825 | 848 | |
---|
| 849 | + ticket = dma_resv_locking_ctx(bo->base.resv); |
---|
826 | 850 | do { |
---|
827 | | - ret = (*man->func->get_node)(man, bo, place, mem); |
---|
828 | | - if (unlikely(ret != 0)) |
---|
829 | | - return ret; |
---|
830 | | - if (mem->mm_node) |
---|
| 851 | + ret = ttm_resource_alloc(bo, place, mem); |
---|
| 852 | + if (likely(!ret)) |
---|
831 | 853 | break; |
---|
832 | | - ret = ttm_mem_evict_first(bdev, mem_type, place, ctx); |
---|
| 854 | + if (unlikely(ret != -ENOSPC)) |
---|
| 855 | + return ret; |
---|
| 856 | + ret = ttm_mem_evict_first(bdev, man, place, ctx, |
---|
| 857 | + ticket); |
---|
833 | 858 | if (unlikely(ret != 0)) |
---|
834 | 859 | return ret; |
---|
835 | 860 | } while (1); |
---|
836 | | - mem->mem_type = mem_type; |
---|
837 | | - return ttm_bo_add_move_fence(bo, man, mem); |
---|
| 861 | + |
---|
| 862 | + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); |
---|
838 | 863 | } |
---|
839 | 864 | |
---|
840 | | -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
---|
| 865 | +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, |
---|
841 | 866 | uint32_t cur_placement, |
---|
842 | 867 | uint32_t proposed_placement) |
---|
843 | 868 | { |
---|
.. | .. |
---|
850 | 875 | |
---|
851 | 876 | if ((cur_placement & caching) != 0) |
---|
852 | 877 | result |= (cur_placement & caching); |
---|
853 | | - else if ((man->default_caching & caching) != 0) |
---|
854 | | - result |= man->default_caching; |
---|
855 | 878 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
---|
856 | 879 | result |= TTM_PL_FLAG_CACHED; |
---|
857 | 880 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
---|
.. | .. |
---|
862 | 885 | return result; |
---|
863 | 886 | } |
---|
864 | 887 | |
---|
865 | | -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
---|
866 | | - uint32_t mem_type, |
---|
867 | | - const struct ttm_place *place, |
---|
868 | | - uint32_t *masked_placement) |
---|
| 888 | +/** |
---|
| 889 | + * ttm_bo_mem_placement - check if placement is compatible |
---|
| 890 | + * @bo: BO to find memory for |
---|
| 891 | + * @place: where to search |
---|
| 892 | + * @mem: the memory object to fill in |
---|
| 893 | + * @ctx: operation context |
---|
| 894 | + * |
---|
| 895 | + * Check if placement is compatible and fill in mem structure. |
---|
| 896 | + * Returns -EBUSY if placement won't work or negative error code. |
---|
| 897 | + * 0 when placement can be used. |
---|
| 898 | + */ |
---|
| 899 | +static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, |
---|
| 900 | + const struct ttm_place *place, |
---|
| 901 | + struct ttm_resource *mem, |
---|
| 902 | + struct ttm_operation_ctx *ctx) |
---|
869 | 903 | { |
---|
870 | | - uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
---|
| 904 | + struct ttm_bo_device *bdev = bo->bdev; |
---|
| 905 | + struct ttm_resource_manager *man; |
---|
| 906 | + uint32_t cur_flags = 0; |
---|
871 | 907 | |
---|
872 | | - if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) |
---|
873 | | - return false; |
---|
| 908 | + man = ttm_manager_type(bdev, place->mem_type); |
---|
| 909 | + if (!man || !ttm_resource_manager_used(man)) |
---|
| 910 | + return -EBUSY; |
---|
874 | 911 | |
---|
875 | | - if ((place->flags & man->available_caching) == 0) |
---|
876 | | - return false; |
---|
| 912 | + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
---|
| 913 | + place->flags); |
---|
| 914 | + cur_flags |= place->flags & ~TTM_PL_MASK_CACHING; |
---|
877 | 915 | |
---|
878 | | - cur_flags |= (place->flags & man->available_caching); |
---|
| 916 | + mem->mem_type = place->mem_type; |
---|
| 917 | + mem->placement = cur_flags; |
---|
879 | 918 | |
---|
880 | | - *masked_placement = cur_flags; |
---|
881 | | - return true; |
---|
| 919 | + spin_lock(&ttm_bo_glob.lru_lock); |
---|
| 920 | + ttm_bo_del_from_lru(bo); |
---|
| 921 | + ttm_bo_add_mem_to_lru(bo, mem); |
---|
| 922 | + spin_unlock(&ttm_bo_glob.lru_lock); |
---|
| 923 | + |
---|
| 924 | + return 0; |
---|
882 | 925 | } |
---|
883 | 926 | |
---|
884 | 927 | /** |
---|
.. | .. |
---|
891 | 934 | */ |
---|
892 | 935 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
---|
893 | 936 | struct ttm_placement *placement, |
---|
894 | | - struct ttm_mem_reg *mem, |
---|
| 937 | + struct ttm_resource *mem, |
---|
895 | 938 | struct ttm_operation_ctx *ctx) |
---|
896 | 939 | { |
---|
897 | 940 | struct ttm_bo_device *bdev = bo->bdev; |
---|
898 | | - struct ttm_mem_type_manager *man; |
---|
899 | | - uint32_t mem_type = TTM_PL_SYSTEM; |
---|
900 | | - uint32_t cur_flags = 0; |
---|
901 | 941 | bool type_found = false; |
---|
902 | | - bool type_ok = false; |
---|
903 | | - bool has_erestartsys = false; |
---|
904 | 942 | int i, ret; |
---|
905 | 943 | |
---|
906 | | - ret = reservation_object_reserve_shared(bo->resv); |
---|
| 944 | + ret = dma_resv_reserve_shared(bo->base.resv, 1); |
---|
907 | 945 | if (unlikely(ret)) |
---|
908 | 946 | return ret; |
---|
909 | 947 | |
---|
910 | | - mem->mm_node = NULL; |
---|
911 | 948 | for (i = 0; i < placement->num_placement; ++i) { |
---|
912 | 949 | const struct ttm_place *place = &placement->placement[i]; |
---|
| 950 | + struct ttm_resource_manager *man; |
---|
913 | 951 | |
---|
914 | | - ret = ttm_mem_type_from_place(place, &mem_type); |
---|
| 952 | + ret = ttm_bo_mem_placement(bo, place, mem, ctx); |
---|
915 | 953 | if (ret) |
---|
916 | | - return ret; |
---|
917 | | - man = &bdev->man[mem_type]; |
---|
918 | | - if (!man->has_type || !man->use_type) |
---|
919 | | - continue; |
---|
920 | | - |
---|
921 | | - type_ok = ttm_bo_mt_compatible(man, mem_type, place, |
---|
922 | | - &cur_flags); |
---|
923 | | - |
---|
924 | | - if (!type_ok) |
---|
925 | 954 | continue; |
---|
926 | 955 | |
---|
927 | 956 | type_found = true; |
---|
928 | | - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
---|
929 | | - cur_flags); |
---|
930 | | - /* |
---|
931 | | - * Use the access and other non-mapping-related flag bits from |
---|
932 | | - * the memory placement flags to the current flags |
---|
933 | | - */ |
---|
934 | | - ttm_flag_masked(&cur_flags, place->flags, |
---|
935 | | - ~TTM_PL_MASK_MEMTYPE); |
---|
936 | | - |
---|
937 | | - if (mem_type == TTM_PL_SYSTEM) |
---|
938 | | - break; |
---|
939 | | - |
---|
940 | | - ret = (*man->func->get_node)(man, bo, place, mem); |
---|
| 957 | + ret = ttm_resource_alloc(bo, place, mem); |
---|
| 958 | + if (ret == -ENOSPC) |
---|
| 959 | + continue; |
---|
941 | 960 | if (unlikely(ret)) |
---|
942 | | - return ret; |
---|
| 961 | + goto error; |
---|
943 | 962 | |
---|
944 | | - if (mem->mm_node) { |
---|
945 | | - ret = ttm_bo_add_move_fence(bo, man, mem); |
---|
946 | | - if (unlikely(ret)) { |
---|
947 | | - (*man->func->put_node)(man, mem); |
---|
948 | | - return ret; |
---|
949 | | - } |
---|
950 | | - break; |
---|
| 963 | + man = ttm_manager_type(bdev, mem->mem_type); |
---|
| 964 | + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); |
---|
| 965 | + if (unlikely(ret)) { |
---|
| 966 | + ttm_resource_free(bo, mem); |
---|
| 967 | + if (ret == -EBUSY) |
---|
| 968 | + continue; |
---|
| 969 | + |
---|
| 970 | + goto error; |
---|
951 | 971 | } |
---|
952 | | - } |
---|
953 | | - |
---|
954 | | - if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
---|
955 | | - mem->mem_type = mem_type; |
---|
956 | | - mem->placement = cur_flags; |
---|
957 | 972 | return 0; |
---|
958 | 973 | } |
---|
959 | 974 | |
---|
960 | 975 | for (i = 0; i < placement->num_busy_placement; ++i) { |
---|
961 | 976 | const struct ttm_place *place = &placement->busy_placement[i]; |
---|
962 | 977 | |
---|
963 | | - ret = ttm_mem_type_from_place(place, &mem_type); |
---|
| 978 | + ret = ttm_bo_mem_placement(bo, place, mem, ctx); |
---|
964 | 979 | if (ret) |
---|
965 | | - return ret; |
---|
966 | | - man = &bdev->man[mem_type]; |
---|
967 | | - if (!man->has_type || !man->use_type) |
---|
968 | | - continue; |
---|
969 | | - if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) |
---|
970 | 980 | continue; |
---|
971 | 981 | |
---|
972 | 982 | type_found = true; |
---|
973 | | - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
---|
974 | | - cur_flags); |
---|
975 | | - /* |
---|
976 | | - * Use the access and other non-mapping-related flag bits from |
---|
977 | | - * the memory placement flags to the current flags |
---|
978 | | - */ |
---|
979 | | - ttm_flag_masked(&cur_flags, place->flags, |
---|
980 | | - ~TTM_PL_MASK_MEMTYPE); |
---|
981 | | - |
---|
982 | | - if (mem_type == TTM_PL_SYSTEM) { |
---|
983 | | - mem->mem_type = mem_type; |
---|
984 | | - mem->placement = cur_flags; |
---|
985 | | - mem->mm_node = NULL; |
---|
| 983 | + ret = ttm_bo_mem_force_space(bo, place, mem, ctx); |
---|
| 984 | + if (likely(!ret)) |
---|
986 | 985 | return 0; |
---|
987 | | - } |
---|
988 | 986 | |
---|
989 | | - ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx); |
---|
990 | | - if (ret == 0 && mem->mm_node) { |
---|
991 | | - mem->placement = cur_flags; |
---|
992 | | - return 0; |
---|
993 | | - } |
---|
994 | | - if (ret == -ERESTARTSYS) |
---|
995 | | - has_erestartsys = true; |
---|
| 987 | + if (ret && ret != -EBUSY) |
---|
| 988 | + goto error; |
---|
996 | 989 | } |
---|
997 | 990 | |
---|
| 991 | + ret = -ENOMEM; |
---|
998 | 992 | if (!type_found) { |
---|
999 | 993 | pr_err(TTM_PFX "No compatible memory type found\n"); |
---|
1000 | | - return -EINVAL; |
---|
| 994 | + ret = -EINVAL; |
---|
1001 | 995 | } |
---|
1002 | 996 | |
---|
1003 | | - return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
---|
| 997 | +error: |
---|
| 998 | + if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { |
---|
| 999 | + ttm_bo_move_to_lru_tail_unlocked(bo); |
---|
| 1000 | + } |
---|
| 1001 | + |
---|
| 1002 | + return ret; |
---|
1004 | 1003 | } |
---|
1005 | 1004 | EXPORT_SYMBOL(ttm_bo_mem_space); |
---|
1006 | 1005 | |
---|
.. | .. |
---|
1009 | 1008 | struct ttm_operation_ctx *ctx) |
---|
1010 | 1009 | { |
---|
1011 | 1010 | int ret = 0; |
---|
1012 | | - struct ttm_mem_reg mem; |
---|
| 1011 | + struct ttm_resource mem; |
---|
1013 | 1012 | |
---|
1014 | | - reservation_object_assert_held(bo->resv); |
---|
| 1013 | + dma_resv_assert_held(bo->base.resv); |
---|
1015 | 1014 | |
---|
1016 | 1015 | mem.num_pages = bo->num_pages; |
---|
1017 | 1016 | mem.size = mem.num_pages << PAGE_SHIFT; |
---|
1018 | 1017 | mem.page_alignment = bo->mem.page_alignment; |
---|
1019 | | - mem.bus.io_reserved_vm = false; |
---|
1020 | | - mem.bus.io_reserved_count = 0; |
---|
| 1018 | + mem.bus.offset = 0; |
---|
| 1019 | + mem.bus.addr = NULL; |
---|
| 1020 | + mem.mm_node = NULL; |
---|
| 1021 | + |
---|
1021 | 1022 | /* |
---|
1022 | 1023 | * Determine where to move the buffer. |
---|
1023 | 1024 | */ |
---|
.. | .. |
---|
1026 | 1027 | goto out_unlock; |
---|
1027 | 1028 | ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); |
---|
1028 | 1029 | out_unlock: |
---|
1029 | | - if (ret && mem.mm_node) |
---|
1030 | | - ttm_bo_mem_put(bo, &mem); |
---|
| 1030 | + if (ret) |
---|
| 1031 | + ttm_resource_free(bo, &mem); |
---|
1031 | 1032 | return ret; |
---|
1032 | 1033 | } |
---|
1033 | 1034 | |
---|
1034 | 1035 | static bool ttm_bo_places_compat(const struct ttm_place *places, |
---|
1035 | 1036 | unsigned num_placement, |
---|
1036 | | - struct ttm_mem_reg *mem, |
---|
| 1037 | + struct ttm_resource *mem, |
---|
1037 | 1038 | uint32_t *new_flags) |
---|
1038 | 1039 | { |
---|
1039 | 1040 | unsigned i; |
---|
.. | .. |
---|
1041 | 1042 | for (i = 0; i < num_placement; i++) { |
---|
1042 | 1043 | const struct ttm_place *heap = &places[i]; |
---|
1043 | 1044 | |
---|
1044 | | - if (mem->mm_node && (mem->start < heap->fpfn || |
---|
| 1045 | + if ((mem->start < heap->fpfn || |
---|
1045 | 1046 | (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) |
---|
1046 | 1047 | continue; |
---|
1047 | 1048 | |
---|
1048 | 1049 | *new_flags = heap->flags; |
---|
1049 | 1050 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
---|
1050 | | - (*new_flags & mem->placement & TTM_PL_MASK_MEM) && |
---|
| 1051 | + (mem->mem_type == heap->mem_type) && |
---|
1051 | 1052 | (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || |
---|
1052 | 1053 | (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) |
---|
1053 | 1054 | return true; |
---|
.. | .. |
---|
1056 | 1057 | } |
---|
1057 | 1058 | |
---|
1058 | 1059 | bool ttm_bo_mem_compat(struct ttm_placement *placement, |
---|
1059 | | - struct ttm_mem_reg *mem, |
---|
| 1060 | + struct ttm_resource *mem, |
---|
1060 | 1061 | uint32_t *new_flags) |
---|
1061 | 1062 | { |
---|
1062 | 1063 | if (ttm_bo_places_compat(placement->placement, placement->num_placement, |
---|
.. | .. |
---|
1081 | 1082 | int ret; |
---|
1082 | 1083 | uint32_t new_flags; |
---|
1083 | 1084 | |
---|
1084 | | - reservation_object_assert_held(bo->resv); |
---|
| 1085 | + dma_resv_assert_held(bo->base.resv); |
---|
| 1086 | + |
---|
| 1087 | + /* |
---|
| 1088 | + * Remove the backing store if no placement is given. |
---|
| 1089 | + */ |
---|
| 1090 | + if (!placement->num_placement && !placement->num_busy_placement) { |
---|
| 1091 | + ret = ttm_bo_pipeline_gutting(bo); |
---|
| 1092 | + if (ret) |
---|
| 1093 | + return ret; |
---|
| 1094 | + |
---|
| 1095 | + return ttm_tt_create(bo, false); |
---|
| 1096 | + } |
---|
| 1097 | + |
---|
1085 | 1098 | /* |
---|
1086 | 1099 | * Check whether we need to move buffer. |
---|
1087 | 1100 | */ |
---|
.. | .. |
---|
1090 | 1103 | if (ret) |
---|
1091 | 1104 | return ret; |
---|
1092 | 1105 | } else { |
---|
1093 | | - /* |
---|
1094 | | - * Use the access and other non-mapping-related flag bits from |
---|
1095 | | - * the compatible memory placement flags to the active flags |
---|
1096 | | - */ |
---|
1097 | | - ttm_flag_masked(&bo->mem.placement, new_flags, |
---|
1098 | | - ~TTM_PL_MASK_MEMTYPE); |
---|
| 1106 | + bo->mem.placement &= TTM_PL_MASK_CACHING; |
---|
| 1107 | + bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING; |
---|
1099 | 1108 | } |
---|
1100 | 1109 | /* |
---|
1101 | 1110 | * We might need to add a TTM. |
---|
1102 | 1111 | */ |
---|
1103 | | - if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
---|
| 1112 | + if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
---|
1104 | 1113 | ret = ttm_tt_create(bo, true); |
---|
1105 | 1114 | if (ret) |
---|
1106 | 1115 | return ret; |
---|
.. | .. |
---|
1118 | 1127 | struct ttm_operation_ctx *ctx, |
---|
1119 | 1128 | size_t acc_size, |
---|
1120 | 1129 | struct sg_table *sg, |
---|
1121 | | - struct reservation_object *resv, |
---|
| 1130 | + struct dma_resv *resv, |
---|
1122 | 1131 | void (*destroy) (struct ttm_buffer_object *)) |
---|
1123 | 1132 | { |
---|
| 1133 | + struct ttm_mem_global *mem_glob = &ttm_mem_glob; |
---|
1124 | 1134 | int ret = 0; |
---|
1125 | 1135 | unsigned long num_pages; |
---|
1126 | | - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
---|
1127 | 1136 | bool locked; |
---|
1128 | 1137 | |
---|
1129 | 1138 | ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); |
---|
.. | .. |
---|
1149 | 1158 | bo->destroy = destroy ? destroy : ttm_bo_default_destroy; |
---|
1150 | 1159 | |
---|
1151 | 1160 | kref_init(&bo->kref); |
---|
1152 | | - kref_init(&bo->list_kref); |
---|
1153 | | - atomic_set(&bo->cpu_writers, 0); |
---|
1154 | 1161 | INIT_LIST_HEAD(&bo->lru); |
---|
1155 | 1162 | INIT_LIST_HEAD(&bo->ddestroy); |
---|
1156 | 1163 | INIT_LIST_HEAD(&bo->swap); |
---|
1157 | | - INIT_LIST_HEAD(&bo->io_reserve_lru); |
---|
1158 | | - mutex_init(&bo->wu_mutex); |
---|
1159 | 1164 | bo->bdev = bdev; |
---|
1160 | 1165 | bo->type = type; |
---|
1161 | 1166 | bo->num_pages = num_pages; |
---|
.. | .. |
---|
1164 | 1169 | bo->mem.num_pages = bo->num_pages; |
---|
1165 | 1170 | bo->mem.mm_node = NULL; |
---|
1166 | 1171 | bo->mem.page_alignment = page_alignment; |
---|
1167 | | - bo->mem.bus.io_reserved_vm = false; |
---|
1168 | | - bo->mem.bus.io_reserved_count = 0; |
---|
| 1172 | + bo->mem.bus.offset = 0; |
---|
| 1173 | + bo->mem.bus.addr = NULL; |
---|
1169 | 1174 | bo->moving = NULL; |
---|
1170 | | - bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
---|
| 1175 | + bo->mem.placement = TTM_PL_FLAG_CACHED; |
---|
1171 | 1176 | bo->acc_size = acc_size; |
---|
1172 | 1177 | bo->sg = sg; |
---|
1173 | 1178 | if (resv) { |
---|
1174 | | - bo->resv = resv; |
---|
1175 | | - reservation_object_assert_held(bo->resv); |
---|
| 1179 | + bo->base.resv = resv; |
---|
| 1180 | + dma_resv_assert_held(bo->base.resv); |
---|
1176 | 1181 | } else { |
---|
1177 | | - bo->resv = &bo->ttm_resv; |
---|
| 1182 | + bo->base.resv = &bo->base._resv; |
---|
1178 | 1183 | } |
---|
1179 | | - reservation_object_init(&bo->ttm_resv); |
---|
1180 | | - atomic_inc(&bo->bdev->glob->bo_count); |
---|
1181 | | - drm_vma_node_reset(&bo->vma_node); |
---|
| 1184 | + if (!ttm_bo_uses_embedded_gem_object(bo)) { |
---|
| 1185 | + /* |
---|
| 1186 | + * bo.gem is not initialized, so we have to setup the |
---|
| 1187 | + * struct elements we want use regardless. |
---|
| 1188 | + */ |
---|
| 1189 | + dma_resv_init(&bo->base._resv); |
---|
| 1190 | + drm_vma_node_reset(&bo->base.vma_node); |
---|
| 1191 | + } |
---|
| 1192 | + atomic_inc(&ttm_bo_glob.bo_count); |
---|
1182 | 1193 | |
---|
1183 | 1194 | /* |
---|
1184 | 1195 | * For ttm_bo_type_device buffers, allocate |
---|
.. | .. |
---|
1186 | 1197 | */ |
---|
1187 | 1198 | if (bo->type == ttm_bo_type_device || |
---|
1188 | 1199 | bo->type == ttm_bo_type_sg) |
---|
1189 | | - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
---|
| 1200 | + ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, |
---|
1190 | 1201 | bo->mem.num_pages); |
---|
1191 | 1202 | |
---|
1192 | 1203 | /* passed reservation objects should already be locked, |
---|
1193 | 1204 | * since otherwise lockdep will be angered in radeon. |
---|
1194 | 1205 | */ |
---|
1195 | 1206 | if (!resv) { |
---|
1196 | | - locked = reservation_object_trylock(bo->resv); |
---|
| 1207 | + locked = dma_resv_trylock(bo->base.resv); |
---|
1197 | 1208 | WARN_ON(!locked); |
---|
1198 | 1209 | } |
---|
1199 | 1210 | |
---|
.. | .. |
---|
1208 | 1219 | return ret; |
---|
1209 | 1220 | } |
---|
1210 | 1221 | |
---|
1211 | | - if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
---|
1212 | | - spin_lock(&bdev->glob->lru_lock); |
---|
1213 | | - ttm_bo_add_to_lru(bo); |
---|
1214 | | - spin_unlock(&bdev->glob->lru_lock); |
---|
1215 | | - } |
---|
| 1222 | + ttm_bo_move_to_lru_tail_unlocked(bo); |
---|
1216 | 1223 | |
---|
1217 | 1224 | return ret; |
---|
1218 | 1225 | } |
---|
.. | .. |
---|
1227 | 1234 | bool interruptible, |
---|
1228 | 1235 | size_t acc_size, |
---|
1229 | 1236 | struct sg_table *sg, |
---|
1230 | | - struct reservation_object *resv, |
---|
| 1237 | + struct dma_resv *resv, |
---|
1231 | 1238 | void (*destroy) (struct ttm_buffer_object *)) |
---|
1232 | 1239 | { |
---|
1233 | 1240 | struct ttm_operation_ctx ctx = { interruptible, false }; |
---|
.. | .. |
---|
1246 | 1253 | } |
---|
1247 | 1254 | EXPORT_SYMBOL(ttm_bo_init); |
---|
1248 | 1255 | |
---|
1249 | | -size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
---|
1250 | | - unsigned long bo_size, |
---|
1251 | | - unsigned struct_size) |
---|
| 1256 | +static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
---|
| 1257 | + unsigned long bo_size, |
---|
| 1258 | + unsigned struct_size) |
---|
1252 | 1259 | { |
---|
1253 | 1260 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
---|
1254 | 1261 | size_t size = 0; |
---|
.. | .. |
---|
1258 | 1265 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
---|
1259 | 1266 | return size; |
---|
1260 | 1267 | } |
---|
1261 | | -EXPORT_SYMBOL(ttm_bo_acc_size); |
---|
1262 | 1268 | |
---|
1263 | 1269 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
---|
1264 | 1270 | unsigned long bo_size, |
---|
.. | .. |
---|
1301 | 1307 | } |
---|
1302 | 1308 | EXPORT_SYMBOL(ttm_bo_create); |
---|
1303 | 1309 | |
---|
1304 | | -static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
---|
1305 | | - unsigned mem_type) |
---|
1306 | | -{ |
---|
1307 | | - struct ttm_operation_ctx ctx = { |
---|
1308 | | - .interruptible = false, |
---|
1309 | | - .no_wait_gpu = false, |
---|
1310 | | - .flags = TTM_OPT_FLAG_FORCE_ALLOC |
---|
1311 | | - }; |
---|
1312 | | - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
---|
1313 | | - struct ttm_bo_global *glob = bdev->glob; |
---|
1314 | | - struct dma_fence *fence; |
---|
1315 | | - int ret; |
---|
1316 | | - unsigned i; |
---|
1317 | | - |
---|
1318 | | - /* |
---|
1319 | | - * Can't use standard list traversal since we're unlocking. |
---|
1320 | | - */ |
---|
1321 | | - |
---|
1322 | | - spin_lock(&glob->lru_lock); |
---|
1323 | | - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
1324 | | - while (!list_empty(&man->lru[i])) { |
---|
1325 | | - spin_unlock(&glob->lru_lock); |
---|
1326 | | - ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx); |
---|
1327 | | - if (ret) |
---|
1328 | | - return ret; |
---|
1329 | | - spin_lock(&glob->lru_lock); |
---|
1330 | | - } |
---|
1331 | | - } |
---|
1332 | | - spin_unlock(&glob->lru_lock); |
---|
1333 | | - |
---|
1334 | | - spin_lock(&man->move_lock); |
---|
1335 | | - fence = dma_fence_get(man->move); |
---|
1336 | | - spin_unlock(&man->move_lock); |
---|
1337 | | - |
---|
1338 | | - if (fence) { |
---|
1339 | | - ret = dma_fence_wait(fence, false); |
---|
1340 | | - dma_fence_put(fence); |
---|
1341 | | - if (ret) |
---|
1342 | | - return ret; |
---|
1343 | | - } |
---|
1344 | | - |
---|
1345 | | - return 0; |
---|
1346 | | -} |
---|
1347 | | - |
---|
1348 | | -int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
---|
1349 | | -{ |
---|
1350 | | - struct ttm_mem_type_manager *man; |
---|
1351 | | - int ret = -EINVAL; |
---|
1352 | | - |
---|
1353 | | - if (mem_type >= TTM_NUM_MEM_TYPES) { |
---|
1354 | | - pr_err("Illegal memory type %d\n", mem_type); |
---|
1355 | | - return ret; |
---|
1356 | | - } |
---|
1357 | | - man = &bdev->man[mem_type]; |
---|
1358 | | - |
---|
1359 | | - if (!man->has_type) { |
---|
1360 | | - pr_err("Trying to take down uninitialized memory manager type %u\n", |
---|
1361 | | - mem_type); |
---|
1362 | | - return ret; |
---|
1363 | | - } |
---|
1364 | | - |
---|
1365 | | - man->use_type = false; |
---|
1366 | | - man->has_type = false; |
---|
1367 | | - |
---|
1368 | | - ret = 0; |
---|
1369 | | - if (mem_type > 0) { |
---|
1370 | | - ret = ttm_bo_force_list_clean(bdev, mem_type); |
---|
1371 | | - if (ret) { |
---|
1372 | | - pr_err("Cleanup eviction failed\n"); |
---|
1373 | | - return ret; |
---|
1374 | | - } |
---|
1375 | | - |
---|
1376 | | - ret = (*man->func->takedown)(man); |
---|
1377 | | - } |
---|
1378 | | - |
---|
1379 | | - dma_fence_put(man->move); |
---|
1380 | | - man->move = NULL; |
---|
1381 | | - |
---|
1382 | | - return ret; |
---|
1383 | | -} |
---|
1384 | | -EXPORT_SYMBOL(ttm_bo_clean_mm); |
---|
1385 | | - |
---|
1386 | 1310 | int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
---|
1387 | 1311 | { |
---|
1388 | | - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
---|
| 1312 | + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); |
---|
1389 | 1313 | |
---|
1390 | 1314 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { |
---|
1391 | 1315 | pr_err("Illegal memory manager memory type %u\n", mem_type); |
---|
1392 | 1316 | return -EINVAL; |
---|
1393 | 1317 | } |
---|
1394 | 1318 | |
---|
1395 | | - if (!man->has_type) { |
---|
| 1319 | + if (!man) { |
---|
1396 | 1320 | pr_err("Memory type %u has not been initialized\n", mem_type); |
---|
1397 | 1321 | return 0; |
---|
1398 | 1322 | } |
---|
1399 | 1323 | |
---|
1400 | | - return ttm_bo_force_list_clean(bdev, mem_type); |
---|
| 1324 | + return ttm_resource_manager_force_list_clean(bdev, man); |
---|
1401 | 1325 | } |
---|
1402 | 1326 | EXPORT_SYMBOL(ttm_bo_evict_mm); |
---|
1403 | | - |
---|
1404 | | -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
---|
1405 | | - unsigned long p_size) |
---|
1406 | | -{ |
---|
1407 | | - int ret; |
---|
1408 | | - struct ttm_mem_type_manager *man; |
---|
1409 | | - unsigned i; |
---|
1410 | | - |
---|
1411 | | - BUG_ON(type >= TTM_NUM_MEM_TYPES); |
---|
1412 | | - man = &bdev->man[type]; |
---|
1413 | | - BUG_ON(man->has_type); |
---|
1414 | | - man->io_reserve_fastpath = true; |
---|
1415 | | - man->use_io_reserve_lru = false; |
---|
1416 | | - mutex_init(&man->io_reserve_mutex); |
---|
1417 | | - spin_lock_init(&man->move_lock); |
---|
1418 | | - INIT_LIST_HEAD(&man->io_reserve_lru); |
---|
1419 | | - |
---|
1420 | | - ret = bdev->driver->init_mem_type(bdev, type, man); |
---|
1421 | | - if (ret) |
---|
1422 | | - return ret; |
---|
1423 | | - man->bdev = bdev; |
---|
1424 | | - |
---|
1425 | | - if (type != TTM_PL_SYSTEM) { |
---|
1426 | | - ret = (*man->func->init)(man, p_size); |
---|
1427 | | - if (ret) |
---|
1428 | | - return ret; |
---|
1429 | | - } |
---|
1430 | | - man->has_type = true; |
---|
1431 | | - man->use_type = true; |
---|
1432 | | - man->size = p_size; |
---|
1433 | | - |
---|
1434 | | - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) |
---|
1435 | | - INIT_LIST_HEAD(&man->lru[i]); |
---|
1436 | | - man->move = NULL; |
---|
1437 | | - |
---|
1438 | | - return 0; |
---|
1439 | | -} |
---|
1440 | | -EXPORT_SYMBOL(ttm_bo_init_mm); |
---|
1441 | 1327 | |
---|
1442 | 1328 | static void ttm_bo_global_kobj_release(struct kobject *kobj) |
---|
1443 | 1329 | { |
---|
.. | .. |
---|
1447 | 1333 | __free_page(glob->dummy_read_page); |
---|
1448 | 1334 | } |
---|
1449 | 1335 | |
---|
1450 | | -void ttm_bo_global_release(struct drm_global_reference *ref) |
---|
| 1336 | +static void ttm_bo_global_release(void) |
---|
1451 | 1337 | { |
---|
1452 | | - struct ttm_bo_global *glob = ref->object; |
---|
| 1338 | + struct ttm_bo_global *glob = &ttm_bo_glob; |
---|
| 1339 | + |
---|
| 1340 | + mutex_lock(&ttm_global_mutex); |
---|
| 1341 | + if (--ttm_bo_glob_use_count > 0) |
---|
| 1342 | + goto out; |
---|
1453 | 1343 | |
---|
1454 | 1344 | kobject_del(&glob->kobj); |
---|
1455 | 1345 | kobject_put(&glob->kobj); |
---|
| 1346 | + ttm_mem_global_release(&ttm_mem_glob); |
---|
| 1347 | + memset(glob, 0, sizeof(*glob)); |
---|
| 1348 | +out: |
---|
| 1349 | + mutex_unlock(&ttm_global_mutex); |
---|
1456 | 1350 | } |
---|
1457 | | -EXPORT_SYMBOL(ttm_bo_global_release); |
---|
1458 | 1351 | |
---|
1459 | | -int ttm_bo_global_init(struct drm_global_reference *ref) |
---|
| 1352 | +static int ttm_bo_global_init(void) |
---|
1460 | 1353 | { |
---|
1461 | | - struct ttm_bo_global_ref *bo_ref = |
---|
1462 | | - container_of(ref, struct ttm_bo_global_ref, ref); |
---|
1463 | | - struct ttm_bo_global *glob = ref->object; |
---|
1464 | | - int ret; |
---|
| 1354 | + struct ttm_bo_global *glob = &ttm_bo_glob; |
---|
| 1355 | + int ret = 0; |
---|
1465 | 1356 | unsigned i; |
---|
1466 | 1357 | |
---|
1467 | | - mutex_init(&glob->device_list_mutex); |
---|
| 1358 | + mutex_lock(&ttm_global_mutex); |
---|
| 1359 | + if (++ttm_bo_glob_use_count > 1) |
---|
| 1360 | + goto out; |
---|
| 1361 | + |
---|
| 1362 | + ret = ttm_mem_global_init(&ttm_mem_glob); |
---|
| 1363 | + if (ret) |
---|
| 1364 | + goto out; |
---|
| 1365 | + |
---|
1468 | 1366 | spin_lock_init(&glob->lru_lock); |
---|
1469 | | - glob->mem_glob = bo_ref->mem_glob; |
---|
1470 | | - glob->mem_glob->bo_glob = glob; |
---|
1471 | 1367 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
---|
1472 | 1368 | |
---|
1473 | 1369 | if (unlikely(glob->dummy_read_page == NULL)) { |
---|
1474 | 1370 | ret = -ENOMEM; |
---|
1475 | | - goto out_no_drp; |
---|
| 1371 | + goto out; |
---|
1476 | 1372 | } |
---|
1477 | 1373 | |
---|
1478 | 1374 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) |
---|
.. | .. |
---|
1484 | 1380 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
---|
1485 | 1381 | if (unlikely(ret != 0)) |
---|
1486 | 1382 | kobject_put(&glob->kobj); |
---|
1487 | | - return ret; |
---|
1488 | | -out_no_drp: |
---|
1489 | | - kfree(glob); |
---|
| 1383 | +out: |
---|
| 1384 | + mutex_unlock(&ttm_global_mutex); |
---|
1490 | 1385 | return ret; |
---|
1491 | 1386 | } |
---|
1492 | | -EXPORT_SYMBOL(ttm_bo_global_init); |
---|
1493 | | - |
---|
1494 | 1387 | |
---|
1495 | 1388 | int ttm_bo_device_release(struct ttm_bo_device *bdev) |
---|
1496 | 1389 | { |
---|
| 1390 | + struct ttm_bo_global *glob = &ttm_bo_glob; |
---|
1497 | 1391 | int ret = 0; |
---|
1498 | | - unsigned i = TTM_NUM_MEM_TYPES; |
---|
1499 | | - struct ttm_mem_type_manager *man; |
---|
1500 | | - struct ttm_bo_global *glob = bdev->glob; |
---|
| 1392 | + unsigned i; |
---|
| 1393 | + struct ttm_resource_manager *man; |
---|
1501 | 1394 | |
---|
1502 | | - while (i--) { |
---|
1503 | | - man = &bdev->man[i]; |
---|
1504 | | - if (man->has_type) { |
---|
1505 | | - man->use_type = false; |
---|
1506 | | - if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { |
---|
1507 | | - ret = -EBUSY; |
---|
1508 | | - pr_err("DRM memory manager type %d is not clean\n", |
---|
1509 | | - i); |
---|
1510 | | - } |
---|
1511 | | - man->has_type = false; |
---|
1512 | | - } |
---|
1513 | | - } |
---|
| 1395 | + man = ttm_manager_type(bdev, TTM_PL_SYSTEM); |
---|
| 1396 | + ttm_resource_manager_set_used(man, false); |
---|
| 1397 | + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); |
---|
1514 | 1398 | |
---|
1515 | | - mutex_lock(&glob->device_list_mutex); |
---|
| 1399 | + mutex_lock(&ttm_global_mutex); |
---|
1516 | 1400 | list_del(&bdev->device_list); |
---|
1517 | | - mutex_unlock(&glob->device_list_mutex); |
---|
| 1401 | + mutex_unlock(&ttm_global_mutex); |
---|
1518 | 1402 | |
---|
1519 | 1403 | cancel_delayed_work_sync(&bdev->wq); |
---|
1520 | 1404 | |
---|
.. | .. |
---|
1523 | 1407 | |
---|
1524 | 1408 | spin_lock(&glob->lru_lock); |
---|
1525 | 1409 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) |
---|
1526 | | - if (list_empty(&bdev->man[0].lru[0])) |
---|
| 1410 | + if (list_empty(&man->lru[0])) |
---|
1527 | 1411 | pr_debug("Swap list %d was clean\n", i); |
---|
1528 | 1412 | spin_unlock(&glob->lru_lock); |
---|
1529 | 1413 | |
---|
1530 | | - drm_vma_offset_manager_destroy(&bdev->vma_manager); |
---|
| 1414 | + if (!ret) |
---|
| 1415 | + ttm_bo_global_release(); |
---|
1531 | 1416 | |
---|
1532 | 1417 | return ret; |
---|
1533 | 1418 | } |
---|
1534 | 1419 | EXPORT_SYMBOL(ttm_bo_device_release); |
---|
1535 | 1420 | |
---|
1536 | | -int ttm_bo_device_init(struct ttm_bo_device *bdev, |
---|
1537 | | - struct ttm_bo_global *glob, |
---|
1538 | | - struct ttm_bo_driver *driver, |
---|
1539 | | - struct address_space *mapping, |
---|
1540 | | - uint64_t file_page_offset, |
---|
1541 | | - bool need_dma32) |
---|
| 1421 | +static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) |
---|
1542 | 1422 | { |
---|
1543 | | - int ret = -EINVAL; |
---|
1544 | | - |
---|
1545 | | - bdev->driver = driver; |
---|
1546 | | - |
---|
1547 | | - memset(bdev->man, 0, sizeof(bdev->man)); |
---|
| 1423 | + struct ttm_resource_manager *man = &bdev->sysman; |
---|
1548 | 1424 | |
---|
1549 | 1425 | /* |
---|
1550 | 1426 | * Initialize the system memory buffer type. |
---|
1551 | 1427 | * Other types need to be driver / IOCTL initialized. |
---|
1552 | 1428 | */ |
---|
1553 | | - ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
---|
1554 | | - if (unlikely(ret != 0)) |
---|
1555 | | - goto out_no_sys; |
---|
| 1429 | + man->use_tt = true; |
---|
1556 | 1430 | |
---|
1557 | | - drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
---|
1558 | | - 0x10000000); |
---|
| 1431 | + ttm_resource_manager_init(man, 0); |
---|
| 1432 | + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); |
---|
| 1433 | + ttm_resource_manager_set_used(man, true); |
---|
| 1434 | +} |
---|
| 1435 | + |
---|
| 1436 | +int ttm_bo_device_init(struct ttm_bo_device *bdev, |
---|
| 1437 | + struct ttm_bo_driver *driver, |
---|
| 1438 | + struct address_space *mapping, |
---|
| 1439 | + struct drm_vma_offset_manager *vma_manager, |
---|
| 1440 | + bool need_dma32) |
---|
| 1441 | +{ |
---|
| 1442 | + struct ttm_bo_global *glob = &ttm_bo_glob; |
---|
| 1443 | + int ret; |
---|
| 1444 | + |
---|
| 1445 | + if (WARN_ON(vma_manager == NULL)) |
---|
| 1446 | + return -EINVAL; |
---|
| 1447 | + |
---|
| 1448 | + ret = ttm_bo_global_init(); |
---|
| 1449 | + if (ret) |
---|
| 1450 | + return ret; |
---|
| 1451 | + |
---|
| 1452 | + bdev->driver = driver; |
---|
| 1453 | + |
---|
| 1454 | + ttm_bo_init_sysman(bdev); |
---|
| 1455 | + |
---|
| 1456 | + bdev->vma_manager = vma_manager; |
---|
1559 | 1457 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
---|
1560 | 1458 | INIT_LIST_HEAD(&bdev->ddestroy); |
---|
1561 | 1459 | bdev->dev_mapping = mapping; |
---|
1562 | | - bdev->glob = glob; |
---|
1563 | 1460 | bdev->need_dma32 = need_dma32; |
---|
1564 | | - mutex_lock(&glob->device_list_mutex); |
---|
| 1461 | + mutex_lock(&ttm_global_mutex); |
---|
1565 | 1462 | list_add_tail(&bdev->device_list, &glob->device_list); |
---|
1566 | | - mutex_unlock(&glob->device_list_mutex); |
---|
| 1463 | + mutex_unlock(&ttm_global_mutex); |
---|
1567 | 1464 | |
---|
1568 | 1465 | return 0; |
---|
1569 | | -out_no_sys: |
---|
1570 | | - return ret; |
---|
1571 | 1466 | } |
---|
1572 | 1467 | EXPORT_SYMBOL(ttm_bo_device_init); |
---|
1573 | 1468 | |
---|
.. | .. |
---|
1575 | 1470 | * buffer object vm functions. |
---|
1576 | 1471 | */ |
---|
1577 | 1472 | |
---|
1578 | | -bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
---|
1579 | | -{ |
---|
1580 | | - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
---|
1581 | | - |
---|
1582 | | - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
---|
1583 | | - if (mem->mem_type == TTM_PL_SYSTEM) |
---|
1584 | | - return false; |
---|
1585 | | - |
---|
1586 | | - if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
---|
1587 | | - return false; |
---|
1588 | | - |
---|
1589 | | - if (mem->placement & TTM_PL_FLAG_CACHED) |
---|
1590 | | - return false; |
---|
1591 | | - } |
---|
1592 | | - return true; |
---|
1593 | | -} |
---|
1594 | | - |
---|
1595 | | -void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
---|
1596 | | -{ |
---|
1597 | | - struct ttm_bo_device *bdev = bo->bdev; |
---|
1598 | | - |
---|
1599 | | - drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); |
---|
1600 | | - ttm_mem_io_free_vm(bo); |
---|
1601 | | -} |
---|
1602 | | - |
---|
1603 | 1473 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
---|
1604 | 1474 | { |
---|
1605 | 1475 | struct ttm_bo_device *bdev = bo->bdev; |
---|
1606 | | - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
---|
1607 | 1476 | |
---|
1608 | | - ttm_mem_io_lock(man, false); |
---|
1609 | | - ttm_bo_unmap_virtual_locked(bo); |
---|
1610 | | - ttm_mem_io_unlock(man); |
---|
| 1477 | + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); |
---|
| 1478 | + ttm_mem_io_free(bdev, &bo->mem); |
---|
1611 | 1479 | } |
---|
1612 | | - |
---|
1613 | | - |
---|
1614 | 1480 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
---|
1615 | 1481 | |
---|
1616 | 1482 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
---|
.. | .. |
---|
1619 | 1485 | long timeout = 15 * HZ; |
---|
1620 | 1486 | |
---|
1621 | 1487 | if (no_wait) { |
---|
1622 | | - if (reservation_object_test_signaled_rcu(bo->resv, true)) |
---|
| 1488 | + if (dma_resv_test_signaled_rcu(bo->base.resv, true)) |
---|
1623 | 1489 | return 0; |
---|
1624 | 1490 | else |
---|
1625 | 1491 | return -EBUSY; |
---|
1626 | 1492 | } |
---|
1627 | 1493 | |
---|
1628 | | - timeout = reservation_object_wait_timeout_rcu(bo->resv, true, |
---|
| 1494 | + timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, |
---|
1629 | 1495 | interruptible, timeout); |
---|
1630 | 1496 | if (timeout < 0) |
---|
1631 | 1497 | return timeout; |
---|
.. | .. |
---|
1633 | 1499 | if (timeout == 0) |
---|
1634 | 1500 | return -EBUSY; |
---|
1635 | 1501 | |
---|
1636 | | - reservation_object_add_excl_fence(bo->resv, NULL); |
---|
| 1502 | + dma_resv_add_excl_fence(bo->base.resv, NULL); |
---|
1637 | 1503 | return 0; |
---|
1638 | 1504 | } |
---|
1639 | 1505 | EXPORT_SYMBOL(ttm_bo_wait); |
---|
1640 | | - |
---|
1641 | | -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
---|
1642 | | -{ |
---|
1643 | | - int ret = 0; |
---|
1644 | | - |
---|
1645 | | - /* |
---|
1646 | | - * Using ttm_bo_reserve makes sure the lru lists are updated. |
---|
1647 | | - */ |
---|
1648 | | - |
---|
1649 | | - ret = ttm_bo_reserve(bo, true, no_wait, NULL); |
---|
1650 | | - if (unlikely(ret != 0)) |
---|
1651 | | - return ret; |
---|
1652 | | - ret = ttm_bo_wait(bo, true, no_wait); |
---|
1653 | | - if (likely(ret == 0)) |
---|
1654 | | - atomic_inc(&bo->cpu_writers); |
---|
1655 | | - ttm_bo_unreserve(bo); |
---|
1656 | | - return ret; |
---|
1657 | | -} |
---|
1658 | | -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
---|
1659 | | - |
---|
1660 | | -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
---|
1661 | | -{ |
---|
1662 | | - atomic_dec(&bo->cpu_writers); |
---|
1663 | | -} |
---|
1664 | | -EXPORT_SYMBOL(ttm_bo_synccpu_write_release); |
---|
1665 | 1506 | |
---|
1666 | 1507 | /** |
---|
1667 | 1508 | * A buffer object shrink method that tries to swap out the first |
---|
.. | .. |
---|
1677 | 1518 | spin_lock(&glob->lru_lock); |
---|
1678 | 1519 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { |
---|
1679 | 1520 | list_for_each_entry(bo, &glob->swap_lru[i], swap) { |
---|
1680 | | - if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) { |
---|
1681 | | - ret = 0; |
---|
1682 | | - break; |
---|
| 1521 | + if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, |
---|
| 1522 | + NULL)) |
---|
| 1523 | + continue; |
---|
| 1524 | + |
---|
| 1525 | + if (!ttm_bo_get_unless_zero(bo)) { |
---|
| 1526 | + if (locked) |
---|
| 1527 | + dma_resv_unlock(bo->base.resv); |
---|
| 1528 | + continue; |
---|
1683 | 1529 | } |
---|
| 1530 | + |
---|
| 1531 | + ret = 0; |
---|
| 1532 | + break; |
---|
1684 | 1533 | } |
---|
1685 | 1534 | if (!ret) |
---|
1686 | 1535 | break; |
---|
.. | .. |
---|
1691 | 1540 | return ret; |
---|
1692 | 1541 | } |
---|
1693 | 1542 | |
---|
1694 | | - kref_get(&bo->list_kref); |
---|
1695 | | - |
---|
1696 | | - if (!list_empty(&bo->ddestroy)) { |
---|
| 1543 | + if (bo->deleted) { |
---|
1697 | 1544 | ret = ttm_bo_cleanup_refs(bo, false, false, locked); |
---|
1698 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 1545 | + ttm_bo_put(bo); |
---|
1699 | 1546 | return ret; |
---|
1700 | 1547 | } |
---|
1701 | 1548 | |
---|
.. | .. |
---|
1709 | 1556 | if (bo->mem.mem_type != TTM_PL_SYSTEM || |
---|
1710 | 1557 | bo->ttm->caching_state != tt_cached) { |
---|
1711 | 1558 | struct ttm_operation_ctx ctx = { false, false }; |
---|
1712 | | - struct ttm_mem_reg evict_mem; |
---|
| 1559 | + struct ttm_resource evict_mem; |
---|
1713 | 1560 | |
---|
1714 | 1561 | evict_mem = bo->mem; |
---|
1715 | 1562 | evict_mem.mm_node = NULL; |
---|
1716 | | - evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; |
---|
| 1563 | + evict_mem.placement = TTM_PL_FLAG_CACHED; |
---|
1717 | 1564 | evict_mem.mem_type = TTM_PL_SYSTEM; |
---|
1718 | 1565 | |
---|
1719 | 1566 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); |
---|
.. | .. |
---|
1739 | 1586 | if (bo->bdev->driver->swap_notify) |
---|
1740 | 1587 | bo->bdev->driver->swap_notify(bo); |
---|
1741 | 1588 | |
---|
1742 | | - ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); |
---|
| 1589 | + ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage); |
---|
1743 | 1590 | out: |
---|
1744 | 1591 | |
---|
1745 | 1592 | /** |
---|
.. | .. |
---|
1748 | 1595 | * already swapped buffer. |
---|
1749 | 1596 | */ |
---|
1750 | 1597 | if (locked) |
---|
1751 | | - reservation_object_unlock(bo->resv); |
---|
1752 | | - kref_put(&bo->list_kref, ttm_bo_release_list); |
---|
| 1598 | + dma_resv_unlock(bo->base.resv); |
---|
| 1599 | + ttm_bo_put(bo); |
---|
1753 | 1600 | return ret; |
---|
1754 | 1601 | } |
---|
1755 | 1602 | EXPORT_SYMBOL(ttm_bo_swapout); |
---|
1756 | 1603 | |
---|
1757 | | -void ttm_bo_swapout_all(struct ttm_bo_device *bdev) |
---|
| 1604 | +void ttm_bo_swapout_all(void) |
---|
1758 | 1605 | { |
---|
1759 | 1606 | struct ttm_operation_ctx ctx = { |
---|
1760 | 1607 | .interruptible = false, |
---|
1761 | 1608 | .no_wait_gpu = false |
---|
1762 | 1609 | }; |
---|
1763 | 1610 | |
---|
1764 | | - while (ttm_bo_swapout(bdev->glob, &ctx) == 0) |
---|
1765 | | - ; |
---|
| 1611 | + while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); |
---|
1766 | 1612 | } |
---|
1767 | 1613 | EXPORT_SYMBOL(ttm_bo_swapout_all); |
---|
1768 | 1614 | |
---|
1769 | | -/** |
---|
1770 | | - * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become |
---|
1771 | | - * unreserved |
---|
1772 | | - * |
---|
1773 | | - * @bo: Pointer to buffer |
---|
1774 | | - */ |
---|
1775 | | -int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) |
---|
| 1615 | +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) |
---|
1776 | 1616 | { |
---|
1777 | | - int ret; |
---|
| 1617 | + if (bo->ttm == NULL) |
---|
| 1618 | + return; |
---|
1778 | 1619 | |
---|
1779 | | - /* |
---|
1780 | | - * In the absense of a wait_unlocked API, |
---|
1781 | | - * Use the bo::wu_mutex to avoid triggering livelocks due to |
---|
1782 | | - * concurrent use of this function. Note that this use of |
---|
1783 | | - * bo::wu_mutex can go away if we change locking order to |
---|
1784 | | - * mmap_sem -> bo::reserve. |
---|
1785 | | - */ |
---|
1786 | | - ret = mutex_lock_interruptible(&bo->wu_mutex); |
---|
1787 | | - if (unlikely(ret != 0)) |
---|
1788 | | - return -ERESTARTSYS; |
---|
1789 | | - if (!ww_mutex_is_locked(&bo->resv->lock)) |
---|
1790 | | - goto out_unlock; |
---|
1791 | | - ret = reservation_object_lock_interruptible(bo->resv, NULL); |
---|
1792 | | - if (ret == -EINTR) |
---|
1793 | | - ret = -ERESTARTSYS; |
---|
1794 | | - if (unlikely(ret != 0)) |
---|
1795 | | - goto out_unlock; |
---|
1796 | | - reservation_object_unlock(bo->resv); |
---|
| 1620 | + ttm_tt_destroy(bo->bdev, bo->ttm); |
---|
| 1621 | + bo->ttm = NULL; |
---|
| 1622 | +} |
---|
1797 | 1623 | |
---|
1798 | | -out_unlock: |
---|
1799 | | - mutex_unlock(&bo->wu_mutex); |
---|
1800 | | - return ret; |
---|
| 1624 | +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem) |
---|
| 1625 | +{ |
---|
| 1626 | + return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); |
---|
| 1627 | +} |
---|
| 1628 | + |
---|
| 1629 | +void ttm_bo_tt_unbind(struct ttm_buffer_object *bo) |
---|
| 1630 | +{ |
---|
| 1631 | + bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm); |
---|
1801 | 1632 | } |
---|