| .. | .. |
|---|
| 39 | 39 | list_for_each_entry_continue_reverse(entry, list, head) { |
|---|
| 40 | 40 | struct ttm_buffer_object *bo = entry->bo; |
|---|
| 41 | 41 | |
|---|
| 42 | | - reservation_object_unlock(bo->resv); |
|---|
| 43 | | - } |
|---|
| 44 | | -} |
|---|
| 45 | | - |
|---|
| 46 | | -static void ttm_eu_del_from_lru_locked(struct list_head *list) |
|---|
| 47 | | -{ |
|---|
| 48 | | - struct ttm_validate_buffer *entry; |
|---|
| 49 | | - |
|---|
| 50 | | - list_for_each_entry(entry, list, head) { |
|---|
| 51 | | - struct ttm_buffer_object *bo = entry->bo; |
|---|
| 52 | | - ttm_bo_del_from_lru(bo); |
|---|
| 42 | + dma_resv_unlock(bo->base.resv); |
|---|
| 53 | 43 | } |
|---|
| 54 | 44 | } |
|---|
| 55 | 45 | |
|---|
| .. | .. |
|---|
| 57 | 47 | struct list_head *list) |
|---|
| 58 | 48 | { |
|---|
| 59 | 49 | struct ttm_validate_buffer *entry; |
|---|
| 60 | | - struct ttm_bo_global *glob; |
|---|
| 61 | 50 | |
|---|
| 62 | 51 | if (list_empty(list)) |
|---|
| 63 | 52 | return; |
|---|
| 64 | 53 | |
|---|
| 65 | | - entry = list_first_entry(list, struct ttm_validate_buffer, head); |
|---|
| 66 | | - glob = entry->bo->bdev->glob; |
|---|
| 67 | | - |
|---|
| 68 | | - spin_lock(&glob->lru_lock); |
|---|
| 54 | + spin_lock(&ttm_bo_glob.lru_lock); |
|---|
| 69 | 55 | list_for_each_entry(entry, list, head) { |
|---|
| 70 | 56 | struct ttm_buffer_object *bo = entry->bo; |
|---|
| 71 | 57 | |
|---|
| 72 | | - ttm_bo_add_to_lru(bo); |
|---|
| 73 | | - reservation_object_unlock(bo->resv); |
|---|
| 58 | + ttm_bo_move_to_lru_tail(bo, NULL); |
|---|
| 59 | + dma_resv_unlock(bo->base.resv); |
|---|
| 74 | 60 | } |
|---|
| 75 | | - spin_unlock(&glob->lru_lock); |
|---|
| 61 | + spin_unlock(&ttm_bo_glob.lru_lock); |
|---|
| 76 | 62 | |
|---|
| 77 | 63 | if (ticket) |
|---|
| 78 | 64 | ww_acquire_fini(ticket); |
|---|
| .. | .. |
|---|
| 95 | 81 | struct list_head *list, bool intr, |
|---|
| 96 | 82 | struct list_head *dups) |
|---|
| 97 | 83 | { |
|---|
| 98 | | - struct ttm_bo_global *glob; |
|---|
| 99 | 84 | struct ttm_validate_buffer *entry; |
|---|
| 100 | 85 | int ret; |
|---|
| 101 | 86 | |
|---|
| 102 | 87 | if (list_empty(list)) |
|---|
| 103 | 88 | return 0; |
|---|
| 104 | | - |
|---|
| 105 | | - entry = list_first_entry(list, struct ttm_validate_buffer, head); |
|---|
| 106 | | - glob = entry->bo->bdev->glob; |
|---|
| 107 | 89 | |
|---|
| 108 | 90 | if (ticket) |
|---|
| 109 | 91 | ww_acquire_init(ticket, &reservation_ww_class); |
|---|
| .. | .. |
|---|
| 111 | 93 | list_for_each_entry(entry, list, head) { |
|---|
| 112 | 94 | struct ttm_buffer_object *bo = entry->bo; |
|---|
| 113 | 95 | |
|---|
| 114 | | - ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); |
|---|
| 115 | | - if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
|---|
| 116 | | - reservation_object_unlock(bo->resv); |
|---|
| 117 | | - |
|---|
| 118 | | - ret = -EBUSY; |
|---|
| 119 | | - |
|---|
| 120 | | - } else if (ret == -EALREADY && dups) { |
|---|
| 96 | + ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); |
|---|
| 97 | + if (ret == -EALREADY && dups) { |
|---|
| 121 | 98 | struct ttm_validate_buffer *safe = entry; |
|---|
| 122 | 99 | entry = list_prev_entry(entry, head); |
|---|
| 123 | 100 | list_del(&safe->head); |
|---|
| .. | .. |
|---|
| 126 | 103 | } |
|---|
| 127 | 104 | |
|---|
| 128 | 105 | if (!ret) { |
|---|
| 129 | | - if (!entry->shared) |
|---|
| 106 | + if (!entry->num_shared) |
|---|
| 130 | 107 | continue; |
|---|
| 131 | 108 | |
|---|
| 132 | | - ret = reservation_object_reserve_shared(bo->resv); |
|---|
| 109 | + ret = dma_resv_reserve_shared(bo->base.resv, |
|---|
| 110 | + entry->num_shared); |
|---|
| 133 | 111 | if (!ret) |
|---|
| 134 | 112 | continue; |
|---|
| 135 | 113 | } |
|---|
| .. | .. |
|---|
| 141 | 119 | ttm_eu_backoff_reservation_reverse(list, entry); |
|---|
| 142 | 120 | |
|---|
| 143 | 121 | if (ret == -EDEADLK) { |
|---|
| 144 | | - if (intr) { |
|---|
| 145 | | - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
|---|
| 146 | | - ticket); |
|---|
| 147 | | - } else { |
|---|
| 148 | | - ww_mutex_lock_slow(&bo->resv->lock, ticket); |
|---|
| 149 | | - ret = 0; |
|---|
| 150 | | - } |
|---|
| 122 | + ret = ttm_bo_reserve_slowpath(bo, intr, ticket); |
|---|
| 151 | 123 | } |
|---|
| 152 | 124 | |
|---|
| 153 | | - if (!ret && entry->shared) |
|---|
| 154 | | - ret = reservation_object_reserve_shared(bo->resv); |
|---|
| 125 | + if (!ret && entry->num_shared) |
|---|
| 126 | + ret = dma_resv_reserve_shared(bo->base.resv, |
|---|
| 127 | + entry->num_shared); |
|---|
| 155 | 128 | |
|---|
| 156 | 129 | if (unlikely(ret != 0)) { |
|---|
| 157 | | - if (ret == -EINTR) |
|---|
| 158 | | - ret = -ERESTARTSYS; |
|---|
| 159 | 130 | if (ticket) { |
|---|
| 160 | 131 | ww_acquire_done(ticket); |
|---|
| 161 | 132 | ww_acquire_fini(ticket); |
|---|
| .. | .. |
|---|
| 170 | 141 | list_add(&entry->head, list); |
|---|
| 171 | 142 | } |
|---|
| 172 | 143 | |
|---|
| 173 | | - if (ticket) |
|---|
| 174 | | - ww_acquire_done(ticket); |
|---|
| 175 | | - spin_lock(&glob->lru_lock); |
|---|
| 176 | | - ttm_eu_del_from_lru_locked(list); |
|---|
| 177 | | - spin_unlock(&glob->lru_lock); |
|---|
| 178 | 144 | return 0; |
|---|
| 179 | 145 | } |
|---|
| 180 | 146 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
|---|
| .. | .. |
|---|
| 184 | 150 | struct dma_fence *fence) |
|---|
| 185 | 151 | { |
|---|
| 186 | 152 | struct ttm_validate_buffer *entry; |
|---|
| 187 | | - struct ttm_buffer_object *bo; |
|---|
| 188 | | - struct ttm_bo_global *glob; |
|---|
| 189 | | - struct ttm_bo_device *bdev; |
|---|
| 190 | | - struct ttm_bo_driver *driver; |
|---|
| 191 | 153 | |
|---|
| 192 | 154 | if (list_empty(list)) |
|---|
| 193 | 155 | return; |
|---|
| 194 | 156 | |
|---|
| 195 | | - bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
|---|
| 196 | | - bdev = bo->bdev; |
|---|
| 197 | | - driver = bdev->driver; |
|---|
| 198 | | - glob = bo->bdev->glob; |
|---|
| 199 | | - |
|---|
| 200 | | - spin_lock(&glob->lru_lock); |
|---|
| 201 | | - |
|---|
| 157 | + spin_lock(&ttm_bo_glob.lru_lock); |
|---|
| 202 | 158 | list_for_each_entry(entry, list, head) { |
|---|
| 203 | | - bo = entry->bo; |
|---|
| 204 | | - if (entry->shared) |
|---|
| 205 | | - reservation_object_add_shared_fence(bo->resv, fence); |
|---|
| 159 | + struct ttm_buffer_object *bo = entry->bo; |
|---|
| 160 | + |
|---|
| 161 | + if (entry->num_shared) |
|---|
| 162 | + dma_resv_add_shared_fence(bo->base.resv, fence); |
|---|
| 206 | 163 | else |
|---|
| 207 | | - reservation_object_add_excl_fence(bo->resv, fence); |
|---|
| 208 | | - ttm_bo_add_to_lru(bo); |
|---|
| 209 | | - reservation_object_unlock(bo->resv); |
|---|
| 164 | + dma_resv_add_excl_fence(bo->base.resv, fence); |
|---|
| 165 | + ttm_bo_move_to_lru_tail(bo, NULL); |
|---|
| 166 | + dma_resv_unlock(bo->base.resv); |
|---|
| 210 | 167 | } |
|---|
| 211 | | - spin_unlock(&glob->lru_lock); |
|---|
| 168 | + spin_unlock(&ttm_bo_glob.lru_lock); |
|---|
| 212 | 169 | if (ticket) |
|---|
| 213 | 170 | ww_acquire_fini(ticket); |
|---|
| 214 | 171 | } |
|---|