| .. | .. |
|---|
| 8 | 8 | #undef CONFIG_DMABUF_CACHE |
|---|
| 9 | 9 | #include <linux/dma-buf-cache.h> |
|---|
| 10 | 10 | |
|---|
| 11 | +/* NOTE: dma-buf-cache APIs are not irq safe, please DO NOT run in irq context !! */ |
|---|
| 12 | + |
|---|
| 11 | 13 | struct dma_buf_cache_list { |
|---|
| 12 | 14 | struct list_head head; |
|---|
| 13 | | - struct mutex lock; |
|---|
| 14 | 15 | }; |
|---|
| 15 | 16 | |
|---|
| 16 | 17 | struct dma_buf_cache { |
|---|
| .. | .. |
|---|
| 25 | 26 | struct dma_buf_cache_list *data; |
|---|
| 26 | 27 | struct dma_buf_cache *cache, *tmp; |
|---|
| 27 | 28 | |
|---|
| 29 | + mutex_lock(&dmabuf->cache_lock); |
|---|
| 30 | + |
|---|
| 28 | 31 | data = dmabuf->dtor_data; |
|---|
| 29 | 32 | |
|---|
| 30 | | - mutex_lock(&data->lock); |
|---|
| 31 | 33 | list_for_each_entry_safe(cache, tmp, &data->head, list) { |
|---|
| 32 | 34 | if (!IS_ERR_OR_NULL(cache->sg_table)) |
|---|
| 33 | 35 | dma_buf_unmap_attachment(cache->attach, |
|---|
| .. | .. |
|---|
| 37 | 39 | dma_buf_detach(dmabuf, cache->attach); |
|---|
| 38 | 40 | list_del(&cache->list); |
|---|
| 39 | 41 | kfree(cache); |
|---|
| 40 | | - |
|---|
| 41 | 42 | } |
|---|
| 42 | | - mutex_unlock(&data->lock); |
|---|
| 43 | + |
|---|
| 44 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 43 | 45 | |
|---|
| 44 | 46 | kfree(data); |
|---|
| 45 | 47 | return 0; |
|---|
| .. | .. |
|---|
| 57 | 59 | |
|---|
| 58 | 60 | data = dmabuf->dtor_data; |
|---|
| 59 | 61 | |
|---|
| 60 | | - mutex_lock(&data->lock); |
|---|
| 61 | 62 | list_for_each_entry(cache, &data->head, list) { |
|---|
| 62 | | - if (cache->attach == attach) { |
|---|
| 63 | | - mutex_unlock(&data->lock); |
|---|
| 63 | + if (cache->attach == attach) |
|---|
| 64 | 64 | return cache; |
|---|
| 65 | | - } |
|---|
| 66 | 65 | } |
|---|
| 67 | | - mutex_unlock(&data->lock); |
|---|
| 68 | 66 | |
|---|
| 69 | 67 | return NULL; |
|---|
| 70 | 68 | } |
|---|
| .. | .. |
|---|
| 74 | 72 | { |
|---|
| 75 | 73 | struct dma_buf_cache *cache; |
|---|
| 76 | 74 | |
|---|
| 75 | + mutex_lock(&dmabuf->cache_lock); |
|---|
| 76 | + |
|---|
| 77 | 77 | cache = dma_buf_cache_get_cache(attach); |
|---|
| 78 | 78 | if (!cache) |
|---|
| 79 | 79 | dma_buf_detach(dmabuf, attach); |
|---|
| 80 | + |
|---|
| 81 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 80 | 82 | } |
|---|
| 81 | 83 | EXPORT_SYMBOL(dma_buf_cache_detach); |
|---|
| 82 | 84 | |
|---|
| .. | .. |
|---|
| 87 | 89 | struct dma_buf_cache_list *data; |
|---|
| 88 | 90 | struct dma_buf_cache *cache; |
|---|
| 89 | 91 | |
|---|
| 92 | + mutex_lock(&dmabuf->cache_lock); |
|---|
| 93 | + |
|---|
| 90 | 94 | if (!dmabuf->dtor) { |
|---|
| 91 | 95 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
|---|
| 92 | | - if (!data) |
|---|
| 93 | | - return ERR_PTR(-ENOMEM); |
|---|
| 94 | | - |
|---|
| 95 | | - mutex_init(&data->lock); |
|---|
| 96 | + if (!data) { |
|---|
| 97 | + attach = ERR_PTR(-ENOMEM); |
|---|
| 98 | + goto err_data; |
|---|
| 99 | + } |
|---|
| 96 | 100 | INIT_LIST_HEAD(&data->head); |
|---|
| 97 | | - |
|---|
| 98 | 101 | dma_buf_set_destructor(dmabuf, dma_buf_cache_destructor, data); |
|---|
| 99 | 102 | } |
|---|
| 100 | 103 | |
|---|
| 101 | | - if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor) |
|---|
| 102 | | - return dma_buf_attach(dmabuf, dev); |
|---|
| 104 | + if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor) { |
|---|
| 105 | + attach = dma_buf_attach(dmabuf, dev); |
|---|
| 106 | + goto attach_done; |
|---|
| 107 | + } |
|---|
| 103 | 108 | |
|---|
| 104 | 109 | data = dmabuf->dtor_data; |
|---|
| 105 | 110 | |
|---|
| 106 | | - mutex_lock(&data->lock); |
|---|
| 107 | 111 | list_for_each_entry(cache, &data->head, list) { |
|---|
| 108 | 112 | if (cache->attach->dev == dev) { |
|---|
| 109 | 113 | /* Already attached */ |
|---|
| 110 | | - mutex_unlock(&data->lock); |
|---|
| 111 | | - return cache->attach; |
|---|
| 114 | + attach = cache->attach; |
|---|
| 115 | + goto attach_done; |
|---|
| 112 | 116 | } |
|---|
| 113 | 117 | } |
|---|
| 114 | | - mutex_unlock(&data->lock); |
|---|
| 115 | 118 | |
|---|
| 116 | 119 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
|---|
| 117 | | - if (!cache) |
|---|
| 118 | | - return ERR_PTR(-ENOMEM); |
|---|
| 119 | | - |
|---|
| 120 | + if (!cache) { |
|---|
| 121 | + attach = ERR_PTR(-ENOMEM); |
|---|
| 122 | + goto err_cache; |
|---|
| 123 | + } |
|---|
| 120 | 124 | /* Cache attachment */ |
|---|
| 121 | 125 | attach = dma_buf_attach(dmabuf, dev); |
|---|
| 122 | | - if (IS_ERR_OR_NULL(attach)) { |
|---|
| 123 | | - kfree(cache); |
|---|
| 124 | | - return attach; |
|---|
| 125 | | - } |
|---|
| 126 | + if (IS_ERR_OR_NULL(attach)) |
|---|
| 127 | + goto err_attach; |
|---|
| 126 | 128 | |
|---|
| 127 | 129 | cache->attach = attach; |
|---|
| 128 | | - mutex_lock(&data->lock); |
|---|
| 129 | 130 | list_add(&cache->list, &data->head); |
|---|
| 130 | | - mutex_unlock(&data->lock); |
|---|
| 131 | 131 | |
|---|
| 132 | | - return cache->attach; |
|---|
| 132 | +attach_done: |
|---|
| 133 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 134 | + return attach; |
|---|
| 135 | + |
|---|
| 136 | +err_attach: |
|---|
| 137 | + kfree(cache); |
|---|
| 138 | +err_cache: |
|---|
| 139 | + kfree(data); |
|---|
| 140 | + dma_buf_set_destructor(dmabuf, NULL, NULL); |
|---|
| 141 | +err_data: |
|---|
| 142 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 143 | + return attach; |
|---|
| 133 | 144 | } |
|---|
| 134 | 145 | EXPORT_SYMBOL(dma_buf_cache_attach); |
|---|
| 135 | 146 | |
|---|
| .. | .. |
|---|
| 137 | 148 | struct sg_table *sg_table, |
|---|
| 138 | 149 | enum dma_data_direction direction) |
|---|
| 139 | 150 | { |
|---|
| 151 | + struct dma_buf *dmabuf = attach->dmabuf; |
|---|
| 140 | 152 | struct dma_buf_cache *cache; |
|---|
| 153 | + |
|---|
| 154 | + mutex_lock(&dmabuf->cache_lock); |
|---|
| 141 | 155 | |
|---|
| 142 | 156 | cache = dma_buf_cache_get_cache(attach); |
|---|
| 143 | 157 | if (!cache) |
|---|
| 144 | 158 | dma_buf_unmap_attachment(attach, sg_table, direction); |
|---|
| 159 | + |
|---|
| 160 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 145 | 161 | } |
|---|
| 146 | 162 | EXPORT_SYMBOL(dma_buf_cache_unmap_attachment); |
|---|
| 147 | 163 | |
|---|
| 148 | 164 | struct sg_table *dma_buf_cache_map_attachment(struct dma_buf_attachment *attach, |
|---|
| 149 | 165 | enum dma_data_direction direction) |
|---|
| 150 | 166 | { |
|---|
| 167 | + struct dma_buf *dmabuf = attach->dmabuf; |
|---|
| 151 | 168 | struct dma_buf_cache *cache; |
|---|
| 169 | + struct sg_table *sg_table; |
|---|
| 170 | + |
|---|
| 171 | + mutex_lock(&dmabuf->cache_lock); |
|---|
| 152 | 172 | |
|---|
| 153 | 173 | cache = dma_buf_cache_get_cache(attach); |
|---|
| 154 | | - if (!cache) |
|---|
| 155 | | - return dma_buf_map_attachment(attach, direction); |
|---|
| 156 | | - |
|---|
| 174 | + if (!cache) { |
|---|
| 175 | + sg_table = dma_buf_map_attachment(attach, direction); |
|---|
| 176 | + goto map_done; |
|---|
| 177 | + } |
|---|
| 157 | 178 | if (cache->sg_table) { |
|---|
| 158 | 179 | /* Already mapped */ |
|---|
| 159 | | - if (cache->direction == direction) |
|---|
| 160 | | - return cache->sg_table; |
|---|
| 161 | | - |
|---|
| 180 | + if (cache->direction == direction) { |
|---|
| 181 | + sg_table = cache->sg_table; |
|---|
| 182 | + goto map_done; |
|---|
| 183 | + } |
|---|
| 162 | 184 | /* Different directions */ |
|---|
| 163 | 185 | dma_buf_unmap_attachment(attach, cache->sg_table, |
|---|
| 164 | 186 | cache->direction); |
|---|
| 165 | | - |
|---|
| 166 | 187 | } |
|---|
| 167 | 188 | |
|---|
| 168 | 189 | /* Cache map */ |
|---|
| 169 | | - cache->sg_table = dma_buf_map_attachment(attach, direction); |
|---|
| 190 | + sg_table = dma_buf_map_attachment(attach, direction); |
|---|
| 191 | + cache->sg_table = sg_table; |
|---|
| 170 | 192 | cache->direction = direction; |
|---|
| 171 | 193 | |
|---|
| 172 | | - return cache->sg_table; |
|---|
| 194 | +map_done: |
|---|
| 195 | + mutex_unlock(&dmabuf->cache_lock); |
|---|
| 196 | + return sg_table; |
|---|
| 173 | 197 | } |
|---|
| 174 | 198 | EXPORT_SYMBOL(dma_buf_cache_map_attachment); |
|---|