hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma-buf/dma-buf-cache.c
....@@ -8,9 +8,10 @@
88 #undef CONFIG_DMABUF_CACHE
99 #include <linux/dma-buf-cache.h>
1010
11
+/* NOTE: dma-buf-cache APIs are not irq safe, please DO NOT run in irq context !! */
12
+
1113 struct dma_buf_cache_list {
1214 struct list_head head;
13
- struct mutex lock;
1415 };
1516
1617 struct dma_buf_cache {
....@@ -25,9 +26,10 @@
2526 struct dma_buf_cache_list *data;
2627 struct dma_buf_cache *cache, *tmp;
2728
29
+ mutex_lock(&dmabuf->cache_lock);
30
+
2831 data = dmabuf->dtor_data;
2932
30
- mutex_lock(&data->lock);
3133 list_for_each_entry_safe(cache, tmp, &data->head, list) {
3234 if (!IS_ERR_OR_NULL(cache->sg_table))
3335 dma_buf_unmap_attachment(cache->attach,
....@@ -37,9 +39,9 @@
3739 dma_buf_detach(dmabuf, cache->attach);
3840 list_del(&cache->list);
3941 kfree(cache);
40
-
4142 }
42
- mutex_unlock(&data->lock);
43
+
44
+ mutex_unlock(&dmabuf->cache_lock);
4345
4446 kfree(data);
4547 return 0;
....@@ -57,14 +59,10 @@
5759
5860 data = dmabuf->dtor_data;
5961
60
- mutex_lock(&data->lock);
6162 list_for_each_entry(cache, &data->head, list) {
62
- if (cache->attach == attach) {
63
- mutex_unlock(&data->lock);
63
+ if (cache->attach == attach)
6464 return cache;
65
- }
6665 }
67
- mutex_unlock(&data->lock);
6866
6967 return NULL;
7068 }
....@@ -74,9 +72,13 @@
7472 {
7573 struct dma_buf_cache *cache;
7674
75
+ mutex_lock(&dmabuf->cache_lock);
76
+
7777 cache = dma_buf_cache_get_cache(attach);
7878 if (!cache)
7979 dma_buf_detach(dmabuf, attach);
80
+
81
+ mutex_unlock(&dmabuf->cache_lock);
8082 }
8183 EXPORT_SYMBOL(dma_buf_cache_detach);
8284
....@@ -87,49 +89,58 @@
8789 struct dma_buf_cache_list *data;
8890 struct dma_buf_cache *cache;
8991
92
+ mutex_lock(&dmabuf->cache_lock);
93
+
9094 if (!dmabuf->dtor) {
9195 data = kzalloc(sizeof(*data), GFP_KERNEL);
92
- if (!data)
93
- return ERR_PTR(-ENOMEM);
94
-
95
- mutex_init(&data->lock);
96
+ if (!data) {
97
+ attach = ERR_PTR(-ENOMEM);
98
+ goto err_data;
99
+ }
96100 INIT_LIST_HEAD(&data->head);
97
-
98101 dma_buf_set_destructor(dmabuf, dma_buf_cache_destructor, data);
99102 }
100103
101
- if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor)
102
- return dma_buf_attach(dmabuf, dev);
104
+ if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor) {
105
+ attach = dma_buf_attach(dmabuf, dev);
106
+ goto attach_done;
107
+ }
103108
104109 data = dmabuf->dtor_data;
105110
106
- mutex_lock(&data->lock);
107111 list_for_each_entry(cache, &data->head, list) {
108112 if (cache->attach->dev == dev) {
109113 /* Already attached */
110
- mutex_unlock(&data->lock);
111
- return cache->attach;
114
+ attach = cache->attach;
115
+ goto attach_done;
112116 }
113117 }
114
- mutex_unlock(&data->lock);
115118
116119 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
117
- if (!cache)
118
- return ERR_PTR(-ENOMEM);
119
-
120
+ if (!cache) {
121
+ attach = ERR_PTR(-ENOMEM);
122
+ goto err_cache;
123
+ }
120124 /* Cache attachment */
121125 attach = dma_buf_attach(dmabuf, dev);
122
- if (IS_ERR_OR_NULL(attach)) {
123
- kfree(cache);
124
- return attach;
125
- }
126
+ if (IS_ERR_OR_NULL(attach))
127
+ goto err_attach;
126128
127129 cache->attach = attach;
128
- mutex_lock(&data->lock);
129130 list_add(&cache->list, &data->head);
130
- mutex_unlock(&data->lock);
131131
132
- return cache->attach;
132
+attach_done:
133
+ mutex_unlock(&dmabuf->cache_lock);
134
+ return attach;
135
+
136
+err_attach:
137
+ kfree(cache);
138
+err_cache:
139
+ kfree(data);
140
+ dma_buf_set_destructor(dmabuf, NULL, NULL);
141
+err_data:
142
+ mutex_unlock(&dmabuf->cache_lock);
143
+ return attach;
133144 }
134145 EXPORT_SYMBOL(dma_buf_cache_attach);
135146
....@@ -137,38 +148,51 @@
137148 struct sg_table *sg_table,
138149 enum dma_data_direction direction)
139150 {
151
+ struct dma_buf *dmabuf = attach->dmabuf;
140152 struct dma_buf_cache *cache;
153
+
154
+ mutex_lock(&dmabuf->cache_lock);
141155
142156 cache = dma_buf_cache_get_cache(attach);
143157 if (!cache)
144158 dma_buf_unmap_attachment(attach, sg_table, direction);
159
+
160
+ mutex_unlock(&dmabuf->cache_lock);
145161 }
146162 EXPORT_SYMBOL(dma_buf_cache_unmap_attachment);
147163
148164 struct sg_table *dma_buf_cache_map_attachment(struct dma_buf_attachment *attach,
149165 enum dma_data_direction direction)
150166 {
167
+ struct dma_buf *dmabuf = attach->dmabuf;
151168 struct dma_buf_cache *cache;
169
+ struct sg_table *sg_table;
170
+
171
+ mutex_lock(&dmabuf->cache_lock);
152172
153173 cache = dma_buf_cache_get_cache(attach);
154
- if (!cache)
155
- return dma_buf_map_attachment(attach, direction);
156
-
174
+ if (!cache) {
175
+ sg_table = dma_buf_map_attachment(attach, direction);
176
+ goto map_done;
177
+ }
157178 if (cache->sg_table) {
158179 /* Already mapped */
159
- if (cache->direction == direction)
160
- return cache->sg_table;
161
-
180
+ if (cache->direction == direction) {
181
+ sg_table = cache->sg_table;
182
+ goto map_done;
183
+ }
162184 /* Different directions */
163185 dma_buf_unmap_attachment(attach, cache->sg_table,
164186 cache->direction);
165
-
166187 }
167188
168189 /* Cache map */
169
- cache->sg_table = dma_buf_map_attachment(attach, direction);
190
+ sg_table = dma_buf_map_attachment(attach, direction);
191
+ cache->sg_table = sg_table;
170192 cache->direction = direction;
171193
172
- return cache->sg_table;
194
+map_done:
195
+ mutex_unlock(&dmabuf->cache_lock);
196
+ return sg_table;
173197 }
174198 EXPORT_SYMBOL(dma_buf_cache_map_attachment);