hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/iommu/iova.c
....@@ -1,18 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright © 2006-2009, Intel Corporation.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms and conditions of the GNU General Public License,
6
- * version 2, as published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope it will be useful, but WITHOUT
9
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11
- * more details.
12
- *
13
- * You should have received a copy of the GNU General Public License along with
14
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
- * Place - Suite 330, Boston, MA 02111-1307 USA.
164 *
175 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
186 */
....@@ -56,6 +44,7 @@
5644 iovad->granule = granule;
5745 iovad->start_pfn = start_pfn;
5846 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
47
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
5948 iovad->flush_cb = NULL;
6049 iovad->fq = NULL;
6150 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
....@@ -153,6 +142,9 @@
153142 free->pfn_lo >= cached_iova->pfn_lo))
154143 iovad->cached32_node = rb_next(&free->node);
155144
145
+ if (free->pfn_lo < iovad->dma_32bit_pfn)
146
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
147
+
156148 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
157149 if (free->pfn_lo >= cached_iova->pfn_lo)
158150 iovad->cached_node = rb_next(&free->node);
....@@ -186,19 +178,19 @@
186178 rb_insert_color(&iova->node, root);
187179 }
188180
189
-#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT
190
-static unsigned long limit_align(struct iova_domain *iovad,
191
- unsigned long shift)
181
+#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT
182
+static unsigned long limit_align_shift(struct iova_domain *iovad,
183
+ unsigned long shift)
192184 {
193
- unsigned long max;
185
+ unsigned long max_align_shift;
194186
195
- max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT
187
+ max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT
196188 - iova_shift(iovad);
197
- return min(shift, max);
189
+ return min_t(unsigned long, max_align_shift, shift);
198190 }
199191 #else
200
-static unsigned long limit_align(struct iova_domain *iovad,
201
- unsigned long shift)
192
+static unsigned long limit_align_shift(struct iova_domain *iovad,
193
+ unsigned long shift)
202194 {
203195 return shift;
204196 }
....@@ -211,27 +203,42 @@
211203 struct rb_node *curr, *prev;
212204 struct iova *curr_iova;
213205 unsigned long flags;
214
- unsigned long new_pfn;
206
+ unsigned long new_pfn, low_pfn_new;
215207 unsigned long align_mask = ~0UL;
208
+ unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
216209
217210 if (size_aligned)
218
- align_mask <<= limit_align(iovad, fls_long(size - 1));
211
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
219212
220213 /* Walk the tree backwards */
221214 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
215
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
216
+ size >= iovad->max32_alloc_size)
217
+ goto iova32_full;
218
+
222219 curr = __get_cached_rbnode(iovad, limit_pfn);
223220 curr_iova = rb_entry(curr, struct iova, node);
221
+ low_pfn_new = curr_iova->pfn_hi;
222
+
223
+retry:
224224 do {
225
- limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
226
- new_pfn = (limit_pfn - size) & align_mask;
225
+ high_pfn = min(high_pfn, curr_iova->pfn_lo);
226
+ new_pfn = (high_pfn - size) & align_mask;
227227 prev = curr;
228228 curr = rb_prev(curr);
229229 curr_iova = rb_entry(curr, struct iova, node);
230
- } while (curr && new_pfn <= curr_iova->pfn_hi);
230
+ } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
231231
232
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
233
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
234
- return -ENOMEM;
232
+ if (high_pfn < size || new_pfn < low_pfn) {
233
+ if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
234
+ high_pfn = limit_pfn;
235
+ low_pfn = low_pfn_new + 1;
236
+ curr = &iovad->anchor.node;
237
+ curr_iova = rb_entry(curr, struct iova, node);
238
+ goto retry;
239
+ }
240
+ iovad->max32_alloc_size = size;
241
+ goto iova32_full;
235242 }
236243
237244 /* pfn_lo will point to size aligned address if size_aligned is set */
....@@ -243,14 +250,17 @@
243250 __cached_rbnode_insert_update(iovad, new);
244251
245252 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
246
-
247
-
248253 return 0;
254
+
255
+iova32_full:
256
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
257
+ return -ENOMEM;
249258 }
250259
251260 static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
252
- unsigned long size, unsigned long limit_pfn,
253
- struct iova *new, bool size_aligned)
261
+ unsigned long size,
262
+ unsigned long limit_pfn,
263
+ struct iova *new, bool size_aligned)
254264 {
255265 struct rb_node *curr, *prev;
256266 struct iova *curr_iova, *prev_iova;
....@@ -261,7 +271,7 @@
261271 unsigned long gap, candidate_gap = ~0UL;
262272
263273 if (size_aligned)
264
- align_mask <<= limit_align(iovad, fls_long(size - 1));
274
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
265275
266276 /* Walk the tree backwards */
267277 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
....@@ -317,7 +327,7 @@
317327
318328 struct iova *alloc_iova_mem(void)
319329 {
320
- return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
330
+ return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
321331 }
322332 EXPORT_SYMBOL(alloc_iova_mem);
323333
....@@ -337,7 +347,7 @@
337347 SLAB_HWCACHE_ALIGN, NULL);
338348 if (!iova_cache) {
339349 mutex_unlock(&iova_cache_mutex);
340
- printk(KERN_ERR "Couldn't create iova cache\n");
350
+ pr_err("Couldn't create iova cache\n");
341351 return -ENOMEM;
342352 }
343353 }
....@@ -520,6 +530,7 @@
520530 flush_rcache = false;
521531 for_each_online_cpu(cpu)
522532 free_cpu_cached_iovas(cpu, iovad);
533
+ free_global_cached_iovas(iovad);
523534 goto retry;
524535 }
525536
....@@ -668,7 +679,7 @@
668679
669680 /* Avoid false sharing as much as possible. */
670681 if (!atomic_read(&iovad->fq_timer_on) &&
671
- !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
682
+ !atomic_xchg(&iovad->fq_timer_on, 1))
672683 mod_timer(&iovad->fq_timer,
673684 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
674685 }
....@@ -807,8 +818,8 @@
807818
808819 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
809820 if (!new_iova)
810
- printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
811
- iova->pfn_lo, iova->pfn_lo);
821
+ pr_err("Reserve iova range %lx@%lx failed\n",
822
+ iova->pfn_lo, iova->pfn_lo);
812823 }
813824 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
814825 }
....@@ -1135,5 +1146,27 @@
11351146 }
11361147 }
11371148
1149
+/*
1150
+ * free all the IOVA ranges of global cache
1151
+ */
1152
+void free_global_cached_iovas(struct iova_domain *iovad)
1153
+{
1154
+ struct iova_rcache *rcache;
1155
+ unsigned long flags;
1156
+ int i, j;
1157
+
1158
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1159
+ rcache = &iovad->rcaches[i];
1160
+ spin_lock_irqsave(&rcache->lock, flags);
1161
+ for (j = 0; j < rcache->depot_size; ++j) {
1162
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
1163
+ iova_magazine_free(rcache->depot[j]);
1164
+ rcache->depot[j] = NULL;
1165
+ }
1166
+ rcache->depot_size = 0;
1167
+ spin_unlock_irqrestore(&rcache->lock, flags);
1168
+ }
1169
+}
1170
+
11381171 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
11391172 MODULE_LICENSE("GPL");