From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/drivers/iommu/iova.c | 111 ++++++++++++++++++++++++++++++++++++-------------------
1 files changed, 72 insertions(+), 39 deletions(-)
diff --git a/kernel/drivers/iommu/iova.c b/kernel/drivers/iommu/iova.c
index b537ebd..95c9f79 100644
--- a/kernel/drivers/iommu/iova.c
+++ b/kernel/drivers/iommu/iova.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
@@ -56,6 +44,7 @@
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -153,6 +142,9 @@
free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node);
+ if (free->pfn_lo < iovad->dma_32bit_pfn)
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
@@ -186,19 +178,19 @@
rb_insert_color(&iova->node, root);
}
-#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT
-static unsigned long limit_align(struct iova_domain *iovad,
- unsigned long shift)
+#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+ unsigned long shift)
{
- unsigned long max;
+ unsigned long max_align_shift;
- max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT
+ max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT
- iova_shift(iovad);
- return min(shift, max);
+ return min_t(unsigned long, max_align_shift, shift);
}
#else
-static unsigned long limit_align(struct iova_domain *iovad,
- unsigned long shift)
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+ unsigned long shift)
{
return shift;
}
@@ -211,27 +203,42 @@
struct rb_node *curr, *prev;
struct iova *curr_iova;
unsigned long flags;
- unsigned long new_pfn;
+ unsigned long new_pfn, low_pfn_new;
unsigned long align_mask = ~0UL;
+ unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
if (size_aligned)
- align_mask <<= limit_align(iovad, fls_long(size - 1));
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
+ size >= iovad->max32_alloc_size)
+ goto iova32_full;
+
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
+ low_pfn_new = curr_iova->pfn_hi;
+
+retry:
do {
- limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
- new_pfn = (limit_pfn - size) & align_mask;
+ high_pfn = min(high_pfn, curr_iova->pfn_lo);
+ new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
curr_iova = rb_entry(curr, struct iova, node);
- } while (curr && new_pfn <= curr_iova->pfn_hi);
+ } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
+ if (high_pfn < size || new_pfn < low_pfn) {
+ if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
+ high_pfn = limit_pfn;
+ low_pfn = low_pfn_new + 1;
+ curr = &iovad->anchor.node;
+ curr_iova = rb_entry(curr, struct iova, node);
+ goto retry;
+ }
+ iovad->max32_alloc_size = size;
+ goto iova32_full;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
@@ -243,14 +250,17 @@
__cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
return 0;
+
+iova32_full:
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
- unsigned long size, unsigned long limit_pfn,
- struct iova *new, bool size_aligned)
+ unsigned long size,
+ unsigned long limit_pfn,
+ struct iova *new, bool size_aligned)
{
struct rb_node *curr, *prev;
struct iova *curr_iova, *prev_iova;
@@ -261,7 +271,7 @@
unsigned long gap, candidate_gap = ~0UL;
if (size_aligned)
- align_mask <<= limit_align(iovad, fls_long(size - 1));
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -317,7 +327,7 @@
struct iova *alloc_iova_mem(void)
{
- return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
+ return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
}
EXPORT_SYMBOL(alloc_iova_mem);
@@ -337,7 +347,7 @@
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
mutex_unlock(&iova_cache_mutex);
- printk(KERN_ERR "Couldn't create iova cache\n");
+ pr_err("Couldn't create iova cache\n");
return -ENOMEM;
}
}
@@ -520,6 +530,7 @@
flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ free_global_cached_iovas(iovad);
goto retry;
}
@@ -668,7 +679,7 @@
/* Avoid false sharing as much as possible. */
if (!atomic_read(&iovad->fq_timer_on) &&
- !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
+ !atomic_xchg(&iovad->fq_timer_on, 1))
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
@@ -807,8 +818,8 @@
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
- printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
- iova->pfn_lo, iova->pfn_lo);
+ pr_err("Reserve iova range %lx@%lx failed\n",
+ iova->pfn_lo, iova->pfn_lo);
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
@@ -1135,5 +1146,27 @@
}
}
+/*
+ * free all the IOVA ranges of global cache
+ */
+void free_global_cached_iovas(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_irqsave(&rcache->lock, flags);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ rcache->depot[j] = NULL;
+ }
+ rcache->depot_size = 0;
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
--
Gitblit v1.6.2