| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright © 2006-2009, Intel Corporation. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 5 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 6 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
|---|
| 9 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 10 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 11 | | - * more details. |
|---|
| 12 | | - * |
|---|
| 13 | | - * You should have received a copy of the GNU General Public License along with |
|---|
| 14 | | - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
|---|
| 15 | | - * Place - Suite 330, Boston, MA 02111-1307 USA. |
|---|
| 16 | 4 | * |
|---|
| 17 | 5 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
|---|
| 18 | 6 | */ |
|---|
| .. | .. |
|---|
| 56 | 44 | iovad->granule = granule; |
|---|
| 57 | 45 | iovad->start_pfn = start_pfn; |
|---|
| 58 | 46 | iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); |
|---|
| 47 | + iovad->max32_alloc_size = iovad->dma_32bit_pfn; |
|---|
| 59 | 48 | iovad->flush_cb = NULL; |
|---|
| 60 | 49 | iovad->fq = NULL; |
|---|
| 61 | 50 | iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; |
|---|
| .. | .. |
|---|
| 153 | 142 | free->pfn_lo >= cached_iova->pfn_lo)) |
|---|
| 154 | 143 | iovad->cached32_node = rb_next(&free->node); |
|---|
| 155 | 144 | |
|---|
| 145 | + if (free->pfn_lo < iovad->dma_32bit_pfn) |
|---|
| 146 | + iovad->max32_alloc_size = iovad->dma_32bit_pfn; |
|---|
| 147 | + |
|---|
| 156 | 148 | cached_iova = rb_entry(iovad->cached_node, struct iova, node); |
|---|
| 157 | 149 | if (free->pfn_lo >= cached_iova->pfn_lo) |
|---|
| 158 | 150 | iovad->cached_node = rb_next(&free->node); |
|---|
| .. | .. |
|---|
| 186 | 178 | rb_insert_color(&iova->node, root); |
|---|
| 187 | 179 | } |
|---|
| 188 | 180 | |
|---|
| 189 | | -#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT |
|---|
| 190 | | -static unsigned long limit_align(struct iova_domain *iovad, |
|---|
| 191 | | - unsigned long shift) |
|---|
| 181 | +#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT |
|---|
| 182 | +static unsigned long limit_align_shift(struct iova_domain *iovad, |
|---|
| 183 | + unsigned long shift) |
|---|
| 192 | 184 | { |
|---|
| 193 | | - unsigned long max; |
|---|
| 185 | + unsigned long max_align_shift; |
|---|
| 194 | 186 | |
|---|
| 195 | | - max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT |
|---|
| 187 | + max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT |
|---|
| 196 | 188 | - iova_shift(iovad); |
|---|
| 197 | | - return min(shift, max); |
|---|
| 189 | + return min_t(unsigned long, max_align_shift, shift); |
|---|
| 198 | 190 | } |
|---|
| 199 | 191 | #else |
|---|
| 200 | | -static unsigned long limit_align(struct iova_domain *iovad, |
|---|
| 201 | | - unsigned long shift) |
|---|
| 192 | +static unsigned long limit_align_shift(struct iova_domain *iovad, |
|---|
| 193 | + unsigned long shift) |
|---|
| 202 | 194 | { |
|---|
| 203 | 195 | return shift; |
|---|
| 204 | 196 | } |
|---|
| .. | .. |
|---|
| 211 | 203 | struct rb_node *curr, *prev; |
|---|
| 212 | 204 | struct iova *curr_iova; |
|---|
| 213 | 205 | unsigned long flags; |
|---|
| 214 | | - unsigned long new_pfn; |
|---|
| 206 | + unsigned long new_pfn, low_pfn_new; |
|---|
| 215 | 207 | unsigned long align_mask = ~0UL; |
|---|
| 208 | + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; |
|---|
| 216 | 209 | |
|---|
| 217 | 210 | if (size_aligned) |
|---|
| 218 | | - align_mask <<= limit_align(iovad, fls_long(size - 1)); |
|---|
| 211 | + align_mask <<= limit_align_shift(iovad, fls_long(size - 1)); |
|---|
| 219 | 212 | |
|---|
| 220 | 213 | /* Walk the tree backwards */ |
|---|
| 221 | 214 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
|---|
| 215 | + if (limit_pfn <= iovad->dma_32bit_pfn && |
|---|
| 216 | + size >= iovad->max32_alloc_size) |
|---|
| 217 | + goto iova32_full; |
|---|
| 218 | + |
|---|
| 222 | 219 | curr = __get_cached_rbnode(iovad, limit_pfn); |
|---|
| 223 | 220 | curr_iova = rb_entry(curr, struct iova, node); |
|---|
| 221 | + low_pfn_new = curr_iova->pfn_hi; |
|---|
| 222 | + |
|---|
| 223 | +retry: |
|---|
| 224 | 224 | do { |
|---|
| 225 | | - limit_pfn = min(limit_pfn, curr_iova->pfn_lo); |
|---|
| 226 | | - new_pfn = (limit_pfn - size) & align_mask; |
|---|
| 225 | + high_pfn = min(high_pfn, curr_iova->pfn_lo); |
|---|
| 226 | + new_pfn = (high_pfn - size) & align_mask; |
|---|
| 227 | 227 | prev = curr; |
|---|
| 228 | 228 | curr = rb_prev(curr); |
|---|
| 229 | 229 | curr_iova = rb_entry(curr, struct iova, node); |
|---|
| 230 | | - } while (curr && new_pfn <= curr_iova->pfn_hi); |
|---|
| 230 | + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn); |
|---|
| 231 | 231 | |
|---|
| 232 | | - if (limit_pfn < size || new_pfn < iovad->start_pfn) { |
|---|
| 233 | | - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
|---|
| 234 | | - return -ENOMEM; |
|---|
| 232 | + if (high_pfn < size || new_pfn < low_pfn) { |
|---|
| 233 | + if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) { |
|---|
| 234 | + high_pfn = limit_pfn; |
|---|
| 235 | + low_pfn = low_pfn_new + 1; |
|---|
| 236 | + curr = &iovad->anchor.node; |
|---|
| 237 | + curr_iova = rb_entry(curr, struct iova, node); |
|---|
| 238 | + goto retry; |
|---|
| 239 | + } |
|---|
| 240 | + iovad->max32_alloc_size = size; |
|---|
| 241 | + goto iova32_full; |
|---|
| 235 | 242 | } |
|---|
| 236 | 243 | |
|---|
| 237 | 244 | /* pfn_lo will point to size aligned address if size_aligned is set */ |
|---|
| .. | .. |
|---|
| 243 | 250 | __cached_rbnode_insert_update(iovad, new); |
|---|
| 244 | 251 | |
|---|
| 245 | 252 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
|---|
| 246 | | - |
|---|
| 247 | | - |
|---|
| 248 | 253 | return 0; |
|---|
| 254 | + |
|---|
| 255 | +iova32_full: |
|---|
| 256 | + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
|---|
| 257 | + return -ENOMEM; |
|---|
| 249 | 258 | } |
|---|
| 250 | 259 | |
|---|
| 251 | 260 | static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad, |
|---|
| 252 | | - unsigned long size, unsigned long limit_pfn, |
|---|
| 253 | | - struct iova *new, bool size_aligned) |
|---|
| 261 | + unsigned long size, |
|---|
| 262 | + unsigned long limit_pfn, |
|---|
| 263 | + struct iova *new, bool size_aligned) |
|---|
| 254 | 264 | { |
|---|
| 255 | 265 | struct rb_node *curr, *prev; |
|---|
| 256 | 266 | struct iova *curr_iova, *prev_iova; |
|---|
| .. | .. |
|---|
| 261 | 271 | unsigned long gap, candidate_gap = ~0UL; |
|---|
| 262 | 272 | |
|---|
| 263 | 273 | if (size_aligned) |
|---|
| 264 | | - align_mask <<= limit_align(iovad, fls_long(size - 1)); |
|---|
| 274 | + align_mask <<= limit_align_shift(iovad, fls_long(size - 1)); |
|---|
| 265 | 275 | |
|---|
| 266 | 276 | /* Walk the tree backwards */ |
|---|
| 267 | 277 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
|---|
| .. | .. |
|---|
| 317 | 327 | |
|---|
| 318 | 328 | struct iova *alloc_iova_mem(void) |
|---|
| 319 | 329 | { |
|---|
| 320 | | - return kmem_cache_zalloc(iova_cache, GFP_ATOMIC); |
|---|
| 330 | + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); |
|---|
| 321 | 331 | } |
|---|
| 322 | 332 | EXPORT_SYMBOL(alloc_iova_mem); |
|---|
| 323 | 333 | |
|---|
| .. | .. |
|---|
| 337 | 347 | SLAB_HWCACHE_ALIGN, NULL); |
|---|
| 338 | 348 | if (!iova_cache) { |
|---|
| 339 | 349 | mutex_unlock(&iova_cache_mutex); |
|---|
| 340 | | - printk(KERN_ERR "Couldn't create iova cache\n"); |
|---|
| 350 | + pr_err("Couldn't create iova cache\n"); |
|---|
| 341 | 351 | return -ENOMEM; |
|---|
| 342 | 352 | } |
|---|
| 343 | 353 | } |
|---|
| .. | .. |
|---|
| 520 | 530 | flush_rcache = false; |
|---|
| 521 | 531 | for_each_online_cpu(cpu) |
|---|
| 522 | 532 | free_cpu_cached_iovas(cpu, iovad); |
|---|
| 533 | + free_global_cached_iovas(iovad); |
|---|
| 523 | 534 | goto retry; |
|---|
| 524 | 535 | } |
|---|
| 525 | 536 | |
|---|
| .. | .. |
|---|
| 668 | 679 | |
|---|
| 669 | 680 | /* Avoid false sharing as much as possible. */ |
|---|
| 670 | 681 | if (!atomic_read(&iovad->fq_timer_on) && |
|---|
| 671 | | - !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) |
|---|
| 682 | + !atomic_xchg(&iovad->fq_timer_on, 1)) |
|---|
| 672 | 683 | mod_timer(&iovad->fq_timer, |
|---|
| 673 | 684 | jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); |
|---|
| 674 | 685 | } |
|---|
| .. | .. |
|---|
| 807 | 818 | |
|---|
| 808 | 819 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
|---|
| 809 | 820 | if (!new_iova) |
|---|
| 810 | | - printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", |
|---|
| 811 | | - iova->pfn_lo, iova->pfn_lo); |
|---|
| 821 | + pr_err("Reserve iova range %lx@%lx failed\n", |
|---|
| 822 | + iova->pfn_lo, iova->pfn_lo); |
|---|
| 812 | 823 | } |
|---|
| 813 | 824 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
|---|
| 814 | 825 | } |
|---|
| .. | .. |
|---|
| 1135 | 1146 | } |
|---|
| 1136 | 1147 | } |
|---|
| 1137 | 1148 | |
|---|
| 1149 | +/* |
|---|
| 1150 | + * free all the IOVA ranges of global cache |
|---|
| 1151 | + */ |
|---|
| 1152 | +void free_global_cached_iovas(struct iova_domain *iovad) |
|---|
| 1153 | +{ |
|---|
| 1154 | + struct iova_rcache *rcache; |
|---|
| 1155 | + unsigned long flags; |
|---|
| 1156 | + int i, j; |
|---|
| 1157 | + |
|---|
| 1158 | + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { |
|---|
| 1159 | + rcache = &iovad->rcaches[i]; |
|---|
| 1160 | + spin_lock_irqsave(&rcache->lock, flags); |
|---|
| 1161 | + for (j = 0; j < rcache->depot_size; ++j) { |
|---|
| 1162 | + iova_magazine_free_pfns(rcache->depot[j], iovad); |
|---|
| 1163 | + iova_magazine_free(rcache->depot[j]); |
|---|
| 1164 | + rcache->depot[j] = NULL; |
|---|
| 1165 | + } |
|---|
| 1166 | + rcache->depot_size = 0; |
|---|
| 1167 | + spin_unlock_irqrestore(&rcache->lock, flags); |
|---|
| 1168 | + } |
|---|
| 1169 | +} |
|---|
| 1170 | + |
|---|
| 1138 | 1171 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); |
|---|
| 1139 | 1172 | MODULE_LICENSE("GPL"); |
|---|