.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
---|
3 | 4 | * Author: Joerg Roedel <jroedel@suse.de> |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify it |
---|
6 | | - * under the terms of the GNU General Public License version 2 as published |
---|
7 | | - * by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; if not, write to the Free Software |
---|
16 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
---|
17 | 5 | */ |
---|
18 | 6 | |
---|
19 | 7 | #define pr_fmt(fmt) "iommu: " fmt |
---|
20 | 8 | |
---|
21 | 9 | #include <linux/device.h> |
---|
22 | 10 | #include <linux/kernel.h> |
---|
| 11 | +#include <linux/bits.h> |
---|
23 | 12 | #include <linux/bug.h> |
---|
24 | 13 | #include <linux/types.h> |
---|
25 | | -#include <linux/module.h> |
---|
| 14 | +#include <linux/init.h> |
---|
| 15 | +#include <linux/export.h> |
---|
26 | 16 | #include <linux/slab.h> |
---|
27 | 17 | #include <linux/errno.h> |
---|
28 | 18 | #include <linux/iommu.h> |
---|
.. | .. |
---|
32 | 22 | #include <linux/pci.h> |
---|
33 | 23 | #include <linux/bitops.h> |
---|
34 | 24 | #include <linux/property.h> |
---|
| 25 | +#include <linux/fsl/mc.h> |
---|
| 26 | +#include <linux/module.h> |
---|
35 | 27 | #include <trace/events/iommu.h> |
---|
36 | 28 | |
---|
37 | 29 | static struct kset *iommu_group_kset; |
---|
38 | 30 | static DEFINE_IDA(iommu_group_ida); |
---|
39 | | -#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH |
---|
40 | | -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; |
---|
41 | | -#else |
---|
42 | | -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; |
---|
43 | | -#endif |
---|
44 | 31 | |
---|
45 | | -struct iommu_callback_data { |
---|
46 | | - const struct iommu_ops *ops; |
---|
47 | | -}; |
---|
| 32 | +static unsigned int iommu_def_domain_type __read_mostly; |
---|
| 33 | +static bool iommu_dma_strict __read_mostly = true; |
---|
| 34 | +static u32 iommu_cmd_line __read_mostly; |
---|
48 | 35 | |
---|
49 | 36 | struct iommu_group { |
---|
50 | 37 | struct kobject kobj; |
---|
.. | .. |
---|
58 | 45 | int id; |
---|
59 | 46 | struct iommu_domain *default_domain; |
---|
60 | 47 | struct iommu_domain *domain; |
---|
| 48 | + struct list_head entry; |
---|
61 | 49 | }; |
---|
62 | 50 | |
---|
63 | 51 | struct group_device { |
---|
.. | .. |
---|
74 | 62 | }; |
---|
75 | 63 | |
---|
76 | 64 | static const char * const iommu_group_resv_type_string[] = { |
---|
77 | | - [IOMMU_RESV_DIRECT] = "direct", |
---|
78 | | - [IOMMU_RESV_RESERVED] = "reserved", |
---|
79 | | - [IOMMU_RESV_MSI] = "msi", |
---|
80 | | - [IOMMU_RESV_SW_MSI] = "msi", |
---|
| 65 | + [IOMMU_RESV_DIRECT] = "direct", |
---|
| 66 | + [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", |
---|
| 67 | + [IOMMU_RESV_RESERVED] = "reserved", |
---|
| 68 | + [IOMMU_RESV_MSI] = "msi", |
---|
| 69 | + [IOMMU_RESV_SW_MSI] = "msi", |
---|
81 | 70 | }; |
---|
| 71 | + |
---|
| 72 | +#define IOMMU_CMD_LINE_DMA_API BIT(0) |
---|
| 73 | + |
---|
| 74 | +static void iommu_set_cmd_line_dma_api(void) |
---|
| 75 | +{ |
---|
| 76 | + iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; |
---|
| 77 | +} |
---|
| 78 | + |
---|
| 79 | +static bool iommu_cmd_line_dma_api(void) |
---|
| 80 | +{ |
---|
| 81 | + return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); |
---|
| 82 | +} |
---|
| 83 | + |
---|
| 84 | +static int iommu_alloc_default_domain(struct iommu_group *group, |
---|
| 85 | + struct device *dev); |
---|
| 86 | +static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
---|
| 87 | + unsigned type); |
---|
| 88 | +static int __iommu_attach_device(struct iommu_domain *domain, |
---|
| 89 | + struct device *dev); |
---|
| 90 | +static int __iommu_attach_group(struct iommu_domain *domain, |
---|
| 91 | + struct iommu_group *group); |
---|
| 92 | +static void __iommu_detach_group(struct iommu_domain *domain, |
---|
| 93 | + struct iommu_group *group); |
---|
| 94 | +static int iommu_create_device_direct_mappings(struct iommu_group *group, |
---|
| 95 | + struct device *dev); |
---|
| 96 | +static struct iommu_group *iommu_group_get_for_dev(struct device *dev); |
---|
82 | 97 | |
---|
83 | 98 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
---|
84 | 99 | struct iommu_group_attribute iommu_group_attr_##_name = \ |
---|
.. | .. |
---|
92 | 107 | static LIST_HEAD(iommu_device_list); |
---|
93 | 108 | static DEFINE_SPINLOCK(iommu_device_lock); |
---|
94 | 109 | |
---|
| 110 | +/* |
---|
| 111 | + * Use a function instead of an array here because the domain-type is a |
---|
| 112 | + * bit-field, so an array would waste memory. |
---|
| 113 | + */ |
---|
| 114 | +static const char *iommu_domain_type_str(unsigned int t) |
---|
| 115 | +{ |
---|
| 116 | + switch (t) { |
---|
| 117 | + case IOMMU_DOMAIN_BLOCKED: |
---|
| 118 | + return "Blocked"; |
---|
| 119 | + case IOMMU_DOMAIN_IDENTITY: |
---|
| 120 | + return "Passthrough"; |
---|
| 121 | + case IOMMU_DOMAIN_UNMANAGED: |
---|
| 122 | + return "Unmanaged"; |
---|
| 123 | + case IOMMU_DOMAIN_DMA: |
---|
| 124 | + return "Translated"; |
---|
| 125 | + default: |
---|
| 126 | + return "Unknown"; |
---|
| 127 | + } |
---|
| 128 | +} |
---|
| 129 | + |
---|
| 130 | +static int __init iommu_subsys_init(void) |
---|
| 131 | +{ |
---|
| 132 | + bool cmd_line = iommu_cmd_line_dma_api(); |
---|
| 133 | + |
---|
| 134 | + if (!cmd_line) { |
---|
| 135 | + if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) |
---|
| 136 | + iommu_set_default_passthrough(false); |
---|
| 137 | + else |
---|
| 138 | + iommu_set_default_translated(false); |
---|
| 139 | + |
---|
| 140 | + if (iommu_default_passthrough() && mem_encrypt_active()) { |
---|
| 141 | + pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); |
---|
| 142 | + iommu_set_default_translated(false); |
---|
| 143 | + } |
---|
| 144 | + } |
---|
| 145 | + |
---|
| 146 | + pr_info("Default domain type: %s %s\n", |
---|
| 147 | + iommu_domain_type_str(iommu_def_domain_type), |
---|
| 148 | + cmd_line ? "(set via kernel command line)" : ""); |
---|
| 149 | + |
---|
| 150 | + return 0; |
---|
| 151 | +} |
---|
| 152 | +subsys_initcall(iommu_subsys_init); |
---|
| 153 | + |
---|
95 | 154 | int iommu_device_register(struct iommu_device *iommu) |
---|
96 | 155 | { |
---|
97 | 156 | spin_lock(&iommu_device_lock); |
---|
98 | 157 | list_add_tail(&iommu->list, &iommu_device_list); |
---|
99 | 158 | spin_unlock(&iommu_device_lock); |
---|
100 | | - |
---|
101 | 159 | return 0; |
---|
102 | 160 | } |
---|
103 | 161 | EXPORT_SYMBOL_GPL(iommu_device_register); |
---|
.. | .. |
---|
110 | 168 | } |
---|
111 | 169 | EXPORT_SYMBOL_GPL(iommu_device_unregister); |
---|
112 | 170 | |
---|
113 | | -static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
---|
114 | | - unsigned type); |
---|
115 | | -static int __iommu_attach_device(struct iommu_domain *domain, |
---|
116 | | - struct device *dev); |
---|
117 | | -static int __iommu_attach_group(struct iommu_domain *domain, |
---|
118 | | - struct iommu_group *group); |
---|
119 | | -static void __iommu_detach_group(struct iommu_domain *domain, |
---|
120 | | - struct iommu_group *group); |
---|
| 171 | +static struct dev_iommu *dev_iommu_get(struct device *dev) |
---|
| 172 | +{ |
---|
| 173 | + struct dev_iommu *param = dev->iommu; |
---|
| 174 | + |
---|
| 175 | + if (param) |
---|
| 176 | + return param; |
---|
| 177 | + |
---|
| 178 | + param = kzalloc(sizeof(*param), GFP_KERNEL); |
---|
| 179 | + if (!param) |
---|
| 180 | + return NULL; |
---|
| 181 | + |
---|
| 182 | + mutex_init(¶m->lock); |
---|
| 183 | + dev->iommu = param; |
---|
| 184 | + return param; |
---|
| 185 | +} |
---|
| 186 | + |
---|
| 187 | +static void dev_iommu_free(struct device *dev) |
---|
| 188 | +{ |
---|
| 189 | + struct dev_iommu *param = dev->iommu; |
---|
| 190 | + |
---|
| 191 | + dev->iommu = NULL; |
---|
| 192 | + if (param->fwspec) { |
---|
| 193 | + fwnode_handle_put(param->fwspec->iommu_fwnode); |
---|
| 194 | + kfree(param->fwspec); |
---|
| 195 | + } |
---|
| 196 | + kfree(param); |
---|
| 197 | +} |
---|
| 198 | + |
---|
| 199 | +static int __iommu_probe_device(struct device *dev, struct list_head *group_list) |
---|
| 200 | +{ |
---|
| 201 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 202 | + struct iommu_device *iommu_dev; |
---|
| 203 | + struct iommu_group *group; |
---|
| 204 | + int ret; |
---|
| 205 | + |
---|
| 206 | + if (!ops) |
---|
| 207 | + return -ENODEV; |
---|
| 208 | + |
---|
| 209 | + if (!dev_iommu_get(dev)) |
---|
| 210 | + return -ENOMEM; |
---|
| 211 | + |
---|
| 212 | + if (!try_module_get(ops->owner)) { |
---|
| 213 | + ret = -EINVAL; |
---|
| 214 | + goto err_free; |
---|
| 215 | + } |
---|
| 216 | + |
---|
| 217 | + iommu_dev = ops->probe_device(dev); |
---|
| 218 | + if (IS_ERR(iommu_dev)) { |
---|
| 219 | + ret = PTR_ERR(iommu_dev); |
---|
| 220 | + goto out_module_put; |
---|
| 221 | + } |
---|
| 222 | + |
---|
| 223 | + dev->iommu->iommu_dev = iommu_dev; |
---|
| 224 | + |
---|
| 225 | + group = iommu_group_get_for_dev(dev); |
---|
| 226 | + if (IS_ERR(group)) { |
---|
| 227 | + ret = PTR_ERR(group); |
---|
| 228 | + goto out_release; |
---|
| 229 | + } |
---|
| 230 | + iommu_group_put(group); |
---|
| 231 | + |
---|
| 232 | + if (group_list && !group->default_domain && list_empty(&group->entry)) |
---|
| 233 | + list_add_tail(&group->entry, group_list); |
---|
| 234 | + |
---|
| 235 | + iommu_device_link(iommu_dev, dev); |
---|
| 236 | + |
---|
| 237 | + return 0; |
---|
| 238 | + |
---|
| 239 | +out_release: |
---|
| 240 | + ops->release_device(dev); |
---|
| 241 | + |
---|
| 242 | +out_module_put: |
---|
| 243 | + module_put(ops->owner); |
---|
| 244 | + |
---|
| 245 | +err_free: |
---|
| 246 | + dev_iommu_free(dev); |
---|
| 247 | + |
---|
| 248 | + return ret; |
---|
| 249 | +} |
---|
| 250 | + |
---|
| 251 | +int iommu_probe_device(struct device *dev) |
---|
| 252 | +{ |
---|
| 253 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 254 | + struct iommu_group *group; |
---|
| 255 | + int ret; |
---|
| 256 | + |
---|
| 257 | + ret = __iommu_probe_device(dev, NULL); |
---|
| 258 | + if (ret) |
---|
| 259 | + goto err_out; |
---|
| 260 | + |
---|
| 261 | + group = iommu_group_get(dev); |
---|
| 262 | + if (!group) |
---|
| 263 | + goto err_release; |
---|
| 264 | + |
---|
| 265 | + /* |
---|
| 266 | + * Try to allocate a default domain - needs support from the |
---|
| 267 | + * IOMMU driver. There are still some drivers which don't |
---|
| 268 | + * support default domains, so the return value is not yet |
---|
| 269 | + * checked. |
---|
| 270 | + */ |
---|
| 271 | + mutex_lock(&group->mutex); |
---|
| 272 | + iommu_alloc_default_domain(group, dev); |
---|
| 273 | + |
---|
| 274 | + if (group->default_domain) { |
---|
| 275 | + ret = __iommu_attach_device(group->default_domain, dev); |
---|
| 276 | + if (ret) { |
---|
| 277 | + mutex_unlock(&group->mutex); |
---|
| 278 | + iommu_group_put(group); |
---|
| 279 | + goto err_release; |
---|
| 280 | + } |
---|
| 281 | + } |
---|
| 282 | + |
---|
| 283 | + iommu_create_device_direct_mappings(group, dev); |
---|
| 284 | + |
---|
| 285 | + mutex_unlock(&group->mutex); |
---|
| 286 | + iommu_group_put(group); |
---|
| 287 | + |
---|
| 288 | + if (ops->probe_finalize) |
---|
| 289 | + ops->probe_finalize(dev); |
---|
| 290 | + |
---|
| 291 | + return 0; |
---|
| 292 | + |
---|
| 293 | +err_release: |
---|
| 294 | + iommu_release_device(dev); |
---|
| 295 | + |
---|
| 296 | +err_out: |
---|
| 297 | + return ret; |
---|
| 298 | + |
---|
| 299 | +} |
---|
| 300 | + |
---|
| 301 | +void iommu_release_device(struct device *dev) |
---|
| 302 | +{ |
---|
| 303 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 304 | + |
---|
| 305 | + if (!dev->iommu) |
---|
| 306 | + return; |
---|
| 307 | + |
---|
| 308 | + iommu_device_unlink(dev->iommu->iommu_dev, dev); |
---|
| 309 | + |
---|
| 310 | + ops->release_device(dev); |
---|
| 311 | + |
---|
| 312 | + iommu_group_remove_device(dev); |
---|
| 313 | + module_put(ops->owner); |
---|
| 314 | + dev_iommu_free(dev); |
---|
| 315 | +} |
---|
121 | 316 | |
---|
122 | 317 | static int __init iommu_set_def_domain_type(char *str) |
---|
123 | 318 | { |
---|
.. | .. |
---|
128 | 323 | if (ret) |
---|
129 | 324 | return ret; |
---|
130 | 325 | |
---|
131 | | - iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; |
---|
| 326 | + if (pt) |
---|
| 327 | + iommu_set_default_passthrough(true); |
---|
| 328 | + else |
---|
| 329 | + iommu_set_default_translated(true); |
---|
| 330 | + |
---|
132 | 331 | return 0; |
---|
133 | 332 | } |
---|
134 | 333 | early_param("iommu.passthrough", iommu_set_def_domain_type); |
---|
| 334 | + |
---|
| 335 | +static int __init iommu_dma_setup(char *str) |
---|
| 336 | +{ |
---|
| 337 | + return kstrtobool(str, &iommu_dma_strict); |
---|
| 338 | +} |
---|
| 339 | +early_param("iommu.strict", iommu_dma_setup); |
---|
135 | 340 | |
---|
136 | 341 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
---|
137 | 342 | struct attribute *__attr, char *buf) |
---|
.. | .. |
---|
186 | 391 | * @new: new region to insert |
---|
187 | 392 | * @regions: list of regions |
---|
188 | 393 | * |
---|
189 | | - * The new element is sorted by address with respect to the other |
---|
190 | | - * regions of the same type. In case it overlaps with another |
---|
191 | | - * region of the same type, regions are merged. In case it |
---|
192 | | - * overlaps with another region of different type, regions are |
---|
193 | | - * not merged. |
---|
| 394 | + * Elements are sorted by start address and overlapping segments |
---|
| 395 | + * of the same type are merged. |
---|
194 | 396 | */ |
---|
195 | 397 | static int iommu_insert_resv_region(struct iommu_resv_region *new, |
---|
196 | 398 | struct list_head *regions) |
---|
197 | 399 | { |
---|
198 | | - struct iommu_resv_region *region; |
---|
199 | | - phys_addr_t start = new->start; |
---|
200 | | - phys_addr_t end = new->start + new->length - 1; |
---|
201 | | - struct list_head *pos = regions->next; |
---|
| 400 | + struct iommu_resv_region *iter, *tmp, *nr, *top; |
---|
| 401 | + LIST_HEAD(stack); |
---|
202 | 402 | |
---|
203 | | - while (pos != regions) { |
---|
204 | | - struct iommu_resv_region *entry = |
---|
205 | | - list_entry(pos, struct iommu_resv_region, list); |
---|
206 | | - phys_addr_t a = entry->start; |
---|
207 | | - phys_addr_t b = entry->start + entry->length - 1; |
---|
208 | | - int type = entry->type; |
---|
209 | | - |
---|
210 | | - if (end < a) { |
---|
211 | | - goto insert; |
---|
212 | | - } else if (start > b) { |
---|
213 | | - pos = pos->next; |
---|
214 | | - } else if ((start >= a) && (end <= b)) { |
---|
215 | | - if (new->type == type) |
---|
216 | | - return 0; |
---|
217 | | - else |
---|
218 | | - pos = pos->next; |
---|
219 | | - } else { |
---|
220 | | - if (new->type == type) { |
---|
221 | | - phys_addr_t new_start = min(a, start); |
---|
222 | | - phys_addr_t new_end = max(b, end); |
---|
223 | | - int ret; |
---|
224 | | - |
---|
225 | | - list_del(&entry->list); |
---|
226 | | - entry->start = new_start; |
---|
227 | | - entry->length = new_end - new_start + 1; |
---|
228 | | - ret = iommu_insert_resv_region(entry, regions); |
---|
229 | | - kfree(entry); |
---|
230 | | - return ret; |
---|
231 | | - } else { |
---|
232 | | - pos = pos->next; |
---|
233 | | - } |
---|
234 | | - } |
---|
235 | | - } |
---|
236 | | -insert: |
---|
237 | | - region = iommu_alloc_resv_region(new->start, new->length, |
---|
238 | | - new->prot, new->type); |
---|
239 | | - if (!region) |
---|
| 403 | + nr = iommu_alloc_resv_region(new->start, new->length, |
---|
| 404 | + new->prot, new->type); |
---|
| 405 | + if (!nr) |
---|
240 | 406 | return -ENOMEM; |
---|
241 | 407 | |
---|
242 | | - list_add_tail(®ion->list, pos); |
---|
| 408 | + /* First add the new element based on start address sorting */ |
---|
| 409 | + list_for_each_entry(iter, regions, list) { |
---|
| 410 | + if (nr->start < iter->start || |
---|
| 411 | + (nr->start == iter->start && nr->type <= iter->type)) |
---|
| 412 | + break; |
---|
| 413 | + } |
---|
| 414 | + list_add_tail(&nr->list, &iter->list); |
---|
| 415 | + |
---|
| 416 | + /* Merge overlapping segments of type nr->type in @regions, if any */ |
---|
| 417 | + list_for_each_entry_safe(iter, tmp, regions, list) { |
---|
| 418 | + phys_addr_t top_end, iter_end = iter->start + iter->length - 1; |
---|
| 419 | + |
---|
| 420 | + /* no merge needed on elements of different types than @new */ |
---|
| 421 | + if (iter->type != new->type) { |
---|
| 422 | + list_move_tail(&iter->list, &stack); |
---|
| 423 | + continue; |
---|
| 424 | + } |
---|
| 425 | + |
---|
| 426 | + /* look for the last stack element of same type as @iter */ |
---|
| 427 | + list_for_each_entry_reverse(top, &stack, list) |
---|
| 428 | + if (top->type == iter->type) |
---|
| 429 | + goto check_overlap; |
---|
| 430 | + |
---|
| 431 | + list_move_tail(&iter->list, &stack); |
---|
| 432 | + continue; |
---|
| 433 | + |
---|
| 434 | +check_overlap: |
---|
| 435 | + top_end = top->start + top->length - 1; |
---|
| 436 | + |
---|
| 437 | + if (iter->start > top_end + 1) { |
---|
| 438 | + list_move_tail(&iter->list, &stack); |
---|
| 439 | + } else { |
---|
| 440 | + top->length = max(top_end, iter_end) - top->start + 1; |
---|
| 441 | + list_del(&iter->list); |
---|
| 442 | + kfree(iter); |
---|
| 443 | + } |
---|
| 444 | + } |
---|
| 445 | + list_splice(&stack, regions); |
---|
243 | 446 | return 0; |
---|
244 | 447 | } |
---|
245 | 448 | |
---|
.. | .. |
---|
381 | 584 | group->kobj.kset = iommu_group_kset; |
---|
382 | 585 | mutex_init(&group->mutex); |
---|
383 | 586 | INIT_LIST_HEAD(&group->devices); |
---|
| 587 | + INIT_LIST_HEAD(&group->entry); |
---|
384 | 588 | BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); |
---|
385 | 589 | |
---|
386 | 590 | ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); |
---|
.. | .. |
---|
522 | 726 | } |
---|
523 | 727 | EXPORT_SYMBOL_GPL(iommu_group_set_name); |
---|
524 | 728 | |
---|
525 | | -static int iommu_group_create_direct_mappings(struct iommu_group *group, |
---|
526 | | - struct device *dev) |
---|
| 729 | +static int iommu_create_device_direct_mappings(struct iommu_group *group, |
---|
| 730 | + struct device *dev) |
---|
527 | 731 | { |
---|
528 | 732 | struct iommu_domain *domain = group->default_domain; |
---|
529 | 733 | struct iommu_resv_region *entry; |
---|
.. | .. |
---|
551 | 755 | start = ALIGN(entry->start, pg_size); |
---|
552 | 756 | end = ALIGN(entry->start + entry->length, pg_size); |
---|
553 | 757 | |
---|
554 | | - if (entry->type != IOMMU_RESV_DIRECT) |
---|
| 758 | + if (entry->type != IOMMU_RESV_DIRECT && |
---|
| 759 | + entry->type != IOMMU_RESV_DIRECT_RELAXABLE) |
---|
555 | 760 | continue; |
---|
556 | 761 | |
---|
557 | 762 | for (addr = start; addr < end; addr += pg_size) { |
---|
.. | .. |
---|
568 | 773 | |
---|
569 | 774 | } |
---|
570 | 775 | |
---|
571 | | - iommu_flush_tlb_all(domain); |
---|
| 776 | + iommu_flush_iotlb_all(domain); |
---|
572 | 777 | |
---|
573 | 778 | out: |
---|
574 | 779 | iommu_put_resv_regions(dev, &mappings); |
---|
575 | 780 | |
---|
576 | 781 | return ret; |
---|
| 782 | +} |
---|
| 783 | + |
---|
| 784 | +static bool iommu_is_attach_deferred(struct iommu_domain *domain, |
---|
| 785 | + struct device *dev) |
---|
| 786 | +{ |
---|
| 787 | + if (domain->ops->is_attach_deferred) |
---|
| 788 | + return domain->ops->is_attach_deferred(domain, dev); |
---|
| 789 | + |
---|
| 790 | + return false; |
---|
577 | 791 | } |
---|
578 | 792 | |
---|
579 | 793 | /** |
---|
.. | .. |
---|
626 | 840 | |
---|
627 | 841 | dev->iommu_group = group; |
---|
628 | 842 | |
---|
629 | | - iommu_group_create_direct_mappings(group, dev); |
---|
630 | | - |
---|
631 | 843 | mutex_lock(&group->mutex); |
---|
632 | 844 | list_add_tail(&device->list, &group->devices); |
---|
633 | | - if (group->domain) |
---|
| 845 | + if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) |
---|
634 | 846 | ret = __iommu_attach_device(group->domain, dev); |
---|
635 | 847 | mutex_unlock(&group->mutex); |
---|
636 | 848 | if (ret) |
---|
.. | .. |
---|
642 | 854 | |
---|
643 | 855 | trace_add_device_to_group(group->id, dev); |
---|
644 | 856 | |
---|
645 | | - pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); |
---|
| 857 | + dev_info(dev, "Adding to iommu group %d\n", group->id); |
---|
646 | 858 | |
---|
647 | 859 | return 0; |
---|
648 | 860 | |
---|
.. | .. |
---|
659 | 871 | sysfs_remove_link(&dev->kobj, "iommu_group"); |
---|
660 | 872 | err_free_device: |
---|
661 | 873 | kfree(device); |
---|
662 | | - pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); |
---|
| 874 | + dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); |
---|
663 | 875 | return ret; |
---|
664 | 876 | } |
---|
665 | 877 | EXPORT_SYMBOL_GPL(iommu_group_add_device); |
---|
.. | .. |
---|
676 | 888 | struct iommu_group *group = dev->iommu_group; |
---|
677 | 889 | struct group_device *tmp_device, *device = NULL; |
---|
678 | 890 | |
---|
679 | | - pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); |
---|
| 891 | + if (!group) |
---|
| 892 | + return; |
---|
| 893 | + |
---|
| 894 | + dev_info(dev, "Removing from iommu group %d\n", group->id); |
---|
680 | 895 | |
---|
681 | 896 | /* Pre-notify listeners that a device is being removed. */ |
---|
682 | 897 | blocking_notifier_call_chain(&group->notifier, |
---|
.. | .. |
---|
833 | 1048 | return blocking_notifier_chain_unregister(&group->notifier, nb); |
---|
834 | 1049 | } |
---|
835 | 1050 | EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); |
---|
| 1051 | + |
---|
| 1052 | +/** |
---|
| 1053 | + * iommu_register_device_fault_handler() - Register a device fault handler |
---|
| 1054 | + * @dev: the device |
---|
| 1055 | + * @handler: the fault handler |
---|
| 1056 | + * @data: private data passed as argument to the handler |
---|
| 1057 | + * |
---|
| 1058 | + * When an IOMMU fault event is received, this handler gets called with the |
---|
| 1059 | + * fault event and data as argument. The handler should return 0 on success. If |
---|
| 1060 | + * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also |
---|
| 1061 | + * complete the fault by calling iommu_page_response() with one of the following |
---|
| 1062 | + * response code: |
---|
| 1063 | + * - IOMMU_PAGE_RESP_SUCCESS: retry the translation |
---|
| 1064 | + * - IOMMU_PAGE_RESP_INVALID: terminate the fault |
---|
| 1065 | + * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting |
---|
| 1066 | + * page faults if possible. |
---|
| 1067 | + * |
---|
| 1068 | + * Return 0 if the fault handler was installed successfully, or an error. |
---|
| 1069 | + */ |
---|
| 1070 | +int iommu_register_device_fault_handler(struct device *dev, |
---|
| 1071 | + iommu_dev_fault_handler_t handler, |
---|
| 1072 | + void *data) |
---|
| 1073 | +{ |
---|
| 1074 | + struct dev_iommu *param = dev->iommu; |
---|
| 1075 | + int ret = 0; |
---|
| 1076 | + |
---|
| 1077 | + if (!param) |
---|
| 1078 | + return -EINVAL; |
---|
| 1079 | + |
---|
| 1080 | + mutex_lock(¶m->lock); |
---|
| 1081 | + /* Only allow one fault handler registered for each device */ |
---|
| 1082 | + if (param->fault_param) { |
---|
| 1083 | + ret = -EBUSY; |
---|
| 1084 | + goto done_unlock; |
---|
| 1085 | + } |
---|
| 1086 | + |
---|
| 1087 | + get_device(dev); |
---|
| 1088 | + param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); |
---|
| 1089 | + if (!param->fault_param) { |
---|
| 1090 | + put_device(dev); |
---|
| 1091 | + ret = -ENOMEM; |
---|
| 1092 | + goto done_unlock; |
---|
| 1093 | + } |
---|
| 1094 | + param->fault_param->handler = handler; |
---|
| 1095 | + param->fault_param->data = data; |
---|
| 1096 | + mutex_init(¶m->fault_param->lock); |
---|
| 1097 | + INIT_LIST_HEAD(¶m->fault_param->faults); |
---|
| 1098 | + |
---|
| 1099 | +done_unlock: |
---|
| 1100 | + mutex_unlock(¶m->lock); |
---|
| 1101 | + |
---|
| 1102 | + return ret; |
---|
| 1103 | +} |
---|
| 1104 | +EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); |
---|
| 1105 | + |
---|
| 1106 | +/** |
---|
| 1107 | + * iommu_unregister_device_fault_handler() - Unregister the device fault handler |
---|
| 1108 | + * @dev: the device |
---|
| 1109 | + * |
---|
| 1110 | + * Remove the device fault handler installed with |
---|
| 1111 | + * iommu_register_device_fault_handler(). |
---|
| 1112 | + * |
---|
| 1113 | + * Return 0 on success, or an error. |
---|
| 1114 | + */ |
---|
| 1115 | +int iommu_unregister_device_fault_handler(struct device *dev) |
---|
| 1116 | +{ |
---|
| 1117 | + struct dev_iommu *param = dev->iommu; |
---|
| 1118 | + int ret = 0; |
---|
| 1119 | + |
---|
| 1120 | + if (!param) |
---|
| 1121 | + return -EINVAL; |
---|
| 1122 | + |
---|
| 1123 | + mutex_lock(¶m->lock); |
---|
| 1124 | + |
---|
| 1125 | + if (!param->fault_param) |
---|
| 1126 | + goto unlock; |
---|
| 1127 | + |
---|
| 1128 | + /* we cannot unregister handler if there are pending faults */ |
---|
| 1129 | + if (!list_empty(¶m->fault_param->faults)) { |
---|
| 1130 | + ret = -EBUSY; |
---|
| 1131 | + goto unlock; |
---|
| 1132 | + } |
---|
| 1133 | + |
---|
| 1134 | + kfree(param->fault_param); |
---|
| 1135 | + param->fault_param = NULL; |
---|
| 1136 | + put_device(dev); |
---|
| 1137 | +unlock: |
---|
| 1138 | + mutex_unlock(¶m->lock); |
---|
| 1139 | + |
---|
| 1140 | + return ret; |
---|
| 1141 | +} |
---|
| 1142 | +EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); |
---|
| 1143 | + |
---|
| 1144 | +/** |
---|
| 1145 | + * iommu_report_device_fault() - Report fault event to device driver |
---|
| 1146 | + * @dev: the device |
---|
| 1147 | + * @evt: fault event data |
---|
| 1148 | + * |
---|
| 1149 | + * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ |
---|
| 1150 | + * handler. When this function fails and the fault is recoverable, it is the |
---|
| 1151 | + * caller's responsibility to complete the fault. |
---|
| 1152 | + * |
---|
| 1153 | + * Return 0 on success, or an error. |
---|
| 1154 | + */ |
---|
| 1155 | +int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) |
---|
| 1156 | +{ |
---|
| 1157 | + struct dev_iommu *param = dev->iommu; |
---|
| 1158 | + struct iommu_fault_event *evt_pending = NULL; |
---|
| 1159 | + struct iommu_fault_param *fparam; |
---|
| 1160 | + int ret = 0; |
---|
| 1161 | + |
---|
| 1162 | + if (!param || !evt) |
---|
| 1163 | + return -EINVAL; |
---|
| 1164 | + |
---|
| 1165 | + /* we only report device fault if there is a handler registered */ |
---|
| 1166 | + mutex_lock(¶m->lock); |
---|
| 1167 | + fparam = param->fault_param; |
---|
| 1168 | + if (!fparam || !fparam->handler) { |
---|
| 1169 | + ret = -EINVAL; |
---|
| 1170 | + goto done_unlock; |
---|
| 1171 | + } |
---|
| 1172 | + |
---|
| 1173 | + if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && |
---|
| 1174 | + (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { |
---|
| 1175 | + evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), |
---|
| 1176 | + GFP_KERNEL); |
---|
| 1177 | + if (!evt_pending) { |
---|
| 1178 | + ret = -ENOMEM; |
---|
| 1179 | + goto done_unlock; |
---|
| 1180 | + } |
---|
| 1181 | + mutex_lock(&fparam->lock); |
---|
| 1182 | + list_add_tail(&evt_pending->list, &fparam->faults); |
---|
| 1183 | + mutex_unlock(&fparam->lock); |
---|
| 1184 | + } |
---|
| 1185 | + |
---|
| 1186 | + ret = fparam->handler(&evt->fault, fparam->data); |
---|
| 1187 | + if (ret && evt_pending) { |
---|
| 1188 | + mutex_lock(&fparam->lock); |
---|
| 1189 | + list_del(&evt_pending->list); |
---|
| 1190 | + mutex_unlock(&fparam->lock); |
---|
| 1191 | + kfree(evt_pending); |
---|
| 1192 | + } |
---|
| 1193 | +done_unlock: |
---|
| 1194 | + mutex_unlock(¶m->lock); |
---|
| 1195 | + return ret; |
---|
| 1196 | +} |
---|
| 1197 | +EXPORT_SYMBOL_GPL(iommu_report_device_fault); |
---|
| 1198 | + |
---|
| 1199 | +int iommu_page_response(struct device *dev, |
---|
| 1200 | + struct iommu_page_response *msg) |
---|
| 1201 | +{ |
---|
| 1202 | + bool needs_pasid; |
---|
| 1203 | + int ret = -EINVAL; |
---|
| 1204 | + struct iommu_fault_event *evt; |
---|
| 1205 | + struct iommu_fault_page_request *prm; |
---|
| 1206 | + struct dev_iommu *param = dev->iommu; |
---|
| 1207 | + bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; |
---|
| 1208 | + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
| 1209 | + |
---|
| 1210 | + if (!domain || !domain->ops->page_response) |
---|
| 1211 | + return -ENODEV; |
---|
| 1212 | + |
---|
| 1213 | + if (!param || !param->fault_param) |
---|
| 1214 | + return -EINVAL; |
---|
| 1215 | + |
---|
| 1216 | + if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || |
---|
| 1217 | + msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) |
---|
| 1218 | + return -EINVAL; |
---|
| 1219 | + |
---|
| 1220 | + /* Only send response if there is a fault report pending */ |
---|
| 1221 | + mutex_lock(¶m->fault_param->lock); |
---|
| 1222 | + if (list_empty(¶m->fault_param->faults)) { |
---|
| 1223 | + dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); |
---|
| 1224 | + goto done_unlock; |
---|
| 1225 | + } |
---|
| 1226 | + /* |
---|
| 1227 | + * Check if we have a matching page request pending to respond, |
---|
| 1228 | + * otherwise return -EINVAL |
---|
| 1229 | + */ |
---|
| 1230 | + list_for_each_entry(evt, ¶m->fault_param->faults, list) { |
---|
| 1231 | + prm = &evt->fault.prm; |
---|
| 1232 | + if (prm->grpid != msg->grpid) |
---|
| 1233 | + continue; |
---|
| 1234 | + |
---|
| 1235 | + /* |
---|
| 1236 | + * If the PASID is required, the corresponding request is |
---|
| 1237 | + * matched using the group ID, the PASID valid bit and the PASID |
---|
| 1238 | + * value. Otherwise only the group ID matches request and |
---|
| 1239 | + * response. |
---|
| 1240 | + */ |
---|
| 1241 | + needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; |
---|
| 1242 | + if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) |
---|
| 1243 | + continue; |
---|
| 1244 | + |
---|
| 1245 | + if (!needs_pasid && has_pasid) { |
---|
| 1246 | + /* No big deal, just clear it. */ |
---|
| 1247 | + msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; |
---|
| 1248 | + msg->pasid = 0; |
---|
| 1249 | + } |
---|
| 1250 | + |
---|
| 1251 | + ret = domain->ops->page_response(dev, evt, msg); |
---|
| 1252 | + list_del(&evt->list); |
---|
| 1253 | + kfree(evt); |
---|
| 1254 | + break; |
---|
| 1255 | + } |
---|
| 1256 | + |
---|
| 1257 | +done_unlock: |
---|
| 1258 | + mutex_unlock(¶m->fault_param->lock); |
---|
| 1259 | + return ret; |
---|
| 1260 | +} |
---|
| 1261 | +EXPORT_SYMBOL_GPL(iommu_page_response); |
---|
836 | 1262 | |
---|
837 | 1263 | /** |
---|
838 | 1264 | * iommu_group_id - Return ID for a group |
---|
.. | .. |
---|
1032 | 1458 | } |
---|
1033 | 1459 | EXPORT_SYMBOL_GPL(pci_device_group); |
---|
1034 | 1460 | |
---|
| 1461 | +/* Get the IOMMU group for device on fsl-mc bus */ |
---|
| 1462 | +struct iommu_group *fsl_mc_device_group(struct device *dev) |
---|
| 1463 | +{ |
---|
| 1464 | + struct device *cont_dev = fsl_mc_cont_dev(dev); |
---|
| 1465 | + struct iommu_group *group; |
---|
| 1466 | + |
---|
| 1467 | + group = iommu_group_get(cont_dev); |
---|
| 1468 | + if (!group) |
---|
| 1469 | + group = iommu_group_alloc(); |
---|
| 1470 | + return group; |
---|
| 1471 | +} |
---|
| 1472 | +EXPORT_SYMBOL_GPL(fsl_mc_device_group); |
---|
| 1473 | + |
---|
| 1474 | +static int iommu_get_def_domain_type(struct device *dev) |
---|
| 1475 | +{ |
---|
| 1476 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 1477 | + unsigned int type = 0; |
---|
| 1478 | + |
---|
| 1479 | + if (ops->def_domain_type) |
---|
| 1480 | + type = ops->def_domain_type(dev); |
---|
| 1481 | + |
---|
| 1482 | + return (type == 0) ? iommu_def_domain_type : type; |
---|
| 1483 | +} |
---|
| 1484 | + |
---|
| 1485 | +static int iommu_group_alloc_default_domain(struct bus_type *bus, |
---|
| 1486 | + struct iommu_group *group, |
---|
| 1487 | + unsigned int type) |
---|
| 1488 | +{ |
---|
| 1489 | + struct iommu_domain *dom; |
---|
| 1490 | + |
---|
| 1491 | + dom = __iommu_domain_alloc(bus, type); |
---|
| 1492 | + if (!dom && type != IOMMU_DOMAIN_DMA) { |
---|
| 1493 | + dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); |
---|
| 1494 | + if (dom) |
---|
| 1495 | + pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", |
---|
| 1496 | + type, group->name); |
---|
| 1497 | + } |
---|
| 1498 | + |
---|
| 1499 | + if (!dom) |
---|
| 1500 | + return -ENOMEM; |
---|
| 1501 | + |
---|
| 1502 | + group->default_domain = dom; |
---|
| 1503 | + if (!group->domain) |
---|
| 1504 | + group->domain = dom; |
---|
| 1505 | + |
---|
| 1506 | + if (!iommu_dma_strict) { |
---|
| 1507 | + int attr = 1; |
---|
| 1508 | + iommu_domain_set_attr(dom, |
---|
| 1509 | + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, |
---|
| 1510 | + &attr); |
---|
| 1511 | + } |
---|
| 1512 | + |
---|
| 1513 | + return 0; |
---|
| 1514 | +} |
---|
| 1515 | + |
---|
| 1516 | +static int iommu_alloc_default_domain(struct iommu_group *group, |
---|
| 1517 | + struct device *dev) |
---|
| 1518 | +{ |
---|
| 1519 | + unsigned int type; |
---|
| 1520 | + |
---|
| 1521 | + if (group->default_domain) |
---|
| 1522 | + return 0; |
---|
| 1523 | + |
---|
| 1524 | + type = iommu_get_def_domain_type(dev); |
---|
| 1525 | + |
---|
| 1526 | + return iommu_group_alloc_default_domain(dev->bus, group, type); |
---|
| 1527 | +} |
---|
| 1528 | + |
---|
1035 | 1529 | /** |
---|
1036 | 1530 | * iommu_group_get_for_dev - Find or create the IOMMU group for a device |
---|
1037 | 1531 | * @dev: target device |
---|
.. | .. |
---|
1042 | 1536 | * to the returned IOMMU group, which will already include the provided |
---|
1043 | 1537 | * device. The reference should be released with iommu_group_put(). |
---|
1044 | 1538 | */ |
---|
1045 | | -struct iommu_group *iommu_group_get_for_dev(struct device *dev) |
---|
| 1539 | +static struct iommu_group *iommu_group_get_for_dev(struct device *dev) |
---|
1046 | 1540 | { |
---|
1047 | 1541 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
1048 | 1542 | struct iommu_group *group; |
---|
.. | .. |
---|
1062 | 1556 | if (IS_ERR(group)) |
---|
1063 | 1557 | return group; |
---|
1064 | 1558 | |
---|
1065 | | - /* |
---|
1066 | | - * Try to allocate a default domain - needs support from the |
---|
1067 | | - * IOMMU driver. |
---|
1068 | | - */ |
---|
1069 | | - if (!group->default_domain) { |
---|
1070 | | - struct iommu_domain *dom; |
---|
1071 | | - |
---|
1072 | | - dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); |
---|
1073 | | - if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { |
---|
1074 | | - dev_warn(dev, |
---|
1075 | | - "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", |
---|
1076 | | - iommu_def_domain_type); |
---|
1077 | | - dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); |
---|
1078 | | - } |
---|
1079 | | - |
---|
1080 | | - group->default_domain = dom; |
---|
1081 | | - if (!group->domain) |
---|
1082 | | - group->domain = dom; |
---|
1083 | | - } |
---|
1084 | | - |
---|
1085 | 1559 | ret = iommu_group_add_device(group, dev); |
---|
1086 | | - if (ret) { |
---|
1087 | | - iommu_group_put(group); |
---|
1088 | | - return ERR_PTR(ret); |
---|
1089 | | - } |
---|
| 1560 | + if (ret) |
---|
| 1561 | + goto out_put_group; |
---|
1090 | 1562 | |
---|
1091 | 1563 | return group; |
---|
| 1564 | + |
---|
| 1565 | +out_put_group: |
---|
| 1566 | + iommu_group_put(group); |
---|
| 1567 | + |
---|
| 1568 | + return ERR_PTR(ret); |
---|
1092 | 1569 | } |
---|
1093 | | -EXPORT_SYMBOL_GPL(iommu_group_get_for_dev); |
---|
1094 | 1570 | |
---|
1095 | 1571 | struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) |
---|
1096 | 1572 | { |
---|
1097 | 1573 | return group->default_domain; |
---|
1098 | 1574 | } |
---|
1099 | | -EXPORT_SYMBOL_GPL(iommu_group_default_domain); |
---|
1100 | 1575 | |
---|
1101 | | -static int add_iommu_group(struct device *dev, void *data) |
---|
| 1576 | +static int probe_iommu_group(struct device *dev, void *data) |
---|
1102 | 1577 | { |
---|
1103 | | - struct iommu_callback_data *cb = data; |
---|
1104 | | - const struct iommu_ops *ops = cb->ops; |
---|
| 1578 | + struct list_head *group_list = data; |
---|
| 1579 | + struct iommu_group *group; |
---|
1105 | 1580 | int ret; |
---|
1106 | 1581 | |
---|
1107 | | - if (!ops->add_device) |
---|
| 1582 | + /* Device is probed already if in a group */ |
---|
| 1583 | + group = iommu_group_get(dev); |
---|
| 1584 | + if (group) { |
---|
| 1585 | + iommu_group_put(group); |
---|
1108 | 1586 | return 0; |
---|
| 1587 | + } |
---|
1109 | 1588 | |
---|
1110 | | - WARN_ON(dev->iommu_group); |
---|
1111 | | - |
---|
1112 | | - ret = ops->add_device(dev); |
---|
1113 | | - |
---|
1114 | | - /* |
---|
1115 | | - * We ignore -ENODEV errors for now, as they just mean that the |
---|
1116 | | - * device is not translated by an IOMMU. We still care about |
---|
1117 | | - * other errors and fail to initialize when they happen. |
---|
1118 | | - */ |
---|
| 1589 | + ret = __iommu_probe_device(dev, group_list); |
---|
1119 | 1590 | if (ret == -ENODEV) |
---|
1120 | 1591 | ret = 0; |
---|
1121 | 1592 | |
---|
.. | .. |
---|
1124 | 1595 | |
---|
1125 | 1596 | static int remove_iommu_group(struct device *dev, void *data) |
---|
1126 | 1597 | { |
---|
1127 | | - struct iommu_callback_data *cb = data; |
---|
1128 | | - const struct iommu_ops *ops = cb->ops; |
---|
1129 | | - |
---|
1130 | | - if (ops->remove_device && dev->iommu_group) |
---|
1131 | | - ops->remove_device(dev); |
---|
| 1598 | + iommu_release_device(dev); |
---|
1132 | 1599 | |
---|
1133 | 1600 | return 0; |
---|
1134 | 1601 | } |
---|
.. | .. |
---|
1136 | 1603 | static int iommu_bus_notifier(struct notifier_block *nb, |
---|
1137 | 1604 | unsigned long action, void *data) |
---|
1138 | 1605 | { |
---|
1139 | | - struct device *dev = data; |
---|
1140 | | - const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
1141 | | - struct iommu_group *group; |
---|
1142 | 1606 | unsigned long group_action = 0; |
---|
| 1607 | + struct device *dev = data; |
---|
| 1608 | + struct iommu_group *group; |
---|
1143 | 1609 | |
---|
1144 | 1610 | /* |
---|
1145 | 1611 | * ADD/DEL call into iommu driver ops if provided, which may |
---|
1146 | 1612 | * result in ADD/DEL notifiers to group->notifier |
---|
1147 | 1613 | */ |
---|
1148 | 1614 | if (action == BUS_NOTIFY_ADD_DEVICE) { |
---|
1149 | | - if (ops->add_device) { |
---|
1150 | | - int ret; |
---|
| 1615 | + int ret; |
---|
1151 | 1616 | |
---|
1152 | | - ret = ops->add_device(dev); |
---|
1153 | | - return (ret) ? NOTIFY_DONE : NOTIFY_OK; |
---|
1154 | | - } |
---|
| 1617 | + ret = iommu_probe_device(dev); |
---|
| 1618 | + return (ret) ? NOTIFY_DONE : NOTIFY_OK; |
---|
1155 | 1619 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { |
---|
1156 | | - if (ops->remove_device && dev->iommu_group) { |
---|
1157 | | - ops->remove_device(dev); |
---|
1158 | | - return 0; |
---|
1159 | | - } |
---|
| 1620 | + iommu_release_device(dev); |
---|
| 1621 | + return NOTIFY_OK; |
---|
1160 | 1622 | } |
---|
1161 | 1623 | |
---|
1162 | 1624 | /* |
---|
.. | .. |
---|
1190 | 1652 | return 0; |
---|
1191 | 1653 | } |
---|
1192 | 1654 | |
---|
| 1655 | +struct __group_domain_type { |
---|
| 1656 | + struct device *dev; |
---|
| 1657 | + unsigned int type; |
---|
| 1658 | +}; |
---|
| 1659 | + |
---|
| 1660 | +static int probe_get_default_domain_type(struct device *dev, void *data) |
---|
| 1661 | +{ |
---|
| 1662 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 1663 | + struct __group_domain_type *gtype = data; |
---|
| 1664 | + unsigned int type = 0; |
---|
| 1665 | + |
---|
| 1666 | + if (ops->def_domain_type) |
---|
| 1667 | + type = ops->def_domain_type(dev); |
---|
| 1668 | + |
---|
| 1669 | + if (type) { |
---|
| 1670 | + if (gtype->type && gtype->type != type) { |
---|
| 1671 | + dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", |
---|
| 1672 | + iommu_domain_type_str(type), |
---|
| 1673 | + dev_name(gtype->dev), |
---|
| 1674 | + iommu_domain_type_str(gtype->type)); |
---|
| 1675 | + gtype->type = 0; |
---|
| 1676 | + } |
---|
| 1677 | + |
---|
| 1678 | + if (!gtype->dev) { |
---|
| 1679 | + gtype->dev = dev; |
---|
| 1680 | + gtype->type = type; |
---|
| 1681 | + } |
---|
| 1682 | + } |
---|
| 1683 | + |
---|
| 1684 | + return 0; |
---|
| 1685 | +} |
---|
| 1686 | + |
---|
| 1687 | +static void probe_alloc_default_domain(struct bus_type *bus, |
---|
| 1688 | + struct iommu_group *group) |
---|
| 1689 | +{ |
---|
| 1690 | + struct __group_domain_type gtype; |
---|
| 1691 | + |
---|
| 1692 | + memset(>ype, 0, sizeof(gtype)); |
---|
| 1693 | + |
---|
| 1694 | + /* Ask for default domain requirements of all devices in the group */ |
---|
| 1695 | + __iommu_group_for_each_dev(group, >ype, |
---|
| 1696 | + probe_get_default_domain_type); |
---|
| 1697 | + |
---|
| 1698 | + if (!gtype.type) |
---|
| 1699 | + gtype.type = iommu_def_domain_type; |
---|
| 1700 | + |
---|
| 1701 | + iommu_group_alloc_default_domain(bus, group, gtype.type); |
---|
| 1702 | + |
---|
| 1703 | +} |
---|
| 1704 | + |
---|
| 1705 | +static int iommu_group_do_dma_attach(struct device *dev, void *data) |
---|
| 1706 | +{ |
---|
| 1707 | + struct iommu_domain *domain = data; |
---|
| 1708 | + int ret = 0; |
---|
| 1709 | + |
---|
| 1710 | + if (!iommu_is_attach_deferred(domain, dev)) |
---|
| 1711 | + ret = __iommu_attach_device(domain, dev); |
---|
| 1712 | + |
---|
| 1713 | + return ret; |
---|
| 1714 | +} |
---|
| 1715 | + |
---|
| 1716 | +static int __iommu_group_dma_attach(struct iommu_group *group) |
---|
| 1717 | +{ |
---|
| 1718 | + return __iommu_group_for_each_dev(group, group->default_domain, |
---|
| 1719 | + iommu_group_do_dma_attach); |
---|
| 1720 | +} |
---|
| 1721 | + |
---|
| 1722 | +static int iommu_group_do_probe_finalize(struct device *dev, void *data) |
---|
| 1723 | +{ |
---|
| 1724 | + struct iommu_domain *domain = data; |
---|
| 1725 | + |
---|
| 1726 | + if (domain->ops->probe_finalize) |
---|
| 1727 | + domain->ops->probe_finalize(dev); |
---|
| 1728 | + |
---|
| 1729 | + return 0; |
---|
| 1730 | +} |
---|
| 1731 | + |
---|
| 1732 | +static void __iommu_group_dma_finalize(struct iommu_group *group) |
---|
| 1733 | +{ |
---|
| 1734 | + __iommu_group_for_each_dev(group, group->default_domain, |
---|
| 1735 | + iommu_group_do_probe_finalize); |
---|
| 1736 | +} |
---|
| 1737 | + |
---|
| 1738 | +static int iommu_do_create_direct_mappings(struct device *dev, void *data) |
---|
| 1739 | +{ |
---|
| 1740 | + struct iommu_group *group = data; |
---|
| 1741 | + |
---|
| 1742 | + iommu_create_device_direct_mappings(group, dev); |
---|
| 1743 | + |
---|
| 1744 | + return 0; |
---|
| 1745 | +} |
---|
| 1746 | + |
---|
| 1747 | +static int iommu_group_create_direct_mappings(struct iommu_group *group) |
---|
| 1748 | +{ |
---|
| 1749 | + return __iommu_group_for_each_dev(group, group, |
---|
| 1750 | + iommu_do_create_direct_mappings); |
---|
| 1751 | +} |
---|
| 1752 | + |
---|
| 1753 | +int bus_iommu_probe(struct bus_type *bus) |
---|
| 1754 | +{ |
---|
| 1755 | + struct iommu_group *group, *next; |
---|
| 1756 | + LIST_HEAD(group_list); |
---|
| 1757 | + int ret; |
---|
| 1758 | + |
---|
| 1759 | + /* |
---|
| 1760 | + * This code-path does not allocate the default domain when |
---|
| 1761 | + * creating the iommu group, so do it after the groups are |
---|
| 1762 | + * created. |
---|
| 1763 | + */ |
---|
| 1764 | + ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); |
---|
| 1765 | + if (ret) |
---|
| 1766 | + return ret; |
---|
| 1767 | + |
---|
| 1768 | + list_for_each_entry_safe(group, next, &group_list, entry) { |
---|
| 1769 | + /* Remove item from the list */ |
---|
| 1770 | + list_del_init(&group->entry); |
---|
| 1771 | + |
---|
| 1772 | + mutex_lock(&group->mutex); |
---|
| 1773 | + |
---|
| 1774 | + /* Try to allocate default domain */ |
---|
| 1775 | + probe_alloc_default_domain(bus, group); |
---|
| 1776 | + |
---|
| 1777 | + if (!group->default_domain) { |
---|
| 1778 | + mutex_unlock(&group->mutex); |
---|
| 1779 | + continue; |
---|
| 1780 | + } |
---|
| 1781 | + |
---|
| 1782 | + iommu_group_create_direct_mappings(group); |
---|
| 1783 | + |
---|
| 1784 | + ret = __iommu_group_dma_attach(group); |
---|
| 1785 | + |
---|
| 1786 | + mutex_unlock(&group->mutex); |
---|
| 1787 | + |
---|
| 1788 | + if (ret) |
---|
| 1789 | + break; |
---|
| 1790 | + |
---|
| 1791 | + __iommu_group_dma_finalize(group); |
---|
| 1792 | + } |
---|
| 1793 | + |
---|
| 1794 | + return ret; |
---|
| 1795 | +} |
---|
| 1796 | + |
---|
1193 | 1797 | static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) |
---|
1194 | 1798 | { |
---|
1195 | | - int err; |
---|
1196 | 1799 | struct notifier_block *nb; |
---|
1197 | | - struct iommu_callback_data cb = { |
---|
1198 | | - .ops = ops, |
---|
1199 | | - }; |
---|
| 1800 | + int err; |
---|
1200 | 1801 | |
---|
1201 | 1802 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
---|
1202 | 1803 | if (!nb) |
---|
.. | .. |
---|
1208 | 1809 | if (err) |
---|
1209 | 1810 | goto out_free; |
---|
1210 | 1811 | |
---|
1211 | | - err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); |
---|
| 1812 | + err = bus_iommu_probe(bus); |
---|
1212 | 1813 | if (err) |
---|
1213 | 1814 | goto out_err; |
---|
| 1815 | + |
---|
1214 | 1816 | |
---|
1215 | 1817 | return 0; |
---|
1216 | 1818 | |
---|
1217 | 1819 | out_err: |
---|
1218 | 1820 | /* Clean up */ |
---|
1219 | | - bus_for_each_dev(bus, NULL, &cb, remove_iommu_group); |
---|
| 1821 | + bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); |
---|
1220 | 1822 | bus_unregister_notifier(bus, nb); |
---|
1221 | 1823 | |
---|
1222 | 1824 | out_free: |
---|
.. | .. |
---|
1241 | 1843 | int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) |
---|
1242 | 1844 | { |
---|
1243 | 1845 | int err; |
---|
| 1846 | + |
---|
| 1847 | + if (ops == NULL) { |
---|
| 1848 | + bus->iommu_ops = NULL; |
---|
| 1849 | + return 0; |
---|
| 1850 | + } |
---|
1244 | 1851 | |
---|
1245 | 1852 | if (bus->iommu_ops != NULL) |
---|
1246 | 1853 | return -EBUSY; |
---|
.. | .. |
---|
1310 | 1917 | domain->type = type; |
---|
1311 | 1918 | /* Assume all sizes by default; the driver may override this later */ |
---|
1312 | 1919 | domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; |
---|
1313 | | - domain->is_debug_domain = false; |
---|
1314 | | - memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN); |
---|
1315 | 1920 | |
---|
1316 | 1921 | return domain; |
---|
1317 | 1922 | } |
---|
.. | .. |
---|
1333 | 1938 | { |
---|
1334 | 1939 | int ret; |
---|
1335 | 1940 | |
---|
1336 | | - /* Hack for disable iommu */ |
---|
1337 | | - if (!domain) { |
---|
1338 | | - ret = dev->bus->iommu_ops->attach_dev(domain, dev); |
---|
1339 | | - return ret; |
---|
1340 | | - } |
---|
1341 | | - |
---|
1342 | | - if ((domain->ops->is_attach_deferred != NULL) && |
---|
1343 | | - domain->ops->is_attach_deferred(domain, dev)) |
---|
1344 | | - return 0; |
---|
1345 | | - |
---|
1346 | 1941 | if (unlikely(domain->ops->attach_dev == NULL)) |
---|
1347 | 1942 | return -ENODEV; |
---|
1348 | 1943 | |
---|
1349 | 1944 | ret = domain->ops->attach_dev(domain, dev); |
---|
1350 | | - if (!ret) { |
---|
| 1945 | + if (!ret) |
---|
1351 | 1946 | trace_attach_device_to_domain(dev); |
---|
1352 | | - |
---|
1353 | | - if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) { |
---|
1354 | | - strlcpy(domain->name, dev_name(dev), |
---|
1355 | | - IOMMU_DOMAIN_NAME_LEN); |
---|
1356 | | - } |
---|
1357 | | - } |
---|
1358 | 1947 | return ret; |
---|
1359 | 1948 | } |
---|
1360 | 1949 | |
---|
.. | .. |
---|
1388 | 1977 | } |
---|
1389 | 1978 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
---|
1390 | 1979 | |
---|
| 1980 | +/* |
---|
| 1981 | + * Check flags and other user provided data for valid combinations. We also |
---|
| 1982 | + * make sure no reserved fields or unused flags are set. This is to ensure |
---|
| 1983 | + * not breaking userspace in the future when these fields or flags are used. |
---|
| 1984 | + */ |
---|
| 1985 | +static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) |
---|
| 1986 | +{ |
---|
| 1987 | + u32 mask; |
---|
| 1988 | + int i; |
---|
| 1989 | + |
---|
| 1990 | + if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) |
---|
| 1991 | + return -EINVAL; |
---|
| 1992 | + |
---|
| 1993 | + mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; |
---|
| 1994 | + if (info->cache & ~mask) |
---|
| 1995 | + return -EINVAL; |
---|
| 1996 | + |
---|
| 1997 | + if (info->granularity >= IOMMU_INV_GRANU_NR) |
---|
| 1998 | + return -EINVAL; |
---|
| 1999 | + |
---|
| 2000 | + switch (info->granularity) { |
---|
| 2001 | + case IOMMU_INV_GRANU_ADDR: |
---|
| 2002 | + if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) |
---|
| 2003 | + return -EINVAL; |
---|
| 2004 | + |
---|
| 2005 | + mask = IOMMU_INV_ADDR_FLAGS_PASID | |
---|
| 2006 | + IOMMU_INV_ADDR_FLAGS_ARCHID | |
---|
| 2007 | + IOMMU_INV_ADDR_FLAGS_LEAF; |
---|
| 2008 | + |
---|
| 2009 | + if (info->granu.addr_info.flags & ~mask) |
---|
| 2010 | + return -EINVAL; |
---|
| 2011 | + break; |
---|
| 2012 | + case IOMMU_INV_GRANU_PASID: |
---|
| 2013 | + mask = IOMMU_INV_PASID_FLAGS_PASID | |
---|
| 2014 | + IOMMU_INV_PASID_FLAGS_ARCHID; |
---|
| 2015 | + if (info->granu.pasid_info.flags & ~mask) |
---|
| 2016 | + return -EINVAL; |
---|
| 2017 | + |
---|
| 2018 | + break; |
---|
| 2019 | + case IOMMU_INV_GRANU_DOMAIN: |
---|
| 2020 | + if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) |
---|
| 2021 | + return -EINVAL; |
---|
| 2022 | + break; |
---|
| 2023 | + default: |
---|
| 2024 | + return -EINVAL; |
---|
| 2025 | + } |
---|
| 2026 | + |
---|
| 2027 | + /* Check reserved padding fields */ |
---|
| 2028 | + for (i = 0; i < sizeof(info->padding); i++) { |
---|
| 2029 | + if (info->padding[i]) |
---|
| 2030 | + return -EINVAL; |
---|
| 2031 | + } |
---|
| 2032 | + |
---|
| 2033 | + return 0; |
---|
| 2034 | +} |
---|
| 2035 | + |
---|
| 2036 | +int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, |
---|
| 2037 | + void __user *uinfo) |
---|
| 2038 | +{ |
---|
| 2039 | + struct iommu_cache_invalidate_info inv_info = { 0 }; |
---|
| 2040 | + u32 minsz; |
---|
| 2041 | + int ret; |
---|
| 2042 | + |
---|
| 2043 | + if (unlikely(!domain->ops->cache_invalidate)) |
---|
| 2044 | + return -ENODEV; |
---|
| 2045 | + |
---|
| 2046 | + /* |
---|
| 2047 | + * No new spaces can be added before the variable sized union, the |
---|
| 2048 | + * minimum size is the offset to the union. |
---|
| 2049 | + */ |
---|
| 2050 | + minsz = offsetof(struct iommu_cache_invalidate_info, granu); |
---|
| 2051 | + |
---|
| 2052 | + /* Copy minsz from user to get flags and argsz */ |
---|
| 2053 | + if (copy_from_user(&inv_info, uinfo, minsz)) |
---|
| 2054 | + return -EFAULT; |
---|
| 2055 | + |
---|
| 2056 | + /* Fields before the variable size union are mandatory */ |
---|
| 2057 | + if (inv_info.argsz < minsz) |
---|
| 2058 | + return -EINVAL; |
---|
| 2059 | + |
---|
| 2060 | + /* PASID and address granu require additional info beyond minsz */ |
---|
| 2061 | + if (inv_info.granularity == IOMMU_INV_GRANU_PASID && |
---|
| 2062 | + inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) |
---|
| 2063 | + return -EINVAL; |
---|
| 2064 | + |
---|
| 2065 | + if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && |
---|
| 2066 | + inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) |
---|
| 2067 | + return -EINVAL; |
---|
| 2068 | + |
---|
| 2069 | + /* |
---|
| 2070 | + * User might be using a newer UAPI header which has a larger data |
---|
| 2071 | + * size, we shall support the existing flags within the current |
---|
| 2072 | + * size. Copy the remaining user data _after_ minsz but not more |
---|
| 2073 | + * than the current kernel supported size. |
---|
| 2074 | + */ |
---|
| 2075 | + if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, |
---|
| 2076 | + min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) |
---|
| 2077 | + return -EFAULT; |
---|
| 2078 | + |
---|
| 2079 | + /* Now the argsz is validated, check the content */ |
---|
| 2080 | + ret = iommu_check_cache_invl_data(&inv_info); |
---|
| 2081 | + if (ret) |
---|
| 2082 | + return ret; |
---|
| 2083 | + |
---|
| 2084 | + return domain->ops->cache_invalidate(domain, dev, &inv_info); |
---|
| 2085 | +} |
---|
| 2086 | +EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); |
---|
| 2087 | + |
---|
| 2088 | +static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) |
---|
| 2089 | +{ |
---|
| 2090 | + u64 mask; |
---|
| 2091 | + int i; |
---|
| 2092 | + |
---|
| 2093 | + if (data->version != IOMMU_GPASID_BIND_VERSION_1) |
---|
| 2094 | + return -EINVAL; |
---|
| 2095 | + |
---|
| 2096 | + /* Check the range of supported formats */ |
---|
| 2097 | + if (data->format >= IOMMU_PASID_FORMAT_LAST) |
---|
| 2098 | + return -EINVAL; |
---|
| 2099 | + |
---|
| 2100 | + /* Check all flags */ |
---|
| 2101 | + mask = IOMMU_SVA_GPASID_VAL; |
---|
| 2102 | + if (data->flags & ~mask) |
---|
| 2103 | + return -EINVAL; |
---|
| 2104 | + |
---|
| 2105 | + /* Check reserved padding fields */ |
---|
| 2106 | + for (i = 0; i < sizeof(data->padding); i++) { |
---|
| 2107 | + if (data->padding[i]) |
---|
| 2108 | + return -EINVAL; |
---|
| 2109 | + } |
---|
| 2110 | + |
---|
| 2111 | + return 0; |
---|
| 2112 | +} |
---|
| 2113 | + |
---|
| 2114 | +static int iommu_sva_prepare_bind_data(void __user *udata, |
---|
| 2115 | + struct iommu_gpasid_bind_data *data) |
---|
| 2116 | +{ |
---|
| 2117 | + u32 minsz; |
---|
| 2118 | + |
---|
| 2119 | + /* |
---|
| 2120 | + * No new spaces can be added before the variable sized union, the |
---|
| 2121 | + * minimum size is the offset to the union. |
---|
| 2122 | + */ |
---|
| 2123 | + minsz = offsetof(struct iommu_gpasid_bind_data, vendor); |
---|
| 2124 | + |
---|
| 2125 | + /* Copy minsz from user to get flags and argsz */ |
---|
| 2126 | + if (copy_from_user(data, udata, minsz)) |
---|
| 2127 | + return -EFAULT; |
---|
| 2128 | + |
---|
| 2129 | + /* Fields before the variable size union are mandatory */ |
---|
| 2130 | + if (data->argsz < minsz) |
---|
| 2131 | + return -EINVAL; |
---|
| 2132 | + /* |
---|
| 2133 | + * User might be using a newer UAPI header, we shall let IOMMU vendor |
---|
| 2134 | + * driver decide on what size it needs. Since the guest PASID bind data |
---|
| 2135 | + * can be vendor specific, larger argsz could be the result of extension |
---|
| 2136 | + * for one vendor but it should not affect another vendor. |
---|
| 2137 | + * Copy the remaining user data _after_ minsz |
---|
| 2138 | + */ |
---|
| 2139 | + if (copy_from_user((void *)data + minsz, udata + minsz, |
---|
| 2140 | + min_t(u32, data->argsz, sizeof(*data)) - minsz)) |
---|
| 2141 | + return -EFAULT; |
---|
| 2142 | + |
---|
| 2143 | + return iommu_check_bind_data(data); |
---|
| 2144 | +} |
---|
| 2145 | + |
---|
| 2146 | +int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, |
---|
| 2147 | + void __user *udata) |
---|
| 2148 | +{ |
---|
| 2149 | + struct iommu_gpasid_bind_data data = { 0 }; |
---|
| 2150 | + int ret; |
---|
| 2151 | + |
---|
| 2152 | + if (unlikely(!domain->ops->sva_bind_gpasid)) |
---|
| 2153 | + return -ENODEV; |
---|
| 2154 | + |
---|
| 2155 | + ret = iommu_sva_prepare_bind_data(udata, &data); |
---|
| 2156 | + if (ret) |
---|
| 2157 | + return ret; |
---|
| 2158 | + |
---|
| 2159 | + return domain->ops->sva_bind_gpasid(domain, dev, &data); |
---|
| 2160 | +} |
---|
| 2161 | +EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); |
---|
| 2162 | + |
---|
| 2163 | +int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, |
---|
| 2164 | + ioasid_t pasid) |
---|
| 2165 | +{ |
---|
| 2166 | + if (unlikely(!domain->ops->sva_unbind_gpasid)) |
---|
| 2167 | + return -ENODEV; |
---|
| 2168 | + |
---|
| 2169 | + return domain->ops->sva_unbind_gpasid(dev, pasid); |
---|
| 2170 | +} |
---|
| 2171 | +EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); |
---|
| 2172 | + |
---|
| 2173 | +int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, |
---|
| 2174 | + void __user *udata) |
---|
| 2175 | +{ |
---|
| 2176 | + struct iommu_gpasid_bind_data data = { 0 }; |
---|
| 2177 | + int ret; |
---|
| 2178 | + |
---|
| 2179 | + if (unlikely(!domain->ops->sva_bind_gpasid)) |
---|
| 2180 | + return -ENODEV; |
---|
| 2181 | + |
---|
| 2182 | + ret = iommu_sva_prepare_bind_data(udata, &data); |
---|
| 2183 | + if (ret) |
---|
| 2184 | + return ret; |
---|
| 2185 | + |
---|
| 2186 | + return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); |
---|
| 2187 | +} |
---|
| 2188 | +EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); |
---|
| 2189 | + |
---|
1391 | 2190 | static void __iommu_detach_device(struct iommu_domain *domain, |
---|
1392 | 2191 | struct device *dev) |
---|
1393 | 2192 | { |
---|
1394 | | - if ((domain->ops->is_attach_deferred != NULL) && |
---|
1395 | | - domain->ops->is_attach_deferred(domain, dev)) |
---|
| 2193 | + if (iommu_is_attach_deferred(domain, dev)) |
---|
1396 | 2194 | return; |
---|
1397 | 2195 | |
---|
1398 | 2196 | if (unlikely(domain->ops->detach_dev == NULL)) |
---|
.. | .. |
---|
1443 | 2241 | EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); |
---|
1444 | 2242 | |
---|
1445 | 2243 | /* |
---|
1446 | | - * IOMMU groups are really the natrual working unit of the IOMMU, but |
---|
| 2244 | + * For IOMMU_DOMAIN_DMA implementations which already provide their own |
---|
| 2245 | + * guarantees that the group and its default domain are valid and correct. |
---|
| 2246 | + */ |
---|
| 2247 | +struct iommu_domain *iommu_get_dma_domain(struct device *dev) |
---|
| 2248 | +{ |
---|
| 2249 | + return dev->iommu_group->default_domain; |
---|
| 2250 | +} |
---|
| 2251 | + |
---|
| 2252 | +/* |
---|
| 2253 | + * IOMMU groups are really the natural working unit of the IOMMU, but |
---|
1447 | 2254 | * the IOMMU API works on domains and devices. Bridge that gap by |
---|
1448 | 2255 | * iterating over the devices in a group. Ideally we'd have a single |
---|
1449 | 2256 | * device which represents the requestor ID of the group, but we also |
---|
.. | .. |
---|
1463 | 2270 | struct iommu_group *group) |
---|
1464 | 2271 | { |
---|
1465 | 2272 | int ret; |
---|
| 2273 | + |
---|
| 2274 | + if (group->default_domain && group->domain != group->default_domain) |
---|
| 2275 | + return -EBUSY; |
---|
1466 | 2276 | |
---|
1467 | 2277 | ret = __iommu_group_for_each_dev(group, domain, |
---|
1468 | 2278 | iommu_group_do_attach_device); |
---|
.. | .. |
---|
1493 | 2303 | return 0; |
---|
1494 | 2304 | } |
---|
1495 | 2305 | |
---|
1496 | | -/* |
---|
1497 | | - * Although upstream implements detaching the default_domain as a noop, |
---|
1498 | | - * the "SID switch" secure usecase require complete removal of SIDS/SMRS |
---|
1499 | | - * from HLOS iommu registers. |
---|
1500 | | - */ |
---|
1501 | 2306 | static void __iommu_detach_group(struct iommu_domain *domain, |
---|
1502 | 2307 | struct iommu_group *group) |
---|
1503 | 2308 | { |
---|
1504 | | - __iommu_group_for_each_dev(group, domain, |
---|
| 2309 | + int ret; |
---|
| 2310 | + |
---|
| 2311 | + if (!group->default_domain) { |
---|
| 2312 | + __iommu_group_for_each_dev(group, domain, |
---|
1505 | 2313 | iommu_group_do_detach_device); |
---|
1506 | | - group->domain = NULL; |
---|
1507 | | - return; |
---|
| 2314 | + group->domain = NULL; |
---|
| 2315 | + return; |
---|
| 2316 | + } |
---|
| 2317 | + |
---|
| 2318 | + if (group->domain == group->default_domain) |
---|
| 2319 | + return; |
---|
| 2320 | + |
---|
| 2321 | + /* Detach by re-attaching to the default domain */ |
---|
| 2322 | + ret = __iommu_group_for_each_dev(group, group->default_domain, |
---|
| 2323 | + iommu_group_do_attach_device); |
---|
| 2324 | + if (ret != 0) |
---|
| 2325 | + WARN_ON(1); |
---|
| 2326 | + else |
---|
| 2327 | + group->domain = group->default_domain; |
---|
1508 | 2328 | } |
---|
1509 | 2329 | |
---|
1510 | 2330 | void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) |
---|
.. | .. |
---|
1524 | 2344 | } |
---|
1525 | 2345 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
---|
1526 | 2346 | |
---|
1527 | | -phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain, |
---|
1528 | | - dma_addr_t iova) |
---|
| 2347 | +static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, |
---|
| 2348 | + phys_addr_t paddr, size_t size, size_t *count) |
---|
1529 | 2349 | { |
---|
1530 | | - if (unlikely(domain->ops->iova_to_phys_hard == NULL)) |
---|
1531 | | - return 0; |
---|
| 2350 | + unsigned int pgsize_idx, pgsize_idx_next; |
---|
| 2351 | + unsigned long pgsizes; |
---|
| 2352 | + size_t offset, pgsize, pgsize_next; |
---|
| 2353 | + unsigned long addr_merge = paddr | iova; |
---|
1532 | 2354 | |
---|
1533 | | - return domain->ops->iova_to_phys_hard(domain, iova); |
---|
1534 | | -} |
---|
| 2355 | + /* Page sizes supported by the hardware and small enough for @size */ |
---|
| 2356 | + pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); |
---|
1535 | 2357 | |
---|
1536 | | -uint64_t iommu_iova_to_pte(struct iommu_domain *domain, |
---|
1537 | | - dma_addr_t iova) |
---|
1538 | | -{ |
---|
1539 | | - if (unlikely(domain->ops->iova_to_pte == NULL)) |
---|
1540 | | - return 0; |
---|
| 2358 | + /* Constrain the page sizes further based on the maximum alignment */ |
---|
| 2359 | + if (likely(addr_merge)) |
---|
| 2360 | + pgsizes &= GENMASK(__ffs(addr_merge), 0); |
---|
1541 | 2361 | |
---|
1542 | | - return domain->ops->iova_to_pte(domain, iova); |
---|
1543 | | -} |
---|
| 2362 | + /* Make sure we have at least one suitable page size */ |
---|
| 2363 | + BUG_ON(!pgsizes); |
---|
1544 | 2364 | |
---|
1545 | | -bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova) |
---|
1546 | | -{ |
---|
1547 | | - if (unlikely(domain->ops->is_iova_coherent == NULL)) |
---|
1548 | | - return 0; |
---|
| 2365 | + /* Pick the biggest page size remaining */ |
---|
| 2366 | + pgsize_idx = __fls(pgsizes); |
---|
| 2367 | + pgsize = BIT(pgsize_idx); |
---|
| 2368 | + if (!count) |
---|
| 2369 | + return pgsize; |
---|
1549 | 2370 | |
---|
1550 | | - return domain->ops->is_iova_coherent(domain, iova); |
---|
1551 | | -} |
---|
1552 | 2371 | |
---|
1553 | | -size_t iommu_pgsize(unsigned long pgsize_bitmap, |
---|
1554 | | - unsigned long addr_merge, size_t size) |
---|
1555 | | -{ |
---|
1556 | | - unsigned int pgsize_idx; |
---|
1557 | | - size_t pgsize; |
---|
| 2372 | + /* Find the next biggest support page size, if it exists */ |
---|
| 2373 | + pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); |
---|
| 2374 | + if (!pgsizes) |
---|
| 2375 | + goto out_set_count; |
---|
1558 | 2376 | |
---|
1559 | | - /* Max page size that still fits into 'size' */ |
---|
1560 | | - pgsize_idx = __fls(size); |
---|
| 2377 | + pgsize_idx_next = __ffs(pgsizes); |
---|
| 2378 | + pgsize_next = BIT(pgsize_idx_next); |
---|
1561 | 2379 | |
---|
1562 | | - /* need to consider alignment requirements ? */ |
---|
1563 | | - if (likely(addr_merge)) { |
---|
1564 | | - /* Max page size allowed by address */ |
---|
1565 | | - unsigned int align_pgsize_idx = __ffs(addr_merge); |
---|
1566 | | - pgsize_idx = min(pgsize_idx, align_pgsize_idx); |
---|
1567 | | - } |
---|
| 2380 | + /* |
---|
| 2381 | + * There's no point trying a bigger page size unless the virtual |
---|
| 2382 | + * and physical addresses are similarly offset within the larger page. |
---|
| 2383 | + */ |
---|
| 2384 | + if ((iova ^ paddr) & (pgsize_next - 1)) |
---|
| 2385 | + goto out_set_count; |
---|
1568 | 2386 | |
---|
1569 | | - /* build a mask of acceptable page sizes */ |
---|
1570 | | - pgsize = (1UL << (pgsize_idx + 1)) - 1; |
---|
| 2387 | + /* Calculate the offset to the next page size alignment boundary */ |
---|
| 2388 | + offset = pgsize_next - (addr_merge & (pgsize_next - 1)); |
---|
1571 | 2389 | |
---|
1572 | | - /* throw away page sizes not supported by the hardware */ |
---|
1573 | | - pgsize &= pgsize_bitmap; |
---|
| 2390 | + /* |
---|
| 2391 | + * If size is big enough to accommodate the larger page, reduce |
---|
| 2392 | + * the number of smaller pages. |
---|
| 2393 | + */ |
---|
| 2394 | + if (offset + pgsize_next <= size) |
---|
| 2395 | + size = offset; |
---|
1574 | 2396 | |
---|
1575 | | - /* make sure we're still sane */ |
---|
1576 | | - if (!pgsize) { |
---|
1577 | | - pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n", |
---|
1578 | | - pgsize_bitmap, addr_merge, size); |
---|
1579 | | - BUG(); |
---|
1580 | | - } |
---|
1581 | | - |
---|
1582 | | - /* pick the biggest page */ |
---|
1583 | | - pgsize_idx = __fls(pgsize); |
---|
1584 | | - pgsize = 1UL << pgsize_idx; |
---|
1585 | | - |
---|
| 2397 | +out_set_count: |
---|
| 2398 | + *count = size >> pgsize_idx; |
---|
1586 | 2399 | return pgsize; |
---|
1587 | 2400 | } |
---|
1588 | 2401 | |
---|
1589 | | -int iommu_map(struct iommu_domain *domain, unsigned long iova, |
---|
1590 | | - phys_addr_t paddr, size_t size, int prot) |
---|
| 2402 | +static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, |
---|
| 2403 | + phys_addr_t paddr, size_t size, int prot, |
---|
| 2404 | + gfp_t gfp, size_t *mapped) |
---|
1591 | 2405 | { |
---|
| 2406 | + const struct iommu_ops *ops = domain->ops; |
---|
| 2407 | + size_t pgsize, count; |
---|
| 2408 | + int ret; |
---|
| 2409 | + |
---|
| 2410 | + pgsize = iommu_pgsize(domain, iova, paddr, size, &count); |
---|
| 2411 | + |
---|
| 2412 | + pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", |
---|
| 2413 | + iova, &paddr, pgsize, count); |
---|
| 2414 | + |
---|
| 2415 | + if (ops->map_pages) { |
---|
| 2416 | + ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, |
---|
| 2417 | + gfp, mapped); |
---|
| 2418 | + } else { |
---|
| 2419 | + ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); |
---|
| 2420 | + *mapped = ret ? 0 : pgsize; |
---|
| 2421 | + } |
---|
| 2422 | + |
---|
| 2423 | + return ret; |
---|
| 2424 | +} |
---|
| 2425 | + |
---|
| 2426 | +static int __iommu_map(struct iommu_domain *domain, unsigned long iova, |
---|
| 2427 | + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
---|
| 2428 | +{ |
---|
| 2429 | + const struct iommu_ops *ops = domain->ops; |
---|
1592 | 2430 | unsigned long orig_iova = iova; |
---|
1593 | 2431 | unsigned int min_pagesz; |
---|
1594 | 2432 | size_t orig_size = size; |
---|
1595 | 2433 | phys_addr_t orig_paddr = paddr; |
---|
1596 | 2434 | int ret = 0; |
---|
1597 | 2435 | |
---|
1598 | | - if (unlikely(domain->ops->map == NULL || |
---|
| 2436 | + if (unlikely(!(ops->map || ops->map_pages) || |
---|
1599 | 2437 | domain->pgsize_bitmap == 0UL)) |
---|
1600 | 2438 | return -ENODEV; |
---|
1601 | 2439 | |
---|
.. | .. |
---|
1619 | 2457 | pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); |
---|
1620 | 2458 | |
---|
1621 | 2459 | while (size) { |
---|
1622 | | - size_t pgsize = iommu_pgsize(domain->pgsize_bitmap, |
---|
1623 | | - iova | paddr, size); |
---|
| 2460 | + size_t mapped = 0; |
---|
1624 | 2461 | |
---|
1625 | | - pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", |
---|
1626 | | - iova, &paddr, pgsize); |
---|
| 2462 | + ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, |
---|
| 2463 | + &mapped); |
---|
| 2464 | + /* |
---|
| 2465 | + * Some pages may have been mapped, even if an error occurred, |
---|
| 2466 | + * so we should account for those so they can be unmapped. |
---|
| 2467 | + */ |
---|
| 2468 | + size -= mapped; |
---|
1627 | 2469 | |
---|
1628 | | - ret = domain->ops->map(domain, iova, paddr, pgsize, prot); |
---|
1629 | 2470 | if (ret) |
---|
1630 | 2471 | break; |
---|
1631 | 2472 | |
---|
1632 | | - iova += pgsize; |
---|
1633 | | - paddr += pgsize; |
---|
1634 | | - size -= pgsize; |
---|
| 2473 | + iova += mapped; |
---|
| 2474 | + paddr += mapped; |
---|
1635 | 2475 | } |
---|
1636 | 2476 | |
---|
1637 | 2477 | /* unroll mapping in case something went wrong */ |
---|
1638 | 2478 | if (ret) |
---|
1639 | 2479 | iommu_unmap(domain, orig_iova, orig_size - size); |
---|
1640 | 2480 | else |
---|
1641 | | - trace_map(domain, orig_iova, orig_paddr, orig_size, prot); |
---|
| 2481 | + trace_map(orig_iova, orig_paddr, orig_size); |
---|
1642 | 2482 | |
---|
1643 | 2483 | return ret; |
---|
1644 | 2484 | } |
---|
| 2485 | + |
---|
| 2486 | +static int _iommu_map(struct iommu_domain *domain, unsigned long iova, |
---|
| 2487 | + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
---|
| 2488 | +{ |
---|
| 2489 | + const struct iommu_ops *ops = domain->ops; |
---|
| 2490 | + int ret; |
---|
| 2491 | + |
---|
| 2492 | + ret = __iommu_map(domain, iova, paddr, size, prot, gfp); |
---|
| 2493 | + if (ret == 0 && ops->iotlb_sync_map) |
---|
| 2494 | + ops->iotlb_sync_map(domain, iova, size); |
---|
| 2495 | + |
---|
| 2496 | + return ret; |
---|
| 2497 | +} |
---|
| 2498 | + |
---|
| 2499 | +int iommu_map(struct iommu_domain *domain, unsigned long iova, |
---|
| 2500 | + phys_addr_t paddr, size_t size, int prot) |
---|
| 2501 | +{ |
---|
| 2502 | + might_sleep(); |
---|
| 2503 | + return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); |
---|
| 2504 | +} |
---|
1645 | 2505 | EXPORT_SYMBOL_GPL(iommu_map); |
---|
| 2506 | + |
---|
| 2507 | +int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, |
---|
| 2508 | + phys_addr_t paddr, size_t size, int prot) |
---|
| 2509 | +{ |
---|
| 2510 | + return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); |
---|
| 2511 | +} |
---|
| 2512 | +EXPORT_SYMBOL_GPL(iommu_map_atomic); |
---|
| 2513 | + |
---|
| 2514 | +static size_t __iommu_unmap_pages(struct iommu_domain *domain, |
---|
| 2515 | + unsigned long iova, size_t size, |
---|
| 2516 | + struct iommu_iotlb_gather *iotlb_gather) |
---|
| 2517 | +{ |
---|
| 2518 | + const struct iommu_ops *ops = domain->ops; |
---|
| 2519 | + size_t pgsize, count; |
---|
| 2520 | + |
---|
| 2521 | + pgsize = iommu_pgsize(domain, iova, iova, size, &count); |
---|
| 2522 | + return ops->unmap_pages ? |
---|
| 2523 | + ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : |
---|
| 2524 | + ops->unmap(domain, iova, pgsize, iotlb_gather); |
---|
| 2525 | +} |
---|
1646 | 2526 | |
---|
1647 | 2527 | static size_t __iommu_unmap(struct iommu_domain *domain, |
---|
1648 | 2528 | unsigned long iova, size_t size, |
---|
1649 | | - bool sync) |
---|
| 2529 | + struct iommu_iotlb_gather *iotlb_gather) |
---|
1650 | 2530 | { |
---|
1651 | 2531 | const struct iommu_ops *ops = domain->ops; |
---|
1652 | 2532 | size_t unmapped_page, unmapped = 0; |
---|
1653 | 2533 | unsigned long orig_iova = iova; |
---|
1654 | 2534 | unsigned int min_pagesz; |
---|
1655 | 2535 | |
---|
1656 | | - if (unlikely(ops->unmap == NULL || |
---|
| 2536 | + if (unlikely(!(ops->unmap || ops->unmap_pages) || |
---|
1657 | 2537 | domain->pgsize_bitmap == 0UL)) |
---|
1658 | 2538 | return 0; |
---|
1659 | 2539 | |
---|
.. | .. |
---|
1681 | 2561 | * or we hit an area that isn't mapped. |
---|
1682 | 2562 | */ |
---|
1683 | 2563 | while (unmapped < size) { |
---|
1684 | | - size_t pgsize = iommu_pgsize(domain->pgsize_bitmap, iova, size - unmapped); |
---|
1685 | | - |
---|
1686 | | - unmapped_page = ops->unmap(domain, iova, pgsize); |
---|
| 2564 | + unmapped_page = __iommu_unmap_pages(domain, iova, |
---|
| 2565 | + size - unmapped, |
---|
| 2566 | + iotlb_gather); |
---|
1687 | 2567 | if (!unmapped_page) |
---|
1688 | 2568 | break; |
---|
1689 | | - |
---|
1690 | | - if (sync && ops->iotlb_range_add) |
---|
1691 | | - ops->iotlb_range_add(domain, iova, pgsize); |
---|
1692 | 2569 | |
---|
1693 | 2570 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n", |
---|
1694 | 2571 | iova, unmapped_page); |
---|
.. | .. |
---|
1697 | 2574 | unmapped += unmapped_page; |
---|
1698 | 2575 | } |
---|
1699 | 2576 | |
---|
1700 | | - if (sync && ops->iotlb_sync) |
---|
1701 | | - ops->iotlb_sync(domain); |
---|
1702 | | - |
---|
1703 | | - trace_unmap(domain, orig_iova, size, unmapped); |
---|
| 2577 | + trace_unmap(orig_iova, size, unmapped); |
---|
1704 | 2578 | return unmapped; |
---|
1705 | 2579 | } |
---|
1706 | 2580 | |
---|
1707 | 2581 | size_t iommu_unmap(struct iommu_domain *domain, |
---|
1708 | 2582 | unsigned long iova, size_t size) |
---|
1709 | 2583 | { |
---|
1710 | | - return __iommu_unmap(domain, iova, size, true); |
---|
| 2584 | + struct iommu_iotlb_gather iotlb_gather; |
---|
| 2585 | + size_t ret; |
---|
| 2586 | + |
---|
| 2587 | + iommu_iotlb_gather_init(&iotlb_gather); |
---|
| 2588 | + ret = __iommu_unmap(domain, iova, size, &iotlb_gather); |
---|
| 2589 | + iommu_iotlb_sync(domain, &iotlb_gather); |
---|
| 2590 | + |
---|
| 2591 | + return ret; |
---|
1711 | 2592 | } |
---|
1712 | 2593 | EXPORT_SYMBOL_GPL(iommu_unmap); |
---|
1713 | 2594 | |
---|
1714 | 2595 | size_t iommu_unmap_fast(struct iommu_domain *domain, |
---|
1715 | | - unsigned long iova, size_t size) |
---|
| 2596 | + unsigned long iova, size_t size, |
---|
| 2597 | + struct iommu_iotlb_gather *iotlb_gather) |
---|
1716 | 2598 | { |
---|
1717 | | - return __iommu_unmap(domain, iova, size, false); |
---|
| 2599 | + return __iommu_unmap(domain, iova, size, iotlb_gather); |
---|
1718 | 2600 | } |
---|
1719 | 2601 | EXPORT_SYMBOL_GPL(iommu_unmap_fast); |
---|
1720 | 2602 | |
---|
1721 | | -size_t iommu_map_sg(struct iommu_domain *domain, |
---|
1722 | | - unsigned long iova, struct scatterlist *sg, |
---|
1723 | | - unsigned int nents, int prot) |
---|
| 2603 | +static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
---|
| 2604 | + struct scatterlist *sg, unsigned int nents, int prot, |
---|
| 2605 | + gfp_t gfp) |
---|
1724 | 2606 | { |
---|
1725 | | - size_t mapped; |
---|
1726 | | - |
---|
1727 | | - mapped = domain->ops->map_sg(domain, iova, sg, nents, prot); |
---|
1728 | | - trace_map_sg(domain, iova, mapped, prot); |
---|
1729 | | - return mapped; |
---|
1730 | | -} |
---|
1731 | | -EXPORT_SYMBOL(iommu_map_sg); |
---|
1732 | | - |
---|
1733 | | -size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
---|
1734 | | - struct scatterlist *sg, unsigned int nents, int prot) |
---|
1735 | | -{ |
---|
1736 | | - struct scatterlist *s; |
---|
1737 | | - size_t mapped = 0; |
---|
1738 | | - unsigned int i, min_pagesz; |
---|
| 2607 | + const struct iommu_ops *ops = domain->ops; |
---|
| 2608 | + size_t len = 0, mapped = 0; |
---|
| 2609 | + phys_addr_t start; |
---|
| 2610 | + unsigned int i = 0; |
---|
1739 | 2611 | int ret; |
---|
1740 | 2612 | |
---|
1741 | | - if (unlikely(domain->pgsize_bitmap == 0UL)) |
---|
1742 | | - return 0; |
---|
| 2613 | + if (ops->map_sg) { |
---|
| 2614 | + ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped); |
---|
1743 | 2615 | |
---|
1744 | | - min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
---|
| 2616 | + if (ops->iotlb_sync_map) |
---|
| 2617 | + ops->iotlb_sync_map(domain, iova, mapped); |
---|
1745 | 2618 | |
---|
1746 | | - for_each_sg(sg, s, nents, i) { |
---|
1747 | | - phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; |
---|
1748 | | - |
---|
1749 | | - /* |
---|
1750 | | - * We are mapping on IOMMU page boundaries, so offset within |
---|
1751 | | - * the page must be 0. However, the IOMMU may support pages |
---|
1752 | | - * smaller than PAGE_SIZE, so s->offset may still represent |
---|
1753 | | - * an offset of that boundary within the CPU page. |
---|
1754 | | - */ |
---|
1755 | | - if (!IS_ALIGNED(s->offset, min_pagesz)) |
---|
1756 | | - goto out_err; |
---|
1757 | | - |
---|
1758 | | - ret = iommu_map(domain, iova + mapped, phys, s->length, prot); |
---|
1759 | 2619 | if (ret) |
---|
1760 | 2620 | goto out_err; |
---|
1761 | 2621 | |
---|
1762 | | - mapped += s->length; |
---|
| 2622 | + return mapped; |
---|
1763 | 2623 | } |
---|
1764 | 2624 | |
---|
| 2625 | + while (i <= nents) { |
---|
| 2626 | + phys_addr_t s_phys = sg_phys(sg); |
---|
| 2627 | + |
---|
| 2628 | + if (len && s_phys != start + len) { |
---|
| 2629 | + ret = __iommu_map(domain, iova + mapped, start, |
---|
| 2630 | + len, prot, gfp); |
---|
| 2631 | + |
---|
| 2632 | + if (ret) |
---|
| 2633 | + goto out_err; |
---|
| 2634 | + |
---|
| 2635 | + mapped += len; |
---|
| 2636 | + len = 0; |
---|
| 2637 | + } |
---|
| 2638 | + |
---|
| 2639 | + if (len) { |
---|
| 2640 | + len += sg->length; |
---|
| 2641 | + } else { |
---|
| 2642 | + len = sg->length; |
---|
| 2643 | + start = s_phys; |
---|
| 2644 | + } |
---|
| 2645 | + |
---|
| 2646 | + if (++i < nents) |
---|
| 2647 | + sg = sg_next(sg); |
---|
| 2648 | + } |
---|
| 2649 | + |
---|
| 2650 | + if (ops->iotlb_sync_map) |
---|
| 2651 | + ops->iotlb_sync_map(domain, iova, mapped); |
---|
1765 | 2652 | return mapped; |
---|
1766 | 2653 | |
---|
1767 | 2654 | out_err: |
---|
.. | .. |
---|
1771 | 2658 | return 0; |
---|
1772 | 2659 | |
---|
1773 | 2660 | } |
---|
1774 | | -EXPORT_SYMBOL_GPL(default_iommu_map_sg); |
---|
| 2661 | + |
---|
| 2662 | +size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
---|
| 2663 | + struct scatterlist *sg, unsigned int nents, int prot) |
---|
| 2664 | +{ |
---|
| 2665 | + might_sleep(); |
---|
| 2666 | + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); |
---|
| 2667 | +} |
---|
| 2668 | +EXPORT_SYMBOL_GPL(iommu_map_sg); |
---|
| 2669 | + |
---|
| 2670 | +size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, |
---|
| 2671 | + struct scatterlist *sg, unsigned int nents, int prot) |
---|
| 2672 | +{ |
---|
| 2673 | + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); |
---|
| 2674 | +} |
---|
| 2675 | +EXPORT_SYMBOL_GPL(iommu_map_sg_atomic); |
---|
1775 | 2676 | |
---|
1776 | 2677 | int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
---|
1777 | 2678 | phys_addr_t paddr, u64 size, int prot) |
---|
.. | .. |
---|
1835 | 2736 | } |
---|
1836 | 2737 | EXPORT_SYMBOL_GPL(report_iommu_fault); |
---|
1837 | 2738 | |
---|
1838 | | -struct dentry *iommu_debugfs_top; |
---|
1839 | | -EXPORT_SYMBOL_GPL(iommu_debugfs_top); |
---|
1840 | | - |
---|
1841 | 2739 | static int __init iommu_init(void) |
---|
1842 | 2740 | { |
---|
1843 | 2741 | iommu_group_kset = kset_create_and_add("iommu_groups", |
---|
.. | .. |
---|
1856 | 2754 | struct iommu_domain_geometry *geometry; |
---|
1857 | 2755 | bool *paging; |
---|
1858 | 2756 | int ret = 0; |
---|
1859 | | - u32 *count; |
---|
1860 | 2757 | |
---|
1861 | 2758 | switch (attr) { |
---|
1862 | 2759 | case DOMAIN_ATTR_GEOMETRY: |
---|
.. | .. |
---|
1867 | 2764 | case DOMAIN_ATTR_PAGING: |
---|
1868 | 2765 | paging = data; |
---|
1869 | 2766 | *paging = (domain->pgsize_bitmap != 0UL); |
---|
1870 | | - break; |
---|
1871 | | - case DOMAIN_ATTR_WINDOWS: |
---|
1872 | | - count = data; |
---|
1873 | | - |
---|
1874 | | - if (domain->ops->domain_get_windows != NULL) |
---|
1875 | | - *count = domain->ops->domain_get_windows(domain); |
---|
1876 | | - else |
---|
1877 | | - ret = -ENODEV; |
---|
1878 | | - |
---|
1879 | 2767 | break; |
---|
1880 | 2768 | default: |
---|
1881 | 2769 | if (!domain->ops->domain_get_attr) |
---|
.. | .. |
---|
1892 | 2780 | enum iommu_attr attr, void *data) |
---|
1893 | 2781 | { |
---|
1894 | 2782 | int ret = 0; |
---|
1895 | | - u32 *count; |
---|
1896 | 2783 | |
---|
1897 | 2784 | switch (attr) { |
---|
1898 | | - case DOMAIN_ATTR_WINDOWS: |
---|
1899 | | - count = data; |
---|
1900 | | - |
---|
1901 | | - if (domain->ops->domain_set_windows != NULL) |
---|
1902 | | - ret = domain->ops->domain_set_windows(domain, *count); |
---|
1903 | | - else |
---|
1904 | | - ret = -ENODEV; |
---|
1905 | | - |
---|
1906 | | - break; |
---|
1907 | 2785 | default: |
---|
1908 | 2786 | if (domain->ops->domain_set_attr == NULL) |
---|
1909 | 2787 | return -EINVAL; |
---|
.. | .. |
---|
1932 | 2810 | } |
---|
1933 | 2811 | |
---|
1934 | 2812 | /** |
---|
1935 | | - * iommu_trigger_fault() - trigger an IOMMU fault |
---|
1936 | | - * @domain: iommu domain |
---|
| 2813 | + * generic_iommu_put_resv_regions - Reserved region driver helper |
---|
| 2814 | + * @dev: device for which to free reserved regions |
---|
| 2815 | + * @list: reserved region list for device |
---|
1937 | 2816 | * |
---|
1938 | | - * Triggers a fault on the device to which this domain is attached. |
---|
1939 | | - * |
---|
1940 | | - * This function should only be used for debugging purposes, for obvious |
---|
1941 | | - * reasons. |
---|
| 2817 | + * IOMMU drivers can use this to implement their .put_resv_regions() callback |
---|
| 2818 | + * for simple reservations. Memory allocated for each reserved region will be |
---|
| 2819 | + * freed. If an IOMMU driver allocates additional resources per region, it is |
---|
| 2820 | + * going to have to implement a custom callback. |
---|
1942 | 2821 | */ |
---|
1943 | | -void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags) |
---|
| 2822 | +void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) |
---|
1944 | 2823 | { |
---|
1945 | | - if (domain->ops->trigger_fault) |
---|
1946 | | - domain->ops->trigger_fault(domain, flags); |
---|
| 2824 | + struct iommu_resv_region *entry, *next; |
---|
| 2825 | + |
---|
| 2826 | + list_for_each_entry_safe(entry, next, list, list) |
---|
| 2827 | + kfree(entry); |
---|
1947 | 2828 | } |
---|
| 2829 | +EXPORT_SYMBOL(generic_iommu_put_resv_regions); |
---|
1948 | 2830 | |
---|
1949 | 2831 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, |
---|
1950 | 2832 | size_t length, int prot, |
---|
.. | .. |
---|
1965 | 2847 | } |
---|
1966 | 2848 | EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); |
---|
1967 | 2849 | |
---|
1968 | | -/* Request that a device is direct mapped by the IOMMU */ |
---|
1969 | | -int iommu_request_dm_for_dev(struct device *dev) |
---|
| 2850 | +void iommu_set_default_passthrough(bool cmd_line) |
---|
1970 | 2851 | { |
---|
1971 | | - struct iommu_domain *dm_domain; |
---|
1972 | | - struct iommu_group *group; |
---|
1973 | | - int ret; |
---|
| 2852 | + if (cmd_line) |
---|
| 2853 | + iommu_set_cmd_line_dma_api(); |
---|
1974 | 2854 | |
---|
1975 | | - /* Device must already be in a group before calling this function */ |
---|
1976 | | - group = iommu_group_get(dev); |
---|
1977 | | - if (!group) |
---|
1978 | | - return -EINVAL; |
---|
1979 | | - |
---|
1980 | | - mutex_lock(&group->mutex); |
---|
1981 | | - |
---|
1982 | | - /* Check if the default domain is already direct mapped */ |
---|
1983 | | - ret = 0; |
---|
1984 | | - if (group->default_domain && |
---|
1985 | | - group->default_domain->type == IOMMU_DOMAIN_IDENTITY) |
---|
1986 | | - goto out; |
---|
1987 | | - |
---|
1988 | | - /* Don't change mappings of existing devices */ |
---|
1989 | | - ret = -EBUSY; |
---|
1990 | | - if (iommu_group_device_count(group) != 1) |
---|
1991 | | - goto out; |
---|
1992 | | - |
---|
1993 | | - /* Allocate a direct mapped domain */ |
---|
1994 | | - ret = -ENOMEM; |
---|
1995 | | - dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); |
---|
1996 | | - if (!dm_domain) |
---|
1997 | | - goto out; |
---|
1998 | | - |
---|
1999 | | - /* Attach the device to the domain */ |
---|
2000 | | - ret = __iommu_attach_group(dm_domain, group); |
---|
2001 | | - if (ret) { |
---|
2002 | | - iommu_domain_free(dm_domain); |
---|
2003 | | - goto out; |
---|
2004 | | - } |
---|
2005 | | - |
---|
2006 | | - /* Make the direct mapped domain the default for this group */ |
---|
2007 | | - if (group->default_domain) |
---|
2008 | | - iommu_domain_free(group->default_domain); |
---|
2009 | | - group->default_domain = dm_domain; |
---|
2010 | | - |
---|
2011 | | - pr_info("Using direct mapping for device %s\n", dev_name(dev)); |
---|
2012 | | - |
---|
2013 | | - ret = 0; |
---|
2014 | | -out: |
---|
2015 | | - mutex_unlock(&group->mutex); |
---|
2016 | | - iommu_group_put(group); |
---|
2017 | | - |
---|
2018 | | - return ret; |
---|
| 2855 | + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; |
---|
2019 | 2856 | } |
---|
| 2857 | + |
---|
| 2858 | +void iommu_set_default_translated(bool cmd_line) |
---|
| 2859 | +{ |
---|
| 2860 | + if (cmd_line) |
---|
| 2861 | + iommu_set_cmd_line_dma_api(); |
---|
| 2862 | + |
---|
| 2863 | + iommu_def_domain_type = IOMMU_DOMAIN_DMA; |
---|
| 2864 | +} |
---|
| 2865 | + |
---|
| 2866 | +bool iommu_default_passthrough(void) |
---|
| 2867 | +{ |
---|
| 2868 | + return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; |
---|
| 2869 | +} |
---|
| 2870 | +EXPORT_SYMBOL_GPL(iommu_default_passthrough); |
---|
2020 | 2871 | |
---|
2021 | 2872 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
---|
2022 | 2873 | { |
---|
.. | .. |
---|
2036 | 2887 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, |
---|
2037 | 2888 | const struct iommu_ops *ops) |
---|
2038 | 2889 | { |
---|
2039 | | - struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
---|
| 2890 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
2040 | 2891 | |
---|
2041 | 2892 | if (fwspec) |
---|
2042 | 2893 | return ops == fwspec->ops ? 0 : -EINVAL; |
---|
2043 | 2894 | |
---|
2044 | | - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); |
---|
| 2895 | + if (!dev_iommu_get(dev)) |
---|
| 2896 | + return -ENOMEM; |
---|
| 2897 | + |
---|
| 2898 | + /* Preallocate for the overwhelmingly common case of 1 ID */ |
---|
| 2899 | + fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); |
---|
2045 | 2900 | if (!fwspec) |
---|
2046 | 2901 | return -ENOMEM; |
---|
2047 | 2902 | |
---|
2048 | 2903 | of_node_get(to_of_node(iommu_fwnode)); |
---|
2049 | 2904 | fwspec->iommu_fwnode = iommu_fwnode; |
---|
2050 | 2905 | fwspec->ops = ops; |
---|
2051 | | - dev->iommu_fwspec = fwspec; |
---|
| 2906 | + dev_iommu_fwspec_set(dev, fwspec); |
---|
2052 | 2907 | return 0; |
---|
2053 | 2908 | } |
---|
2054 | 2909 | EXPORT_SYMBOL_GPL(iommu_fwspec_init); |
---|
2055 | 2910 | |
---|
2056 | 2911 | void iommu_fwspec_free(struct device *dev) |
---|
2057 | 2912 | { |
---|
2058 | | - struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
---|
| 2913 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
2059 | 2914 | |
---|
2060 | 2915 | if (fwspec) { |
---|
2061 | 2916 | fwnode_handle_put(fwspec->iommu_fwnode); |
---|
2062 | 2917 | kfree(fwspec); |
---|
2063 | | - dev->iommu_fwspec = NULL; |
---|
| 2918 | + dev_iommu_fwspec_set(dev, NULL); |
---|
2064 | 2919 | } |
---|
2065 | 2920 | } |
---|
2066 | 2921 | EXPORT_SYMBOL_GPL(iommu_fwspec_free); |
---|
2067 | 2922 | |
---|
2068 | 2923 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) |
---|
2069 | 2924 | { |
---|
2070 | | - struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
---|
2071 | | - size_t size; |
---|
2072 | | - int i; |
---|
| 2925 | + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
---|
| 2926 | + int i, new_num; |
---|
2073 | 2927 | |
---|
2074 | 2928 | if (!fwspec) |
---|
2075 | 2929 | return -EINVAL; |
---|
2076 | 2930 | |
---|
2077 | | - size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); |
---|
2078 | | - if (size > sizeof(*fwspec)) { |
---|
2079 | | - fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); |
---|
| 2931 | + new_num = fwspec->num_ids + num_ids; |
---|
| 2932 | + if (new_num > 1) { |
---|
| 2933 | + fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), |
---|
| 2934 | + GFP_KERNEL); |
---|
2080 | 2935 | if (!fwspec) |
---|
2081 | 2936 | return -ENOMEM; |
---|
2082 | 2937 | |
---|
2083 | | - dev->iommu_fwspec = fwspec; |
---|
| 2938 | + dev_iommu_fwspec_set(dev, fwspec); |
---|
2084 | 2939 | } |
---|
2085 | 2940 | |
---|
2086 | 2941 | for (i = 0; i < num_ids; i++) |
---|
2087 | 2942 | fwspec->ids[fwspec->num_ids + i] = ids[i]; |
---|
2088 | 2943 | |
---|
2089 | | - fwspec->num_ids += num_ids; |
---|
| 2944 | + fwspec->num_ids = new_num; |
---|
2090 | 2945 | return 0; |
---|
2091 | 2946 | } |
---|
2092 | 2947 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); |
---|
| 2948 | + |
---|
| 2949 | +/* |
---|
| 2950 | + * Per device IOMMU features. |
---|
| 2951 | + */ |
---|
| 2952 | +bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) |
---|
| 2953 | +{ |
---|
| 2954 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 2955 | + |
---|
| 2956 | + if (ops && ops->dev_has_feat) |
---|
| 2957 | + return ops->dev_has_feat(dev, feat); |
---|
| 2958 | + |
---|
| 2959 | + return false; |
---|
| 2960 | +} |
---|
| 2961 | +EXPORT_SYMBOL_GPL(iommu_dev_has_feature); |
---|
| 2962 | + |
---|
| 2963 | +int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) |
---|
| 2964 | +{ |
---|
| 2965 | + if (dev->iommu && dev->iommu->iommu_dev) { |
---|
| 2966 | + const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; |
---|
| 2967 | + |
---|
| 2968 | + if (ops->dev_enable_feat) |
---|
| 2969 | + return ops->dev_enable_feat(dev, feat); |
---|
| 2970 | + } |
---|
| 2971 | + |
---|
| 2972 | + return -ENODEV; |
---|
| 2973 | +} |
---|
| 2974 | +EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); |
---|
| 2975 | + |
---|
| 2976 | +/* |
---|
| 2977 | + * The device drivers should do the necessary cleanups before calling this. |
---|
| 2978 | + * For example, before disabling the aux-domain feature, the device driver |
---|
| 2979 | + * should detach all aux-domains. Otherwise, this will return -EBUSY. |
---|
| 2980 | + */ |
---|
| 2981 | +int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) |
---|
| 2982 | +{ |
---|
| 2983 | + if (dev->iommu && dev->iommu->iommu_dev) { |
---|
| 2984 | + const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; |
---|
| 2985 | + |
---|
| 2986 | + if (ops->dev_disable_feat) |
---|
| 2987 | + return ops->dev_disable_feat(dev, feat); |
---|
| 2988 | + } |
---|
| 2989 | + |
---|
| 2990 | + return -EBUSY; |
---|
| 2991 | +} |
---|
| 2992 | +EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); |
---|
| 2993 | + |
---|
| 2994 | +bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) |
---|
| 2995 | +{ |
---|
| 2996 | + if (dev->iommu && dev->iommu->iommu_dev) { |
---|
| 2997 | + const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; |
---|
| 2998 | + |
---|
| 2999 | + if (ops->dev_feat_enabled) |
---|
| 3000 | + return ops->dev_feat_enabled(dev, feat); |
---|
| 3001 | + } |
---|
| 3002 | + |
---|
| 3003 | + return false; |
---|
| 3004 | +} |
---|
| 3005 | +EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); |
---|
| 3006 | + |
---|
| 3007 | +/* |
---|
| 3008 | + * Aux-domain specific attach/detach. |
---|
| 3009 | + * |
---|
| 3010 | + * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns |
---|
| 3011 | + * true. Also, as long as domains are attached to a device through this |
---|
| 3012 | + * interface, any tries to call iommu_attach_device() should fail |
---|
| 3013 | + * (iommu_detach_device() can't fail, so we fail when trying to re-attach). |
---|
| 3014 | + * This should make us safe against a device being attached to a guest as a |
---|
| 3015 | + * whole while there are still pasid users on it (aux and sva). |
---|
| 3016 | + */ |
---|
| 3017 | +int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) |
---|
| 3018 | +{ |
---|
| 3019 | + int ret = -ENODEV; |
---|
| 3020 | + |
---|
| 3021 | + if (domain->ops->aux_attach_dev) |
---|
| 3022 | + ret = domain->ops->aux_attach_dev(domain, dev); |
---|
| 3023 | + |
---|
| 3024 | + if (!ret) |
---|
| 3025 | + trace_attach_device_to_domain(dev); |
---|
| 3026 | + |
---|
| 3027 | + return ret; |
---|
| 3028 | +} |
---|
| 3029 | +EXPORT_SYMBOL_GPL(iommu_aux_attach_device); |
---|
| 3030 | + |
---|
| 3031 | +void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) |
---|
| 3032 | +{ |
---|
| 3033 | + if (domain->ops->aux_detach_dev) { |
---|
| 3034 | + domain->ops->aux_detach_dev(domain, dev); |
---|
| 3035 | + trace_detach_device_from_domain(dev); |
---|
| 3036 | + } |
---|
| 3037 | +} |
---|
| 3038 | +EXPORT_SYMBOL_GPL(iommu_aux_detach_device); |
---|
| 3039 | + |
---|
| 3040 | +int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) |
---|
| 3041 | +{ |
---|
| 3042 | + int ret = -ENODEV; |
---|
| 3043 | + |
---|
| 3044 | + if (domain->ops->aux_get_pasid) |
---|
| 3045 | + ret = domain->ops->aux_get_pasid(domain, dev); |
---|
| 3046 | + |
---|
| 3047 | + return ret; |
---|
| 3048 | +} |
---|
| 3049 | +EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); |
---|
| 3050 | + |
---|
| 3051 | +/** |
---|
| 3052 | + * iommu_sva_bind_device() - Bind a process address space to a device |
---|
| 3053 | + * @dev: the device |
---|
| 3054 | + * @mm: the mm to bind, caller must hold a reference to it |
---|
| 3055 | + * |
---|
| 3056 | + * Create a bond between device and address space, allowing the device to access |
---|
| 3057 | + * the mm using the returned PASID. If a bond already exists between @device and |
---|
| 3058 | + * @mm, it is returned and an additional reference is taken. Caller must call |
---|
| 3059 | + * iommu_sva_unbind_device() to release each reference. |
---|
| 3060 | + * |
---|
| 3061 | + * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to |
---|
| 3062 | + * initialize the required SVA features. |
---|
| 3063 | + * |
---|
| 3064 | + * On error, returns an ERR_PTR value. |
---|
| 3065 | + */ |
---|
| 3066 | +struct iommu_sva * |
---|
| 3067 | +iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) |
---|
| 3068 | +{ |
---|
| 3069 | + struct iommu_group *group; |
---|
| 3070 | + struct iommu_sva *handle = ERR_PTR(-EINVAL); |
---|
| 3071 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 3072 | + |
---|
| 3073 | + if (!ops || !ops->sva_bind) |
---|
| 3074 | + return ERR_PTR(-ENODEV); |
---|
| 3075 | + |
---|
| 3076 | + group = iommu_group_get(dev); |
---|
| 3077 | + if (!group) |
---|
| 3078 | + return ERR_PTR(-ENODEV); |
---|
| 3079 | + |
---|
| 3080 | + /* Ensure device count and domain don't change while we're binding */ |
---|
| 3081 | + mutex_lock(&group->mutex); |
---|
| 3082 | + |
---|
| 3083 | + /* |
---|
| 3084 | + * To keep things simple, SVA currently doesn't support IOMMU groups |
---|
| 3085 | + * with more than one device. Existing SVA-capable systems are not |
---|
| 3086 | + * affected by the problems that required IOMMU groups (lack of ACS |
---|
| 3087 | + * isolation, device ID aliasing and other hardware issues). |
---|
| 3088 | + */ |
---|
| 3089 | + if (iommu_group_device_count(group) != 1) |
---|
| 3090 | + goto out_unlock; |
---|
| 3091 | + |
---|
| 3092 | + handle = ops->sva_bind(dev, mm, drvdata); |
---|
| 3093 | + |
---|
| 3094 | +out_unlock: |
---|
| 3095 | + mutex_unlock(&group->mutex); |
---|
| 3096 | + iommu_group_put(group); |
---|
| 3097 | + |
---|
| 3098 | + return handle; |
---|
| 3099 | +} |
---|
| 3100 | +EXPORT_SYMBOL_GPL(iommu_sva_bind_device); |
---|
| 3101 | + |
---|
| 3102 | +/** |
---|
| 3103 | + * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device |
---|
| 3104 | + * @handle: the handle returned by iommu_sva_bind_device() |
---|
| 3105 | + * |
---|
| 3106 | + * Put reference to a bond between device and address space. The device should |
---|
| 3107 | + * not be issuing any more transaction for this PASID. All outstanding page |
---|
| 3108 | + * requests for this PASID must have been flushed to the IOMMU. |
---|
| 3109 | + * |
---|
| 3110 | + * Returns 0 on success, or an error value |
---|
| 3111 | + */ |
---|
| 3112 | +void iommu_sva_unbind_device(struct iommu_sva *handle) |
---|
| 3113 | +{ |
---|
| 3114 | + struct iommu_group *group; |
---|
| 3115 | + struct device *dev = handle->dev; |
---|
| 3116 | + const struct iommu_ops *ops = dev->bus->iommu_ops; |
---|
| 3117 | + |
---|
| 3118 | + if (!ops || !ops->sva_unbind) |
---|
| 3119 | + return; |
---|
| 3120 | + |
---|
| 3121 | + group = iommu_group_get(dev); |
---|
| 3122 | + if (!group) |
---|
| 3123 | + return; |
---|
| 3124 | + |
---|
| 3125 | + mutex_lock(&group->mutex); |
---|
| 3126 | + ops->sva_unbind(handle); |
---|
| 3127 | + mutex_unlock(&group->mutex); |
---|
| 3128 | + |
---|
| 3129 | + iommu_group_put(group); |
---|
| 3130 | +} |
---|
| 3131 | +EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); |
---|
| 3132 | + |
---|
| 3133 | +u32 iommu_sva_get_pasid(struct iommu_sva *handle) |
---|
| 3134 | +{ |
---|
| 3135 | + const struct iommu_ops *ops = handle->dev->bus->iommu_ops; |
---|
| 3136 | + |
---|
| 3137 | + if (!ops || !ops->sva_get_pasid) |
---|
| 3138 | + return IOMMU_PASID_INVALID; |
---|
| 3139 | + |
---|
| 3140 | + return ops->sva_get_pasid(handle); |
---|
| 3141 | +} |
---|
| 3142 | +EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); |
---|