.. | .. |
---|
1 | 1 | /* |
---|
| 2 | + * Copyright(c) 2020 Cornelis Networks, Inc. |
---|
2 | 3 | * Copyright(c) 2016 - 2017 Intel Corporation. |
---|
3 | 4 | * |
---|
4 | 5 | * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
.. | .. |
---|
48 | 49 | #include <linux/rculist.h> |
---|
49 | 50 | #include <linux/mmu_notifier.h> |
---|
50 | 51 | #include <linux/interval_tree_generic.h> |
---|
| 52 | +#include <linux/sched/mm.h> |
---|
51 | 53 | |
---|
52 | 54 | #include "mmu_rb.h" |
---|
53 | 55 | #include "trace.h" |
---|
54 | 56 | |
---|
55 | | -struct mmu_rb_handler { |
---|
56 | | - struct mmu_notifier mn; |
---|
57 | | - struct rb_root_cached root; |
---|
58 | | - void *ops_arg; |
---|
59 | | - spinlock_t lock; /* protect the RB tree */ |
---|
60 | | - struct mmu_rb_ops *ops; |
---|
61 | | - struct mm_struct *mm; |
---|
62 | | - struct list_head lru_list; |
---|
63 | | - struct work_struct del_work; |
---|
64 | | - struct list_head del_list; |
---|
65 | | - struct workqueue_struct *wq; |
---|
66 | | -}; |
---|
67 | | - |
---|
68 | 57 | static unsigned long mmu_node_start(struct mmu_rb_node *); |
---|
69 | 58 | static unsigned long mmu_node_last(struct mmu_rb_node *); |
---|
70 | 59 | static int mmu_notifier_range_start(struct mmu_notifier *, |
---|
71 | | - struct mm_struct *, |
---|
72 | | - unsigned long, unsigned long, bool); |
---|
| 60 | + const struct mmu_notifier_range *); |
---|
73 | 61 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, |
---|
74 | 62 | unsigned long, unsigned long); |
---|
75 | 63 | static void do_remove(struct mmu_rb_handler *handler, |
---|
.. | .. |
---|
77 | 65 | static void handle_remove(struct work_struct *work); |
---|
78 | 66 | |
---|
79 | 67 | static const struct mmu_notifier_ops mn_opts = { |
---|
80 | | - .flags = MMU_INVALIDATE_DOES_NOT_BLOCK, |
---|
81 | 68 | .invalidate_range_start = mmu_notifier_range_start, |
---|
82 | 69 | }; |
---|
83 | 70 | |
---|
.. | .. |
---|
94 | 81 | return PAGE_ALIGN(node->addr + node->len) - 1; |
---|
95 | 82 | } |
---|
96 | 83 | |
---|
97 | | -int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, |
---|
| 84 | +int hfi1_mmu_rb_register(void *ops_arg, |
---|
98 | 85 | struct mmu_rb_ops *ops, |
---|
99 | 86 | struct workqueue_struct *wq, |
---|
100 | 87 | struct mmu_rb_handler **handler) |
---|
101 | 88 | { |
---|
102 | | - struct mmu_rb_handler *handlr; |
---|
| 89 | + struct mmu_rb_handler *h; |
---|
103 | 90 | int ret; |
---|
104 | 91 | |
---|
105 | | - handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); |
---|
106 | | - if (!handlr) |
---|
| 92 | + h = kzalloc(sizeof(*h), GFP_KERNEL); |
---|
| 93 | + if (!h) |
---|
107 | 94 | return -ENOMEM; |
---|
108 | 95 | |
---|
109 | | - handlr->root = RB_ROOT_CACHED; |
---|
110 | | - handlr->ops = ops; |
---|
111 | | - handlr->ops_arg = ops_arg; |
---|
112 | | - INIT_HLIST_NODE(&handlr->mn.hlist); |
---|
113 | | - spin_lock_init(&handlr->lock); |
---|
114 | | - handlr->mn.ops = &mn_opts; |
---|
115 | | - handlr->mm = mm; |
---|
116 | | - INIT_WORK(&handlr->del_work, handle_remove); |
---|
117 | | - INIT_LIST_HEAD(&handlr->del_list); |
---|
118 | | - INIT_LIST_HEAD(&handlr->lru_list); |
---|
119 | | - handlr->wq = wq; |
---|
| 96 | + h->root = RB_ROOT_CACHED; |
---|
| 97 | + h->ops = ops; |
---|
| 98 | + h->ops_arg = ops_arg; |
---|
| 99 | + INIT_HLIST_NODE(&h->mn.hlist); |
---|
| 100 | + spin_lock_init(&h->lock); |
---|
| 101 | + h->mn.ops = &mn_opts; |
---|
| 102 | + INIT_WORK(&h->del_work, handle_remove); |
---|
| 103 | + INIT_LIST_HEAD(&h->del_list); |
---|
| 104 | + INIT_LIST_HEAD(&h->lru_list); |
---|
| 105 | + h->wq = wq; |
---|
120 | 106 | |
---|
121 | | - ret = mmu_notifier_register(&handlr->mn, handlr->mm); |
---|
| 107 | + ret = mmu_notifier_register(&h->mn, current->mm); |
---|
122 | 108 | if (ret) { |
---|
123 | | - kfree(handlr); |
---|
| 109 | + kfree(h); |
---|
124 | 110 | return ret; |
---|
125 | 111 | } |
---|
126 | 112 | |
---|
127 | | - *handler = handlr; |
---|
| 113 | + *handler = h; |
---|
128 | 114 | return 0; |
---|
129 | 115 | } |
---|
130 | 116 | |
---|
.. | .. |
---|
135 | 121 | unsigned long flags; |
---|
136 | 122 | struct list_head del_list; |
---|
137 | 123 | |
---|
| 124 | + /* Prevent freeing of mm until we are completely finished. */ |
---|
| 125 | + mmgrab(handler->mn.mm); |
---|
| 126 | + |
---|
138 | 127 | /* Unregister first so we don't get any more notifications. */ |
---|
139 | | - mmu_notifier_unregister(&handler->mn, handler->mm); |
---|
| 128 | + mmu_notifier_unregister(&handler->mn, handler->mn.mm); |
---|
140 | 129 | |
---|
141 | 130 | /* |
---|
142 | 131 | * Make sure the wq delete handler is finished running. It will not |
---|
.. | .. |
---|
157 | 146 | |
---|
158 | 147 | do_remove(handler, &del_list); |
---|
159 | 148 | |
---|
| 149 | + /* Now the mm may be freed. */ |
---|
| 150 | + mmdrop(handler->mn.mm); |
---|
| 151 | + |
---|
160 | 152 | kfree(handler); |
---|
161 | 153 | } |
---|
162 | 154 | |
---|
.. | .. |
---|
168 | 160 | int ret = 0; |
---|
169 | 161 | |
---|
170 | 162 | trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len); |
---|
| 163 | + |
---|
| 164 | + if (current->mm != handler->mn.mm) |
---|
| 165 | + return -EPERM; |
---|
| 166 | + |
---|
171 | 167 | spin_lock_irqsave(&handler->lock, flags); |
---|
172 | 168 | node = __mmu_rb_search(handler, mnode->addr, mnode->len); |
---|
173 | 169 | if (node) { |
---|
.. | .. |
---|
182 | 178 | __mmu_int_rb_remove(mnode, &handler->root); |
---|
183 | 179 | list_del(&mnode->list); /* remove from LRU list */ |
---|
184 | 180 | } |
---|
| 181 | + mnode->handler = handler; |
---|
185 | 182 | unlock: |
---|
186 | 183 | spin_unlock_irqrestore(&handler->lock, flags); |
---|
187 | 184 | return ret; |
---|
.. | .. |
---|
219 | 216 | unsigned long flags; |
---|
220 | 217 | bool ret = false; |
---|
221 | 218 | |
---|
| 219 | + if (current->mm != handler->mn.mm) |
---|
| 220 | + return ret; |
---|
| 221 | + |
---|
222 | 222 | spin_lock_irqsave(&handler->lock, flags); |
---|
223 | 223 | node = __mmu_rb_search(handler, addr, len); |
---|
224 | 224 | if (node) { |
---|
.. | .. |
---|
240 | 240 | struct list_head del_list; |
---|
241 | 241 | unsigned long flags; |
---|
242 | 242 | bool stop = false; |
---|
| 243 | + |
---|
| 244 | + if (current->mm != handler->mn.mm) |
---|
| 245 | + return; |
---|
243 | 246 | |
---|
244 | 247 | INIT_LIST_HEAD(&del_list); |
---|
245 | 248 | |
---|
.. | .. |
---|
274 | 277 | { |
---|
275 | 278 | unsigned long flags; |
---|
276 | 279 | |
---|
| 280 | + if (current->mm != handler->mn.mm) |
---|
| 281 | + return; |
---|
| 282 | + |
---|
277 | 283 | /* Validity of handler and node pointers has been checked by caller. */ |
---|
278 | 284 | trace_hfi1_mmu_rb_remove(node->addr, node->len); |
---|
279 | 285 | spin_lock_irqsave(&handler->lock, flags); |
---|
.. | .. |
---|
285 | 291 | } |
---|
286 | 292 | |
---|
287 | 293 | static int mmu_notifier_range_start(struct mmu_notifier *mn, |
---|
288 | | - struct mm_struct *mm, |
---|
289 | | - unsigned long start, |
---|
290 | | - unsigned long end, |
---|
291 | | - bool blockable) |
---|
| 294 | + const struct mmu_notifier_range *range) |
---|
292 | 295 | { |
---|
293 | 296 | struct mmu_rb_handler *handler = |
---|
294 | 297 | container_of(mn, struct mmu_rb_handler, mn); |
---|
.. | .. |
---|
298 | 301 | bool added = false; |
---|
299 | 302 | |
---|
300 | 303 | spin_lock_irqsave(&handler->lock, flags); |
---|
301 | | - for (node = __mmu_int_rb_iter_first(root, start, end - 1); |
---|
| 304 | + for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); |
---|
302 | 305 | node; node = ptr) { |
---|
303 | 306 | /* Guard against node removal. */ |
---|
304 | | - ptr = __mmu_int_rb_iter_next(node, start, end - 1); |
---|
| 307 | + ptr = __mmu_int_rb_iter_next(node, range->start, |
---|
| 308 | + range->end - 1); |
---|
305 | 309 | trace_hfi1_mmu_mem_invalidate(node->addr, node->len); |
---|
306 | 310 | if (handler->ops->invalidate(handler->ops_arg, node)) { |
---|
307 | 311 | __mmu_int_rb_remove(node, root); |
---|
.. | .. |
---|
337 | 341 | |
---|
338 | 342 | /* |
---|
339 | 343 | * Work queue function to remove all nodes that have been queued up to |
---|
340 | | - * be removed. The key feature is that mm->mmap_sem is not being held |
---|
| 344 | + * be removed. The key feature is that mm->mmap_lock is not being held |
---|
341 | 345 | * and the remove callback can sleep while taking it, if needed. |
---|
342 | 346 | */ |
---|
343 | 347 | static void handle_remove(struct work_struct *work) |
---|