.. | .. |
---|
9 | 9 | #include <linux/blkdev.h> |
---|
10 | 10 | #include <linux/slab.h> |
---|
11 | 11 | #include <linux/sched/task.h> |
---|
12 | | -#include <linux/delay.h> |
---|
13 | 12 | |
---|
14 | 13 | #include "blk.h" |
---|
15 | 14 | |
---|
.. | .. |
---|
29 | 28 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
---|
30 | 29 | atomic_long_inc(&ioc->refcount); |
---|
31 | 30 | } |
---|
32 | | -EXPORT_SYMBOL(get_io_context); |
---|
33 | 31 | |
---|
34 | 32 | static void icq_free_icq_rcu(struct rcu_head *head) |
---|
35 | 33 | { |
---|
.. | .. |
---|
49 | 47 | if (icq->flags & ICQ_EXITED) |
---|
50 | 48 | return; |
---|
51 | 49 | |
---|
52 | | - if (et->uses_mq && et->ops.mq.exit_icq) |
---|
53 | | - et->ops.mq.exit_icq(icq); |
---|
54 | | - else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn) |
---|
55 | | - et->ops.sq.elevator_exit_icq_fn(icq); |
---|
| 50 | + if (et->ops.exit_icq) |
---|
| 51 | + et->ops.exit_icq(icq); |
---|
56 | 52 | |
---|
57 | 53 | icq->flags |= ICQ_EXITED; |
---|
58 | 54 | } |
---|
.. | .. |
---|
100 | 96 | { |
---|
101 | 97 | struct io_context *ioc = container_of(work, struct io_context, |
---|
102 | 98 | release_work); |
---|
103 | | - unsigned long flags; |
---|
104 | | - |
---|
105 | | - /* |
---|
106 | | - * Exiting icq may call into put_io_context() through elevator |
---|
107 | | - * which will trigger lockdep warning. The ioc's are guaranteed to |
---|
108 | | - * be different, use a different locking subclass here. Use |
---|
109 | | - * irqsave variant as there's no spin_lock_irq_nested(). |
---|
110 | | - */ |
---|
111 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
---|
| 99 | + spin_lock_irq(&ioc->lock); |
---|
112 | 100 | |
---|
113 | 101 | while (!hlist_empty(&ioc->icq_list)) { |
---|
114 | 102 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
---|
115 | 103 | struct io_cq, ioc_node); |
---|
116 | 104 | struct request_queue *q = icq->q; |
---|
117 | 105 | |
---|
118 | | - if (spin_trylock(q->queue_lock)) { |
---|
| 106 | + if (spin_trylock(&q->queue_lock)) { |
---|
119 | 107 | ioc_destroy_icq(icq); |
---|
120 | | - spin_unlock(q->queue_lock); |
---|
| 108 | + spin_unlock(&q->queue_lock); |
---|
121 | 109 | } else { |
---|
122 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
---|
123 | | - cpu_chill(); |
---|
124 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
---|
| 110 | + /* Make sure q and icq cannot be freed. */ |
---|
| 111 | + rcu_read_lock(); |
---|
| 112 | + |
---|
| 113 | + /* Re-acquire the locks in the correct order. */ |
---|
| 114 | + spin_unlock(&ioc->lock); |
---|
| 115 | + spin_lock(&q->queue_lock); |
---|
| 116 | + spin_lock(&ioc->lock); |
---|
| 117 | + |
---|
| 118 | + /* |
---|
| 119 | + * The icq may have been destroyed when the ioc lock |
---|
| 120 | + * was released. |
---|
| 121 | + */ |
---|
| 122 | + if (!(icq->flags & ICQ_DESTROYED)) |
---|
| 123 | + ioc_destroy_icq(icq); |
---|
| 124 | + |
---|
| 125 | + spin_unlock(&q->queue_lock); |
---|
| 126 | + rcu_read_unlock(); |
---|
125 | 127 | } |
---|
126 | 128 | } |
---|
127 | 129 | |
---|
128 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
---|
| 130 | + spin_unlock_irq(&ioc->lock); |
---|
129 | 131 | |
---|
130 | 132 | kmem_cache_free(iocontext_cachep, ioc); |
---|
131 | 133 | } |
---|
.. | .. |
---|
164 | 166 | if (free_ioc) |
---|
165 | 167 | kmem_cache_free(iocontext_cachep, ioc); |
---|
166 | 168 | } |
---|
167 | | -EXPORT_SYMBOL(put_io_context); |
---|
168 | 169 | |
---|
169 | 170 | /** |
---|
170 | 171 | * put_io_context_active - put active reference on ioc |
---|
.. | .. |
---|
175 | 176 | */ |
---|
176 | 177 | void put_io_context_active(struct io_context *ioc) |
---|
177 | 178 | { |
---|
178 | | - struct elevator_type *et; |
---|
179 | | - unsigned long flags; |
---|
180 | 179 | struct io_cq *icq; |
---|
181 | 180 | |
---|
182 | 181 | if (!atomic_dec_and_test(&ioc->active_ref)) { |
---|
.. | .. |
---|
184 | 183 | return; |
---|
185 | 184 | } |
---|
186 | 185 | |
---|
187 | | - /* |
---|
188 | | - * Need ioc lock to walk icq_list and q lock to exit icq. Perform |
---|
189 | | - * reverse double locking. Read comment in ioc_release_fn() for |
---|
190 | | - * explanation on the nested locking annotation. |
---|
191 | | - */ |
---|
192 | | -retry: |
---|
193 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
---|
| 186 | + spin_lock_irq(&ioc->lock); |
---|
194 | 187 | hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { |
---|
195 | 188 | if (icq->flags & ICQ_EXITED) |
---|
196 | 189 | continue; |
---|
197 | 190 | |
---|
198 | | - et = icq->q->elevator->type; |
---|
199 | | - if (et->uses_mq) { |
---|
200 | | - ioc_exit_icq(icq); |
---|
201 | | - } else { |
---|
202 | | - if (spin_trylock(icq->q->queue_lock)) { |
---|
203 | | - ioc_exit_icq(icq); |
---|
204 | | - spin_unlock(icq->q->queue_lock); |
---|
205 | | - } else { |
---|
206 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
---|
207 | | - cpu_chill(); |
---|
208 | | - goto retry; |
---|
209 | | - } |
---|
210 | | - } |
---|
| 191 | + ioc_exit_icq(icq); |
---|
211 | 192 | } |
---|
212 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
---|
| 193 | + spin_unlock_irq(&ioc->lock); |
---|
213 | 194 | |
---|
214 | 195 | put_io_context(ioc); |
---|
215 | 196 | } |
---|
.. | .. |
---|
235 | 216 | rcu_read_lock(); |
---|
236 | 217 | while (!list_empty(icq_list)) { |
---|
237 | 218 | struct io_cq *icq = list_entry(icq_list->next, |
---|
238 | | - struct io_cq, q_node); |
---|
| 219 | + struct io_cq, q_node); |
---|
239 | 220 | struct io_context *ioc = icq->ioc; |
---|
240 | 221 | |
---|
241 | 222 | spin_lock_irqsave(&ioc->lock, flags); |
---|
.. | .. |
---|
259 | 240 | { |
---|
260 | 241 | LIST_HEAD(icq_list); |
---|
261 | 242 | |
---|
262 | | - spin_lock_irq(q->queue_lock); |
---|
| 243 | + spin_lock_irq(&q->queue_lock); |
---|
263 | 244 | list_splice_init(&q->icq_list, &icq_list); |
---|
| 245 | + spin_unlock_irq(&q->queue_lock); |
---|
264 | 246 | |
---|
265 | | - if (q->mq_ops) { |
---|
266 | | - spin_unlock_irq(q->queue_lock); |
---|
267 | | - __ioc_clear_queue(&icq_list); |
---|
268 | | - } else { |
---|
269 | | - __ioc_clear_queue(&icq_list); |
---|
270 | | - spin_unlock_irq(q->queue_lock); |
---|
271 | | - } |
---|
| 247 | + __ioc_clear_queue(&icq_list); |
---|
272 | 248 | } |
---|
273 | 249 | |
---|
274 | 250 | int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) |
---|
.. | .. |
---|
344 | 320 | |
---|
345 | 321 | return NULL; |
---|
346 | 322 | } |
---|
347 | | -EXPORT_SYMBOL(get_task_io_context); |
---|
348 | 323 | |
---|
349 | 324 | /** |
---|
350 | 325 | * ioc_lookup_icq - lookup io_cq from ioc |
---|
.. | .. |
---|
358 | 333 | { |
---|
359 | 334 | struct io_cq *icq; |
---|
360 | 335 | |
---|
361 | | - lockdep_assert_held(q->queue_lock); |
---|
| 336 | + lockdep_assert_held(&q->queue_lock); |
---|
362 | 337 | |
---|
363 | 338 | /* |
---|
364 | 339 | * icq's are indexed from @ioc using radix tree and hint pointer, |
---|
.. | .. |
---|
417 | 392 | INIT_HLIST_NODE(&icq->ioc_node); |
---|
418 | 393 | |
---|
419 | 394 | /* lock both q and ioc and try to link @icq */ |
---|
420 | | - spin_lock_irq(q->queue_lock); |
---|
| 395 | + spin_lock_irq(&q->queue_lock); |
---|
421 | 396 | spin_lock(&ioc->lock); |
---|
422 | 397 | |
---|
423 | 398 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { |
---|
424 | 399 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); |
---|
425 | 400 | list_add(&icq->q_node, &q->icq_list); |
---|
426 | | - if (et->uses_mq && et->ops.mq.init_icq) |
---|
427 | | - et->ops.mq.init_icq(icq); |
---|
428 | | - else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn) |
---|
429 | | - et->ops.sq.elevator_init_icq_fn(icq); |
---|
| 401 | + if (et->ops.init_icq) |
---|
| 402 | + et->ops.init_icq(icq); |
---|
430 | 403 | } else { |
---|
431 | 404 | kmem_cache_free(et->icq_cache, icq); |
---|
432 | 405 | icq = ioc_lookup_icq(ioc, q); |
---|
.. | .. |
---|
435 | 408 | } |
---|
436 | 409 | |
---|
437 | 410 | spin_unlock(&ioc->lock); |
---|
438 | | - spin_unlock_irq(q->queue_lock); |
---|
| 411 | + spin_unlock_irq(&q->queue_lock); |
---|
439 | 412 | radix_tree_preload_end(); |
---|
440 | 413 | return icq; |
---|
441 | 414 | } |
---|