| .. | .. |
|---|
| 28 | 28 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
|---|
| 29 | 29 | atomic_long_inc(&ioc->refcount); |
|---|
| 30 | 30 | } |
|---|
| 31 | | -EXPORT_SYMBOL(get_io_context); |
|---|
| 32 | 31 | |
|---|
| 33 | 32 | static void icq_free_icq_rcu(struct rcu_head *head) |
|---|
| 34 | 33 | { |
|---|
| .. | .. |
|---|
| 48 | 47 | if (icq->flags & ICQ_EXITED) |
|---|
| 49 | 48 | return; |
|---|
| 50 | 49 | |
|---|
| 51 | | - if (et->uses_mq && et->ops.mq.exit_icq) |
|---|
| 52 | | - et->ops.mq.exit_icq(icq); |
|---|
| 53 | | - else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn) |
|---|
| 54 | | - et->ops.sq.elevator_exit_icq_fn(icq); |
|---|
| 50 | + if (et->ops.exit_icq) |
|---|
| 51 | + et->ops.exit_icq(icq); |
|---|
| 55 | 52 | |
|---|
| 56 | 53 | icq->flags |= ICQ_EXITED; |
|---|
| 57 | 54 | } |
|---|
| .. | .. |
|---|
| 99 | 96 | { |
|---|
| 100 | 97 | struct io_context *ioc = container_of(work, struct io_context, |
|---|
| 101 | 98 | release_work); |
|---|
| 102 | | - unsigned long flags; |
|---|
| 103 | | - |
|---|
| 104 | | - /* |
|---|
| 105 | | - * Exiting icq may call into put_io_context() through elevator |
|---|
| 106 | | - * which will trigger lockdep warning. The ioc's are guaranteed to |
|---|
| 107 | | - * be different, use a different locking subclass here. Use |
|---|
| 108 | | - * irqsave variant as there's no spin_lock_irq_nested(). |
|---|
| 109 | | - */ |
|---|
| 110 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
|---|
| 99 | + spin_lock_irq(&ioc->lock); |
|---|
| 111 | 100 | |
|---|
| 112 | 101 | while (!hlist_empty(&ioc->icq_list)) { |
|---|
| 113 | 102 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
|---|
| 114 | 103 | struct io_cq, ioc_node); |
|---|
| 115 | 104 | struct request_queue *q = icq->q; |
|---|
| 116 | 105 | |
|---|
| 117 | | - if (spin_trylock(q->queue_lock)) { |
|---|
| 106 | + if (spin_trylock(&q->queue_lock)) { |
|---|
| 118 | 107 | ioc_destroy_icq(icq); |
|---|
| 119 | | - spin_unlock(q->queue_lock); |
|---|
| 108 | + spin_unlock(&q->queue_lock); |
|---|
| 120 | 109 | } else { |
|---|
| 121 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
|---|
| 122 | | - cpu_relax(); |
|---|
| 123 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
|---|
| 110 | + /* Make sure q and icq cannot be freed. */ |
|---|
| 111 | + rcu_read_lock(); |
|---|
| 112 | + |
|---|
| 113 | + /* Re-acquire the locks in the correct order. */ |
|---|
| 114 | + spin_unlock(&ioc->lock); |
|---|
| 115 | + spin_lock(&q->queue_lock); |
|---|
| 116 | + spin_lock(&ioc->lock); |
|---|
| 117 | + |
|---|
| 118 | + /* |
|---|
| 119 | + * The icq may have been destroyed when the ioc lock |
|---|
| 120 | + * was released. |
|---|
| 121 | + */ |
|---|
| 122 | + if (!(icq->flags & ICQ_DESTROYED)) |
|---|
| 123 | + ioc_destroy_icq(icq); |
|---|
| 124 | + |
|---|
| 125 | + spin_unlock(&q->queue_lock); |
|---|
| 126 | + rcu_read_unlock(); |
|---|
| 124 | 127 | } |
|---|
| 125 | 128 | } |
|---|
| 126 | 129 | |
|---|
| 127 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
|---|
| 130 | + spin_unlock_irq(&ioc->lock); |
|---|
| 128 | 131 | |
|---|
| 129 | 132 | kmem_cache_free(iocontext_cachep, ioc); |
|---|
| 130 | 133 | } |
|---|
| .. | .. |
|---|
| 163 | 166 | if (free_ioc) |
|---|
| 164 | 167 | kmem_cache_free(iocontext_cachep, ioc); |
|---|
| 165 | 168 | } |
|---|
| 166 | | -EXPORT_SYMBOL(put_io_context); |
|---|
| 167 | 169 | |
|---|
| 168 | 170 | /** |
|---|
| 169 | 171 | * put_io_context_active - put active reference on ioc |
|---|
| .. | .. |
|---|
| 174 | 176 | */ |
|---|
| 175 | 177 | void put_io_context_active(struct io_context *ioc) |
|---|
| 176 | 178 | { |
|---|
| 177 | | - struct elevator_type *et; |
|---|
| 178 | | - unsigned long flags; |
|---|
| 179 | 179 | struct io_cq *icq; |
|---|
| 180 | 180 | |
|---|
| 181 | 181 | if (!atomic_dec_and_test(&ioc->active_ref)) { |
|---|
| .. | .. |
|---|
| 183 | 183 | return; |
|---|
| 184 | 184 | } |
|---|
| 185 | 185 | |
|---|
| 186 | | - /* |
|---|
| 187 | | - * Need ioc lock to walk icq_list and q lock to exit icq. Perform |
|---|
| 188 | | - * reverse double locking. Read comment in ioc_release_fn() for |
|---|
| 189 | | - * explanation on the nested locking annotation. |
|---|
| 190 | | - */ |
|---|
| 191 | | -retry: |
|---|
| 192 | | - spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
|---|
| 186 | + spin_lock_irq(&ioc->lock); |
|---|
| 193 | 187 | hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { |
|---|
| 194 | 188 | if (icq->flags & ICQ_EXITED) |
|---|
| 195 | 189 | continue; |
|---|
| 196 | 190 | |
|---|
| 197 | | - et = icq->q->elevator->type; |
|---|
| 198 | | - if (et->uses_mq) { |
|---|
| 199 | | - ioc_exit_icq(icq); |
|---|
| 200 | | - } else { |
|---|
| 201 | | - if (spin_trylock(icq->q->queue_lock)) { |
|---|
| 202 | | - ioc_exit_icq(icq); |
|---|
| 203 | | - spin_unlock(icq->q->queue_lock); |
|---|
| 204 | | - } else { |
|---|
| 205 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
|---|
| 206 | | - cpu_relax(); |
|---|
| 207 | | - goto retry; |
|---|
| 208 | | - } |
|---|
| 209 | | - } |
|---|
| 191 | + ioc_exit_icq(icq); |
|---|
| 210 | 192 | } |
|---|
| 211 | | - spin_unlock_irqrestore(&ioc->lock, flags); |
|---|
| 193 | + spin_unlock_irq(&ioc->lock); |
|---|
| 212 | 194 | |
|---|
| 213 | 195 | put_io_context(ioc); |
|---|
| 214 | 196 | } |
|---|
| .. | .. |
|---|
| 234 | 216 | rcu_read_lock(); |
|---|
| 235 | 217 | while (!list_empty(icq_list)) { |
|---|
| 236 | 218 | struct io_cq *icq = list_entry(icq_list->next, |
|---|
| 237 | | - struct io_cq, q_node); |
|---|
| 219 | + struct io_cq, q_node); |
|---|
| 238 | 220 | struct io_context *ioc = icq->ioc; |
|---|
| 239 | 221 | |
|---|
| 240 | 222 | spin_lock_irqsave(&ioc->lock, flags); |
|---|
| .. | .. |
|---|
| 258 | 240 | { |
|---|
| 259 | 241 | LIST_HEAD(icq_list); |
|---|
| 260 | 242 | |
|---|
| 261 | | - spin_lock_irq(q->queue_lock); |
|---|
| 243 | + spin_lock_irq(&q->queue_lock); |
|---|
| 262 | 244 | list_splice_init(&q->icq_list, &icq_list); |
|---|
| 245 | + spin_unlock_irq(&q->queue_lock); |
|---|
| 263 | 246 | |
|---|
| 264 | | - if (q->mq_ops) { |
|---|
| 265 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 266 | | - __ioc_clear_queue(&icq_list); |
|---|
| 267 | | - } else { |
|---|
| 268 | | - __ioc_clear_queue(&icq_list); |
|---|
| 269 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 270 | | - } |
|---|
| 247 | + __ioc_clear_queue(&icq_list); |
|---|
| 271 | 248 | } |
|---|
| 272 | 249 | |
|---|
| 273 | 250 | int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) |
|---|
| .. | .. |
|---|
| 343 | 320 | |
|---|
| 344 | 321 | return NULL; |
|---|
| 345 | 322 | } |
|---|
| 346 | | -EXPORT_SYMBOL(get_task_io_context); |
|---|
| 347 | 323 | |
|---|
| 348 | 324 | /** |
|---|
| 349 | 325 | * ioc_lookup_icq - lookup io_cq from ioc |
|---|
| .. | .. |
|---|
| 357 | 333 | { |
|---|
| 358 | 334 | struct io_cq *icq; |
|---|
| 359 | 335 | |
|---|
| 360 | | - lockdep_assert_held(q->queue_lock); |
|---|
| 336 | + lockdep_assert_held(&q->queue_lock); |
|---|
| 361 | 337 | |
|---|
| 362 | 338 | /* |
|---|
| 363 | 339 | * icq's are indexed from @ioc using radix tree and hint pointer, |
|---|
| .. | .. |
|---|
| 416 | 392 | INIT_HLIST_NODE(&icq->ioc_node); |
|---|
| 417 | 393 | |
|---|
| 418 | 394 | /* lock both q and ioc and try to link @icq */ |
|---|
| 419 | | - spin_lock_irq(q->queue_lock); |
|---|
| 395 | + spin_lock_irq(&q->queue_lock); |
|---|
| 420 | 396 | spin_lock(&ioc->lock); |
|---|
| 421 | 397 | |
|---|
| 422 | 398 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { |
|---|
| 423 | 399 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); |
|---|
| 424 | 400 | list_add(&icq->q_node, &q->icq_list); |
|---|
| 425 | | - if (et->uses_mq && et->ops.mq.init_icq) |
|---|
| 426 | | - et->ops.mq.init_icq(icq); |
|---|
| 427 | | - else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn) |
|---|
| 428 | | - et->ops.sq.elevator_init_icq_fn(icq); |
|---|
| 401 | + if (et->ops.init_icq) |
|---|
| 402 | + et->ops.init_icq(icq); |
|---|
| 429 | 403 | } else { |
|---|
| 430 | 404 | kmem_cache_free(et->icq_cache, icq); |
|---|
| 431 | 405 | icq = ioc_lookup_icq(ioc, q); |
|---|
| .. | .. |
|---|
| 434 | 408 | } |
|---|
| 435 | 409 | |
|---|
| 436 | 410 | spin_unlock(&ioc->lock); |
|---|
| 437 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 411 | + spin_unlock_irq(&q->queue_lock); |
|---|
| 438 | 412 | radix_tree_preload_end(); |
|---|
| 439 | 413 | return icq; |
|---|
| 440 | 414 | } |
|---|