hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/block/blk-ioc.c
....@@ -28,7 +28,6 @@
2828 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
2929 atomic_long_inc(&ioc->refcount);
3030 }
31
-EXPORT_SYMBOL(get_io_context);
3231
3332 static void icq_free_icq_rcu(struct rcu_head *head)
3433 {
....@@ -48,10 +47,8 @@
4847 if (icq->flags & ICQ_EXITED)
4948 return;
5049
51
- if (et->uses_mq && et->ops.mq.exit_icq)
52
- et->ops.mq.exit_icq(icq);
53
- else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
54
- et->ops.sq.elevator_exit_icq_fn(icq);
50
+ if (et->ops.exit_icq)
51
+ et->ops.exit_icq(icq);
5552
5653 icq->flags |= ICQ_EXITED;
5754 }
....@@ -99,32 +96,38 @@
9996 {
10097 struct io_context *ioc = container_of(work, struct io_context,
10198 release_work);
102
- unsigned long flags;
103
-
104
- /*
105
- * Exiting icq may call into put_io_context() through elevator
106
- * which will trigger lockdep warning. The ioc's are guaranteed to
107
- * be different, use a different locking subclass here. Use
108
- * irqsave variant as there's no spin_lock_irq_nested().
109
- */
110
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
99
+ spin_lock_irq(&ioc->lock);
111100
112101 while (!hlist_empty(&ioc->icq_list)) {
113102 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
114103 struct io_cq, ioc_node);
115104 struct request_queue *q = icq->q;
116105
117
- if (spin_trylock(q->queue_lock)) {
106
+ if (spin_trylock(&q->queue_lock)) {
118107 ioc_destroy_icq(icq);
119
- spin_unlock(q->queue_lock);
108
+ spin_unlock(&q->queue_lock);
120109 } else {
121
- spin_unlock_irqrestore(&ioc->lock, flags);
122
- cpu_relax();
123
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
110
+ /* Make sure q and icq cannot be freed. */
111
+ rcu_read_lock();
112
+
113
+ /* Re-acquire the locks in the correct order. */
114
+ spin_unlock(&ioc->lock);
115
+ spin_lock(&q->queue_lock);
116
+ spin_lock(&ioc->lock);
117
+
118
+ /*
119
+ * The icq may have been destroyed when the ioc lock
120
+ * was released.
121
+ */
122
+ if (!(icq->flags & ICQ_DESTROYED))
123
+ ioc_destroy_icq(icq);
124
+
125
+ spin_unlock(&q->queue_lock);
126
+ rcu_read_unlock();
124127 }
125128 }
126129
127
- spin_unlock_irqrestore(&ioc->lock, flags);
130
+ spin_unlock_irq(&ioc->lock);
128131
129132 kmem_cache_free(iocontext_cachep, ioc);
130133 }
....@@ -163,7 +166,6 @@
163166 if (free_ioc)
164167 kmem_cache_free(iocontext_cachep, ioc);
165168 }
166
-EXPORT_SYMBOL(put_io_context);
167169
168170 /**
169171 * put_io_context_active - put active reference on ioc
....@@ -174,8 +176,6 @@
174176 */
175177 void put_io_context_active(struct io_context *ioc)
176178 {
177
- struct elevator_type *et;
178
- unsigned long flags;
179179 struct io_cq *icq;
180180
181181 if (!atomic_dec_and_test(&ioc->active_ref)) {
....@@ -183,32 +183,14 @@
183183 return;
184184 }
185185
186
- /*
187
- * Need ioc lock to walk icq_list and q lock to exit icq. Perform
188
- * reverse double locking. Read comment in ioc_release_fn() for
189
- * explanation on the nested locking annotation.
190
- */
191
-retry:
192
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
186
+ spin_lock_irq(&ioc->lock);
193187 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
194188 if (icq->flags & ICQ_EXITED)
195189 continue;
196190
197
- et = icq->q->elevator->type;
198
- if (et->uses_mq) {
199
- ioc_exit_icq(icq);
200
- } else {
201
- if (spin_trylock(icq->q->queue_lock)) {
202
- ioc_exit_icq(icq);
203
- spin_unlock(icq->q->queue_lock);
204
- } else {
205
- spin_unlock_irqrestore(&ioc->lock, flags);
206
- cpu_relax();
207
- goto retry;
208
- }
209
- }
191
+ ioc_exit_icq(icq);
210192 }
211
- spin_unlock_irqrestore(&ioc->lock, flags);
193
+ spin_unlock_irq(&ioc->lock);
212194
213195 put_io_context(ioc);
214196 }
....@@ -234,7 +216,7 @@
234216 rcu_read_lock();
235217 while (!list_empty(icq_list)) {
236218 struct io_cq *icq = list_entry(icq_list->next,
237
- struct io_cq, q_node);
219
+ struct io_cq, q_node);
238220 struct io_context *ioc = icq->ioc;
239221
240222 spin_lock_irqsave(&ioc->lock, flags);
....@@ -258,16 +240,11 @@
258240 {
259241 LIST_HEAD(icq_list);
260242
261
- spin_lock_irq(q->queue_lock);
243
+ spin_lock_irq(&q->queue_lock);
262244 list_splice_init(&q->icq_list, &icq_list);
245
+ spin_unlock_irq(&q->queue_lock);
263246
264
- if (q->mq_ops) {
265
- spin_unlock_irq(q->queue_lock);
266
- __ioc_clear_queue(&icq_list);
267
- } else {
268
- __ioc_clear_queue(&icq_list);
269
- spin_unlock_irq(q->queue_lock);
270
- }
247
+ __ioc_clear_queue(&icq_list);
271248 }
272249
273250 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
....@@ -343,7 +320,6 @@
343320
344321 return NULL;
345322 }
346
-EXPORT_SYMBOL(get_task_io_context);
347323
348324 /**
349325 * ioc_lookup_icq - lookup io_cq from ioc
....@@ -357,7 +333,7 @@
357333 {
358334 struct io_cq *icq;
359335
360
- lockdep_assert_held(q->queue_lock);
336
+ lockdep_assert_held(&q->queue_lock);
361337
362338 /*
363339 * icq's are indexed from @ioc using radix tree and hint pointer,
....@@ -416,16 +392,14 @@
416392 INIT_HLIST_NODE(&icq->ioc_node);
417393
418394 /* lock both q and ioc and try to link @icq */
419
- spin_lock_irq(q->queue_lock);
395
+ spin_lock_irq(&q->queue_lock);
420396 spin_lock(&ioc->lock);
421397
422398 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
423399 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
424400 list_add(&icq->q_node, &q->icq_list);
425
- if (et->uses_mq && et->ops.mq.init_icq)
426
- et->ops.mq.init_icq(icq);
427
- else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
428
- et->ops.sq.elevator_init_icq_fn(icq);
401
+ if (et->ops.init_icq)
402
+ et->ops.init_icq(icq);
429403 } else {
430404 kmem_cache_free(et->icq_cache, icq);
431405 icq = ioc_lookup_icq(ioc, q);
....@@ -434,7 +408,7 @@
434408 }
435409
436410 spin_unlock(&ioc->lock);
437
- spin_unlock_irq(q->queue_lock);
411
+ spin_unlock_irq(&q->queue_lock);
438412 radix_tree_preload_end();
439413 return icq;
440414 }