hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/block/blk-mq-sysfs.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 #include <linux/kernel.h>
23 #include <linux/module.h>
34 #include <linux/backing-dev.h>
....@@ -16,6 +17,18 @@
1617
1718 static void blk_mq_sysfs_release(struct kobject *kobj)
1819 {
20
+ struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
+
22
+ free_percpu(ctxs->queue_ctx);
23
+ kfree(ctxs);
24
+}
25
+
26
+static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27
+{
28
+ struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
+
30
+ /* ctx->ctxs won't be released until all ctx are freed */
31
+ kobject_put(&ctx->ctxs->kobj);
1932 }
2033
2134 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
....@@ -59,10 +72,8 @@
5972 if (!entry->show)
6073 return -EIO;
6174
62
- res = -ENOENT;
6375 mutex_lock(&q->sysfs_lock);
64
- if (!blk_queue_dying(q))
65
- res = entry->show(ctx, page);
76
+ res = entry->show(ctx, page);
6677 mutex_unlock(&q->sysfs_lock);
6778 return res;
6879 }
....@@ -82,10 +93,8 @@
8293 if (!entry->store)
8394 return -EIO;
8495
85
- res = -ENOENT;
8696 mutex_lock(&q->sysfs_lock);
87
- if (!blk_queue_dying(q))
88
- res = entry->store(ctx, page, length);
97
+ res = entry->store(ctx, page, length);
8998 mutex_unlock(&q->sysfs_lock);
9099 return res;
91100 }
....@@ -105,10 +114,8 @@
105114 if (!entry->show)
106115 return -EIO;
107116
108
- res = -ENOENT;
109117 mutex_lock(&q->sysfs_lock);
110
- if (!blk_queue_dying(q))
111
- res = entry->show(hctx, page);
118
+ res = entry->show(hctx, page);
112119 mutex_unlock(&q->sysfs_lock);
113120 return res;
114121 }
....@@ -129,10 +136,8 @@
129136 if (!entry->store)
130137 return -EIO;
131138
132
- res = -ENOENT;
133139 mutex_lock(&q->sysfs_lock);
134
- if (!blk_queue_dying(q))
135
- res = entry->store(hctx, page, length);
140
+ res = entry->store(hctx, page, length);
136141 mutex_unlock(&q->sysfs_lock);
137142 return res;
138143 }
....@@ -172,10 +177,6 @@
172177 return pos + ret;
173178 }
174179
175
-static struct attribute *default_ctx_attrs[] = {
176
- NULL,
177
-};
178
-
179180 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
180181 .attr = {.name = "nr_tags", .mode = 0444 },
181182 .show = blk_mq_hw_sysfs_nr_tags_show,
....@@ -195,6 +196,7 @@
195196 &blk_mq_hw_sysfs_cpus.attr,
196197 NULL,
197198 };
199
+ATTRIBUTE_GROUPS(default_hw_ctx);
198200
199201 static const struct sysfs_ops blk_mq_sysfs_ops = {
200202 .show = blk_mq_sysfs_show,
....@@ -213,13 +215,12 @@
213215
214216 static struct kobj_type blk_mq_ctx_ktype = {
215217 .sysfs_ops = &blk_mq_sysfs_ops,
216
- .default_attrs = default_ctx_attrs,
217
- .release = blk_mq_sysfs_release,
218
+ .release = blk_mq_ctx_sysfs_release,
218219 };
219220
220221 static struct kobj_type blk_mq_hw_ktype = {
221222 .sysfs_ops = &blk_mq_hw_sysfs_ops,
222
- .default_attrs = default_hw_ctx_attrs,
223
+ .default_groups = default_hw_ctx_groups,
223224 .release = blk_mq_hw_sysfs_release,
224225 };
225226
....@@ -241,21 +242,28 @@
241242 {
242243 struct request_queue *q = hctx->queue;
243244 struct blk_mq_ctx *ctx;
244
- int i, ret;
245
+ int i, j, ret;
245246
246247 if (!hctx->nr_ctx)
247248 return 0;
248249
249
- ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
250
+ ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
250251 if (ret)
251252 return ret;
252253
253254 hctx_for_each_ctx(hctx, ctx, i) {
254255 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
255256 if (ret)
256
- break;
257
+ goto out;
257258 }
258259
260
+ return 0;
261
+out:
262
+ hctx_for_each_ctx(hctx, ctx, j) {
263
+ if (j < i)
264
+ kobject_del(&ctx->kobj);
265
+ }
266
+ kobject_del(&hctx->kobj);
259267 return ret;
260268 }
261269
....@@ -264,13 +272,13 @@
264272 struct blk_mq_hw_ctx *hctx;
265273 int i;
266274
267
- lockdep_assert_held(&q->sysfs_lock);
275
+ lockdep_assert_held(&q->sysfs_dir_lock);
268276
269277 queue_for_each_hw_ctx(q, hctx, i)
270278 blk_mq_unregister_hctx(hctx);
271279
272
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
273
- kobject_del(&q->mq_kobj);
280
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
281
+ kobject_del(q->mq_kobj);
274282 kobject_put(&dev->kobj);
275283
276284 q->mq_sysfs_init_done = false;
....@@ -290,7 +298,7 @@
290298 ctx = per_cpu_ptr(q->queue_ctx, cpu);
291299 kobject_put(&ctx->kobj);
292300 }
293
- kobject_put(&q->mq_kobj);
301
+ kobject_put(q->mq_kobj);
294302 }
295303
296304 void blk_mq_sysfs_init(struct request_queue *q)
....@@ -298,10 +306,12 @@
298306 struct blk_mq_ctx *ctx;
299307 int cpu;
300308
301
- kobject_init(&q->mq_kobj, &blk_mq_ktype);
309
+ kobject_init(q->mq_kobj, &blk_mq_ktype);
302310
303311 for_each_possible_cpu(cpu) {
304312 ctx = per_cpu_ptr(q->queue_ctx, cpu);
313
+
314
+ kobject_get(q->mq_kobj);
305315 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
306316 }
307317 }
....@@ -312,13 +322,13 @@
312322 int ret, i;
313323
314324 WARN_ON_ONCE(!q->kobj.parent);
315
- lockdep_assert_held(&q->sysfs_lock);
325
+ lockdep_assert_held(&q->sysfs_dir_lock);
316326
317
- ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
327
+ ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
318328 if (ret < 0)
319329 goto out;
320330
321
- kobject_uevent(&q->mq_kobj, KOBJ_ADD);
331
+ kobject_uevent(q->mq_kobj, KOBJ_ADD);
322332
323333 queue_for_each_hw_ctx(q, hctx, i) {
324334 ret = blk_mq_register_hctx(hctx);
....@@ -335,30 +345,18 @@
335345 while (--i >= 0)
336346 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
337347
338
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
339
- kobject_del(&q->mq_kobj);
348
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
349
+ kobject_del(q->mq_kobj);
340350 kobject_put(&dev->kobj);
341351 return ret;
342352 }
343
-
344
-int blk_mq_register_dev(struct device *dev, struct request_queue *q)
345
-{
346
- int ret;
347
-
348
- mutex_lock(&q->sysfs_lock);
349
- ret = __blk_mq_register_dev(dev, q);
350
- mutex_unlock(&q->sysfs_lock);
351
-
352
- return ret;
353
-}
354
-EXPORT_SYMBOL_GPL(blk_mq_register_dev);
355353
356354 void blk_mq_sysfs_unregister(struct request_queue *q)
357355 {
358356 struct blk_mq_hw_ctx *hctx;
359357 int i;
360358
361
- mutex_lock(&q->sysfs_lock);
359
+ mutex_lock(&q->sysfs_dir_lock);
362360 if (!q->mq_sysfs_init_done)
363361 goto unlock;
364362
....@@ -366,7 +364,7 @@
366364 blk_mq_unregister_hctx(hctx);
367365
368366 unlock:
369
- mutex_unlock(&q->sysfs_lock);
367
+ mutex_unlock(&q->sysfs_dir_lock);
370368 }
371369
372370 int blk_mq_sysfs_register(struct request_queue *q)
....@@ -374,7 +372,7 @@
374372 struct blk_mq_hw_ctx *hctx;
375373 int i, ret = 0;
376374
377
- mutex_lock(&q->sysfs_lock);
375
+ mutex_lock(&q->sysfs_dir_lock);
378376 if (!q->mq_sysfs_init_done)
379377 goto unlock;
380378
....@@ -385,7 +383,7 @@
385383 }
386384
387385 unlock:
388
- mutex_unlock(&q->sysfs_lock);
386
+ mutex_unlock(&q->sysfs_dir_lock);
389387
390388 return ret;
391389 }