.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | #include <linux/kernel.h> |
---|
2 | 3 | #include <linux/module.h> |
---|
3 | 4 | #include <linux/backing-dev.h> |
---|
.. | .. |
---|
16 | 17 | |
---|
17 | 18 | static void blk_mq_sysfs_release(struct kobject *kobj) |
---|
18 | 19 | { |
---|
| 20 | + struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); |
---|
| 21 | + |
---|
| 22 | + free_percpu(ctxs->queue_ctx); |
---|
| 23 | + kfree(ctxs); |
---|
| 24 | +} |
---|
| 25 | + |
---|
| 26 | +static void blk_mq_ctx_sysfs_release(struct kobject *kobj) |
---|
| 27 | +{ |
---|
| 28 | + struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); |
---|
| 29 | + |
---|
| 30 | + /* ctx->ctxs won't be released until all ctx are freed */ |
---|
| 31 | + kobject_put(&ctx->ctxs->kobj); |
---|
19 | 32 | } |
---|
20 | 33 | |
---|
21 | 34 | static void blk_mq_hw_sysfs_release(struct kobject *kobj) |
---|
.. | .. |
---|
59 | 72 | if (!entry->show) |
---|
60 | 73 | return -EIO; |
---|
61 | 74 | |
---|
62 | | - res = -ENOENT; |
---|
63 | 75 | mutex_lock(&q->sysfs_lock); |
---|
64 | | - if (!blk_queue_dying(q)) |
---|
65 | | - res = entry->show(ctx, page); |
---|
| 76 | + res = entry->show(ctx, page); |
---|
66 | 77 | mutex_unlock(&q->sysfs_lock); |
---|
67 | 78 | return res; |
---|
68 | 79 | } |
---|
.. | .. |
---|
82 | 93 | if (!entry->store) |
---|
83 | 94 | return -EIO; |
---|
84 | 95 | |
---|
85 | | - res = -ENOENT; |
---|
86 | 96 | mutex_lock(&q->sysfs_lock); |
---|
87 | | - if (!blk_queue_dying(q)) |
---|
88 | | - res = entry->store(ctx, page, length); |
---|
| 97 | + res = entry->store(ctx, page, length); |
---|
89 | 98 | mutex_unlock(&q->sysfs_lock); |
---|
90 | 99 | return res; |
---|
91 | 100 | } |
---|
.. | .. |
---|
105 | 114 | if (!entry->show) |
---|
106 | 115 | return -EIO; |
---|
107 | 116 | |
---|
108 | | - res = -ENOENT; |
---|
109 | 117 | mutex_lock(&q->sysfs_lock); |
---|
110 | | - if (!blk_queue_dying(q)) |
---|
111 | | - res = entry->show(hctx, page); |
---|
| 118 | + res = entry->show(hctx, page); |
---|
112 | 119 | mutex_unlock(&q->sysfs_lock); |
---|
113 | 120 | return res; |
---|
114 | 121 | } |
---|
.. | .. |
---|
129 | 136 | if (!entry->store) |
---|
130 | 137 | return -EIO; |
---|
131 | 138 | |
---|
132 | | - res = -ENOENT; |
---|
133 | 139 | mutex_lock(&q->sysfs_lock); |
---|
134 | | - if (!blk_queue_dying(q)) |
---|
135 | | - res = entry->store(hctx, page, length); |
---|
| 140 | + res = entry->store(hctx, page, length); |
---|
136 | 141 | mutex_unlock(&q->sysfs_lock); |
---|
137 | 142 | return res; |
---|
138 | 143 | } |
---|
.. | .. |
---|
172 | 177 | return pos + ret; |
---|
173 | 178 | } |
---|
174 | 179 | |
---|
175 | | -static struct attribute *default_ctx_attrs[] = { |
---|
176 | | - NULL, |
---|
177 | | -}; |
---|
178 | | - |
---|
179 | 180 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { |
---|
180 | 181 | .attr = {.name = "nr_tags", .mode = 0444 }, |
---|
181 | 182 | .show = blk_mq_hw_sysfs_nr_tags_show, |
---|
.. | .. |
---|
195 | 196 | &blk_mq_hw_sysfs_cpus.attr, |
---|
196 | 197 | NULL, |
---|
197 | 198 | }; |
---|
| 199 | +ATTRIBUTE_GROUPS(default_hw_ctx); |
---|
198 | 200 | |
---|
199 | 201 | static const struct sysfs_ops blk_mq_sysfs_ops = { |
---|
200 | 202 | .show = blk_mq_sysfs_show, |
---|
.. | .. |
---|
213 | 215 | |
---|
214 | 216 | static struct kobj_type blk_mq_ctx_ktype = { |
---|
215 | 217 | .sysfs_ops = &blk_mq_sysfs_ops, |
---|
216 | | - .default_attrs = default_ctx_attrs, |
---|
217 | | - .release = blk_mq_sysfs_release, |
---|
| 218 | + .release = blk_mq_ctx_sysfs_release, |
---|
218 | 219 | }; |
---|
219 | 220 | |
---|
220 | 221 | static struct kobj_type blk_mq_hw_ktype = { |
---|
221 | 222 | .sysfs_ops = &blk_mq_hw_sysfs_ops, |
---|
222 | | - .default_attrs = default_hw_ctx_attrs, |
---|
| 223 | + .default_groups = default_hw_ctx_groups, |
---|
223 | 224 | .release = blk_mq_hw_sysfs_release, |
---|
224 | 225 | }; |
---|
225 | 226 | |
---|
.. | .. |
---|
241 | 242 | { |
---|
242 | 243 | struct request_queue *q = hctx->queue; |
---|
243 | 244 | struct blk_mq_ctx *ctx; |
---|
244 | | - int i, ret; |
---|
| 245 | + int i, j, ret; |
---|
245 | 246 | |
---|
246 | 247 | if (!hctx->nr_ctx) |
---|
247 | 248 | return 0; |
---|
248 | 249 | |
---|
249 | | - ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); |
---|
| 250 | + ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); |
---|
250 | 251 | if (ret) |
---|
251 | 252 | return ret; |
---|
252 | 253 | |
---|
253 | 254 | hctx_for_each_ctx(hctx, ctx, i) { |
---|
254 | 255 | ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); |
---|
255 | 256 | if (ret) |
---|
256 | | - break; |
---|
| 257 | + goto out; |
---|
257 | 258 | } |
---|
258 | 259 | |
---|
| 260 | + return 0; |
---|
| 261 | +out: |
---|
| 262 | + hctx_for_each_ctx(hctx, ctx, j) { |
---|
| 263 | + if (j < i) |
---|
| 264 | + kobject_del(&ctx->kobj); |
---|
| 265 | + } |
---|
| 266 | + kobject_del(&hctx->kobj); |
---|
259 | 267 | return ret; |
---|
260 | 268 | } |
---|
261 | 269 | |
---|
.. | .. |
---|
264 | 272 | struct blk_mq_hw_ctx *hctx; |
---|
265 | 273 | int i; |
---|
266 | 274 | |
---|
267 | | - lockdep_assert_held(&q->sysfs_lock); |
---|
| 275 | + lockdep_assert_held(&q->sysfs_dir_lock); |
---|
268 | 276 | |
---|
269 | 277 | queue_for_each_hw_ctx(q, hctx, i) |
---|
270 | 278 | blk_mq_unregister_hctx(hctx); |
---|
271 | 279 | |
---|
272 | | - kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
---|
273 | | - kobject_del(&q->mq_kobj); |
---|
| 280 | + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); |
---|
| 281 | + kobject_del(q->mq_kobj); |
---|
274 | 282 | kobject_put(&dev->kobj); |
---|
275 | 283 | |
---|
276 | 284 | q->mq_sysfs_init_done = false; |
---|
.. | .. |
---|
290 | 298 | ctx = per_cpu_ptr(q->queue_ctx, cpu); |
---|
291 | 299 | kobject_put(&ctx->kobj); |
---|
292 | 300 | } |
---|
293 | | - kobject_put(&q->mq_kobj); |
---|
| 301 | + kobject_put(q->mq_kobj); |
---|
294 | 302 | } |
---|
295 | 303 | |
---|
296 | 304 | void blk_mq_sysfs_init(struct request_queue *q) |
---|
.. | .. |
---|
298 | 306 | struct blk_mq_ctx *ctx; |
---|
299 | 307 | int cpu; |
---|
300 | 308 | |
---|
301 | | - kobject_init(&q->mq_kobj, &blk_mq_ktype); |
---|
| 309 | + kobject_init(q->mq_kobj, &blk_mq_ktype); |
---|
302 | 310 | |
---|
303 | 311 | for_each_possible_cpu(cpu) { |
---|
304 | 312 | ctx = per_cpu_ptr(q->queue_ctx, cpu); |
---|
| 313 | + |
---|
| 314 | + kobject_get(q->mq_kobj); |
---|
305 | 315 | kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
---|
306 | 316 | } |
---|
307 | 317 | } |
---|
.. | .. |
---|
312 | 322 | int ret, i; |
---|
313 | 323 | |
---|
314 | 324 | WARN_ON_ONCE(!q->kobj.parent); |
---|
315 | | - lockdep_assert_held(&q->sysfs_lock); |
---|
| 325 | + lockdep_assert_held(&q->sysfs_dir_lock); |
---|
316 | 326 | |
---|
317 | | - ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
---|
| 327 | + ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
---|
318 | 328 | if (ret < 0) |
---|
319 | 329 | goto out; |
---|
320 | 330 | |
---|
321 | | - kobject_uevent(&q->mq_kobj, KOBJ_ADD); |
---|
| 331 | + kobject_uevent(q->mq_kobj, KOBJ_ADD); |
---|
322 | 332 | |
---|
323 | 333 | queue_for_each_hw_ctx(q, hctx, i) { |
---|
324 | 334 | ret = blk_mq_register_hctx(hctx); |
---|
.. | .. |
---|
335 | 345 | while (--i >= 0) |
---|
336 | 346 | blk_mq_unregister_hctx(q->queue_hw_ctx[i]); |
---|
337 | 347 | |
---|
338 | | - kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
---|
339 | | - kobject_del(&q->mq_kobj); |
---|
| 348 | + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); |
---|
| 349 | + kobject_del(q->mq_kobj); |
---|
340 | 350 | kobject_put(&dev->kobj); |
---|
341 | 351 | return ret; |
---|
342 | 352 | } |
---|
343 | | - |
---|
344 | | -int blk_mq_register_dev(struct device *dev, struct request_queue *q) |
---|
345 | | -{ |
---|
346 | | - int ret; |
---|
347 | | - |
---|
348 | | - mutex_lock(&q->sysfs_lock); |
---|
349 | | - ret = __blk_mq_register_dev(dev, q); |
---|
350 | | - mutex_unlock(&q->sysfs_lock); |
---|
351 | | - |
---|
352 | | - return ret; |
---|
353 | | -} |
---|
354 | | -EXPORT_SYMBOL_GPL(blk_mq_register_dev); |
---|
355 | 353 | |
---|
356 | 354 | void blk_mq_sysfs_unregister(struct request_queue *q) |
---|
357 | 355 | { |
---|
358 | 356 | struct blk_mq_hw_ctx *hctx; |
---|
359 | 357 | int i; |
---|
360 | 358 | |
---|
361 | | - mutex_lock(&q->sysfs_lock); |
---|
| 359 | + mutex_lock(&q->sysfs_dir_lock); |
---|
362 | 360 | if (!q->mq_sysfs_init_done) |
---|
363 | 361 | goto unlock; |
---|
364 | 362 | |
---|
.. | .. |
---|
366 | 364 | blk_mq_unregister_hctx(hctx); |
---|
367 | 365 | |
---|
368 | 366 | unlock: |
---|
369 | | - mutex_unlock(&q->sysfs_lock); |
---|
| 367 | + mutex_unlock(&q->sysfs_dir_lock); |
---|
370 | 368 | } |
---|
371 | 369 | |
---|
372 | 370 | int blk_mq_sysfs_register(struct request_queue *q) |
---|
.. | .. |
---|
374 | 372 | struct blk_mq_hw_ctx *hctx; |
---|
375 | 373 | int i, ret = 0; |
---|
376 | 374 | |
---|
377 | | - mutex_lock(&q->sysfs_lock); |
---|
| 375 | + mutex_lock(&q->sysfs_dir_lock); |
---|
378 | 376 | if (!q->mq_sysfs_init_done) |
---|
379 | 377 | goto unlock; |
---|
380 | 378 | |
---|
.. | .. |
---|
385 | 383 | } |
---|
386 | 384 | |
---|
387 | 385 | unlock: |
---|
388 | | - mutex_unlock(&q->sysfs_lock); |
---|
| 386 | + mutex_unlock(&q->sysfs_dir_lock); |
---|
389 | 387 | |
---|
390 | 388 | return ret; |
---|
391 | 389 | } |
---|