hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/blktrace.c
....@@ -67,23 +67,22 @@
6767 * Send out a notify message.
6868 */
6969 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70
- const void *data, size_t len,
71
- union kernfs_node_id *cgid)
70
+ const void *data, size_t len, u64 cgid)
7271 {
7372 struct blk_io_trace *t;
7473 struct ring_buffer_event *event = NULL;
75
- struct ring_buffer *buffer = NULL;
76
- int pc = 0;
74
+ struct trace_buffer *buffer = NULL;
75
+ unsigned int trace_ctx = 0;
7776 int cpu = smp_processor_id();
7877 bool blk_tracer = blk_tracer_enabled;
79
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
78
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
8079
8180 if (blk_tracer) {
82
- buffer = blk_tr->trace_buffer.buffer;
83
- pc = preempt_count();
81
+ buffer = blk_tr->array_buffer.buffer;
82
+ trace_ctx = tracing_gen_ctx_flags(0);
8483 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
8584 sizeof(*t) + len + cgid_len,
86
- 0, pc);
85
+ trace_ctx);
8786 if (!event)
8887 return;
8988 t = ring_buffer_event_data(event);
....@@ -103,12 +102,12 @@
103102 t->pid = pid;
104103 t->cpu = cpu;
105104 t->pdu_len = len + cgid_len;
106
- if (cgid)
107
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
105
+ if (cgid_len)
106
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
108107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
109108
110109 if (blk_tracer)
111
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
110
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
112111 }
113112 }
114113
....@@ -125,7 +124,7 @@
125124 spin_lock_irqsave(&running_trace_lock, flags);
126125 list_for_each_entry(bt, &running_trace_list, running_list) {
127126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
128
- sizeof(tsk->comm), NULL);
127
+ sizeof(tsk->comm), 0);
129128 }
130129 spin_unlock_irqrestore(&running_trace_lock, flags);
131130 }
....@@ -142,7 +141,7 @@
142141 words[1] = now.tv_nsec;
143142
144143 local_irq_save(flags);
145
- trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
144
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
146145 local_irq_restore(flags);
147146 }
148147
....@@ -174,10 +173,10 @@
174173 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
175174 blkcg = NULL;
176175 #ifdef CONFIG_BLK_CGROUP
177
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
178
- blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
176
+ trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
177
+ blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
179178 #else
180
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
179
+ trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
181180 #endif
182181 local_irq_restore(flags);
183182 }
....@@ -215,18 +214,19 @@
215214 */
216215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
217216 int op, int op_flags, u32 what, int error, int pdu_len,
218
- void *pdu_data, union kernfs_node_id *cgid)
217
+ void *pdu_data, u64 cgid)
219218 {
220219 struct task_struct *tsk = current;
221220 struct ring_buffer_event *event = NULL;
222
- struct ring_buffer *buffer = NULL;
221
+ struct trace_buffer *buffer = NULL;
223222 struct blk_io_trace *t;
224223 unsigned long flags = 0;
225224 unsigned long *sequence;
225
+ unsigned int trace_ctx = 0;
226226 pid_t pid;
227
- int cpu, pc = 0;
227
+ int cpu;
228228 bool blk_tracer = blk_tracer_enabled;
229
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
229
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
230230
231231 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
232232 return;
....@@ -252,11 +252,11 @@
252252 if (blk_tracer) {
253253 tracing_record_cmdline(current);
254254
255
- buffer = blk_tr->trace_buffer.buffer;
256
- pc = preempt_count();
255
+ buffer = blk_tr->array_buffer.buffer;
256
+ trace_ctx = tracing_gen_ctx_flags(0);
257257 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
258258 sizeof(*t) + pdu_len + cgid_len,
259
- 0, pc);
259
+ trace_ctx);
260260 if (!event)
261261 return;
262262 t = ring_buffer_event_data(event);
....@@ -297,12 +297,12 @@
297297 t->pdu_len = pdu_len + cgid_len;
298298
299299 if (cgid_len)
300
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
300
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
301301 if (pdu_len)
302302 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
303303
304304 if (blk_tracer) {
305
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
305
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
306306 return;
307307 }
308308 }
....@@ -348,7 +348,8 @@
348348 {
349349 struct blk_trace *bt;
350350
351
- bt = xchg(&q->blk_trace, NULL);
351
+ bt = rcu_replace_pointer(q->blk_trace, NULL,
352
+ lockdep_is_held(&q->debugfs_mutex));
352353 if (!bt)
353354 return -EINVAL;
354355
....@@ -362,9 +363,9 @@
362363 {
363364 int ret;
364365
365
- mutex_lock(&q->blk_trace_mutex);
366
+ mutex_lock(&q->debugfs_mutex);
366367 ret = __blk_trace_remove(q);
367
- mutex_unlock(&q->blk_trace_mutex);
368
+ mutex_unlock(&q->debugfs_mutex);
368369
369370 return ret;
370371 }
....@@ -483,11 +484,10 @@
483484 struct dentry *dir = NULL;
484485 int ret;
485486
487
+ lockdep_assert_held(&q->debugfs_mutex);
488
+
486489 if (!buts->buf_size || !buts->buf_nr)
487490 return -EINVAL;
488
-
489
- if (!blk_debugfs_root)
490
- return -ENOENT;
491491
492492 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
493493 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
....@@ -502,7 +502,8 @@
502502 * bdev can be NULL, as with scsi-generic, this is a helpful as
503503 * we can be.
504504 */
505
- if (q->blk_trace) {
505
+ if (rcu_dereference_protected(q->blk_trace,
506
+ lockdep_is_held(&q->debugfs_mutex))) {
506507 pr_warn("Concurrent blktraces are not allowed on %s\n",
507508 buts->name);
508509 return -EBUSY;
....@@ -521,21 +522,16 @@
521522 if (!bt->msg_data)
522523 goto err;
523524
524
-#ifdef CONFIG_BLK_DEBUG_FS
525525 /*
526
- * When tracing whole make_request drivers (multiqueue) block devices,
527
- * reuse the existing debugfs directory created by the block layer on
528
- * init. For request-based block devices, all partitions block devices,
526
+ * When tracing the whole disk reuse the existing debugfs directory
527
+ * created by the block layer on init. For partitions block devices,
529528 * and scsi-generic block devices we create a temporary new debugfs
530529 * directory that will be removed once the trace ends.
531530 */
532
- if (q->mq_ops && bdev && bdev == bdev->bd_contains)
531
+ if (bdev && !bdev_is_partition(bdev))
533532 dir = q->debugfs_dir;
534533 else
535
-#endif
536534 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
537
- if (!dir)
538
- goto err;
539535
540536 /*
541537 * As blktrace relies on debugfs for its interface the debugfs directory
....@@ -556,12 +552,8 @@
556552 ret = -EIO;
557553 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
558554 &blk_dropped_fops);
559
- if (!bt->dropped_file)
560
- goto err;
561555
562556 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
563
- if (!bt->msg_file)
564
- goto err;
565557
566558 bt->rchan = relay_open("trace", dir, buts->buf_size,
567559 buts->buf_nr, &blk_relay_callbacks, bt);
....@@ -583,10 +575,7 @@
583575 bt->pid = buts->pid;
584576 bt->trace_state = Blktrace_setup;
585577
586
- ret = -EBUSY;
587
- if (cmpxchg(&q->blk_trace, NULL, bt))
588
- goto err;
589
-
578
+ rcu_assign_pointer(q->blk_trace, bt);
590579 get_probe_ref();
591580
592581 ret = 0;
....@@ -623,9 +612,9 @@
623612 {
624613 int ret;
625614
626
- mutex_lock(&q->blk_trace_mutex);
615
+ mutex_lock(&q->debugfs_mutex);
627616 ret = __blk_trace_setup(q, name, dev, bdev, arg);
628
- mutex_unlock(&q->blk_trace_mutex);
617
+ mutex_unlock(&q->debugfs_mutex);
629618
630619 return ret;
631620 }
....@@ -671,7 +660,7 @@
671660 struct blk_trace *bt;
672661
673662 bt = rcu_dereference_protected(q->blk_trace,
674
- lockdep_is_held(&q->blk_trace_mutex));
663
+ lockdep_is_held(&q->debugfs_mutex));
675664 if (bt == NULL)
676665 return -EINVAL;
677666
....@@ -711,9 +700,9 @@
711700 {
712701 int ret;
713702
714
- mutex_lock(&q->blk_trace_mutex);
703
+ mutex_lock(&q->debugfs_mutex);
715704 ret = __blk_trace_startstop(q, start);
716
- mutex_unlock(&q->blk_trace_mutex);
705
+ mutex_unlock(&q->debugfs_mutex);
717706
718707 return ret;
719708 }
....@@ -742,7 +731,7 @@
742731 if (!q)
743732 return -ENXIO;
744733
745
- mutex_lock(&q->blk_trace_mutex);
734
+ mutex_lock(&q->debugfs_mutex);
746735
747736 switch (cmd) {
748737 case BLKTRACESETUP:
....@@ -757,6 +746,7 @@
757746 #endif
758747 case BLKTRACESTART:
759748 start = 1;
749
+ fallthrough;
760750 case BLKTRACESTOP:
761751 ret = __blk_trace_startstop(q, start);
762752 break;
....@@ -768,7 +758,7 @@
768758 break;
769759 }
770760
771
- mutex_unlock(&q->blk_trace_mutex);
761
+ mutex_unlock(&q->debugfs_mutex);
772762 return ret;
773763 }
774764
....@@ -779,44 +769,42 @@
779769 **/
780770 void blk_trace_shutdown(struct request_queue *q)
781771 {
782
- mutex_lock(&q->blk_trace_mutex);
772
+ mutex_lock(&q->debugfs_mutex);
783773 if (rcu_dereference_protected(q->blk_trace,
784
- lockdep_is_held(&q->blk_trace_mutex))) {
774
+ lockdep_is_held(&q->debugfs_mutex))) {
785775 __blk_trace_startstop(q, 0);
786776 __blk_trace_remove(q);
787777 }
788778
789
- mutex_unlock(&q->blk_trace_mutex);
779
+ mutex_unlock(&q->debugfs_mutex);
790780 }
791781
792782 #ifdef CONFIG_BLK_CGROUP
793
-static union kernfs_node_id *
794
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
783
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
795784 {
796785 struct blk_trace *bt;
797786
798787 /* We don't use the 'bt' value here except as an optimization... */
799788 bt = rcu_dereference_protected(q->blk_trace, 1);
800789 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
801
- return NULL;
790
+ return 0;
802791
803
- if (!bio->bi_css)
804
- return NULL;
805
- return cgroup_get_kernfs_id(bio->bi_css->cgroup);
792
+ if (!bio->bi_blkg)
793
+ return 0;
794
+ return cgroup_id(bio_blkcg(bio)->css.cgroup);
806795 }
807796 #else
808
-static union kernfs_node_id *
809
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
797
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
810798 {
811
- return NULL;
799
+ return 0;
812800 }
813801 #endif
814802
815
-static union kernfs_node_id *
803
+static u64
816804 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
817805 {
818806 if (!rq->bio)
819
- return NULL;
807
+ return 0;
820808 /* Use the first bio */
821809 return blk_trace_bio_get_cgid(q, rq->bio);
822810 }
....@@ -838,8 +826,7 @@
838826 *
839827 **/
840828 static void blk_add_trace_rq(struct request *rq, int error,
841
- unsigned int nr_bytes, u32 what,
842
- union kernfs_node_id *cgid)
829
+ unsigned int nr_bytes, u32 what, u64 cgid)
843830 {
844831 struct blk_trace *bt;
845832
....@@ -871,6 +858,13 @@
871858 struct request_queue *q, struct request *rq)
872859 {
873860 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
861
+ blk_trace_request_get_cgid(q, rq));
862
+}
863
+
864
+static void blk_add_trace_rq_merge(void *ignore,
865
+ struct request_queue *q, struct request *rq)
866
+{
867
+ blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
874868 blk_trace_request_get_cgid(q, rq));
875869 }
876870
....@@ -925,10 +919,10 @@
925919 }
926920
927921 static void blk_add_trace_bio_complete(void *ignore,
928
- struct request_queue *q, struct bio *bio,
929
- int error)
922
+ struct request_queue *q, struct bio *bio)
930923 {
931
- blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
924
+ blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
925
+ blk_status_to_errno(bio->bi_status));
932926 }
933927
934928 static void blk_add_trace_bio_backmerge(void *ignore,
....@@ -966,7 +960,7 @@
966960 bt = rcu_dereference(q->blk_trace);
967961 if (bt)
968962 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
969
- NULL, NULL);
963
+ NULL, 0);
970964 rcu_read_unlock();
971965 }
972966 }
....@@ -985,7 +979,7 @@
985979 bt = rcu_dereference(q->blk_trace);
986980 if (bt)
987981 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
988
- 0, 0, NULL, NULL);
982
+ 0, 0, NULL, 0);
989983 rcu_read_unlock();
990984 }
991985 }
....@@ -997,7 +991,7 @@
997991 rcu_read_lock();
998992 bt = rcu_dereference(q->blk_trace);
999993 if (bt)
1000
- __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
994
+ __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
1001995 rcu_read_unlock();
1002996 }
1003997
....@@ -1017,7 +1011,7 @@
10171011 else
10181012 what = BLK_TA_UNPLUG_TIMER;
10191013
1020
- __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
1014
+ __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
10211015 }
10221016 rcu_read_unlock();
10231017 }
....@@ -1158,6 +1152,8 @@
11581152 WARN_ON(ret);
11591153 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
11601154 WARN_ON(ret);
1155
+ ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1156
+ WARN_ON(ret);
11611157 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
11621158 WARN_ON(ret);
11631159 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
....@@ -1204,6 +1200,7 @@
12041200 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
12051201 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
12061202 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1203
+ unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
12071204 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
12081205 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
12091206
....@@ -1256,19 +1253,17 @@
12561253
12571254 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
12581255 {
1259
- return (void *)(te_blk_io_trace(ent) + 1) +
1260
- (has_cg ? sizeof(union kernfs_node_id) : 0);
1256
+ return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
12611257 }
12621258
1263
-static inline const void *cgid_start(const struct trace_entry *ent)
1259
+static inline u64 t_cgid(const struct trace_entry *ent)
12641260 {
1265
- return (void *)(te_blk_io_trace(ent) + 1);
1261
+ return *(u64 *)(te_blk_io_trace(ent) + 1);
12661262 }
12671263
12681264 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
12691265 {
1270
- return te_blk_io_trace(ent)->pdu_len -
1271
- (has_cg ? sizeof(union kernfs_node_id) : 0);
1266
+ return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
12721267 }
12731268
12741269 static inline u32 t_action(const struct trace_entry *ent)
....@@ -1330,7 +1325,7 @@
13301325
13311326 fill_rwbs(rwbs, t);
13321327 if (has_cg) {
1333
- const union kernfs_node_id *id = cgid_start(iter->ent);
1328
+ u64 id = t_cgid(iter->ent);
13341329
13351330 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
13361331 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
....@@ -1340,11 +1335,25 @@
13401335 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
13411336 MAJOR(t->device), MINOR(t->device),
13421337 blkcg_name_buf, act, rwbs);
1343
- } else
1338
+ } else {
1339
+ /*
1340
+ * The cgid portion used to be "INO,GEN". Userland
1341
+ * builds a FILEID_INO32_GEN fid out of them and
1342
+ * opens the cgroup using open_by_handle_at(2).
1343
+ * While 32bit ino setups are still the same, 64bit
1344
+ * ones now use the 64bit ino as the whole ID and
1345
+ * no longer use generation.
1346
+ *
1347
+ * Regarldess of the content, always output
1348
+ * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1349
+ * be mapped back to @id on both 64 and 32bit ino
1350
+ * setups. See __kernfs_fh_to_dentry().
1351
+ */
13441352 trace_seq_printf(&iter->seq,
1345
- "%3d,%-3d %x,%-x %2s %3s ",
1353
+ "%3d,%-3d %llx,%-llx %2s %3s ",
13461354 MAJOR(t->device), MINOR(t->device),
1347
- id->ino, id->generation, act, rwbs);
1355
+ id & U32_MAX, id >> 32, act, rwbs);
1356
+ }
13481357 } else
13491358 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
13501359 MAJOR(t->device), MINOR(t->device), act, rwbs);
....@@ -1657,7 +1666,8 @@
16571666 {
16581667 struct blk_trace *bt;
16591668
1660
- bt = xchg(&q->blk_trace, NULL);
1669
+ bt = rcu_replace_pointer(q->blk_trace, NULL,
1670
+ lockdep_is_held(&q->debugfs_mutex));
16611671 if (bt == NULL)
16621672 return -EINVAL;
16631673
....@@ -1697,10 +1707,7 @@
16971707
16981708 blk_trace_setup_lba(bt, bdev);
16991709
1700
- ret = -EBUSY;
1701
- if (cmpxchg(&q->blk_trace, NULL, bt))
1702
- goto free_bt;
1703
-
1710
+ rcu_assign_pointer(q->blk_trace, bt);
17041711 get_probe_ref();
17051712 return 0;
17061713
....@@ -1829,13 +1836,11 @@
18291836 struct device_attribute *attr,
18301837 char *buf)
18311838 {
1832
- struct hd_struct *p = dev_to_part(dev);
1839
+ struct block_device *bdev = bdget_part(dev_to_part(dev));
18331840 struct request_queue *q;
1834
- struct block_device *bdev;
18351841 struct blk_trace *bt;
18361842 ssize_t ret = -ENXIO;
18371843
1838
- bdev = bdget(part_devt(p));
18391844 if (bdev == NULL)
18401845 goto out;
18411846
....@@ -1843,10 +1848,10 @@
18431848 if (q == NULL)
18441849 goto out_bdput;
18451850
1846
- mutex_lock(&q->blk_trace_mutex);
1851
+ mutex_lock(&q->debugfs_mutex);
18471852
18481853 bt = rcu_dereference_protected(q->blk_trace,
1849
- lockdep_is_held(&q->blk_trace_mutex));
1854
+ lockdep_is_held(&q->debugfs_mutex));
18501855 if (attr == &dev_attr_enable) {
18511856 ret = sprintf(buf, "%u\n", !!bt);
18521857 goto out_unlock_bdev;
....@@ -1864,7 +1869,7 @@
18641869 ret = sprintf(buf, "%llu\n", bt->end_lba);
18651870
18661871 out_unlock_bdev:
1867
- mutex_unlock(&q->blk_trace_mutex);
1872
+ mutex_unlock(&q->debugfs_mutex);
18681873 out_bdput:
18691874 bdput(bdev);
18701875 out:
....@@ -1877,7 +1882,6 @@
18771882 {
18781883 struct block_device *bdev;
18791884 struct request_queue *q;
1880
- struct hd_struct *p;
18811885 struct blk_trace *bt;
18821886 u64 value;
18831887 ssize_t ret = -EINVAL;
....@@ -1897,9 +1901,7 @@
18971901 goto out;
18981902
18991903 ret = -ENXIO;
1900
-
1901
- p = dev_to_part(dev);
1902
- bdev = bdget(part_devt(p));
1904
+ bdev = bdget_part(dev_to_part(dev));
19031905 if (bdev == NULL)
19041906 goto out;
19051907
....@@ -1907,10 +1909,10 @@
19071909 if (q == NULL)
19081910 goto out_bdput;
19091911
1910
- mutex_lock(&q->blk_trace_mutex);
1912
+ mutex_lock(&q->debugfs_mutex);
19111913
19121914 bt = rcu_dereference_protected(q->blk_trace,
1913
- lockdep_is_held(&q->blk_trace_mutex));
1915
+ lockdep_is_held(&q->debugfs_mutex));
19141916 if (attr == &dev_attr_enable) {
19151917 if (!!value == !!bt) {
19161918 ret = 0;
....@@ -1927,7 +1929,7 @@
19271929 if (bt == NULL) {
19281930 ret = blk_trace_setup_queue(q, bdev);
19291931 bt = rcu_dereference_protected(q->blk_trace,
1930
- lockdep_is_held(&q->blk_trace_mutex));
1932
+ lockdep_is_held(&q->debugfs_mutex));
19311933 }
19321934
19331935 if (ret == 0) {
....@@ -1942,7 +1944,7 @@
19421944 }
19431945
19441946 out_unlock_bdev:
1945
- mutex_unlock(&q->blk_trace_mutex);
1947
+ mutex_unlock(&q->debugfs_mutex);
19461948 out_bdput:
19471949 bdput(bdev);
19481950 out: