hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/trace/blktrace.c
....@@ -67,19 +67,18 @@
6767 * Send out a notify message.
6868 */
6969 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70
- const void *data, size_t len,
71
- union kernfs_node_id *cgid)
70
+ const void *data, size_t len, u64 cgid)
7271 {
7372 struct blk_io_trace *t;
7473 struct ring_buffer_event *event = NULL;
75
- struct ring_buffer *buffer = NULL;
74
+ struct trace_buffer *buffer = NULL;
7675 int pc = 0;
7776 int cpu = smp_processor_id();
7877 bool blk_tracer = blk_tracer_enabled;
79
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
78
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
8079
8180 if (blk_tracer) {
82
- buffer = blk_tr->trace_buffer.buffer;
81
+ buffer = blk_tr->array_buffer.buffer;
8382 pc = preempt_count();
8483 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
8584 sizeof(*t) + len + cgid_len,
....@@ -103,8 +102,8 @@
103102 t->pid = pid;
104103 t->cpu = cpu;
105104 t->pdu_len = len + cgid_len;
106
- if (cgid)
107
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
105
+ if (cgid_len)
106
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
108107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
109108
110109 if (blk_tracer)
....@@ -125,7 +124,7 @@
125124 spin_lock_irqsave(&running_trace_lock, flags);
126125 list_for_each_entry(bt, &running_trace_list, running_list) {
127126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
128
- sizeof(tsk->comm), NULL);
127
+ sizeof(tsk->comm), 0);
129128 }
130129 spin_unlock_irqrestore(&running_trace_lock, flags);
131130 }
....@@ -142,7 +141,7 @@
142141 words[1] = now.tv_nsec;
143142
144143 local_irq_save(flags);
145
- trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
144
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
146145 local_irq_restore(flags);
147146 }
148147
....@@ -174,10 +173,10 @@
174173 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
175174 blkcg = NULL;
176175 #ifdef CONFIG_BLK_CGROUP
177
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
178
- blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
176
+ trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
177
+ blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
179178 #else
180
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
179
+ trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
181180 #endif
182181 local_irq_restore(flags);
183182 }
....@@ -215,18 +214,18 @@
215214 */
216215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
217216 int op, int op_flags, u32 what, int error, int pdu_len,
218
- void *pdu_data, union kernfs_node_id *cgid)
217
+ void *pdu_data, u64 cgid)
219218 {
220219 struct task_struct *tsk = current;
221220 struct ring_buffer_event *event = NULL;
222
- struct ring_buffer *buffer = NULL;
221
+ struct trace_buffer *buffer = NULL;
223222 struct blk_io_trace *t;
224223 unsigned long flags = 0;
225224 unsigned long *sequence;
226225 pid_t pid;
227226 int cpu, pc = 0;
228227 bool blk_tracer = blk_tracer_enabled;
229
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
228
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
230229
231230 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
232231 return;
....@@ -252,7 +251,7 @@
252251 if (blk_tracer) {
253252 tracing_record_cmdline(current);
254253
255
- buffer = blk_tr->trace_buffer.buffer;
254
+ buffer = blk_tr->array_buffer.buffer;
256255 pc = preempt_count();
257256 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
258257 sizeof(*t) + pdu_len + cgid_len,
....@@ -297,7 +296,7 @@
297296 t->pdu_len = pdu_len + cgid_len;
298297
299298 if (cgid_len)
300
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
299
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
301300 if (pdu_len)
302301 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
303302
....@@ -348,7 +347,8 @@
348347 {
349348 struct blk_trace *bt;
350349
351
- bt = xchg(&q->blk_trace, NULL);
350
+ bt = rcu_replace_pointer(q->blk_trace, NULL,
351
+ lockdep_is_held(&q->debugfs_mutex));
352352 if (!bt)
353353 return -EINVAL;
354354
....@@ -362,9 +362,9 @@
362362 {
363363 int ret;
364364
365
- mutex_lock(&q->blk_trace_mutex);
365
+ mutex_lock(&q->debugfs_mutex);
366366 ret = __blk_trace_remove(q);
367
- mutex_unlock(&q->blk_trace_mutex);
367
+ mutex_unlock(&q->debugfs_mutex);
368368
369369 return ret;
370370 }
....@@ -483,11 +483,10 @@
483483 struct dentry *dir = NULL;
484484 int ret;
485485
486
+ lockdep_assert_held(&q->debugfs_mutex);
487
+
486488 if (!buts->buf_size || !buts->buf_nr)
487489 return -EINVAL;
488
-
489
- if (!blk_debugfs_root)
490
- return -ENOENT;
491490
492491 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
493492 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
....@@ -502,7 +501,8 @@
502501 * bdev can be NULL, as with scsi-generic, this is a helpful as
503502 * we can be.
504503 */
505
- if (q->blk_trace) {
504
+ if (rcu_dereference_protected(q->blk_trace,
505
+ lockdep_is_held(&q->debugfs_mutex))) {
506506 pr_warn("Concurrent blktraces are not allowed on %s\n",
507507 buts->name);
508508 return -EBUSY;
....@@ -521,21 +521,16 @@
521521 if (!bt->msg_data)
522522 goto err;
523523
524
-#ifdef CONFIG_BLK_DEBUG_FS
525524 /*
526
- * When tracing whole make_request drivers (multiqueue) block devices,
527
- * reuse the existing debugfs directory created by the block layer on
528
- * init. For request-based block devices, all partitions block devices,
525
+ * When tracing the whole disk reuse the existing debugfs directory
526
+ * created by the block layer on init. For partitions block devices,
529527 * and scsi-generic block devices we create a temporary new debugfs
530528 * directory that will be removed once the trace ends.
531529 */
532
- if (q->mq_ops && bdev && bdev == bdev->bd_contains)
530
+ if (bdev && !bdev_is_partition(bdev))
533531 dir = q->debugfs_dir;
534532 else
535
-#endif
536533 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
537
- if (!dir)
538
- goto err;
539534
540535 /*
541536 * As blktrace relies on debugfs for its interface the debugfs directory
....@@ -556,12 +551,8 @@
556551 ret = -EIO;
557552 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
558553 &blk_dropped_fops);
559
- if (!bt->dropped_file)
560
- goto err;
561554
562555 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
563
- if (!bt->msg_file)
564
- goto err;
565556
566557 bt->rchan = relay_open("trace", dir, buts->buf_size,
567558 buts->buf_nr, &blk_relay_callbacks, bt);
....@@ -583,10 +574,7 @@
583574 bt->pid = buts->pid;
584575 bt->trace_state = Blktrace_setup;
585576
586
- ret = -EBUSY;
587
- if (cmpxchg(&q->blk_trace, NULL, bt))
588
- goto err;
589
-
577
+ rcu_assign_pointer(q->blk_trace, bt);
590578 get_probe_ref();
591579
592580 ret = 0;
....@@ -623,9 +611,9 @@
623611 {
624612 int ret;
625613
626
- mutex_lock(&q->blk_trace_mutex);
614
+ mutex_lock(&q->debugfs_mutex);
627615 ret = __blk_trace_setup(q, name, dev, bdev, arg);
628
- mutex_unlock(&q->blk_trace_mutex);
616
+ mutex_unlock(&q->debugfs_mutex);
629617
630618 return ret;
631619 }
....@@ -671,7 +659,7 @@
671659 struct blk_trace *bt;
672660
673661 bt = rcu_dereference_protected(q->blk_trace,
674
- lockdep_is_held(&q->blk_trace_mutex));
662
+ lockdep_is_held(&q->debugfs_mutex));
675663 if (bt == NULL)
676664 return -EINVAL;
677665
....@@ -711,9 +699,9 @@
711699 {
712700 int ret;
713701
714
- mutex_lock(&q->blk_trace_mutex);
702
+ mutex_lock(&q->debugfs_mutex);
715703 ret = __blk_trace_startstop(q, start);
716
- mutex_unlock(&q->blk_trace_mutex);
704
+ mutex_unlock(&q->debugfs_mutex);
717705
718706 return ret;
719707 }
....@@ -742,7 +730,7 @@
742730 if (!q)
743731 return -ENXIO;
744732
745
- mutex_lock(&q->blk_trace_mutex);
733
+ mutex_lock(&q->debugfs_mutex);
746734
747735 switch (cmd) {
748736 case BLKTRACESETUP:
....@@ -757,6 +745,7 @@
757745 #endif
758746 case BLKTRACESTART:
759747 start = 1;
748
+ fallthrough;
760749 case BLKTRACESTOP:
761750 ret = __blk_trace_startstop(q, start);
762751 break;
....@@ -768,7 +757,7 @@
768757 break;
769758 }
770759
771
- mutex_unlock(&q->blk_trace_mutex);
760
+ mutex_unlock(&q->debugfs_mutex);
772761 return ret;
773762 }
774763
....@@ -779,44 +768,42 @@
779768 **/
780769 void blk_trace_shutdown(struct request_queue *q)
781770 {
782
- mutex_lock(&q->blk_trace_mutex);
771
+ mutex_lock(&q->debugfs_mutex);
783772 if (rcu_dereference_protected(q->blk_trace,
784
- lockdep_is_held(&q->blk_trace_mutex))) {
773
+ lockdep_is_held(&q->debugfs_mutex))) {
785774 __blk_trace_startstop(q, 0);
786775 __blk_trace_remove(q);
787776 }
788777
789
- mutex_unlock(&q->blk_trace_mutex);
778
+ mutex_unlock(&q->debugfs_mutex);
790779 }
791780
792781 #ifdef CONFIG_BLK_CGROUP
793
-static union kernfs_node_id *
794
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
782
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
795783 {
796784 struct blk_trace *bt;
797785
798786 /* We don't use the 'bt' value here except as an optimization... */
799787 bt = rcu_dereference_protected(q->blk_trace, 1);
800788 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
801
- return NULL;
789
+ return 0;
802790
803
- if (!bio->bi_css)
804
- return NULL;
805
- return cgroup_get_kernfs_id(bio->bi_css->cgroup);
791
+ if (!bio->bi_blkg)
792
+ return 0;
793
+ return cgroup_id(bio_blkcg(bio)->css.cgroup);
806794 }
807795 #else
808
-static union kernfs_node_id *
809
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
796
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
810797 {
811
- return NULL;
798
+ return 0;
812799 }
813800 #endif
814801
815
-static union kernfs_node_id *
802
+static u64
816803 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
817804 {
818805 if (!rq->bio)
819
- return NULL;
806
+ return 0;
820807 /* Use the first bio */
821808 return blk_trace_bio_get_cgid(q, rq->bio);
822809 }
....@@ -838,8 +825,7 @@
838825 *
839826 **/
840827 static void blk_add_trace_rq(struct request *rq, int error,
841
- unsigned int nr_bytes, u32 what,
842
- union kernfs_node_id *cgid)
828
+ unsigned int nr_bytes, u32 what, u64 cgid)
843829 {
844830 struct blk_trace *bt;
845831
....@@ -871,6 +857,13 @@
871857 struct request_queue *q, struct request *rq)
872858 {
873859 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
860
+ blk_trace_request_get_cgid(q, rq));
861
+}
862
+
863
+static void blk_add_trace_rq_merge(void *ignore,
864
+ struct request_queue *q, struct request *rq)
865
+{
866
+ blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
874867 blk_trace_request_get_cgid(q, rq));
875868 }
876869
....@@ -925,10 +918,10 @@
925918 }
926919
927920 static void blk_add_trace_bio_complete(void *ignore,
928
- struct request_queue *q, struct bio *bio,
929
- int error)
921
+ struct request_queue *q, struct bio *bio)
930922 {
931
- blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
923
+ blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
924
+ blk_status_to_errno(bio->bi_status));
932925 }
933926
934927 static void blk_add_trace_bio_backmerge(void *ignore,
....@@ -966,7 +959,7 @@
966959 bt = rcu_dereference(q->blk_trace);
967960 if (bt)
968961 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
969
- NULL, NULL);
962
+ NULL, 0);
970963 rcu_read_unlock();
971964 }
972965 }
....@@ -985,7 +978,7 @@
985978 bt = rcu_dereference(q->blk_trace);
986979 if (bt)
987980 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
988
- 0, 0, NULL, NULL);
981
+ 0, 0, NULL, 0);
989982 rcu_read_unlock();
990983 }
991984 }
....@@ -997,7 +990,7 @@
997990 rcu_read_lock();
998991 bt = rcu_dereference(q->blk_trace);
999992 if (bt)
1000
- __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
993
+ __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
1001994 rcu_read_unlock();
1002995 }
1003996
....@@ -1017,7 +1010,7 @@
10171010 else
10181011 what = BLK_TA_UNPLUG_TIMER;
10191012
1020
- __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
1013
+ __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
10211014 }
10221015 rcu_read_unlock();
10231016 }
....@@ -1158,6 +1151,8 @@
11581151 WARN_ON(ret);
11591152 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
11601153 WARN_ON(ret);
1154
+ ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1155
+ WARN_ON(ret);
11611156 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
11621157 WARN_ON(ret);
11631158 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
....@@ -1204,6 +1199,7 @@
12041199 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
12051200 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
12061201 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1202
+ unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
12071203 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
12081204 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
12091205
....@@ -1256,19 +1252,17 @@
12561252
12571253 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
12581254 {
1259
- return (void *)(te_blk_io_trace(ent) + 1) +
1260
- (has_cg ? sizeof(union kernfs_node_id) : 0);
1255
+ return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
12611256 }
12621257
1263
-static inline const void *cgid_start(const struct trace_entry *ent)
1258
+static inline u64 t_cgid(const struct trace_entry *ent)
12641259 {
1265
- return (void *)(te_blk_io_trace(ent) + 1);
1260
+ return *(u64 *)(te_blk_io_trace(ent) + 1);
12661261 }
12671262
12681263 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
12691264 {
1270
- return te_blk_io_trace(ent)->pdu_len -
1271
- (has_cg ? sizeof(union kernfs_node_id) : 0);
1265
+ return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
12721266 }
12731267
12741268 static inline u32 t_action(const struct trace_entry *ent)
....@@ -1330,7 +1324,7 @@
13301324
13311325 fill_rwbs(rwbs, t);
13321326 if (has_cg) {
1333
- const union kernfs_node_id *id = cgid_start(iter->ent);
1327
+ u64 id = t_cgid(iter->ent);
13341328
13351329 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
13361330 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
....@@ -1340,11 +1334,25 @@
13401334 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
13411335 MAJOR(t->device), MINOR(t->device),
13421336 blkcg_name_buf, act, rwbs);
1343
- } else
1337
+ } else {
1338
+ /*
1339
+ * The cgid portion used to be "INO,GEN". Userland
1340
+ * builds a FILEID_INO32_GEN fid out of them and
1341
+ * opens the cgroup using open_by_handle_at(2).
1342
+ * While 32bit ino setups are still the same, 64bit
1343
+ * ones now use the 64bit ino as the whole ID and
1344
+ * no longer use generation.
1345
+ *
1346
+ * Regarldess of the content, always output
1347
+ * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1348
+ * be mapped back to @id on both 64 and 32bit ino
1349
+ * setups. See __kernfs_fh_to_dentry().
1350
+ */
13441351 trace_seq_printf(&iter->seq,
1345
- "%3d,%-3d %x,%-x %2s %3s ",
1352
+ "%3d,%-3d %llx,%-llx %2s %3s ",
13461353 MAJOR(t->device), MINOR(t->device),
1347
- id->ino, id->generation, act, rwbs);
1354
+ id & U32_MAX, id >> 32, act, rwbs);
1355
+ }
13481356 } else
13491357 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
13501358 MAJOR(t->device), MINOR(t->device), act, rwbs);
....@@ -1594,7 +1602,8 @@
15941602
15951603 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
15961604 {
1597
- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1605
+ if ((iter->ent->type != TRACE_BLK) ||
1606
+ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
15981607 return TRACE_TYPE_UNHANDLED;
15991608
16001609 return print_one_line(iter, true);
....@@ -1657,7 +1666,8 @@
16571666 {
16581667 struct blk_trace *bt;
16591668
1660
- bt = xchg(&q->blk_trace, NULL);
1669
+ bt = rcu_replace_pointer(q->blk_trace, NULL,
1670
+ lockdep_is_held(&q->debugfs_mutex));
16611671 if (bt == NULL)
16621672 return -EINVAL;
16631673
....@@ -1697,10 +1707,7 @@
16971707
16981708 blk_trace_setup_lba(bt, bdev);
16991709
1700
- ret = -EBUSY;
1701
- if (cmpxchg(&q->blk_trace, NULL, bt))
1702
- goto free_bt;
1703
-
1710
+ rcu_assign_pointer(q->blk_trace, bt);
17041711 get_probe_ref();
17051712 return 0;
17061713
....@@ -1829,13 +1836,11 @@
18291836 struct device_attribute *attr,
18301837 char *buf)
18311838 {
1832
- struct hd_struct *p = dev_to_part(dev);
1839
+ struct block_device *bdev = bdget_part(dev_to_part(dev));
18331840 struct request_queue *q;
1834
- struct block_device *bdev;
18351841 struct blk_trace *bt;
18361842 ssize_t ret = -ENXIO;
18371843
1838
- bdev = bdget(part_devt(p));
18391844 if (bdev == NULL)
18401845 goto out;
18411846
....@@ -1843,10 +1848,10 @@
18431848 if (q == NULL)
18441849 goto out_bdput;
18451850
1846
- mutex_lock(&q->blk_trace_mutex);
1851
+ mutex_lock(&q->debugfs_mutex);
18471852
18481853 bt = rcu_dereference_protected(q->blk_trace,
1849
- lockdep_is_held(&q->blk_trace_mutex));
1854
+ lockdep_is_held(&q->debugfs_mutex));
18501855 if (attr == &dev_attr_enable) {
18511856 ret = sprintf(buf, "%u\n", !!bt);
18521857 goto out_unlock_bdev;
....@@ -1864,7 +1869,7 @@
18641869 ret = sprintf(buf, "%llu\n", bt->end_lba);
18651870
18661871 out_unlock_bdev:
1867
- mutex_unlock(&q->blk_trace_mutex);
1872
+ mutex_unlock(&q->debugfs_mutex);
18681873 out_bdput:
18691874 bdput(bdev);
18701875 out:
....@@ -1877,7 +1882,6 @@
18771882 {
18781883 struct block_device *bdev;
18791884 struct request_queue *q;
1880
- struct hd_struct *p;
18811885 struct blk_trace *bt;
18821886 u64 value;
18831887 ssize_t ret = -EINVAL;
....@@ -1897,9 +1901,7 @@
18971901 goto out;
18981902
18991903 ret = -ENXIO;
1900
-
1901
- p = dev_to_part(dev);
1902
- bdev = bdget(part_devt(p));
1904
+ bdev = bdget_part(dev_to_part(dev));
19031905 if (bdev == NULL)
19041906 goto out;
19051907
....@@ -1907,10 +1909,10 @@
19071909 if (q == NULL)
19081910 goto out_bdput;
19091911
1910
- mutex_lock(&q->blk_trace_mutex);
1912
+ mutex_lock(&q->debugfs_mutex);
19111913
19121914 bt = rcu_dereference_protected(q->blk_trace,
1913
- lockdep_is_held(&q->blk_trace_mutex));
1915
+ lockdep_is_held(&q->debugfs_mutex));
19141916 if (attr == &dev_attr_enable) {
19151917 if (!!value == !!bt) {
19161918 ret = 0;
....@@ -1927,7 +1929,7 @@
19271929 if (bt == NULL) {
19281930 ret = blk_trace_setup_queue(q, bdev);
19291931 bt = rcu_dereference_protected(q->blk_trace,
1930
- lockdep_is_held(&q->blk_trace_mutex));
1932
+ lockdep_is_held(&q->debugfs_mutex));
19311933 }
19321934
19331935 if (ret == 0) {
....@@ -1942,7 +1944,7 @@
19421944 }
19431945
19441946 out_unlock_bdev:
1945
- mutex_unlock(&q->blk_trace_mutex);
1947
+ mutex_unlock(&q->debugfs_mutex);
19461948 out_bdput:
19471949 bdput(bdev);
19481950 out: