hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/backing-dev.c
....@@ -1,5 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12
23 #include <linux/wait.h>
4
+#include <linux/rbtree.h>
35 #include <linux/backing-dev.h>
46 #include <linux/kthread.h>
57 #include <linux/freezer.h>
....@@ -12,20 +14,19 @@
1214 #include <linux/device.h>
1315 #include <trace/events/writeback.h>
1416
15
-struct backing_dev_info noop_backing_dev_info = {
16
- .name = "noop",
17
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
18
-};
17
+struct backing_dev_info noop_backing_dev_info;
1918 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
2019
2120 static struct class *bdi_class;
2221 static const char *bdi_unknown_name = "(unknown)";
2322
2423 /*
25
- * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
26
- * locking.
24
+ * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
25
+ * reader side locking.
2726 */
2827 DEFINE_SPINLOCK(bdi_lock);
28
+static u64 bdi_id_cursor;
29
+static struct rb_root bdi_tree = RB_ROOT;
2930 LIST_HEAD(bdi_list);
3031
3132 /* bdi_wq serves all asynchronous writeback tasks */
....@@ -103,39 +104,25 @@
103104 }
104105 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
105106
106
-static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
107
+static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
107108 {
108
- if (!bdi_debug_root)
109
- return -ENOMEM;
110
-
111109 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
112
- if (!bdi->debug_dir)
113
- return -ENOMEM;
114110
115
- bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
116
- bdi, &bdi_debug_stats_fops);
117
- if (!bdi->debug_stats) {
118
- debugfs_remove(bdi->debug_dir);
119
- bdi->debug_dir = NULL;
120
- return -ENOMEM;
121
- }
122
-
123
- return 0;
111
+ debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
112
+ &bdi_debug_stats_fops);
124113 }
125114
126115 static void bdi_debug_unregister(struct backing_dev_info *bdi)
127116 {
128
- debugfs_remove(bdi->debug_stats);
129
- debugfs_remove(bdi->debug_dir);
117
+ debugfs_remove_recursive(bdi->debug_dir);
130118 }
131119 #else
132120 static inline void bdi_debug_init(void)
133121 {
134122 }
135
-static inline int bdi_debug_register(struct backing_dev_info *bdi,
123
+static inline void bdi_debug_register(struct backing_dev_info *bdi,
136124 const char *name)
137125 {
138
- return 0;
139126 }
140127 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
141128 {
....@@ -215,10 +202,9 @@
215202 struct device_attribute *attr,
216203 char *page)
217204 {
218
- struct backing_dev_info *bdi = dev_get_drvdata(dev);
219
-
220
- return snprintf(page, PAGE_SIZE-1, "%d\n",
221
- bdi_cap_stable_pages_required(bdi) ? 1 : 0);
205
+ dev_warn_once(dev,
206
+ "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
207
+ return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
222208 }
223209 static DEVICE_ATTR_RO(stable_pages_required);
224210
....@@ -292,7 +278,7 @@
292278 #define INIT_BW (100 << (20 - PAGE_SHIFT))
293279
294280 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
295
- int blkcg_id, gfp_t gfp)
281
+ gfp_t gfp)
296282 {
297283 int i, err;
298284
....@@ -319,15 +305,9 @@
319305 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
320306 wb->dirty_sleep = jiffies;
321307
322
- wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
323
- if (!wb->congested) {
324
- err = -ENOMEM;
325
- goto out_put_bdi;
326
- }
327
-
328308 err = fprop_local_init_percpu(&wb->completions, gfp);
329309 if (err)
330
- goto out_put_cong;
310
+ goto out_put_bdi;
331311
332312 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
333313 err = percpu_counter_init(&wb->stat[i], 0, gfp);
....@@ -341,8 +321,6 @@
341321 while (i--)
342322 percpu_counter_destroy(&wb->stat[i]);
343323 fprop_local_destroy_percpu(&wb->completions);
344
-out_put_cong:
345
- wb_congested_put(wb->congested);
346324 out_put_bdi:
347325 if (wb != &bdi->wb)
348326 bdi_put(bdi);
....@@ -385,7 +363,6 @@
385363 percpu_counter_destroy(&wb->stat[i]);
386364
387365 fprop_local_destroy_percpu(&wb->completions);
388
- wb_congested_put(wb->congested);
389366 if (wb != &wb->bdi->wb)
390367 bdi_put(wb->bdi);
391368 }
....@@ -395,97 +372,19 @@
395372 #include <linux/memcontrol.h>
396373
397374 /*
398
- * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
399
- * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
400
- * protected.
375
+ * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
376
+ * bdi->cgwb_tree is also RCU protected.
401377 */
402378 static DEFINE_SPINLOCK(cgwb_lock);
403379 static struct workqueue_struct *cgwb_release_wq;
404380
405
-/**
406
- * wb_congested_get_create - get or create a wb_congested
407
- * @bdi: associated bdi
408
- * @blkcg_id: ID of the associated blkcg
409
- * @gfp: allocation mask
410
- *
411
- * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
412
- * The returned wb_congested has its reference count incremented. Returns
413
- * NULL on failure.
414
- */
415
-struct bdi_writeback_congested *
416
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
381
+static void cgwb_free_rcu(struct rcu_head *rcu_head)
417382 {
418
- struct bdi_writeback_congested *new_congested = NULL, *congested;
419
- struct rb_node **node, *parent;
420
- unsigned long flags;
421
-retry:
422
- spin_lock_irqsave(&cgwb_lock, flags);
383
+ struct bdi_writeback *wb = container_of(rcu_head,
384
+ struct bdi_writeback, rcu);
423385
424
- node = &bdi->cgwb_congested_tree.rb_node;
425
- parent = NULL;
426
-
427
- while (*node != NULL) {
428
- parent = *node;
429
- congested = rb_entry(parent, struct bdi_writeback_congested,
430
- rb_node);
431
- if (congested->blkcg_id < blkcg_id)
432
- node = &parent->rb_left;
433
- else if (congested->blkcg_id > blkcg_id)
434
- node = &parent->rb_right;
435
- else
436
- goto found;
437
- }
438
-
439
- if (new_congested) {
440
- /* !found and storage for new one already allocated, insert */
441
- congested = new_congested;
442
- rb_link_node(&congested->rb_node, parent, node);
443
- rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
444
- spin_unlock_irqrestore(&cgwb_lock, flags);
445
- return congested;
446
- }
447
-
448
- spin_unlock_irqrestore(&cgwb_lock, flags);
449
-
450
- /* allocate storage for new one and retry */
451
- new_congested = kzalloc(sizeof(*new_congested), gfp);
452
- if (!new_congested)
453
- return NULL;
454
-
455
- refcount_set(&new_congested->refcnt, 1);
456
- new_congested->__bdi = bdi;
457
- new_congested->blkcg_id = blkcg_id;
458
- goto retry;
459
-
460
-found:
461
- refcount_inc(&congested->refcnt);
462
- spin_unlock_irqrestore(&cgwb_lock, flags);
463
- kfree(new_congested);
464
- return congested;
465
-}
466
-
467
-/**
468
- * wb_congested_put - put a wb_congested
469
- * @congested: wb_congested to put
470
- *
471
- * Put @congested and destroy it if the refcnt reaches zero.
472
- */
473
-void wb_congested_put(struct bdi_writeback_congested *congested)
474
-{
475
- unsigned long flags;
476
-
477
- if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
478
- return;
479
-
480
- /* bdi might already have been destroyed leaving @congested unlinked */
481
- if (congested->__bdi) {
482
- rb_erase(&congested->rb_node,
483
- &congested->__bdi->cgwb_congested_tree);
484
- congested->__bdi = NULL;
485
- }
486
-
487
- spin_unlock_irqrestore(&cgwb_lock, flags);
488
- kfree(congested);
386
+ percpu_ref_exit(&wb->refcnt);
387
+ kfree(wb);
489388 }
490389
491390 static void cgwb_release_workfn(struct work_struct *work)
....@@ -501,13 +400,12 @@
501400 css_put(wb->blkcg_css);
502401 mutex_unlock(&wb->bdi->cgwb_release_mutex);
503402
504
- /* triggers blkg destruction if cgwb_refcnt becomes zero */
505
- blkcg_cgwb_put(blkcg);
403
+ /* triggers blkg destruction if no online users left */
404
+ blkcg_unpin_online(blkcg);
506405
507406 fprop_local_destroy_percpu(&wb->memcg_completions);
508
- percpu_ref_exit(&wb->refcnt);
509407 wb_exit(wb);
510
- kfree_rcu(wb, rcu);
408
+ call_rcu(&wb->rcu, cgwb_free_rcu);
511409 }
512410
513411 static void cgwb_release(struct percpu_ref *refcnt)
....@@ -569,7 +467,7 @@
569467 goto out_put;
570468 }
571469
572
- ret = wb_init(wb, bdi, blkcg_css->id, gfp);
470
+ ret = wb_init(wb, bdi, gfp);
573471 if (ret)
574472 goto err_free;
575473
....@@ -602,7 +500,7 @@
602500 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
603501 list_add(&wb->memcg_node, memcg_cgwb_list);
604502 list_add(&wb->blkcg_node, blkcg_cgwb_list);
605
- blkcg_cgwb_get(blkcg);
503
+ blkcg_pin_online(blkcg);
606504 css_get(memcg_css);
607505 css_get(blkcg_css);
608506 }
....@@ -629,13 +527,12 @@
629527 }
630528
631529 /**
632
- * wb_get_create - get wb for a given memcg, create if necessary
530
+ * wb_get_lookup - get wb for a given memcg
633531 * @bdi: target bdi
634532 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
635
- * @gfp: allocation mask to use
636533 *
637
- * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
638
- * create one. The returned wb has its refcount incremented.
534
+ * Try to get the wb for @memcg_css on @bdi. The returned wb has its
535
+ * refcount incremented.
639536 *
640537 * This function uses css_get() on @memcg_css and thus expects its refcnt
641538 * to be positive on invocation. IOW, rcu_read_lock() protection on
....@@ -652,6 +549,39 @@
652549 * each lookup. On mismatch, the existing wb is discarded and a new one is
653550 * created.
654551 */
552
+struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
553
+ struct cgroup_subsys_state *memcg_css)
554
+{
555
+ struct bdi_writeback *wb;
556
+
557
+ if (!memcg_css->parent)
558
+ return &bdi->wb;
559
+
560
+ rcu_read_lock();
561
+ wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
562
+ if (wb) {
563
+ struct cgroup_subsys_state *blkcg_css;
564
+
565
+ /* see whether the blkcg association has changed */
566
+ blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
567
+ if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
568
+ wb = NULL;
569
+ css_put(blkcg_css);
570
+ }
571
+ rcu_read_unlock();
572
+
573
+ return wb;
574
+}
575
+
576
+/**
577
+ * wb_get_create - get wb for a given memcg, create if necessary
578
+ * @bdi: target bdi
579
+ * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
580
+ * @gfp: allocation mask to use
581
+ *
582
+ * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
583
+ * create one. See wb_get_lookup() for more details.
584
+ */
655585 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
656586 struct cgroup_subsys_state *memcg_css,
657587 gfp_t gfp)
....@@ -664,20 +594,7 @@
664594 return &bdi->wb;
665595
666596 do {
667
- rcu_read_lock();
668
- wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
669
- if (wb) {
670
- struct cgroup_subsys_state *blkcg_css;
671
-
672
- /* see whether the blkcg association has changed */
673
- blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
674
- &io_cgrp_subsys);
675
- if (unlikely(wb->blkcg_css != blkcg_css ||
676
- !wb_tryget(wb)))
677
- wb = NULL;
678
- css_put(blkcg_css);
679
- }
680
- rcu_read_unlock();
597
+ wb = wb_get_lookup(bdi, memcg_css);
681598 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
682599
683600 return wb;
....@@ -688,11 +605,10 @@
688605 int ret;
689606
690607 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
691
- bdi->cgwb_congested_tree = RB_ROOT;
692608 mutex_init(&bdi->cgwb_release_mutex);
693609 init_rwsem(&bdi->wb_switch_rwsem);
694610
695
- ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
611
+ ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
696612 if (!ret) {
697613 bdi->wb.memcg_css = &root_mem_cgroup->css;
698614 bdi->wb.blkcg_css = blkcg_root_css;
....@@ -761,21 +677,6 @@
761677 spin_unlock_irq(&cgwb_lock);
762678 }
763679
764
-static void cgwb_bdi_exit(struct backing_dev_info *bdi)
765
-{
766
- struct rb_node *rbn;
767
-
768
- spin_lock_irq(&cgwb_lock);
769
- while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
770
- struct bdi_writeback_congested *congested =
771
- rb_entry(rbn, struct bdi_writeback_congested, rb_node);
772
-
773
- rb_erase(rbn, &bdi->cgwb_congested_tree);
774
- congested->__bdi = NULL; /* mark @congested unlinked */
775
- }
776
- spin_unlock_irq(&cgwb_lock);
777
-}
778
-
779680 static void cgwb_bdi_register(struct backing_dev_info *bdi)
780681 {
781682 spin_lock_irq(&cgwb_lock);
....@@ -802,28 +703,10 @@
802703
803704 static int cgwb_bdi_init(struct backing_dev_info *bdi)
804705 {
805
- int err;
806
-
807
- bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
808
- if (!bdi->wb_congested)
809
- return -ENOMEM;
810
-
811
- refcount_set(&bdi->wb_congested->refcnt, 1);
812
-
813
- err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
814
- if (err) {
815
- wb_congested_put(bdi->wb_congested);
816
- return err;
817
- }
818
- return 0;
706
+ return wb_init(&bdi->wb, bdi, GFP_KERNEL);
819707 }
820708
821709 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
822
-
823
-static void cgwb_bdi_exit(struct backing_dev_info *bdi)
824
-{
825
- wb_congested_put(bdi->wb_congested);
826
-}
827710
828711 static void cgwb_bdi_register(struct backing_dev_info *bdi)
829712 {
....@@ -856,12 +739,11 @@
856739 return ret;
857740 }
858741
859
-struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
742
+struct backing_dev_info *bdi_alloc(int node_id)
860743 {
861744 struct backing_dev_info *bdi;
862745
863
- bdi = kmalloc_node(sizeof(struct backing_dev_info),
864
- gfp_mask | __GFP_ZERO, node_id);
746
+ bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
865747 if (!bdi)
866748 return NULL;
867749
....@@ -869,18 +751,71 @@
869751 kfree(bdi);
870752 return NULL;
871753 }
754
+ bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
755
+ bdi->ra_pages = VM_READAHEAD_PAGES;
756
+ bdi->io_pages = VM_READAHEAD_PAGES;
872757 return bdi;
873758 }
874
-EXPORT_SYMBOL(bdi_alloc_node);
759
+EXPORT_SYMBOL(bdi_alloc);
760
+
761
+static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
762
+{
763
+ struct rb_node **p = &bdi_tree.rb_node;
764
+ struct rb_node *parent = NULL;
765
+ struct backing_dev_info *bdi;
766
+
767
+ lockdep_assert_held(&bdi_lock);
768
+
769
+ while (*p) {
770
+ parent = *p;
771
+ bdi = rb_entry(parent, struct backing_dev_info, rb_node);
772
+
773
+ if (bdi->id > id)
774
+ p = &(*p)->rb_left;
775
+ else if (bdi->id < id)
776
+ p = &(*p)->rb_right;
777
+ else
778
+ break;
779
+ }
780
+
781
+ if (parentp)
782
+ *parentp = parent;
783
+ return p;
784
+}
785
+
786
+/**
787
+ * bdi_get_by_id - lookup and get bdi from its id
788
+ * @id: bdi id to lookup
789
+ *
790
+ * Find bdi matching @id and get it. Returns NULL if the matching bdi
791
+ * doesn't exist or is already unregistered.
792
+ */
793
+struct backing_dev_info *bdi_get_by_id(u64 id)
794
+{
795
+ struct backing_dev_info *bdi = NULL;
796
+ struct rb_node **p;
797
+
798
+ spin_lock_bh(&bdi_lock);
799
+ p = bdi_lookup_rb_node(id, NULL);
800
+ if (*p) {
801
+ bdi = rb_entry(*p, struct backing_dev_info, rb_node);
802
+ bdi_get(bdi);
803
+ }
804
+ spin_unlock_bh(&bdi_lock);
805
+
806
+ return bdi;
807
+}
875808
876809 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
877810 {
878811 struct device *dev;
812
+ struct rb_node *parent, **p;
879813
880814 if (bdi->dev) /* The driver needs to use separate queues per device */
881815 return 0;
882816
883
- dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
817
+ vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
818
+ dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
884819 if (IS_ERR(dev))
885820 return PTR_ERR(dev);
886821
....@@ -891,13 +826,20 @@
891826 set_bit(WB_registered, &bdi->wb.state);
892827
893828 spin_lock_bh(&bdi_lock);
829
+
830
+ bdi->id = ++bdi_id_cursor;
831
+
832
+ p = bdi_lookup_rb_node(bdi->id, &parent);
833
+ rb_link_node(&bdi->rb_node, parent, p);
834
+ rb_insert_color(&bdi->rb_node, &bdi_tree);
835
+
894836 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
837
+
895838 spin_unlock_bh(&bdi_lock);
896839
897840 trace_writeback_bdi_register(bdi);
898841 return 0;
899842 }
900
-EXPORT_SYMBOL(bdi_register_va);
901843
902844 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
903845 {
....@@ -911,20 +853,12 @@
911853 }
912854 EXPORT_SYMBOL(bdi_register);
913855
914
-int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
856
+void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
915857 {
916
- int rc;
917
-
918
- rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
919
- if (rc)
920
- return rc;
921
- /* Leaking owner reference... */
922
- WARN_ON(bdi->owner);
858
+ WARN_ON_ONCE(bdi->owner);
923859 bdi->owner = owner;
924860 get_device(owner);
925
- return 0;
926861 }
927
-EXPORT_SYMBOL(bdi_register_owner);
928862
929863 /*
930864 * Remove bdi from bdi_list, and ensure that it is no longer visible
....@@ -932,6 +866,7 @@
932866 static void bdi_remove_from_list(struct backing_dev_info *bdi)
933867 {
934868 spin_lock_bh(&bdi_lock);
869
+ rb_erase(&bdi->rb_node, &bdi_tree);
935870 list_del_rcu(&bdi->bdi_list);
936871 spin_unlock_bh(&bdi_lock);
937872
....@@ -973,7 +908,6 @@
973908 bdi_unregister(bdi);
974909 WARN_ON_ONCE(bdi->dev);
975910 wb_exit(&bdi->wb);
976
- cgwb_bdi_exit(bdi);
977911 kfree(bdi);
978912 }
979913
....@@ -987,7 +921,7 @@
987921 {
988922 if (!bdi || !bdi->dev)
989923 return bdi_unknown_name;
990
- return dev_name(bdi->dev);
924
+ return bdi->dev_name;
991925 }
992926 EXPORT_SYMBOL_GPL(bdi_dev_name);
993927
....@@ -997,29 +931,29 @@
997931 };
998932 static atomic_t nr_wb_congested[2];
999933
1000
-void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
934
+void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
1001935 {
1002936 wait_queue_head_t *wqh = &congestion_wqh[sync];
1003937 enum wb_congested_state bit;
1004938
1005939 bit = sync ? WB_sync_congested : WB_async_congested;
1006
- if (test_and_clear_bit(bit, &congested->state))
940
+ if (test_and_clear_bit(bit, &bdi->wb.congested))
1007941 atomic_dec(&nr_wb_congested[sync]);
1008942 smp_mb__after_atomic();
1009943 if (waitqueue_active(wqh))
1010944 wake_up(wqh);
1011945 }
1012
-EXPORT_SYMBOL(clear_wb_congested);
946
+EXPORT_SYMBOL(clear_bdi_congested);
1013947
1014
-void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
948
+void set_bdi_congested(struct backing_dev_info *bdi, int sync)
1015949 {
1016950 enum wb_congested_state bit;
1017951
1018952 bit = sync ? WB_sync_congested : WB_async_congested;
1019
- if (!test_and_set_bit(bit, &congested->state))
953
+ if (!test_and_set_bit(bit, &bdi->wb.congested))
1020954 atomic_inc(&nr_wb_congested[sync]);
1021955 }
1022
-EXPORT_SYMBOL(set_wb_congested);
956
+EXPORT_SYMBOL(set_bdi_congested);
1023957
1024958 /**
1025959 * congestion_wait - wait for a backing_dev to become uncongested