hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/quota/dquot.c
....@@ -225,12 +225,21 @@
225225
226226 /*
227227 * Dquot List Management:
228
- * The quota code uses four lists for dquot management: the inuse_list,
229
- * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
230
- * structure may be on some of those lists, depending on its current state.
228
+ * The quota code uses five lists for dquot management: the inuse_list,
229
+ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230
+ * A single dquot structure may be on some of those lists, depending on
231
+ * its current state.
231232 *
232233 * All dquots are placed to the end of inuse_list when first created, and this
233234 * list is used for invalidate operation, which must look at every dquot.
235
+ *
236
+ * When the last reference of a dquot will be dropped, the dquot will be
237
+ * added to releasing_dquots. We'd then queue work item which would call
238
+ * synchronize_srcu() and after that perform the final cleanup of all the
239
+ * dquots on the list. Both releasing_dquots and free_dquots use the
240
+ * dq_free list_head in the dquot struct. When a dquot is removed from
241
+ * releasing_dquots, a reference count is always subtracted, and if
242
+ * dq_count == 0 at that point, the dquot will be added to the free_dquots.
234243 *
235244 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
236245 * and this list is searched whenever we need an available dquot. Dquots are
....@@ -250,6 +259,7 @@
250259
251260 static LIST_HEAD(inuse_list);
252261 static LIST_HEAD(free_dquots);
262
+static LIST_HEAD(releasing_dquots);
253263 static unsigned int dq_hash_bits, dq_hash_mask;
254264 static struct hlist_head *dquot_hash;
255265
....@@ -259,6 +269,9 @@
259269 static qsize_t inode_get_rsv_space(struct inode *inode);
260270 static qsize_t __inode_get_rsv_space(struct inode *inode);
261271 static int __dquot_initialize(struct inode *inode, int type);
272
+
273
+static void quota_release_workfn(struct work_struct *work);
274
+static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
262275
263276 static inline unsigned int
264277 hashfn(const struct super_block *sb, struct kqid qid)
....@@ -307,12 +320,18 @@
307320 dqstats_inc(DQST_FREE_DQUOTS);
308321 }
309322
323
+static inline void put_releasing_dquots(struct dquot *dquot)
324
+{
325
+ list_add_tail(&dquot->dq_free, &releasing_dquots);
326
+}
327
+
310328 static inline void remove_free_dquot(struct dquot *dquot)
311329 {
312330 if (list_empty(&dquot->dq_free))
313331 return;
314332 list_del_init(&dquot->dq_free);
315
- dqstats_dec(DQST_FREE_DQUOTS);
333
+ if (!atomic_read(&dquot->dq_count))
334
+ dqstats_dec(DQST_FREE_DQUOTS);
316335 }
317336
318337 static inline void put_inuse(struct dquot *dquot)
....@@ -338,6 +357,11 @@
338357 mutex_unlock(&dquot->dq_lock);
339358 }
340359
360
+static inline int dquot_active(struct dquot *dquot)
361
+{
362
+ return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
363
+}
364
+
341365 static inline int dquot_dirty(struct dquot *dquot)
342366 {
343367 return test_bit(DQ_MOD_B, &dquot->dq_flags);
....@@ -353,14 +377,14 @@
353377 {
354378 int ret = 1;
355379
356
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
380
+ if (!dquot_active(dquot))
357381 return 0;
358382
359383 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
360384 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
361385
362386 /* If quota is dirty already, we don't have to acquire dq_list_lock */
363
- if (test_bit(DQ_MOD_B, &dquot->dq_flags))
387
+ if (dquot_dirty(dquot))
364388 return 1;
365389
366390 spin_lock(&dq_list_lock);
....@@ -442,7 +466,7 @@
442466 smp_mb__before_atomic();
443467 set_bit(DQ_READ_B, &dquot->dq_flags);
444468 /* Instantiate dquot if needed */
445
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
469
+ if (!dquot_active(dquot) && !dquot->dq_off) {
446470 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
447471 /* Write the info if needed */
448472 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
....@@ -484,7 +508,7 @@
484508 goto out_lock;
485509 /* Inactive dquot can be only if there was error during read/init
486510 * => we have better not writing it */
487
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
511
+ if (dquot_active(dquot))
488512 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
489513 else
490514 ret = -EIO;
....@@ -549,6 +573,8 @@
549573 struct dquot *dquot, *tmp;
550574
551575 restart:
576
+ flush_delayed_work(&quota_release_work);
577
+
552578 spin_lock(&dq_list_lock);
553579 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
554580 if (dquot->dq_sb != sb)
....@@ -557,7 +583,13 @@
557583 continue;
558584 /* Wait for dquot users */
559585 if (atomic_read(&dquot->dq_count)) {
560
- dqgrab(dquot);
586
+ /* dquot in releasing_dquots, flush and retry */
587
+ if (!list_empty(&dquot->dq_free)) {
588
+ spin_unlock(&dq_list_lock);
589
+ goto restart;
590
+ }
591
+
592
+ atomic_inc(&dquot->dq_count);
561593 spin_unlock(&dq_list_lock);
562594 /*
563595 * Once dqput() wakes us up, we know it's time to free
....@@ -599,7 +631,7 @@
599631
600632 spin_lock(&dq_list_lock);
601633 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
602
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
634
+ if (!dquot_active(dquot))
603635 continue;
604636 if (dquot->dq_sb != sb)
605637 continue;
....@@ -614,7 +646,7 @@
614646 * outstanding call and recheck the DQ_ACTIVE_B after that.
615647 */
616648 wait_on_dquot(dquot);
617
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
649
+ if (dquot_active(dquot)) {
618650 ret = fn(dquot, priv);
619651 if (ret < 0)
620652 goto out;
....@@ -629,6 +661,18 @@
629661 return ret;
630662 }
631663 EXPORT_SYMBOL(dquot_scan_active);
664
+
665
+static inline int dquot_write_dquot(struct dquot *dquot)
666
+{
667
+ int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
668
+ if (ret < 0) {
669
+ quota_error(dquot->dq_sb, "Can't write quota structure "
670
+ "(error %d). Quota may get out of sync!", ret);
671
+ /* Clear dirty bit anyway to avoid infinite loop. */
672
+ clear_dquot_dirty(dquot);
673
+ }
674
+ return ret;
675
+}
632676
633677 /* Write all dquot structures to quota files */
634678 int dquot_writeback_dquots(struct super_block *sb, int type)
....@@ -653,23 +697,16 @@
653697 dquot = list_first_entry(&dirty, struct dquot,
654698 dq_dirty);
655699
656
- WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
700
+ WARN_ON(!dquot_active(dquot));
657701
658702 /* Now we have active dquot from which someone is
659703 * holding reference so we can safely just increase
660704 * use count */
661705 dqgrab(dquot);
662706 spin_unlock(&dq_list_lock);
663
- err = sb->dq_op->write_dquot(dquot);
664
- if (err) {
665
- /*
666
- * Clear dirty bit anyway to avoid infinite
667
- * loop here.
668
- */
669
- clear_dquot_dirty(dquot);
670
- if (!ret)
671
- ret = err;
672
- }
707
+ err = dquot_write_dquot(dquot);
708
+ if (err && !ret)
709
+ ret = err;
673710 dqput(dquot);
674711 spin_lock(&dq_list_lock);
675712 }
....@@ -763,12 +800,53 @@
763800 };
764801
765802 /*
803
+ * Safely release dquot and put reference to dquot.
804
+ */
805
+static void quota_release_workfn(struct work_struct *work)
806
+{
807
+ struct dquot *dquot;
808
+ struct list_head rls_head;
809
+
810
+ spin_lock(&dq_list_lock);
811
+ /* Exchange the list head to avoid livelock. */
812
+ list_replace_init(&releasing_dquots, &rls_head);
813
+ spin_unlock(&dq_list_lock);
814
+
815
+restart:
816
+ synchronize_srcu(&dquot_srcu);
817
+ spin_lock(&dq_list_lock);
818
+ while (!list_empty(&rls_head)) {
819
+ dquot = list_first_entry(&rls_head, struct dquot, dq_free);
820
+ /* Dquot got used again? */
821
+ if (atomic_read(&dquot->dq_count) > 1) {
822
+ remove_free_dquot(dquot);
823
+ atomic_dec(&dquot->dq_count);
824
+ continue;
825
+ }
826
+ if (dquot_dirty(dquot)) {
827
+ spin_unlock(&dq_list_lock);
828
+ /* Commit dquot before releasing */
829
+ dquot_write_dquot(dquot);
830
+ goto restart;
831
+ }
832
+ if (dquot_active(dquot)) {
833
+ spin_unlock(&dq_list_lock);
834
+ dquot->dq_sb->dq_op->release_dquot(dquot);
835
+ goto restart;
836
+ }
837
+ /* Dquot is inactive and clean, now move it to free list */
838
+ remove_free_dquot(dquot);
839
+ atomic_dec(&dquot->dq_count);
840
+ put_dquot_last(dquot);
841
+ }
842
+ spin_unlock(&dq_list_lock);
843
+}
844
+
845
+/*
766846 * Put reference to dquot
767847 */
768848 void dqput(struct dquot *dquot)
769849 {
770
- int ret;
771
-
772850 if (!dquot)
773851 return;
774852 #ifdef CONFIG_QUOTA_DEBUG
....@@ -780,7 +858,7 @@
780858 }
781859 #endif
782860 dqstats_inc(DQST_DROPS);
783
-we_slept:
861
+
784862 spin_lock(&dq_list_lock);
785863 if (atomic_read(&dquot->dq_count) > 1) {
786864 /* We have more than one user... nothing to do */
....@@ -792,35 +870,15 @@
792870 spin_unlock(&dq_list_lock);
793871 return;
794872 }
873
+
795874 /* Need to release dquot? */
796
- if (dquot_dirty(dquot)) {
797
- spin_unlock(&dq_list_lock);
798
- /* Commit dquot before releasing */
799
- ret = dquot->dq_sb->dq_op->write_dquot(dquot);
800
- if (ret < 0) {
801
- quota_error(dquot->dq_sb, "Can't write quota structure"
802
- " (error %d). Quota may get out of sync!",
803
- ret);
804
- /*
805
- * We clear dirty bit anyway, so that we avoid
806
- * infinite loop here
807
- */
808
- clear_dquot_dirty(dquot);
809
- }
810
- goto we_slept;
811
- }
812
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
813
- spin_unlock(&dq_list_lock);
814
- dquot->dq_sb->dq_op->release_dquot(dquot);
815
- goto we_slept;
816
- }
817
- atomic_dec(&dquot->dq_count);
818875 #ifdef CONFIG_QUOTA_DEBUG
819876 /* sanity check */
820877 BUG_ON(!list_empty(&dquot->dq_free));
821878 #endif
822
- put_dquot_last(dquot);
879
+ put_releasing_dquots(dquot);
823880 spin_unlock(&dq_list_lock);
881
+ queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
824882 }
825883 EXPORT_SYMBOL(dqput);
826884
....@@ -910,7 +968,7 @@
910968 * already finished or it will be canceled due to dq_count > 1 test */
911969 wait_on_dquot(dquot);
912970 /* Read the dquot / allocate space in quota file */
913
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
971
+ if (!dquot_active(dquot)) {
914972 int err;
915973
916974 err = sb->dq_op->acquire_dquot(dquot);
....@@ -1427,7 +1485,7 @@
14271485 return QUOTA_NL_NOWARN;
14281486 }
14291487
1430
-static int dquot_active(const struct inode *inode)
1488
+static int inode_quota_active(const struct inode *inode)
14311489 {
14321490 struct super_block *sb = inode->i_sb;
14331491
....@@ -1450,7 +1508,7 @@
14501508 qsize_t rsv;
14511509 int ret = 0;
14521510
1453
- if (!dquot_active(inode))
1511
+ if (!inode_quota_active(inode))
14541512 return 0;
14551513
14561514 dquots = i_dquot(inode);
....@@ -1558,7 +1616,7 @@
15581616 struct dquot **dquots;
15591617 int i;
15601618
1561
- if (!dquot_active(inode))
1619
+ if (!inode_quota_active(inode))
15621620 return false;
15631621
15641622 dquots = i_dquot(inode);
....@@ -1669,7 +1727,7 @@
16691727 int reserve = flags & DQUOT_SPACE_RESERVE;
16701728 struct dquot **dquots;
16711729
1672
- if (!dquot_active(inode)) {
1730
+ if (!inode_quota_active(inode)) {
16731731 if (reserve) {
16741732 spin_lock(&inode->i_lock);
16751733 *inode_reserved_space(inode) += number;
....@@ -1739,7 +1797,7 @@
17391797 struct dquot_warn warn[MAXQUOTAS];
17401798 struct dquot * const *dquots;
17411799
1742
- if (!dquot_active(inode))
1800
+ if (!inode_quota_active(inode))
17431801 return 0;
17441802 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
17451803 warn[cnt].w_type = QUOTA_NL_NOWARN;
....@@ -1782,7 +1840,7 @@
17821840 struct dquot **dquots;
17831841 int cnt, index;
17841842
1785
- if (!dquot_active(inode)) {
1843
+ if (!inode_quota_active(inode)) {
17861844 spin_lock(&inode->i_lock);
17871845 *inode_reserved_space(inode) -= number;
17881846 __inode_add_bytes(inode, number);
....@@ -1824,7 +1882,7 @@
18241882 struct dquot **dquots;
18251883 int cnt, index;
18261884
1827
- if (!dquot_active(inode)) {
1885
+ if (!inode_quota_active(inode)) {
18281886 spin_lock(&inode->i_lock);
18291887 *inode_reserved_space(inode) += number;
18301888 __inode_sub_bytes(inode, number);
....@@ -1868,7 +1926,7 @@
18681926 struct dquot **dquots;
18691927 int reserve = flags & DQUOT_SPACE_RESERVE, index;
18701928
1871
- if (!dquot_active(inode)) {
1929
+ if (!inode_quota_active(inode)) {
18721930 if (reserve) {
18731931 spin_lock(&inode->i_lock);
18741932 *inode_reserved_space(inode) -= number;
....@@ -1923,7 +1981,7 @@
19231981 struct dquot * const *dquots;
19241982 int index;
19251983
1926
- if (!dquot_active(inode))
1984
+ if (!inode_quota_active(inode))
19271985 return;
19281986
19291987 dquots = i_dquot(inode);
....@@ -2094,7 +2152,7 @@
20942152 struct super_block *sb = inode->i_sb;
20952153 int ret;
20962154
2097
- if (!dquot_active(inode))
2155
+ if (!inode_quota_active(inode))
20982156 return 0;
20992157
21002158 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
....@@ -2319,6 +2377,8 @@
23192377 struct super_block *sb = inode->i_sb;
23202378 struct quota_info *dqopt = sb_dqopt(sb);
23212379
2380
+ if (is_bad_inode(inode))
2381
+ return -EUCLEAN;
23222382 if (!S_ISREG(inode->i_mode))
23232383 return -EACCES;
23242384 if (IS_RDONLY(inode))
....@@ -2413,7 +2473,8 @@
24132473
24142474 error = add_dquot_ref(sb, type);
24152475 if (error)
2416
- dquot_disable(sb, type, flags);
2476
+ dquot_disable(sb, type,
2477
+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
24172478
24182479 return error;
24192480 out_fmt: