.. | .. |
---|
225 | 225 | |
---|
226 | 226 | /* |
---|
227 | 227 | * Dquot List Management: |
---|
228 | | - * The quota code uses four lists for dquot management: the inuse_list, |
---|
229 | | - * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot |
---|
230 | | - * structure may be on some of those lists, depending on its current state. |
---|
| 228 | + * The quota code uses five lists for dquot management: the inuse_list, |
---|
| 229 | + * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array. |
---|
| 230 | + * A single dquot structure may be on some of those lists, depending on |
---|
| 231 | + * its current state. |
---|
231 | 232 | * |
---|
232 | 233 | * All dquots are placed to the end of inuse_list when first created, and this |
---|
233 | 234 | * list is used for invalidate operation, which must look at every dquot. |
---|
| 235 | + * |
---|
| 236 | + * When the last reference of a dquot will be dropped, the dquot will be |
---|
| 237 | + * added to releasing_dquots. We'd then queue work item which would call |
---|
| 238 | + * synchronize_srcu() and after that perform the final cleanup of all the |
---|
| 239 | + * dquots on the list. Both releasing_dquots and free_dquots use the |
---|
| 240 | + * dq_free list_head in the dquot struct. When a dquot is removed from |
---|
| 241 | + * releasing_dquots, a reference count is always subtracted, and if |
---|
| 242 | + * dq_count == 0 at that point, the dquot will be added to the free_dquots. |
---|
234 | 243 | * |
---|
235 | 244 | * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, |
---|
236 | 245 | * and this list is searched whenever we need an available dquot. Dquots are |
---|
.. | .. |
---|
250 | 259 | |
---|
251 | 260 | static LIST_HEAD(inuse_list); |
---|
252 | 261 | static LIST_HEAD(free_dquots); |
---|
| 262 | +static LIST_HEAD(releasing_dquots); |
---|
253 | 263 | static unsigned int dq_hash_bits, dq_hash_mask; |
---|
254 | 264 | static struct hlist_head *dquot_hash; |
---|
255 | 265 | |
---|
.. | .. |
---|
259 | 269 | static qsize_t inode_get_rsv_space(struct inode *inode); |
---|
260 | 270 | static qsize_t __inode_get_rsv_space(struct inode *inode); |
---|
261 | 271 | static int __dquot_initialize(struct inode *inode, int type); |
---|
| 272 | + |
---|
| 273 | +static void quota_release_workfn(struct work_struct *work); |
---|
| 274 | +static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn); |
---|
262 | 275 | |
---|
263 | 276 | static inline unsigned int |
---|
264 | 277 | hashfn(const struct super_block *sb, struct kqid qid) |
---|
.. | .. |
---|
307 | 320 | dqstats_inc(DQST_FREE_DQUOTS); |
---|
308 | 321 | } |
---|
309 | 322 | |
---|
| 323 | +static inline void put_releasing_dquots(struct dquot *dquot) |
---|
| 324 | +{ |
---|
| 325 | + list_add_tail(&dquot->dq_free, &releasing_dquots); |
---|
| 326 | +} |
---|
| 327 | + |
---|
310 | 328 | static inline void remove_free_dquot(struct dquot *dquot) |
---|
311 | 329 | { |
---|
312 | 330 | if (list_empty(&dquot->dq_free)) |
---|
313 | 331 | return; |
---|
314 | 332 | list_del_init(&dquot->dq_free); |
---|
315 | | - dqstats_dec(DQST_FREE_DQUOTS); |
---|
| 333 | + if (!atomic_read(&dquot->dq_count)) |
---|
| 334 | + dqstats_dec(DQST_FREE_DQUOTS); |
---|
316 | 335 | } |
---|
317 | 336 | |
---|
318 | 337 | static inline void put_inuse(struct dquot *dquot) |
---|
.. | .. |
---|
338 | 357 | mutex_unlock(&dquot->dq_lock); |
---|
339 | 358 | } |
---|
340 | 359 | |
---|
| 360 | +static inline int dquot_active(struct dquot *dquot) |
---|
| 361 | +{ |
---|
| 362 | + return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
---|
| 363 | +} |
---|
| 364 | + |
---|
341 | 365 | static inline int dquot_dirty(struct dquot *dquot) |
---|
342 | 366 | { |
---|
343 | 367 | return test_bit(DQ_MOD_B, &dquot->dq_flags); |
---|
.. | .. |
---|
353 | 377 | { |
---|
354 | 378 | int ret = 1; |
---|
355 | 379 | |
---|
356 | | - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) |
---|
| 380 | + if (!dquot_active(dquot)) |
---|
357 | 381 | return 0; |
---|
358 | 382 | |
---|
359 | 383 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) |
---|
360 | 384 | return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); |
---|
361 | 385 | |
---|
362 | 386 | /* If quota is dirty already, we don't have to acquire dq_list_lock */ |
---|
363 | | - if (test_bit(DQ_MOD_B, &dquot->dq_flags)) |
---|
| 387 | + if (dquot_dirty(dquot)) |
---|
364 | 388 | return 1; |
---|
365 | 389 | |
---|
366 | 390 | spin_lock(&dq_list_lock); |
---|
.. | .. |
---|
442 | 466 | smp_mb__before_atomic(); |
---|
443 | 467 | set_bit(DQ_READ_B, &dquot->dq_flags); |
---|
444 | 468 | /* Instantiate dquot if needed */ |
---|
445 | | - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { |
---|
| 469 | + if (!dquot_active(dquot) && !dquot->dq_off) { |
---|
446 | 470 | ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); |
---|
447 | 471 | /* Write the info if needed */ |
---|
448 | 472 | if (info_dirty(&dqopt->info[dquot->dq_id.type])) { |
---|
.. | .. |
---|
484 | 508 | goto out_lock; |
---|
485 | 509 | /* Inactive dquot can be only if there was error during read/init |
---|
486 | 510 | * => we have better not writing it */ |
---|
487 | | - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) |
---|
| 511 | + if (dquot_active(dquot)) |
---|
488 | 512 | ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); |
---|
489 | 513 | else |
---|
490 | 514 | ret = -EIO; |
---|
.. | .. |
---|
549 | 573 | struct dquot *dquot, *tmp; |
---|
550 | 574 | |
---|
551 | 575 | restart: |
---|
| 576 | + flush_delayed_work("a_release_work); |
---|
| 577 | + |
---|
552 | 578 | spin_lock(&dq_list_lock); |
---|
553 | 579 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { |
---|
554 | 580 | if (dquot->dq_sb != sb) |
---|
.. | .. |
---|
557 | 583 | continue; |
---|
558 | 584 | /* Wait for dquot users */ |
---|
559 | 585 | if (atomic_read(&dquot->dq_count)) { |
---|
560 | | - dqgrab(dquot); |
---|
| 586 | + /* dquot in releasing_dquots, flush and retry */ |
---|
| 587 | + if (!list_empty(&dquot->dq_free)) { |
---|
| 588 | + spin_unlock(&dq_list_lock); |
---|
| 589 | + goto restart; |
---|
| 590 | + } |
---|
| 591 | + |
---|
| 592 | + atomic_inc(&dquot->dq_count); |
---|
561 | 593 | spin_unlock(&dq_list_lock); |
---|
562 | 594 | /* |
---|
563 | 595 | * Once dqput() wakes us up, we know it's time to free |
---|
.. | .. |
---|
599 | 631 | |
---|
600 | 632 | spin_lock(&dq_list_lock); |
---|
601 | 633 | list_for_each_entry(dquot, &inuse_list, dq_inuse) { |
---|
602 | | - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) |
---|
| 634 | + if (!dquot_active(dquot)) |
---|
603 | 635 | continue; |
---|
604 | 636 | if (dquot->dq_sb != sb) |
---|
605 | 637 | continue; |
---|
.. | .. |
---|
614 | 646 | * outstanding call and recheck the DQ_ACTIVE_B after that. |
---|
615 | 647 | */ |
---|
616 | 648 | wait_on_dquot(dquot); |
---|
617 | | - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { |
---|
| 649 | + if (dquot_active(dquot)) { |
---|
618 | 650 | ret = fn(dquot, priv); |
---|
619 | 651 | if (ret < 0) |
---|
620 | 652 | goto out; |
---|
.. | .. |
---|
629 | 661 | return ret; |
---|
630 | 662 | } |
---|
631 | 663 | EXPORT_SYMBOL(dquot_scan_active); |
---|
| 664 | + |
---|
| 665 | +static inline int dquot_write_dquot(struct dquot *dquot) |
---|
| 666 | +{ |
---|
| 667 | + int ret = dquot->dq_sb->dq_op->write_dquot(dquot); |
---|
| 668 | + if (ret < 0) { |
---|
| 669 | + quota_error(dquot->dq_sb, "Can't write quota structure " |
---|
| 670 | + "(error %d). Quota may get out of sync!", ret); |
---|
| 671 | + /* Clear dirty bit anyway to avoid infinite loop. */ |
---|
| 672 | + clear_dquot_dirty(dquot); |
---|
| 673 | + } |
---|
| 674 | + return ret; |
---|
| 675 | +} |
---|
632 | 676 | |
---|
633 | 677 | /* Write all dquot structures to quota files */ |
---|
634 | 678 | int dquot_writeback_dquots(struct super_block *sb, int type) |
---|
.. | .. |
---|
653 | 697 | dquot = list_first_entry(&dirty, struct dquot, |
---|
654 | 698 | dq_dirty); |
---|
655 | 699 | |
---|
656 | | - WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); |
---|
| 700 | + WARN_ON(!dquot_active(dquot)); |
---|
657 | 701 | |
---|
658 | 702 | /* Now we have active dquot from which someone is |
---|
659 | 703 | * holding reference so we can safely just increase |
---|
660 | 704 | * use count */ |
---|
661 | 705 | dqgrab(dquot); |
---|
662 | 706 | spin_unlock(&dq_list_lock); |
---|
663 | | - err = sb->dq_op->write_dquot(dquot); |
---|
664 | | - if (err) { |
---|
665 | | - /* |
---|
666 | | - * Clear dirty bit anyway to avoid infinite |
---|
667 | | - * loop here. |
---|
668 | | - */ |
---|
669 | | - clear_dquot_dirty(dquot); |
---|
670 | | - if (!ret) |
---|
671 | | - ret = err; |
---|
672 | | - } |
---|
| 707 | + err = dquot_write_dquot(dquot); |
---|
| 708 | + if (err && !ret) |
---|
| 709 | + ret = err; |
---|
673 | 710 | dqput(dquot); |
---|
674 | 711 | spin_lock(&dq_list_lock); |
---|
675 | 712 | } |
---|
.. | .. |
---|
763 | 800 | }; |
---|
764 | 801 | |
---|
765 | 802 | /* |
---|
| 803 | + * Safely release dquot and put reference to dquot. |
---|
| 804 | + */ |
---|
| 805 | +static void quota_release_workfn(struct work_struct *work) |
---|
| 806 | +{ |
---|
| 807 | + struct dquot *dquot; |
---|
| 808 | + struct list_head rls_head; |
---|
| 809 | + |
---|
| 810 | + spin_lock(&dq_list_lock); |
---|
| 811 | + /* Exchange the list head to avoid livelock. */ |
---|
| 812 | + list_replace_init(&releasing_dquots, &rls_head); |
---|
| 813 | + spin_unlock(&dq_list_lock); |
---|
| 814 | + |
---|
| 815 | +restart: |
---|
| 816 | + synchronize_srcu(&dquot_srcu); |
---|
| 817 | + spin_lock(&dq_list_lock); |
---|
| 818 | + while (!list_empty(&rls_head)) { |
---|
| 819 | + dquot = list_first_entry(&rls_head, struct dquot, dq_free); |
---|
| 820 | + /* Dquot got used again? */ |
---|
| 821 | + if (atomic_read(&dquot->dq_count) > 1) { |
---|
| 822 | + remove_free_dquot(dquot); |
---|
| 823 | + atomic_dec(&dquot->dq_count); |
---|
| 824 | + continue; |
---|
| 825 | + } |
---|
| 826 | + if (dquot_dirty(dquot)) { |
---|
| 827 | + spin_unlock(&dq_list_lock); |
---|
| 828 | + /* Commit dquot before releasing */ |
---|
| 829 | + dquot_write_dquot(dquot); |
---|
| 830 | + goto restart; |
---|
| 831 | + } |
---|
| 832 | + if (dquot_active(dquot)) { |
---|
| 833 | + spin_unlock(&dq_list_lock); |
---|
| 834 | + dquot->dq_sb->dq_op->release_dquot(dquot); |
---|
| 835 | + goto restart; |
---|
| 836 | + } |
---|
| 837 | + /* Dquot is inactive and clean, now move it to free list */ |
---|
| 838 | + remove_free_dquot(dquot); |
---|
| 839 | + atomic_dec(&dquot->dq_count); |
---|
| 840 | + put_dquot_last(dquot); |
---|
| 841 | + } |
---|
| 842 | + spin_unlock(&dq_list_lock); |
---|
| 843 | +} |
---|
| 844 | + |
---|
| 845 | +/* |
---|
766 | 846 | * Put reference to dquot |
---|
767 | 847 | */ |
---|
768 | 848 | void dqput(struct dquot *dquot) |
---|
769 | 849 | { |
---|
770 | | - int ret; |
---|
771 | | - |
---|
772 | 850 | if (!dquot) |
---|
773 | 851 | return; |
---|
774 | 852 | #ifdef CONFIG_QUOTA_DEBUG |
---|
.. | .. |
---|
780 | 858 | } |
---|
781 | 859 | #endif |
---|
782 | 860 | dqstats_inc(DQST_DROPS); |
---|
783 | | -we_slept: |
---|
| 861 | + |
---|
784 | 862 | spin_lock(&dq_list_lock); |
---|
785 | 863 | if (atomic_read(&dquot->dq_count) > 1) { |
---|
786 | 864 | /* We have more than one user... nothing to do */ |
---|
.. | .. |
---|
792 | 870 | spin_unlock(&dq_list_lock); |
---|
793 | 871 | return; |
---|
794 | 872 | } |
---|
| 873 | + |
---|
795 | 874 | /* Need to release dquot? */ |
---|
796 | | - if (dquot_dirty(dquot)) { |
---|
797 | | - spin_unlock(&dq_list_lock); |
---|
798 | | - /* Commit dquot before releasing */ |
---|
799 | | - ret = dquot->dq_sb->dq_op->write_dquot(dquot); |
---|
800 | | - if (ret < 0) { |
---|
801 | | - quota_error(dquot->dq_sb, "Can't write quota structure" |
---|
802 | | - " (error %d). Quota may get out of sync!", |
---|
803 | | - ret); |
---|
804 | | - /* |
---|
805 | | - * We clear dirty bit anyway, so that we avoid |
---|
806 | | - * infinite loop here |
---|
807 | | - */ |
---|
808 | | - clear_dquot_dirty(dquot); |
---|
809 | | - } |
---|
810 | | - goto we_slept; |
---|
811 | | - } |
---|
812 | | - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { |
---|
813 | | - spin_unlock(&dq_list_lock); |
---|
814 | | - dquot->dq_sb->dq_op->release_dquot(dquot); |
---|
815 | | - goto we_slept; |
---|
816 | | - } |
---|
817 | | - atomic_dec(&dquot->dq_count); |
---|
818 | 875 | #ifdef CONFIG_QUOTA_DEBUG |
---|
819 | 876 | /* sanity check */ |
---|
820 | 877 | BUG_ON(!list_empty(&dquot->dq_free)); |
---|
821 | 878 | #endif |
---|
822 | | - put_dquot_last(dquot); |
---|
| 879 | + put_releasing_dquots(dquot); |
---|
823 | 880 | spin_unlock(&dq_list_lock); |
---|
| 881 | + queue_delayed_work(system_unbound_wq, "a_release_work, 1); |
---|
824 | 882 | } |
---|
825 | 883 | EXPORT_SYMBOL(dqput); |
---|
826 | 884 | |
---|
.. | .. |
---|
910 | 968 | * already finished or it will be canceled due to dq_count > 1 test */ |
---|
911 | 969 | wait_on_dquot(dquot); |
---|
912 | 970 | /* Read the dquot / allocate space in quota file */ |
---|
913 | | - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { |
---|
| 971 | + if (!dquot_active(dquot)) { |
---|
914 | 972 | int err; |
---|
915 | 973 | |
---|
916 | 974 | err = sb->dq_op->acquire_dquot(dquot); |
---|
.. | .. |
---|
1427 | 1485 | return QUOTA_NL_NOWARN; |
---|
1428 | 1486 | } |
---|
1429 | 1487 | |
---|
1430 | | -static int dquot_active(const struct inode *inode) |
---|
| 1488 | +static int inode_quota_active(const struct inode *inode) |
---|
1431 | 1489 | { |
---|
1432 | 1490 | struct super_block *sb = inode->i_sb; |
---|
1433 | 1491 | |
---|
.. | .. |
---|
1450 | 1508 | qsize_t rsv; |
---|
1451 | 1509 | int ret = 0; |
---|
1452 | 1510 | |
---|
1453 | | - if (!dquot_active(inode)) |
---|
| 1511 | + if (!inode_quota_active(inode)) |
---|
1454 | 1512 | return 0; |
---|
1455 | 1513 | |
---|
1456 | 1514 | dquots = i_dquot(inode); |
---|
.. | .. |
---|
1558 | 1616 | struct dquot **dquots; |
---|
1559 | 1617 | int i; |
---|
1560 | 1618 | |
---|
1561 | | - if (!dquot_active(inode)) |
---|
| 1619 | + if (!inode_quota_active(inode)) |
---|
1562 | 1620 | return false; |
---|
1563 | 1621 | |
---|
1564 | 1622 | dquots = i_dquot(inode); |
---|
.. | .. |
---|
1669 | 1727 | int reserve = flags & DQUOT_SPACE_RESERVE; |
---|
1670 | 1728 | struct dquot **dquots; |
---|
1671 | 1729 | |
---|
1672 | | - if (!dquot_active(inode)) { |
---|
| 1730 | + if (!inode_quota_active(inode)) { |
---|
1673 | 1731 | if (reserve) { |
---|
1674 | 1732 | spin_lock(&inode->i_lock); |
---|
1675 | 1733 | *inode_reserved_space(inode) += number; |
---|
.. | .. |
---|
1739 | 1797 | struct dquot_warn warn[MAXQUOTAS]; |
---|
1740 | 1798 | struct dquot * const *dquots; |
---|
1741 | 1799 | |
---|
1742 | | - if (!dquot_active(inode)) |
---|
| 1800 | + if (!inode_quota_active(inode)) |
---|
1743 | 1801 | return 0; |
---|
1744 | 1802 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
---|
1745 | 1803 | warn[cnt].w_type = QUOTA_NL_NOWARN; |
---|
.. | .. |
---|
1782 | 1840 | struct dquot **dquots; |
---|
1783 | 1841 | int cnt, index; |
---|
1784 | 1842 | |
---|
1785 | | - if (!dquot_active(inode)) { |
---|
| 1843 | + if (!inode_quota_active(inode)) { |
---|
1786 | 1844 | spin_lock(&inode->i_lock); |
---|
1787 | 1845 | *inode_reserved_space(inode) -= number; |
---|
1788 | 1846 | __inode_add_bytes(inode, number); |
---|
.. | .. |
---|
1824 | 1882 | struct dquot **dquots; |
---|
1825 | 1883 | int cnt, index; |
---|
1826 | 1884 | |
---|
1827 | | - if (!dquot_active(inode)) { |
---|
| 1885 | + if (!inode_quota_active(inode)) { |
---|
1828 | 1886 | spin_lock(&inode->i_lock); |
---|
1829 | 1887 | *inode_reserved_space(inode) += number; |
---|
1830 | 1888 | __inode_sub_bytes(inode, number); |
---|
.. | .. |
---|
1868 | 1926 | struct dquot **dquots; |
---|
1869 | 1927 | int reserve = flags & DQUOT_SPACE_RESERVE, index; |
---|
1870 | 1928 | |
---|
1871 | | - if (!dquot_active(inode)) { |
---|
| 1929 | + if (!inode_quota_active(inode)) { |
---|
1872 | 1930 | if (reserve) { |
---|
1873 | 1931 | spin_lock(&inode->i_lock); |
---|
1874 | 1932 | *inode_reserved_space(inode) -= number; |
---|
.. | .. |
---|
1923 | 1981 | struct dquot * const *dquots; |
---|
1924 | 1982 | int index; |
---|
1925 | 1983 | |
---|
1926 | | - if (!dquot_active(inode)) |
---|
| 1984 | + if (!inode_quota_active(inode)) |
---|
1927 | 1985 | return; |
---|
1928 | 1986 | |
---|
1929 | 1987 | dquots = i_dquot(inode); |
---|
.. | .. |
---|
2094 | 2152 | struct super_block *sb = inode->i_sb; |
---|
2095 | 2153 | int ret; |
---|
2096 | 2154 | |
---|
2097 | | - if (!dquot_active(inode)) |
---|
| 2155 | + if (!inode_quota_active(inode)) |
---|
2098 | 2156 | return 0; |
---|
2099 | 2157 | |
---|
2100 | 2158 | if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){ |
---|
.. | .. |
---|
2319 | 2377 | struct super_block *sb = inode->i_sb; |
---|
2320 | 2378 | struct quota_info *dqopt = sb_dqopt(sb); |
---|
2321 | 2379 | |
---|
| 2380 | + if (is_bad_inode(inode)) |
---|
| 2381 | + return -EUCLEAN; |
---|
2322 | 2382 | if (!S_ISREG(inode->i_mode)) |
---|
2323 | 2383 | return -EACCES; |
---|
2324 | 2384 | if (IS_RDONLY(inode)) |
---|
.. | .. |
---|
2413 | 2473 | |
---|
2414 | 2474 | error = add_dquot_ref(sb, type); |
---|
2415 | 2475 | if (error) |
---|
2416 | | - dquot_disable(sb, type, flags); |
---|
| 2476 | + dquot_disable(sb, type, |
---|
| 2477 | + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); |
---|
2417 | 2478 | |
---|
2418 | 2479 | return error; |
---|
2419 | 2480 | out_fmt: |
---|