.. | .. |
---|
1202 | 1202 | int ret = 0; |
---|
1203 | 1203 | |
---|
1204 | 1204 | /* |
---|
1205 | | - * We need to have subvol_sem write locked, to prevent races between |
---|
1206 | | - * concurrent tasks trying to disable quotas, because we will unlock |
---|
1207 | | - * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. |
---|
| 1205 | + * We need to have subvol_sem write locked to prevent races with |
---|
| 1206 | + * snapshot creation. |
---|
1208 | 1207 | */ |
---|
1209 | 1208 | lockdep_assert_held_write(&fs_info->subvol_sem); |
---|
| 1209 | + |
---|
| 1210 | + /* |
---|
| 1211 | + * Lock the cleaner mutex to prevent races with concurrent relocation, |
---|
| 1212 | + * because relocation may be building backrefs for blocks of the quota |
---|
| 1213 | + * root while we are deleting the root. This is like dropping fs roots |
---|
| 1214 | + * of deleted snapshots/subvolumes, we need the same protection. |
---|
| 1215 | + * |
---|
| 1216 | + * This also prevents races between concurrent tasks trying to disable |
---|
| 1217 | + * quotas, because we will unlock and relock qgroup_ioctl_lock across |
---|
| 1218 | + * BTRFS_FS_QUOTA_ENABLED changes. |
---|
| 1219 | + */ |
---|
| 1220 | + mutex_lock(&fs_info->cleaner_mutex); |
---|
1210 | 1221 | |
---|
1211 | 1222 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
---|
1212 | 1223 | if (!fs_info->quota_root) |
---|
.. | .. |
---|
1270 | 1281 | goto out; |
---|
1271 | 1282 | } |
---|
1272 | 1283 | |
---|
| 1284 | + spin_lock(&fs_info->trans_lock); |
---|
1273 | 1285 | list_del("a_root->dirty_list); |
---|
| 1286 | + spin_unlock(&fs_info->trans_lock); |
---|
1274 | 1287 | |
---|
1275 | 1288 | btrfs_tree_lock(quota_root->node); |
---|
1276 | 1289 | btrfs_clean_tree_block(quota_root->node); |
---|
.. | .. |
---|
1285 | 1298 | btrfs_end_transaction(trans); |
---|
1286 | 1299 | else if (trans) |
---|
1287 | 1300 | ret = btrfs_end_transaction(trans); |
---|
| 1301 | + mutex_unlock(&fs_info->cleaner_mutex); |
---|
1288 | 1302 | |
---|
1289 | 1303 | return ret; |
---|
1290 | 1304 | } |
---|
.. | .. |
---|
2762 | 2776 | } |
---|
2763 | 2777 | |
---|
2764 | 2778 | /* |
---|
2765 | | - * called from commit_transaction. Writes all changed qgroups to disk. |
---|
| 2779 | + * Writes all changed qgroups to disk. |
---|
| 2780 | + * Called by the transaction commit path and the qgroup assign ioctl. |
---|
2766 | 2781 | */ |
---|
2767 | 2782 | int btrfs_run_qgroups(struct btrfs_trans_handle *trans) |
---|
2768 | 2783 | { |
---|
2769 | 2784 | struct btrfs_fs_info *fs_info = trans->fs_info; |
---|
2770 | 2785 | int ret = 0; |
---|
| 2786 | + |
---|
| 2787 | + /* |
---|
| 2788 | + * In case we are called from the qgroup assign ioctl, assert that we |
---|
| 2789 | + * are holding the qgroup_ioctl_lock, otherwise we can race with a quota |
---|
| 2790 | + * disable operation (ioctl) and access a freed quota root. |
---|
| 2791 | + */ |
---|
| 2792 | + if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) |
---|
| 2793 | + lockdep_assert_held(&fs_info->qgroup_ioctl_lock); |
---|
2771 | 2794 | |
---|
2772 | 2795 | if (!fs_info->quota_root) |
---|
2773 | 2796 | return ret; |
---|
.. | .. |
---|
3296 | 3319 | int err = -ENOMEM; |
---|
3297 | 3320 | int ret = 0; |
---|
3298 | 3321 | bool stopped = false; |
---|
| 3322 | + bool did_leaf_rescans = false; |
---|
3299 | 3323 | |
---|
3300 | 3324 | path = btrfs_alloc_path(); |
---|
3301 | 3325 | if (!path) |
---|
.. | .. |
---|
3316 | 3340 | } |
---|
3317 | 3341 | |
---|
3318 | 3342 | err = qgroup_rescan_leaf(trans, path); |
---|
| 3343 | + did_leaf_rescans = true; |
---|
3319 | 3344 | |
---|
3320 | 3345 | if (err > 0) |
---|
3321 | 3346 | btrfs_commit_transaction(trans); |
---|
.. | .. |
---|
3336 | 3361 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
---|
3337 | 3362 | |
---|
3338 | 3363 | /* |
---|
3339 | | - * only update status, since the previous part has already updated the |
---|
3340 | | - * qgroup info. |
---|
| 3364 | + * Only update status, since the previous part has already updated the |
---|
| 3365 | + * qgroup info, and only if we did any actual work. This also prevents |
---|
| 3366 | + * race with a concurrent quota disable, which has already set |
---|
| 3367 | + * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at |
---|
| 3368 | + * btrfs_quota_disable(). |
---|
3341 | 3369 | */ |
---|
3342 | | - trans = btrfs_start_transaction(fs_info->quota_root, 1); |
---|
3343 | | - if (IS_ERR(trans)) { |
---|
3344 | | - err = PTR_ERR(trans); |
---|
| 3370 | + if (did_leaf_rescans) { |
---|
| 3371 | + trans = btrfs_start_transaction(fs_info->quota_root, 1); |
---|
| 3372 | + if (IS_ERR(trans)) { |
---|
| 3373 | + err = PTR_ERR(trans); |
---|
| 3374 | + trans = NULL; |
---|
| 3375 | + btrfs_err(fs_info, |
---|
| 3376 | + "fail to start transaction for status update: %d", |
---|
| 3377 | + err); |
---|
| 3378 | + } |
---|
| 3379 | + } else { |
---|
3345 | 3380 | trans = NULL; |
---|
3346 | | - btrfs_err(fs_info, |
---|
3347 | | - "fail to start transaction for status update: %d", |
---|
3348 | | - err); |
---|
3349 | 3381 | } |
---|
3350 | 3382 | |
---|
3351 | 3383 | mutex_lock(&fs_info->qgroup_rescan_lock); |
---|
.. | .. |
---|
4356 | 4388 | ulist_free(entry->old_roots); |
---|
4357 | 4389 | kfree(entry); |
---|
4358 | 4390 | } |
---|
| 4391 | + *root = RB_ROOT; |
---|
4359 | 4392 | } |
---|