.. | .. |
---|
285 | 285 | ino_t ino; |
---|
286 | 286 | |
---|
287 | 287 | if (!(sb->s_flags & SB_KERNMOUNT)) { |
---|
288 | | - raw_spin_lock(&sbinfo->stat_lock); |
---|
| 288 | + spin_lock(&sbinfo->stat_lock); |
---|
289 | 289 | if (sbinfo->max_inodes) { |
---|
290 | 290 | if (!sbinfo->free_inodes) { |
---|
291 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 291 | + spin_unlock(&sbinfo->stat_lock); |
---|
292 | 292 | return -ENOSPC; |
---|
293 | 293 | } |
---|
294 | 294 | sbinfo->free_inodes--; |
---|
.. | .. |
---|
311 | 311 | } |
---|
312 | 312 | *inop = ino; |
---|
313 | 313 | } |
---|
314 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 314 | + spin_unlock(&sbinfo->stat_lock); |
---|
315 | 315 | } else if (inop) { |
---|
316 | 316 | /* |
---|
317 | 317 | * __shmem_file_setup, one of our callers, is lock-free: it |
---|
.. | .. |
---|
326 | 326 | * to worry about things like glibc compatibility. |
---|
327 | 327 | */ |
---|
328 | 328 | ino_t *next_ino; |
---|
329 | | - |
---|
330 | 329 | next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); |
---|
331 | 330 | ino = *next_ino; |
---|
332 | 331 | if (unlikely(ino % SHMEM_INO_BATCH == 0)) { |
---|
333 | | - raw_spin_lock(&sbinfo->stat_lock); |
---|
| 332 | + spin_lock(&sbinfo->stat_lock); |
---|
334 | 333 | ino = sbinfo->next_ino; |
---|
335 | 334 | sbinfo->next_ino += SHMEM_INO_BATCH; |
---|
336 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 335 | + spin_unlock(&sbinfo->stat_lock); |
---|
337 | 336 | if (unlikely(is_zero_ino(ino))) |
---|
338 | 337 | ino++; |
---|
339 | 338 | } |
---|
.. | .. |
---|
349 | 348 | { |
---|
350 | 349 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
---|
351 | 350 | if (sbinfo->max_inodes) { |
---|
352 | | - raw_spin_lock(&sbinfo->stat_lock); |
---|
| 351 | + spin_lock(&sbinfo->stat_lock); |
---|
353 | 352 | sbinfo->free_inodes++; |
---|
354 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 353 | + spin_unlock(&sbinfo->stat_lock); |
---|
355 | 354 | } |
---|
356 | 355 | } |
---|
357 | 356 | |
---|
.. | .. |
---|
1493 | 1492 | { |
---|
1494 | 1493 | struct mempolicy *mpol = NULL; |
---|
1495 | 1494 | if (sbinfo->mpol) { |
---|
1496 | | - raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
---|
| 1495 | + spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
---|
1497 | 1496 | mpol = sbinfo->mpol; |
---|
1498 | 1497 | mpol_get(mpol); |
---|
1499 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 1498 | + spin_unlock(&sbinfo->stat_lock); |
---|
1500 | 1499 | } |
---|
1501 | 1500 | return mpol; |
---|
1502 | 1501 | } |
---|
.. | .. |
---|
3563 | 3562 | struct shmem_options *ctx = fc->fs_private; |
---|
3564 | 3563 | struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); |
---|
3565 | 3564 | unsigned long inodes; |
---|
3566 | | - struct mempolicy *mpol = NULL; |
---|
3567 | 3565 | const char *err; |
---|
3568 | 3566 | |
---|
3569 | | - raw_spin_lock(&sbinfo->stat_lock); |
---|
| 3567 | + spin_lock(&sbinfo->stat_lock); |
---|
3570 | 3568 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
---|
3571 | 3569 | if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { |
---|
3572 | 3570 | if (!sbinfo->max_blocks) { |
---|
.. | .. |
---|
3611 | 3609 | * Preserve previous mempolicy unless mpol remount option was specified. |
---|
3612 | 3610 | */ |
---|
3613 | 3611 | if (ctx->mpol) { |
---|
3614 | | - mpol = sbinfo->mpol; |
---|
| 3612 | + mpol_put(sbinfo->mpol); |
---|
3615 | 3613 | sbinfo->mpol = ctx->mpol; /* transfers initial ref */ |
---|
3616 | 3614 | ctx->mpol = NULL; |
---|
3617 | 3615 | } |
---|
3618 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
3619 | | - mpol_put(mpol); |
---|
| 3616 | + spin_unlock(&sbinfo->stat_lock); |
---|
3620 | 3617 | return 0; |
---|
3621 | 3618 | out: |
---|
3622 | | - raw_spin_unlock(&sbinfo->stat_lock); |
---|
| 3619 | + spin_unlock(&sbinfo->stat_lock); |
---|
3623 | 3620 | return invalfc(fc, "%s", err); |
---|
3624 | 3621 | } |
---|
3625 | 3622 | |
---|
.. | .. |
---|
3736 | 3733 | sbinfo->mpol = ctx->mpol; |
---|
3737 | 3734 | ctx->mpol = NULL; |
---|
3738 | 3735 | |
---|
3739 | | - raw_spin_lock_init(&sbinfo->stat_lock); |
---|
| 3736 | + spin_lock_init(&sbinfo->stat_lock); |
---|
3740 | 3737 | if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) |
---|
3741 | 3738 | goto failed; |
---|
3742 | 3739 | spin_lock_init(&sbinfo->shrinklist_lock); |
---|