.. | .. |
---|
49 | 49 | * |
---|
50 | 50 | * bch_bucket_alloc() allocates a single bucket from a specific cache. |
---|
51 | 51 | * |
---|
52 | | - * bch_bucket_alloc_set() allocates one or more buckets from different caches |
---|
| 52 | + * bch_bucket_alloc_set() allocates one bucket from different caches |
---|
53 | 53 | * out of a cache set. |
---|
54 | 54 | * |
---|
55 | 55 | * free_some_buckets() drives all the processes described above. It's called |
---|
.. | .. |
---|
87 | 87 | { |
---|
88 | 88 | struct cache *ca; |
---|
89 | 89 | struct bucket *b; |
---|
90 | | - unsigned int next = c->nbuckets * c->sb.bucket_size / 1024; |
---|
91 | | - unsigned int i; |
---|
| 90 | + unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; |
---|
92 | 91 | int r; |
---|
93 | 92 | |
---|
94 | 93 | atomic_sub(sectors, &c->rescale); |
---|
.. | .. |
---|
104 | 103 | |
---|
105 | 104 | c->min_prio = USHRT_MAX; |
---|
106 | 105 | |
---|
107 | | - for_each_cache(ca, c, i) |
---|
108 | | - for_each_bucket(b, ca) |
---|
109 | | - if (b->prio && |
---|
110 | | - b->prio != BTREE_PRIO && |
---|
111 | | - !atomic_read(&b->pin)) { |
---|
112 | | - b->prio--; |
---|
113 | | - c->min_prio = min(c->min_prio, b->prio); |
---|
114 | | - } |
---|
| 106 | + ca = c->cache; |
---|
| 107 | + for_each_bucket(b, ca) |
---|
| 108 | + if (b->prio && |
---|
| 109 | + b->prio != BTREE_PRIO && |
---|
| 110 | + !atomic_read(&b->pin)) { |
---|
| 111 | + b->prio--; |
---|
| 112 | + c->min_prio = min(c->min_prio, b->prio); |
---|
| 113 | + } |
---|
115 | 114 | |
---|
116 | 115 | mutex_unlock(&c->bucket_lock); |
---|
117 | 116 | } |
---|
.. | .. |
---|
362 | 361 | * new stuff to them: |
---|
363 | 362 | */ |
---|
364 | 363 | allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); |
---|
365 | | - if (CACHE_SYNC(&ca->set->sb)) { |
---|
| 364 | + if (CACHE_SYNC(&ca->sb)) { |
---|
366 | 365 | /* |
---|
367 | 366 | * This could deadlock if an allocation with a btree |
---|
368 | 367 | * node locked ever blocked - having the btree node |
---|
.. | .. |
---|
488 | 487 | } |
---|
489 | 488 | |
---|
490 | 489 | int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, |
---|
491 | | - struct bkey *k, int n, bool wait) |
---|
| 490 | + struct bkey *k, bool wait) |
---|
492 | 491 | { |
---|
493 | | - int i; |
---|
| 492 | + struct cache *ca; |
---|
| 493 | + long b; |
---|
494 | 494 | |
---|
495 | 495 | /* No allocation if CACHE_SET_IO_DISABLE bit is set */ |
---|
496 | 496 | if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) |
---|
497 | 497 | return -1; |
---|
498 | 498 | |
---|
499 | 499 | lockdep_assert_held(&c->bucket_lock); |
---|
500 | | - BUG_ON(!n || n > c->caches_loaded || n > 8); |
---|
501 | 500 | |
---|
502 | 501 | bkey_init(k); |
---|
503 | 502 | |
---|
504 | | - /* sort by free space/prio of oldest data in caches */ |
---|
| 503 | + ca = c->cache; |
---|
| 504 | + b = bch_bucket_alloc(ca, reserve, wait); |
---|
| 505 | + if (b == -1) |
---|
| 506 | + goto err; |
---|
505 | 507 | |
---|
506 | | - for (i = 0; i < n; i++) { |
---|
507 | | - struct cache *ca = c->cache_by_alloc[i]; |
---|
508 | | - long b = bch_bucket_alloc(ca, reserve, wait); |
---|
| 508 | + k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, |
---|
| 509 | + bucket_to_sector(c, b), |
---|
| 510 | + ca->sb.nr_this_dev); |
---|
509 | 511 | |
---|
510 | | - if (b == -1) |
---|
511 | | - goto err; |
---|
512 | | - |
---|
513 | | - k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, |
---|
514 | | - bucket_to_sector(c, b), |
---|
515 | | - ca->sb.nr_this_dev); |
---|
516 | | - |
---|
517 | | - SET_KEY_PTRS(k, i + 1); |
---|
518 | | - } |
---|
| 512 | + SET_KEY_PTRS(k, 1); |
---|
519 | 513 | |
---|
520 | 514 | return 0; |
---|
521 | 515 | err: |
---|
.. | .. |
---|
525 | 519 | } |
---|
526 | 520 | |
---|
527 | 521 | int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, |
---|
528 | | - struct bkey *k, int n, bool wait) |
---|
| 522 | + struct bkey *k, bool wait) |
---|
529 | 523 | { |
---|
530 | 524 | int ret; |
---|
531 | 525 | |
---|
532 | 526 | mutex_lock(&c->bucket_lock); |
---|
533 | | - ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); |
---|
| 527 | + ret = __bch_bucket_alloc_set(c, reserve, k, wait); |
---|
534 | 528 | mutex_unlock(&c->bucket_lock); |
---|
535 | 529 | return ret; |
---|
536 | 530 | } |
---|
.. | .. |
---|
589 | 583 | struct open_bucket, list); |
---|
590 | 584 | found: |
---|
591 | 585 | if (!ret->sectors_free && KEY_PTRS(alloc)) { |
---|
592 | | - ret->sectors_free = c->sb.bucket_size; |
---|
| 586 | + ret->sectors_free = c->cache->sb.bucket_size; |
---|
593 | 587 | bkey_copy(&ret->key, alloc); |
---|
594 | 588 | bkey_init(alloc); |
---|
595 | 589 | } |
---|
.. | .. |
---|
638 | 632 | |
---|
639 | 633 | spin_unlock(&c->data_bucket_lock); |
---|
640 | 634 | |
---|
641 | | - if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) |
---|
| 635 | + if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait)) |
---|
642 | 636 | return false; |
---|
643 | 637 | |
---|
644 | 638 | spin_lock(&c->data_bucket_lock); |
---|
.. | .. |
---|
683 | 677 | &PTR_CACHE(c, &b->key, i)->sectors_written); |
---|
684 | 678 | } |
---|
685 | 679 | |
---|
686 | | - if (b->sectors_free < c->sb.block_size) |
---|
| 680 | + if (b->sectors_free < c->cache->sb.block_size) |
---|
687 | 681 | b->sectors_free = 0; |
---|
688 | 682 | |
---|
689 | 683 | /* |
---|