.. | .. |
---|
10 | 10 | |
---|
11 | 11 | struct mb_cache; |
---|
12 | 12 | |
---|
| 13 | +/* Cache entry flags */ |
---|
| 14 | +enum { |
---|
| 15 | + MBE_REFERENCED_B = 0, |
---|
| 16 | + MBE_REUSABLE_B |
---|
| 17 | +}; |
---|
| 18 | + |
---|
13 | 19 | struct mb_cache_entry { |
---|
14 | 20 | /* List of entries in cache - protected by cache->c_list_lock */ |
---|
15 | 21 | struct list_head e_list; |
---|
16 | | - /* Hash table list - protected by hash chain bitlock */ |
---|
| 22 | + /* |
---|
| 23 | + * Hash table list - protected by hash chain bitlock. The entry is |
---|
| 24 | + * guaranteed to be hashed while e_refcnt > 0. |
---|
| 25 | + */ |
---|
17 | 26 | struct hlist_bl_node e_hash_list; |
---|
| 27 | + /* |
---|
| 28 | + * Entry refcount. Once it reaches zero, entry is unhashed and freed. |
---|
| 29 | + * While refcount > 0, the entry is guaranteed to stay in the hash and |
---|
| 30 | + * e.g. mb_cache_entry_try_delete() will fail. |
---|
| 31 | + */ |
---|
18 | 32 | atomic_t e_refcnt; |
---|
19 | 33 | /* Key in hash - stable during lifetime of the entry */ |
---|
20 | 34 | u32 e_key; |
---|
21 | | - u32 e_referenced:1; |
---|
22 | | - u32 e_reusable:1; |
---|
| 35 | + unsigned long e_flags; |
---|
23 | 36 | /* User provided value - stable during lifetime of the entry */ |
---|
24 | 37 | u64 e_value; |
---|
25 | 38 | }; |
---|
.. | .. |
---|
29 | 42 | |
---|
30 | 43 | int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, |
---|
31 | 44 | u64 value, bool reusable); |
---|
32 | | -void __mb_cache_entry_free(struct mb_cache_entry *entry); |
---|
33 | | -static inline int mb_cache_entry_put(struct mb_cache *cache, |
---|
34 | | - struct mb_cache_entry *entry) |
---|
| 45 | +void __mb_cache_entry_free(struct mb_cache *cache, |
---|
| 46 | + struct mb_cache_entry *entry); |
---|
| 47 | +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); |
---|
| 48 | +static inline void mb_cache_entry_put(struct mb_cache *cache, |
---|
| 49 | + struct mb_cache_entry *entry) |
---|
35 | 50 | { |
---|
36 | | - if (!atomic_dec_and_test(&entry->e_refcnt)) |
---|
37 | | - return 0; |
---|
38 | | - __mb_cache_entry_free(entry); |
---|
39 | | - return 1; |
---|
| 51 | + unsigned int cnt = atomic_dec_return(&entry->e_refcnt); |
---|
| 52 | + |
---|
| 53 | + if (cnt > 0) { |
---|
| 54 | + if (cnt <= 2) |
---|
| 55 | + wake_up_var(&entry->e_refcnt); |
---|
| 56 | + return; |
---|
| 57 | + } |
---|
| 58 | + __mb_cache_entry_free(cache, entry); |
---|
40 | 59 | } |
---|
41 | 60 | |
---|
| 61 | +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, |
---|
| 62 | + u32 key, u64 value); |
---|
42 | 63 | void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); |
---|
43 | 64 | struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, |
---|
44 | 65 | u64 value); |
---|