| .. | .. |
|---|
| 1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
|---|
| 2 | 2 | /* |
|---|
| 3 | | - * Variant of atomic_t specialized for reference counts. |
|---|
| 4 | | - * |
|---|
| 5 | | - * The interface matches the atomic_t interface (to aid in porting) but only |
|---|
| 6 | | - * provides the few functions one should use for reference counting. |
|---|
| 7 | | - * |
|---|
| 8 | | - * It differs in that the counter saturates at UINT_MAX and will not move once |
|---|
| 9 | | - * there. This avoids wrapping the counter and causing 'spurious' |
|---|
| 10 | | - * use-after-free issues. |
|---|
| 11 | | - * |
|---|
| 12 | | - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions |
|---|
| 13 | | - * and provide only what is strictly required for refcounts. |
|---|
| 14 | | - * |
|---|
| 15 | | - * The increments are fully relaxed; these will not provide ordering. The |
|---|
| 16 | | - * rationale is that whatever is used to obtain the object we're increasing the |
|---|
| 17 | | - * reference count on will provide the ordering. For locked data structures, |
|---|
| 18 | | - * its the lock acquire, for RCU/lockless data structures its the dependent |
|---|
| 19 | | - * load. |
|---|
| 20 | | - * |
|---|
| 21 | | - * Do note that inc_not_zero() provides a control dependency which will order |
|---|
| 22 | | - * future stores against the inc, this ensures we'll never modify the object |
|---|
| 23 | | - * if we did not in fact acquire a reference. |
|---|
| 24 | | - * |
|---|
| 25 | | - * The decrements will provide release order, such that all the prior loads and |
|---|
| 26 | | - * stores will be issued before, it also provides a control dependency, which |
|---|
| 27 | | - * will order us against the subsequent free(). |
|---|
| 28 | | - * |
|---|
| 29 | | - * The control dependency is against the load of the cmpxchg (ll/sc) that |
|---|
| 30 | | - * succeeded. This means the stores aren't fully ordered, but this is fine |
|---|
| 31 | | - * because the 1->0 transition indicates no concurrency. |
|---|
| 32 | | - * |
|---|
| 33 | | - * Note that the allocator is responsible for ordering things between free() |
|---|
| 34 | | - * and alloc(). |
|---|
| 35 | | - * |
|---|
| 3 | + * Out-of-line refcount functions. |
|---|
| 36 | 4 | */ |
|---|
| 37 | 5 | |
|---|
| 38 | 6 | #include <linux/mutex.h> |
|---|
| .. | .. |
|---|
| 40 | 8 | #include <linux/spinlock.h> |
|---|
| 41 | 9 | #include <linux/bug.h> |
|---|
| 42 | 10 | |
|---|
| 43 | | -/** |
|---|
| 44 | | - * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 |
|---|
| 45 | | - * @i: the value to add to the refcount |
|---|
| 46 | | - * @r: the refcount |
|---|
| 47 | | - * |
|---|
| 48 | | - * Will saturate at UINT_MAX and WARN. |
|---|
| 49 | | - * |
|---|
| 50 | | - * Provides no memory ordering, it is assumed the caller has guaranteed the |
|---|
| 51 | | - * object memory to be stable (RCU, etc.). It does provide a control dependency |
|---|
| 52 | | - * and thereby orders future stores. See the comment on top. |
|---|
| 53 | | - * |
|---|
| 54 | | - * Use of this function is not recommended for the normal reference counting |
|---|
| 55 | | - * use case in which references are taken and released one at a time. In these |
|---|
| 56 | | - * cases, refcount_inc(), or one of its variants, should instead be used to |
|---|
| 57 | | - * increment a reference count. |
|---|
| 58 | | - * |
|---|
| 59 | | - * Return: false if the passed refcount is 0, true otherwise |
|---|
| 60 | | - */ |
|---|
| 61 | | -bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) |
|---|
| 11 | +#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n") |
|---|
| 12 | + |
|---|
| 13 | +void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) |
|---|
| 62 | 14 | { |
|---|
| 63 | | - unsigned int new, val = atomic_read(&r->refs); |
|---|
| 15 | + refcount_set(r, REFCOUNT_SATURATED); |
|---|
| 64 | 16 | |
|---|
| 65 | | - do { |
|---|
| 66 | | - if (!val) |
|---|
| 67 | | - return false; |
|---|
| 68 | | - |
|---|
| 69 | | - if (unlikely(val == UINT_MAX)) |
|---|
| 70 | | - return true; |
|---|
| 71 | | - |
|---|
| 72 | | - new = val + i; |
|---|
| 73 | | - if (new < val) |
|---|
| 74 | | - new = UINT_MAX; |
|---|
| 75 | | - |
|---|
| 76 | | - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
|---|
| 77 | | - |
|---|
| 78 | | - WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
|---|
| 79 | | - |
|---|
| 80 | | - return true; |
|---|
| 17 | + switch (t) { |
|---|
| 18 | + case REFCOUNT_ADD_NOT_ZERO_OVF: |
|---|
| 19 | + REFCOUNT_WARN("saturated; leaking memory"); |
|---|
| 20 | + break; |
|---|
| 21 | + case REFCOUNT_ADD_OVF: |
|---|
| 22 | + REFCOUNT_WARN("saturated; leaking memory"); |
|---|
| 23 | + break; |
|---|
| 24 | + case REFCOUNT_ADD_UAF: |
|---|
| 25 | + REFCOUNT_WARN("addition on 0; use-after-free"); |
|---|
| 26 | + break; |
|---|
| 27 | + case REFCOUNT_SUB_UAF: |
|---|
| 28 | + REFCOUNT_WARN("underflow; use-after-free"); |
|---|
| 29 | + break; |
|---|
| 30 | + case REFCOUNT_DEC_LEAK: |
|---|
| 31 | + REFCOUNT_WARN("decrement hit 0; leaking memory"); |
|---|
| 32 | + break; |
|---|
| 33 | + default: |
|---|
| 34 | + REFCOUNT_WARN("unknown saturation event!?"); |
|---|
| 35 | + } |
|---|
| 81 | 36 | } |
|---|
| 82 | | -EXPORT_SYMBOL(refcount_add_not_zero_checked); |
|---|
| 83 | | - |
|---|
| 84 | | -/** |
|---|
| 85 | | - * refcount_add_checked - add a value to a refcount |
|---|
| 86 | | - * @i: the value to add to the refcount |
|---|
| 87 | | - * @r: the refcount |
|---|
| 88 | | - * |
|---|
| 89 | | - * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. |
|---|
| 90 | | - * |
|---|
| 91 | | - * Provides no memory ordering, it is assumed the caller has guaranteed the |
|---|
| 92 | | - * object memory to be stable (RCU, etc.). It does provide a control dependency |
|---|
| 93 | | - * and thereby orders future stores. See the comment on top. |
|---|
| 94 | | - * |
|---|
| 95 | | - * Use of this function is not recommended for the normal reference counting |
|---|
| 96 | | - * use case in which references are taken and released one at a time. In these |
|---|
| 97 | | - * cases, refcount_inc(), or one of its variants, should instead be used to |
|---|
| 98 | | - * increment a reference count. |
|---|
| 99 | | - */ |
|---|
| 100 | | -void refcount_add_checked(unsigned int i, refcount_t *r) |
|---|
| 101 | | -{ |
|---|
| 102 | | - WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
|---|
| 103 | | -} |
|---|
| 104 | | -EXPORT_SYMBOL(refcount_add_checked); |
|---|
| 105 | | - |
|---|
| 106 | | -/** |
|---|
| 107 | | - * refcount_inc_not_zero_checked - increment a refcount unless it is 0 |
|---|
| 108 | | - * @r: the refcount to increment |
|---|
| 109 | | - * |
|---|
| 110 | | - * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. |
|---|
| 111 | | - * |
|---|
| 112 | | - * Provides no memory ordering, it is assumed the caller has guaranteed the |
|---|
| 113 | | - * object memory to be stable (RCU, etc.). It does provide a control dependency |
|---|
| 114 | | - * and thereby orders future stores. See the comment on top. |
|---|
| 115 | | - * |
|---|
| 116 | | - * Return: true if the increment was successful, false otherwise |
|---|
| 117 | | - */ |
|---|
| 118 | | -bool refcount_inc_not_zero_checked(refcount_t *r) |
|---|
| 119 | | -{ |
|---|
| 120 | | - unsigned int new, val = atomic_read(&r->refs); |
|---|
| 121 | | - |
|---|
| 122 | | - do { |
|---|
| 123 | | - new = val + 1; |
|---|
| 124 | | - |
|---|
| 125 | | - if (!val) |
|---|
| 126 | | - return false; |
|---|
| 127 | | - |
|---|
| 128 | | - if (unlikely(!new)) |
|---|
| 129 | | - return true; |
|---|
| 130 | | - |
|---|
| 131 | | - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
|---|
| 132 | | - |
|---|
| 133 | | - WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
|---|
| 134 | | - |
|---|
| 135 | | - return true; |
|---|
| 136 | | -} |
|---|
| 137 | | -EXPORT_SYMBOL(refcount_inc_not_zero_checked); |
|---|
| 138 | | - |
|---|
| 139 | | -/** |
|---|
| 140 | | - * refcount_inc_checked - increment a refcount |
|---|
| 141 | | - * @r: the refcount to increment |
|---|
| 142 | | - * |
|---|
| 143 | | - * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. |
|---|
| 144 | | - * |
|---|
| 145 | | - * Provides no memory ordering, it is assumed the caller already has a |
|---|
| 146 | | - * reference on the object. |
|---|
| 147 | | - * |
|---|
| 148 | | - * Will WARN if the refcount is 0, as this represents a possible use-after-free |
|---|
| 149 | | - * condition. |
|---|
| 150 | | - */ |
|---|
| 151 | | -void refcount_inc_checked(refcount_t *r) |
|---|
| 152 | | -{ |
|---|
| 153 | | - WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); |
|---|
| 154 | | -} |
|---|
| 155 | | -EXPORT_SYMBOL(refcount_inc_checked); |
|---|
| 156 | | - |
|---|
| 157 | | -/** |
|---|
| 158 | | - * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 |
|---|
| 159 | | - * @i: amount to subtract from the refcount |
|---|
| 160 | | - * @r: the refcount |
|---|
| 161 | | - * |
|---|
| 162 | | - * Similar to atomic_dec_and_test(), but it will WARN, return false and |
|---|
| 163 | | - * ultimately leak on underflow and will fail to decrement when saturated |
|---|
| 164 | | - * at UINT_MAX. |
|---|
| 165 | | - * |
|---|
| 166 | | - * Provides release memory ordering, such that prior loads and stores are done |
|---|
| 167 | | - * before, and provides a control dependency such that free() must come after. |
|---|
| 168 | | - * See the comment on top. |
|---|
| 169 | | - * |
|---|
| 170 | | - * Use of this function is not recommended for the normal reference counting |
|---|
| 171 | | - * use case in which references are taken and released one at a time. In these |
|---|
| 172 | | - * cases, refcount_dec(), or one of its variants, should instead be used to |
|---|
| 173 | | - * decrement a reference count. |
|---|
| 174 | | - * |
|---|
| 175 | | - * Return: true if the resulting refcount is 0, false otherwise |
|---|
| 176 | | - */ |
|---|
| 177 | | -bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) |
|---|
| 178 | | -{ |
|---|
| 179 | | - unsigned int new, val = atomic_read(&r->refs); |
|---|
| 180 | | - |
|---|
| 181 | | - do { |
|---|
| 182 | | - if (unlikely(val == UINT_MAX)) |
|---|
| 183 | | - return false; |
|---|
| 184 | | - |
|---|
| 185 | | - new = val - i; |
|---|
| 186 | | - if (new > val) { |
|---|
| 187 | | - WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
|---|
| 188 | | - return false; |
|---|
| 189 | | - } |
|---|
| 190 | | - |
|---|
| 191 | | - } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
|---|
| 192 | | - |
|---|
| 193 | | - return !new; |
|---|
| 194 | | -} |
|---|
| 195 | | -EXPORT_SYMBOL(refcount_sub_and_test_checked); |
|---|
| 196 | | - |
|---|
| 197 | | -/** |
|---|
| 198 | | - * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 |
|---|
| 199 | | - * @r: the refcount |
|---|
| 200 | | - * |
|---|
| 201 | | - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
|---|
| 202 | | - * decrement when saturated at UINT_MAX. |
|---|
| 203 | | - * |
|---|
| 204 | | - * Provides release memory ordering, such that prior loads and stores are done |
|---|
| 205 | | - * before, and provides a control dependency such that free() must come after. |
|---|
| 206 | | - * See the comment on top. |
|---|
| 207 | | - * |
|---|
| 208 | | - * Return: true if the resulting refcount is 0, false otherwise |
|---|
| 209 | | - */ |
|---|
| 210 | | -bool refcount_dec_and_test_checked(refcount_t *r) |
|---|
| 211 | | -{ |
|---|
| 212 | | - return refcount_sub_and_test_checked(1, r); |
|---|
| 213 | | -} |
|---|
| 214 | | -EXPORT_SYMBOL(refcount_dec_and_test_checked); |
|---|
| 215 | | - |
|---|
| 216 | | -/** |
|---|
| 217 | | - * refcount_dec_checked - decrement a refcount |
|---|
| 218 | | - * @r: the refcount |
|---|
| 219 | | - * |
|---|
| 220 | | - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
|---|
| 221 | | - * when saturated at UINT_MAX. |
|---|
| 222 | | - * |
|---|
| 223 | | - * Provides release memory ordering, such that prior loads and stores are done |
|---|
| 224 | | - * before. |
|---|
| 225 | | - */ |
|---|
| 226 | | -void refcount_dec_checked(refcount_t *r) |
|---|
| 227 | | -{ |
|---|
| 228 | | - WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
|---|
| 229 | | -} |
|---|
| 230 | | -EXPORT_SYMBOL(refcount_dec_checked); |
|---|
| 37 | +EXPORT_SYMBOL(refcount_warn_saturate); |
|---|
| 231 | 38 | |
|---|
| 232 | 39 | /** |
|---|
| 233 | 40 | * refcount_dec_if_one - decrement a refcount if it is 1 |
|---|
| .. | .. |
|---|
| 269 | 76 | unsigned int new, val = atomic_read(&r->refs); |
|---|
| 270 | 77 | |
|---|
| 271 | 78 | do { |
|---|
| 272 | | - if (unlikely(val == UINT_MAX)) |
|---|
| 79 | + if (unlikely(val == REFCOUNT_SATURATED)) |
|---|
| 273 | 80 | return true; |
|---|
| 274 | 81 | |
|---|
| 275 | 82 | if (val == 1) |
|---|
| .. | .. |
|---|
| 294 | 101 | * @lock: the mutex to be locked |
|---|
| 295 | 102 | * |
|---|
| 296 | 103 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
|---|
| 297 | | - * to decrement when saturated at UINT_MAX. |
|---|
| 104 | + * to decrement when saturated at REFCOUNT_SATURATED. |
|---|
| 298 | 105 | * |
|---|
| 299 | 106 | * Provides release memory ordering, such that prior loads and stores are done |
|---|
| 300 | 107 | * before, and provides a control dependency such that free() must come after. |
|---|
| .. | .. |
|---|
| 325 | 132 | * @lock: the spinlock to be locked |
|---|
| 326 | 133 | * |
|---|
| 327 | 134 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
|---|
| 328 | | - * decrement when saturated at UINT_MAX. |
|---|
| 135 | + * decrement when saturated at REFCOUNT_SATURATED. |
|---|
| 329 | 136 | * |
|---|
| 330 | 137 | * Provides release memory ordering, such that prior loads and stores are done |
|---|
| 331 | 138 | * before, and provides a control dependency such that free() must come after. |
|---|