| .. | .. |
|---|
| 24 | 24 | #include <linux/list_nulls.h> |
|---|
| 25 | 25 | #include <linux/workqueue.h> |
|---|
| 26 | 26 | #include <linux/rculist.h> |
|---|
| 27 | +#include <linux/bit_spinlock.h> |
|---|
| 27 | 28 | |
|---|
| 28 | 29 | #include <linux/rhashtable-types.h> |
|---|
| 29 | 30 | /* |
|---|
| 31 | + * Objects in an rhashtable have an embedded struct rhash_head |
|---|
| 32 | + * which is linked into as hash chain from the hash table - or one |
|---|
| 33 | + * of two or more hash tables when the rhashtable is being resized. |
|---|
| 30 | 34 | * The end of the chain is marked with a special nulls marks which has |
|---|
| 31 | | - * the least significant bit set. |
|---|
| 35 | + * the least significant bit set but otherwise stores the address of |
|---|
| 36 | + * the hash bucket. This allows us to be sure we've found the end |
|---|
| 37 | + * of the right list. |
|---|
| 38 | + * The value stored in the hash bucket has BIT(0) used as a lock bit. |
|---|
| 39 | + * This bit must be atomically set before any changes are made to |
|---|
| 40 | + * the chain. To avoid dereferencing this pointer without clearing |
|---|
| 41 | + * the bit first, we use an opaque 'struct rhash_lock_head *' for the |
|---|
| 42 | + * pointer stored in the bucket. This struct needs to be defined so |
|---|
| 43 | + * that rcu_dereference() works on it, but it has no content so a |
|---|
| 44 | + * cast is needed for it to be useful. This ensures it isn't |
|---|
| 45 | + * used by mistake with clearing the lock bit first. |
|---|
| 32 | 46 | */ |
|---|
| 47 | +struct rhash_lock_head {}; |
|---|
| 33 | 48 | |
|---|
| 34 | 49 | /* Maximum chain length before rehash |
|---|
| 35 | 50 | * |
|---|
| .. | .. |
|---|
| 52 | 67 | * @nest: Number of bits of first-level nested table. |
|---|
| 53 | 68 | * @rehash: Current bucket being rehashed |
|---|
| 54 | 69 | * @hash_rnd: Random seed to fold into hash |
|---|
| 55 | | - * @locks_mask: Mask to apply before accessing locks[] |
|---|
| 56 | | - * @locks: Array of spinlocks protecting individual buckets |
|---|
| 57 | 70 | * @walkers: List of active walkers |
|---|
| 58 | 71 | * @rcu: RCU structure for freeing the table |
|---|
| 59 | 72 | * @future_tbl: Table under construction during rehashing |
|---|
| .. | .. |
|---|
| 63 | 76 | struct bucket_table { |
|---|
| 64 | 77 | unsigned int size; |
|---|
| 65 | 78 | unsigned int nest; |
|---|
| 66 | | - unsigned int rehash; |
|---|
| 67 | 79 | u32 hash_rnd; |
|---|
| 68 | | - unsigned int locks_mask; |
|---|
| 69 | | - spinlock_t *locks; |
|---|
| 70 | 80 | struct list_head walkers; |
|---|
| 71 | 81 | struct rcu_head rcu; |
|---|
| 72 | 82 | |
|---|
| 73 | 83 | struct bucket_table __rcu *future_tbl; |
|---|
| 74 | 84 | |
|---|
| 75 | | - struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; |
|---|
| 85 | + struct lockdep_map dep_map; |
|---|
| 86 | + |
|---|
| 87 | + struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; |
|---|
| 76 | 88 | }; |
|---|
| 77 | 89 | |
|---|
| 90 | +/* |
|---|
| 91 | + * NULLS_MARKER() expects a hash value with the low |
|---|
| 92 | + * bits mostly likely to be significant, and it discards |
|---|
| 93 | + * the msb. |
|---|
| 94 | + * We give it an address, in which the bottom bit is |
|---|
| 95 | + * always 0, and the msb might be significant. |
|---|
| 96 | + * So we shift the address down one bit to align with |
|---|
| 97 | + * expectations and avoid losing a significant bit. |
|---|
| 98 | + * |
|---|
| 99 | + * We never store the NULLS_MARKER in the hash table |
|---|
| 100 | + * itself as we need the lsb for locking. |
|---|
| 101 | + * Instead we store a NULL |
|---|
| 102 | + */ |
|---|
| 103 | +#define RHT_NULLS_MARKER(ptr) \ |
|---|
| 104 | + ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) |
|---|
| 78 | 105 | #define INIT_RHT_NULLS_HEAD(ptr) \ |
|---|
| 79 | | - ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) |
|---|
| 106 | + ((ptr) = NULL) |
|---|
| 80 | 107 | |
|---|
| 81 | 108 | static inline bool rht_is_a_nulls(const struct rhash_head *ptr) |
|---|
| 82 | 109 | { |
|---|
| .. | .. |
|---|
| 196 | 223 | return atomic_read(&ht->nelems) >= ht->max_elems; |
|---|
| 197 | 224 | } |
|---|
| 198 | 225 | |
|---|
| 199 | | -/* The bucket lock is selected based on the hash and protects mutations |
|---|
| 200 | | - * on a group of hash buckets. |
|---|
| 201 | | - * |
|---|
| 202 | | - * A maximum of tbl->size/2 bucket locks is allocated. This ensures that |
|---|
| 203 | | - * a single lock always covers both buckets which may both contains |
|---|
| 204 | | - * entries which link to the same bucket of the old table during resizing. |
|---|
| 205 | | - * This allows to simplify the locking as locking the bucket in both |
|---|
| 206 | | - * tables during resize always guarantee protection. |
|---|
| 207 | | - * |
|---|
| 208 | | - * IMPORTANT: When holding the bucket lock of both the old and new table |
|---|
| 209 | | - * during expansions and shrinking, the old bucket lock must always be |
|---|
| 210 | | - * acquired first. |
|---|
| 211 | | - */ |
|---|
| 212 | | -static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, |
|---|
| 213 | | - unsigned int hash) |
|---|
| 214 | | -{ |
|---|
| 215 | | - return &tbl->locks[hash & tbl->locks_mask]; |
|---|
| 216 | | -} |
|---|
| 217 | | - |
|---|
| 218 | 226 | #ifdef CONFIG_PROVE_LOCKING |
|---|
| 219 | 227 | int lockdep_rht_mutex_is_held(struct rhashtable *ht); |
|---|
| 220 | 228 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); |
|---|
| .. | .. |
|---|
| 253 | 261 | void *arg); |
|---|
| 254 | 262 | void rhashtable_destroy(struct rhashtable *ht); |
|---|
| 255 | 263 | |
|---|
| 256 | | -struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, |
|---|
| 257 | | - unsigned int hash); |
|---|
| 258 | | -struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, |
|---|
| 259 | | - struct bucket_table *tbl, |
|---|
| 260 | | - unsigned int hash); |
|---|
| 264 | +struct rhash_lock_head __rcu **rht_bucket_nested( |
|---|
| 265 | + const struct bucket_table *tbl, unsigned int hash); |
|---|
| 266 | +struct rhash_lock_head __rcu **__rht_bucket_nested( |
|---|
| 267 | + const struct bucket_table *tbl, unsigned int hash); |
|---|
| 268 | +struct rhash_lock_head __rcu **rht_bucket_nested_insert( |
|---|
| 269 | + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); |
|---|
| 261 | 270 | |
|---|
| 262 | 271 | #define rht_dereference(p, ht) \ |
|---|
| 263 | 272 | rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) |
|---|
| .. | .. |
|---|
| 274 | 283 | #define rht_entry(tpos, pos, member) \ |
|---|
| 275 | 284 | ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) |
|---|
| 276 | 285 | |
|---|
| 277 | | -static inline struct rhash_head __rcu *const *rht_bucket( |
|---|
| 286 | +static inline struct rhash_lock_head __rcu *const *rht_bucket( |
|---|
| 278 | 287 | const struct bucket_table *tbl, unsigned int hash) |
|---|
| 279 | 288 | { |
|---|
| 280 | 289 | return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : |
|---|
| 281 | 290 | &tbl->buckets[hash]; |
|---|
| 282 | 291 | } |
|---|
| 283 | 292 | |
|---|
| 284 | | -static inline struct rhash_head __rcu **rht_bucket_var( |
|---|
| 293 | +static inline struct rhash_lock_head __rcu **rht_bucket_var( |
|---|
| 285 | 294 | struct bucket_table *tbl, unsigned int hash) |
|---|
| 286 | 295 | { |
|---|
| 287 | | - return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : |
|---|
| 296 | + return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : |
|---|
| 288 | 297 | &tbl->buckets[hash]; |
|---|
| 289 | 298 | } |
|---|
| 290 | 299 | |
|---|
| 291 | | -static inline struct rhash_head __rcu **rht_bucket_insert( |
|---|
| 300 | +static inline struct rhash_lock_head __rcu **rht_bucket_insert( |
|---|
| 292 | 301 | struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) |
|---|
| 293 | 302 | { |
|---|
| 294 | 303 | return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : |
|---|
| 295 | 304 | &tbl->buckets[hash]; |
|---|
| 296 | 305 | } |
|---|
| 297 | 306 | |
|---|
| 307 | +/* |
|---|
| 308 | + * We lock a bucket by setting BIT(0) in the pointer - this is always |
|---|
| 309 | + * zero in real pointers. The NULLS mark is never stored in the bucket, |
|---|
| 310 | + * rather we store NULL if the bucket is empty. |
|---|
| 311 | + * bit_spin_locks do not handle contention well, but the whole point |
|---|
| 312 | + * of the hashtable design is to achieve minimum per-bucket contention. |
|---|
| 313 | + * A nested hash table might not have a bucket pointer. In that case |
|---|
| 314 | + * we cannot get a lock. For remove and replace the bucket cannot be |
|---|
| 315 | + * interesting and doesn't need locking. |
|---|
| 316 | + * For insert we allocate the bucket if this is the last bucket_table, |
|---|
| 317 | + * and then take the lock. |
|---|
| 318 | + * Sometimes we unlock a bucket by writing a new pointer there. In that |
|---|
| 319 | + * case we don't need to unlock, but we do need to reset state such as |
|---|
| 320 | + * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() |
|---|
| 321 | + * provides the same release semantics that bit_spin_unlock() provides, |
|---|
| 322 | + * this is safe. |
|---|
| 323 | + * When we write to a bucket without unlocking, we use rht_assign_locked(). |
|---|
| 324 | + */ |
|---|
| 325 | + |
|---|
| 326 | +static inline void rht_lock(struct bucket_table *tbl, |
|---|
| 327 | + struct rhash_lock_head __rcu **bkt) |
|---|
| 328 | +{ |
|---|
| 329 | + local_bh_disable(); |
|---|
| 330 | + bit_spin_lock(0, (unsigned long *)bkt); |
|---|
| 331 | + lock_map_acquire(&tbl->dep_map); |
|---|
| 332 | +} |
|---|
| 333 | + |
|---|
| 334 | +static inline void rht_lock_nested(struct bucket_table *tbl, |
|---|
| 335 | + struct rhash_lock_head __rcu **bucket, |
|---|
| 336 | + unsigned int subclass) |
|---|
| 337 | +{ |
|---|
| 338 | + local_bh_disable(); |
|---|
| 339 | + bit_spin_lock(0, (unsigned long *)bucket); |
|---|
| 340 | + lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); |
|---|
| 341 | +} |
|---|
| 342 | + |
|---|
| 343 | +static inline void rht_unlock(struct bucket_table *tbl, |
|---|
| 344 | + struct rhash_lock_head __rcu **bkt) |
|---|
| 345 | +{ |
|---|
| 346 | + lock_map_release(&tbl->dep_map); |
|---|
| 347 | + bit_spin_unlock(0, (unsigned long *)bkt); |
|---|
| 348 | + local_bh_enable(); |
|---|
| 349 | +} |
|---|
| 350 | + |
|---|
| 351 | +static inline struct rhash_head *__rht_ptr( |
|---|
| 352 | + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) |
|---|
| 353 | +{ |
|---|
| 354 | + return (struct rhash_head *) |
|---|
| 355 | + ((unsigned long)p & ~BIT(0) ?: |
|---|
| 356 | + (unsigned long)RHT_NULLS_MARKER(bkt)); |
|---|
| 357 | +} |
|---|
| 358 | + |
|---|
| 359 | +/* |
|---|
| 360 | + * Where 'bkt' is a bucket and might be locked: |
|---|
| 361 | + * rht_ptr_rcu() dereferences that pointer and clears the lock bit. |
|---|
| 362 | + * rht_ptr() dereferences in a context where the bucket is locked. |
|---|
| 363 | + * rht_ptr_exclusive() dereferences in a context where exclusive |
|---|
| 364 | + * access is guaranteed, such as when destroying the table. |
|---|
| 365 | + */ |
|---|
| 366 | +static inline struct rhash_head *rht_ptr_rcu( |
|---|
| 367 | + struct rhash_lock_head __rcu *const *bkt) |
|---|
| 368 | +{ |
|---|
| 369 | + return __rht_ptr(rcu_dereference(*bkt), bkt); |
|---|
| 370 | +} |
|---|
| 371 | + |
|---|
| 372 | +static inline struct rhash_head *rht_ptr( |
|---|
| 373 | + struct rhash_lock_head __rcu *const *bkt, |
|---|
| 374 | + struct bucket_table *tbl, |
|---|
| 375 | + unsigned int hash) |
|---|
| 376 | +{ |
|---|
| 377 | + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); |
|---|
| 378 | +} |
|---|
| 379 | + |
|---|
| 380 | +static inline struct rhash_head *rht_ptr_exclusive( |
|---|
| 381 | + struct rhash_lock_head __rcu *const *bkt) |
|---|
| 382 | +{ |
|---|
| 383 | + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); |
|---|
| 384 | +} |
|---|
| 385 | + |
|---|
| 386 | +static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, |
|---|
| 387 | + struct rhash_head *obj) |
|---|
| 388 | +{ |
|---|
| 389 | + if (rht_is_a_nulls(obj)) |
|---|
| 390 | + obj = NULL; |
|---|
| 391 | + rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); |
|---|
| 392 | +} |
|---|
| 393 | + |
|---|
| 394 | +static inline void rht_assign_unlock(struct bucket_table *tbl, |
|---|
| 395 | + struct rhash_lock_head __rcu **bkt, |
|---|
| 396 | + struct rhash_head *obj) |
|---|
| 397 | +{ |
|---|
| 398 | + if (rht_is_a_nulls(obj)) |
|---|
| 399 | + obj = NULL; |
|---|
| 400 | + lock_map_release(&tbl->dep_map); |
|---|
| 401 | + rcu_assign_pointer(*bkt, (void *)obj); |
|---|
| 402 | + preempt_enable(); |
|---|
| 403 | + __release(bitlock); |
|---|
| 404 | + local_bh_enable(); |
|---|
| 405 | +} |
|---|
| 406 | + |
|---|
| 298 | 407 | /** |
|---|
| 299 | | - * rht_for_each_continue - continue iterating over hash chain |
|---|
| 408 | + * rht_for_each_from - iterate over hash chain from given head |
|---|
| 300 | 409 | * @pos: the &struct rhash_head to use as a loop cursor. |
|---|
| 301 | | - * @head: the previous &struct rhash_head to continue from |
|---|
| 410 | + * @head: the &struct rhash_head to start from |
|---|
| 302 | 411 | * @tbl: the &struct bucket_table |
|---|
| 303 | 412 | * @hash: the hash value / bucket index |
|---|
| 304 | 413 | */ |
|---|
| 305 | | -#define rht_for_each_continue(pos, head, tbl, hash) \ |
|---|
| 306 | | - for (pos = rht_dereference_bucket(head, tbl, hash); \ |
|---|
| 307 | | - !rht_is_a_nulls(pos); \ |
|---|
| 414 | +#define rht_for_each_from(pos, head, tbl, hash) \ |
|---|
| 415 | + for (pos = head; \ |
|---|
| 416 | + !rht_is_a_nulls(pos); \ |
|---|
| 308 | 417 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
|---|
| 309 | 418 | |
|---|
| 310 | 419 | /** |
|---|
| .. | .. |
|---|
| 314 | 423 | * @hash: the hash value / bucket index |
|---|
| 315 | 424 | */ |
|---|
| 316 | 425 | #define rht_for_each(pos, tbl, hash) \ |
|---|
| 317 | | - rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) |
|---|
| 426 | + rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
|---|
| 427 | + tbl, hash) |
|---|
| 318 | 428 | |
|---|
| 319 | 429 | /** |
|---|
| 320 | | - * rht_for_each_entry_continue - continue iterating over hash chain |
|---|
| 430 | + * rht_for_each_entry_from - iterate over hash chain from given head |
|---|
| 321 | 431 | * @tpos: the type * to use as a loop cursor. |
|---|
| 322 | 432 | * @pos: the &struct rhash_head to use as a loop cursor. |
|---|
| 323 | | - * @head: the previous &struct rhash_head to continue from |
|---|
| 433 | + * @head: the &struct rhash_head to start from |
|---|
| 324 | 434 | * @tbl: the &struct bucket_table |
|---|
| 325 | 435 | * @hash: the hash value / bucket index |
|---|
| 326 | 436 | * @member: name of the &struct rhash_head within the hashable struct. |
|---|
| 327 | 437 | */ |
|---|
| 328 | | -#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ |
|---|
| 329 | | - for (pos = rht_dereference_bucket(head, tbl, hash); \ |
|---|
| 438 | +#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ |
|---|
| 439 | + for (pos = head; \ |
|---|
| 330 | 440 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
|---|
| 331 | 441 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
|---|
| 332 | 442 | |
|---|
| .. | .. |
|---|
| 339 | 449 | * @member: name of the &struct rhash_head within the hashable struct. |
|---|
| 340 | 450 | */ |
|---|
| 341 | 451 | #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ |
|---|
| 342 | | - rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ |
|---|
| 343 | | - tbl, hash, member) |
|---|
| 452 | + rht_for_each_entry_from(tpos, pos, \ |
|---|
| 453 | + rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
|---|
| 454 | + tbl, hash, member) |
|---|
| 344 | 455 | |
|---|
| 345 | 456 | /** |
|---|
| 346 | 457 | * rht_for_each_entry_safe - safely iterate over hash chain of given type |
|---|
| .. | .. |
|---|
| 355 | 466 | * remove the loop cursor from the list. |
|---|
| 356 | 467 | */ |
|---|
| 357 | 468 | #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ |
|---|
| 358 | | - for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ |
|---|
| 469 | + for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
|---|
| 359 | 470 | next = !rht_is_a_nulls(pos) ? \ |
|---|
| 360 | 471 | rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ |
|---|
| 361 | 472 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
|---|
| .. | .. |
|---|
| 364 | 475 | rht_dereference_bucket(pos->next, tbl, hash) : NULL) |
|---|
| 365 | 476 | |
|---|
| 366 | 477 | /** |
|---|
| 367 | | - * rht_for_each_rcu_continue - continue iterating over rcu hash chain |
|---|
| 478 | + * rht_for_each_rcu_from - iterate over rcu hash chain from given head |
|---|
| 368 | 479 | * @pos: the &struct rhash_head to use as a loop cursor. |
|---|
| 369 | | - * @head: the previous &struct rhash_head to continue from |
|---|
| 480 | + * @head: the &struct rhash_head to start from |
|---|
| 370 | 481 | * @tbl: the &struct bucket_table |
|---|
| 371 | 482 | * @hash: the hash value / bucket index |
|---|
| 372 | 483 | * |
|---|
| .. | .. |
|---|
| 374 | 485 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
|---|
| 375 | 486 | * traversal is guarded by rcu_read_lock(). |
|---|
| 376 | 487 | */ |
|---|
| 377 | | -#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ |
|---|
| 488 | +#define rht_for_each_rcu_from(pos, head, tbl, hash) \ |
|---|
| 378 | 489 | for (({barrier(); }), \ |
|---|
| 379 | | - pos = rht_dereference_bucket_rcu(head, tbl, hash); \ |
|---|
| 490 | + pos = head; \ |
|---|
| 380 | 491 | !rht_is_a_nulls(pos); \ |
|---|
| 381 | 492 | pos = rcu_dereference_raw(pos->next)) |
|---|
| 382 | 493 | |
|---|
| .. | .. |
|---|
| 390 | 501 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
|---|
| 391 | 502 | * traversal is guarded by rcu_read_lock(). |
|---|
| 392 | 503 | */ |
|---|
| 393 | | -#define rht_for_each_rcu(pos, tbl, hash) \ |
|---|
| 394 | | - rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) |
|---|
| 504 | +#define rht_for_each_rcu(pos, tbl, hash) \ |
|---|
| 505 | + for (({barrier(); }), \ |
|---|
| 506 | + pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ |
|---|
| 507 | + !rht_is_a_nulls(pos); \ |
|---|
| 508 | + pos = rcu_dereference_raw(pos->next)) |
|---|
| 395 | 509 | |
|---|
| 396 | 510 | /** |
|---|
| 397 | | - * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain |
|---|
| 511 | + * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head |
|---|
| 398 | 512 | * @tpos: the type * to use as a loop cursor. |
|---|
| 399 | 513 | * @pos: the &struct rhash_head to use as a loop cursor. |
|---|
| 400 | | - * @head: the previous &struct rhash_head to continue from |
|---|
| 514 | + * @head: the &struct rhash_head to start from |
|---|
| 401 | 515 | * @tbl: the &struct bucket_table |
|---|
| 402 | 516 | * @hash: the hash value / bucket index |
|---|
| 403 | 517 | * @member: name of the &struct rhash_head within the hashable struct. |
|---|
| .. | .. |
|---|
| 406 | 520 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
|---|
| 407 | 521 | * traversal is guarded by rcu_read_lock(). |
|---|
| 408 | 522 | */ |
|---|
| 409 | | -#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ |
|---|
| 523 | +#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ |
|---|
| 410 | 524 | for (({barrier(); }), \ |
|---|
| 411 | | - pos = rht_dereference_bucket_rcu(head, tbl, hash); \ |
|---|
| 525 | + pos = head; \ |
|---|
| 412 | 526 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
|---|
| 413 | 527 | pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) |
|---|
| 414 | 528 | |
|---|
| .. | .. |
|---|
| 425 | 539 | * traversal is guarded by rcu_read_lock(). |
|---|
| 426 | 540 | */ |
|---|
| 427 | 541 | #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ |
|---|
| 428 | | - rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ |
|---|
| 429 | | - tbl, hash, member) |
|---|
| 542 | + rht_for_each_entry_rcu_from(tpos, pos, \ |
|---|
| 543 | + rht_ptr_rcu(rht_bucket(tbl, hash)), \ |
|---|
| 544 | + tbl, hash, member) |
|---|
| 430 | 545 | |
|---|
| 431 | 546 | /** |
|---|
| 432 | 547 | * rhl_for_each_rcu - iterate over rcu hash table list |
|---|
| .. | .. |
|---|
| 471 | 586 | .ht = ht, |
|---|
| 472 | 587 | .key = key, |
|---|
| 473 | 588 | }; |
|---|
| 589 | + struct rhash_lock_head __rcu *const *bkt; |
|---|
| 474 | 590 | struct bucket_table *tbl; |
|---|
| 475 | 591 | struct rhash_head *he; |
|---|
| 476 | 592 | unsigned int hash; |
|---|
| .. | .. |
|---|
| 478 | 594 | tbl = rht_dereference_rcu(ht->tbl, ht); |
|---|
| 479 | 595 | restart: |
|---|
| 480 | 596 | hash = rht_key_hashfn(ht, tbl, key, params); |
|---|
| 481 | | - rht_for_each_rcu(he, tbl, hash) { |
|---|
| 482 | | - if (params.obj_cmpfn ? |
|---|
| 483 | | - params.obj_cmpfn(&arg, rht_obj(ht, he)) : |
|---|
| 484 | | - rhashtable_compare(&arg, rht_obj(ht, he))) |
|---|
| 485 | | - continue; |
|---|
| 486 | | - return he; |
|---|
| 487 | | - } |
|---|
| 597 | + bkt = rht_bucket(tbl, hash); |
|---|
| 598 | + do { |
|---|
| 599 | + rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { |
|---|
| 600 | + if (params.obj_cmpfn ? |
|---|
| 601 | + params.obj_cmpfn(&arg, rht_obj(ht, he)) : |
|---|
| 602 | + rhashtable_compare(&arg, rht_obj(ht, he))) |
|---|
| 603 | + continue; |
|---|
| 604 | + return he; |
|---|
| 605 | + } |
|---|
| 606 | + /* An object might have been moved to a different hash chain, |
|---|
| 607 | + * while we walk along it - better check and retry. |
|---|
| 608 | + */ |
|---|
| 609 | + } while (he != RHT_NULLS_MARKER(bkt)); |
|---|
| 488 | 610 | |
|---|
| 489 | 611 | /* Ensure we see any new tables. */ |
|---|
| 490 | 612 | smp_rmb(); |
|---|
| .. | .. |
|---|
| 580 | 702 | .ht = ht, |
|---|
| 581 | 703 | .key = key, |
|---|
| 582 | 704 | }; |
|---|
| 705 | + struct rhash_lock_head __rcu **bkt; |
|---|
| 583 | 706 | struct rhash_head __rcu **pprev; |
|---|
| 584 | 707 | struct bucket_table *tbl; |
|---|
| 585 | 708 | struct rhash_head *head; |
|---|
| 586 | | - spinlock_t *lock; |
|---|
| 587 | 709 | unsigned int hash; |
|---|
| 588 | 710 | int elasticity; |
|---|
| 589 | 711 | void *data; |
|---|
| .. | .. |
|---|
| 592 | 714 | |
|---|
| 593 | 715 | tbl = rht_dereference_rcu(ht->tbl, ht); |
|---|
| 594 | 716 | hash = rht_head_hashfn(ht, tbl, obj, params); |
|---|
| 595 | | - lock = rht_bucket_lock(tbl, hash); |
|---|
| 596 | | - spin_lock_bh(lock); |
|---|
| 717 | + elasticity = RHT_ELASTICITY; |
|---|
| 718 | + bkt = rht_bucket_insert(ht, tbl, hash); |
|---|
| 719 | + data = ERR_PTR(-ENOMEM); |
|---|
| 720 | + if (!bkt) |
|---|
| 721 | + goto out; |
|---|
| 722 | + pprev = NULL; |
|---|
| 723 | + rht_lock(tbl, bkt); |
|---|
| 597 | 724 | |
|---|
| 598 | 725 | if (unlikely(rcu_access_pointer(tbl->future_tbl))) { |
|---|
| 599 | 726 | slow_path: |
|---|
| 600 | | - spin_unlock_bh(lock); |
|---|
| 727 | + rht_unlock(tbl, bkt); |
|---|
| 601 | 728 | rcu_read_unlock(); |
|---|
| 602 | 729 | return rhashtable_insert_slow(ht, key, obj); |
|---|
| 603 | 730 | } |
|---|
| 604 | 731 | |
|---|
| 605 | | - elasticity = RHT_ELASTICITY; |
|---|
| 606 | | - pprev = rht_bucket_insert(ht, tbl, hash); |
|---|
| 607 | | - data = ERR_PTR(-ENOMEM); |
|---|
| 608 | | - if (!pprev) |
|---|
| 609 | | - goto out; |
|---|
| 610 | | - |
|---|
| 611 | | - rht_for_each_continue(head, *pprev, tbl, hash) { |
|---|
| 732 | + rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
|---|
| 612 | 733 | struct rhlist_head *plist; |
|---|
| 613 | 734 | struct rhlist_head *list; |
|---|
| 614 | 735 | |
|---|
| .. | .. |
|---|
| 624 | 745 | data = rht_obj(ht, head); |
|---|
| 625 | 746 | |
|---|
| 626 | 747 | if (!rhlist) |
|---|
| 627 | | - goto out; |
|---|
| 748 | + goto out_unlock; |
|---|
| 628 | 749 | |
|---|
| 629 | 750 | |
|---|
| 630 | 751 | list = container_of(obj, struct rhlist_head, rhead); |
|---|
| .. | .. |
|---|
| 633 | 754 | RCU_INIT_POINTER(list->next, plist); |
|---|
| 634 | 755 | head = rht_dereference_bucket(head->next, tbl, hash); |
|---|
| 635 | 756 | RCU_INIT_POINTER(list->rhead.next, head); |
|---|
| 636 | | - rcu_assign_pointer(*pprev, obj); |
|---|
| 637 | | - |
|---|
| 638 | | - goto good; |
|---|
| 757 | + if (pprev) { |
|---|
| 758 | + rcu_assign_pointer(*pprev, obj); |
|---|
| 759 | + rht_unlock(tbl, bkt); |
|---|
| 760 | + } else |
|---|
| 761 | + rht_assign_unlock(tbl, bkt, obj); |
|---|
| 762 | + data = NULL; |
|---|
| 763 | + goto out; |
|---|
| 639 | 764 | } |
|---|
| 640 | 765 | |
|---|
| 641 | 766 | if (elasticity <= 0) |
|---|
| .. | .. |
|---|
| 643 | 768 | |
|---|
| 644 | 769 | data = ERR_PTR(-E2BIG); |
|---|
| 645 | 770 | if (unlikely(rht_grow_above_max(ht, tbl))) |
|---|
| 646 | | - goto out; |
|---|
| 771 | + goto out_unlock; |
|---|
| 647 | 772 | |
|---|
| 648 | 773 | if (unlikely(rht_grow_above_100(ht, tbl))) |
|---|
| 649 | 774 | goto slow_path; |
|---|
| 650 | 775 | |
|---|
| 651 | | - head = rht_dereference_bucket(*pprev, tbl, hash); |
|---|
| 776 | + /* Inserting at head of list makes unlocking free. */ |
|---|
| 777 | + head = rht_ptr(bkt, tbl, hash); |
|---|
| 652 | 778 | |
|---|
| 653 | 779 | RCU_INIT_POINTER(obj->next, head); |
|---|
| 654 | 780 | if (rhlist) { |
|---|
| .. | .. |
|---|
| 658 | 784 | RCU_INIT_POINTER(list->next, NULL); |
|---|
| 659 | 785 | } |
|---|
| 660 | 786 | |
|---|
| 661 | | - rcu_assign_pointer(*pprev, obj); |
|---|
| 662 | | - |
|---|
| 663 | 787 | atomic_inc(&ht->nelems); |
|---|
| 788 | + rht_assign_unlock(tbl, bkt, obj); |
|---|
| 789 | + |
|---|
| 664 | 790 | if (rht_grow_above_75(ht, tbl)) |
|---|
| 665 | 791 | schedule_work(&ht->run_work); |
|---|
| 666 | 792 | |
|---|
| 667 | | -good: |
|---|
| 668 | 793 | data = NULL; |
|---|
| 669 | | - |
|---|
| 670 | 794 | out: |
|---|
| 671 | | - spin_unlock_bh(lock); |
|---|
| 672 | 795 | rcu_read_unlock(); |
|---|
| 673 | 796 | |
|---|
| 674 | 797 | return data; |
|---|
| 798 | + |
|---|
| 799 | +out_unlock: |
|---|
| 800 | + rht_unlock(tbl, bkt); |
|---|
| 801 | + goto out; |
|---|
| 675 | 802 | } |
|---|
| 676 | 803 | |
|---|
| 677 | 804 | /** |
|---|
| .. | .. |
|---|
| 680 | 807 | * @obj: pointer to hash head inside object |
|---|
| 681 | 808 | * @params: hash table parameters |
|---|
| 682 | 809 | * |
|---|
| 683 | | - * Will take a per bucket spinlock to protect against mutual mutations |
|---|
| 810 | + * Will take the per bucket bitlock to protect against mutual mutations |
|---|
| 684 | 811 | * on the same bucket. Multiple insertions may occur in parallel unless |
|---|
| 685 | | - * they map to the same bucket lock. |
|---|
| 812 | + * they map to the same bucket. |
|---|
| 686 | 813 | * |
|---|
| 687 | 814 | * It is safe to call this function from atomic context. |
|---|
| 688 | 815 | * |
|---|
| .. | .. |
|---|
| 709 | 836 | * @list: pointer to hash list head inside object |
|---|
| 710 | 837 | * @params: hash table parameters |
|---|
| 711 | 838 | * |
|---|
| 712 | | - * Will take a per bucket spinlock to protect against mutual mutations |
|---|
| 839 | + * Will take the per bucket bitlock to protect against mutual mutations |
|---|
| 713 | 840 | * on the same bucket. Multiple insertions may occur in parallel unless |
|---|
| 714 | | - * they map to the same bucket lock. |
|---|
| 841 | + * they map to the same bucket. |
|---|
| 715 | 842 | * |
|---|
| 716 | 843 | * It is safe to call this function from atomic context. |
|---|
| 717 | 844 | * |
|---|
| .. | .. |
|---|
| 732 | 859 | * @list: pointer to hash list head inside object |
|---|
| 733 | 860 | * @params: hash table parameters |
|---|
| 734 | 861 | * |
|---|
| 735 | | - * Will take a per bucket spinlock to protect against mutual mutations |
|---|
| 862 | + * Will take the per bucket bitlock to protect against mutual mutations |
|---|
| 736 | 863 | * on the same bucket. Multiple insertions may occur in parallel unless |
|---|
| 737 | | - * they map to the same bucket lock. |
|---|
| 864 | + * they map to the same bucket. |
|---|
| 738 | 865 | * |
|---|
| 739 | 866 | * It is safe to call this function from atomic context. |
|---|
| 740 | 867 | * |
|---|
| .. | .. |
|---|
| 757 | 884 | * @ht: hash table |
|---|
| 758 | 885 | * @obj: pointer to hash head inside object |
|---|
| 759 | 886 | * @params: hash table parameters |
|---|
| 760 | | - * |
|---|
| 761 | | - * Locks down the bucket chain in both the old and new table if a resize |
|---|
| 762 | | - * is in progress to ensure that writers can't remove from the old table |
|---|
| 763 | | - * and can't insert to the new table during the atomic operation of search |
|---|
| 764 | | - * and insertion. Searches for duplicates in both the old and new table if |
|---|
| 765 | | - * a resize is in progress. |
|---|
| 766 | 887 | * |
|---|
| 767 | 888 | * This lookup function may only be used for fixed key hash table (key_len |
|---|
| 768 | 889 | * parameter set). It will BUG() if used inappropriately. |
|---|
| .. | .. |
|---|
| 819 | 940 | * @obj: pointer to hash head inside object |
|---|
| 820 | 941 | * @params: hash table parameters |
|---|
| 821 | 942 | * |
|---|
| 822 | | - * Locks down the bucket chain in both the old and new table if a resize |
|---|
| 823 | | - * is in progress to ensure that writers can't remove from the old table |
|---|
| 824 | | - * and can't insert to the new table during the atomic operation of search |
|---|
| 825 | | - * and insertion. Searches for duplicates in both the old and new table if |
|---|
| 826 | | - * a resize is in progress. |
|---|
| 827 | | - * |
|---|
| 828 | 943 | * Lookups may occur in parallel with hashtable mutations and resizing. |
|---|
| 829 | 944 | * |
|---|
| 830 | 945 | * Will trigger an automatic deferred table resizing if residency in the |
|---|
| .. | .. |
|---|
| 850 | 965 | /** |
|---|
| 851 | 966 | * rhashtable_lookup_get_insert_key - lookup and insert object into hash table |
|---|
| 852 | 967 | * @ht: hash table |
|---|
| 968 | + * @key: key |
|---|
| 853 | 969 | * @obj: pointer to hash head inside object |
|---|
| 854 | 970 | * @params: hash table parameters |
|---|
| 855 | | - * @data: pointer to element data already in hashes |
|---|
| 856 | 971 | * |
|---|
| 857 | 972 | * Just like rhashtable_lookup_insert_key(), but this function returns the |
|---|
| 858 | 973 | * object if it exists, NULL if it does not and the insertion was successful, |
|---|
| .. | .. |
|---|
| 873 | 988 | struct rhash_head *obj, const struct rhashtable_params params, |
|---|
| 874 | 989 | bool rhlist) |
|---|
| 875 | 990 | { |
|---|
| 991 | + struct rhash_lock_head __rcu **bkt; |
|---|
| 876 | 992 | struct rhash_head __rcu **pprev; |
|---|
| 877 | 993 | struct rhash_head *he; |
|---|
| 878 | | - spinlock_t * lock; |
|---|
| 879 | 994 | unsigned int hash; |
|---|
| 880 | 995 | int err = -ENOENT; |
|---|
| 881 | 996 | |
|---|
| 882 | 997 | hash = rht_head_hashfn(ht, tbl, obj, params); |
|---|
| 883 | | - lock = rht_bucket_lock(tbl, hash); |
|---|
| 998 | + bkt = rht_bucket_var(tbl, hash); |
|---|
| 999 | + if (!bkt) |
|---|
| 1000 | + return -ENOENT; |
|---|
| 1001 | + pprev = NULL; |
|---|
| 1002 | + rht_lock(tbl, bkt); |
|---|
| 884 | 1003 | |
|---|
| 885 | | - spin_lock_bh(lock); |
|---|
| 886 | | - |
|---|
| 887 | | - pprev = rht_bucket_var(tbl, hash); |
|---|
| 888 | | - rht_for_each_continue(he, *pprev, tbl, hash) { |
|---|
| 1004 | + rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { |
|---|
| 889 | 1005 | struct rhlist_head *list; |
|---|
| 890 | 1006 | |
|---|
| 891 | 1007 | list = container_of(he, struct rhlist_head, rhead); |
|---|
| .. | .. |
|---|
| 925 | 1041 | } |
|---|
| 926 | 1042 | } |
|---|
| 927 | 1043 | |
|---|
| 928 | | - rcu_assign_pointer(*pprev, obj); |
|---|
| 929 | | - break; |
|---|
| 1044 | + if (pprev) { |
|---|
| 1045 | + rcu_assign_pointer(*pprev, obj); |
|---|
| 1046 | + rht_unlock(tbl, bkt); |
|---|
| 1047 | + } else { |
|---|
| 1048 | + rht_assign_unlock(tbl, bkt, obj); |
|---|
| 1049 | + } |
|---|
| 1050 | + goto unlocked; |
|---|
| 930 | 1051 | } |
|---|
| 931 | 1052 | |
|---|
| 932 | | - spin_unlock_bh(lock); |
|---|
| 933 | | - |
|---|
| 1053 | + rht_unlock(tbl, bkt); |
|---|
| 1054 | +unlocked: |
|---|
| 934 | 1055 | if (err > 0) { |
|---|
| 935 | 1056 | atomic_dec(&ht->nelems); |
|---|
| 936 | 1057 | if (unlikely(ht->p.automatic_shrinking && |
|---|
| .. | .. |
|---|
| 1019 | 1140 | struct rhash_head *obj_old, struct rhash_head *obj_new, |
|---|
| 1020 | 1141 | const struct rhashtable_params params) |
|---|
| 1021 | 1142 | { |
|---|
| 1143 | + struct rhash_lock_head __rcu **bkt; |
|---|
| 1022 | 1144 | struct rhash_head __rcu **pprev; |
|---|
| 1023 | 1145 | struct rhash_head *he; |
|---|
| 1024 | | - spinlock_t *lock; |
|---|
| 1025 | 1146 | unsigned int hash; |
|---|
| 1026 | 1147 | int err = -ENOENT; |
|---|
| 1027 | 1148 | |
|---|
| .. | .. |
|---|
| 1032 | 1153 | if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) |
|---|
| 1033 | 1154 | return -EINVAL; |
|---|
| 1034 | 1155 | |
|---|
| 1035 | | - lock = rht_bucket_lock(tbl, hash); |
|---|
| 1156 | + bkt = rht_bucket_var(tbl, hash); |
|---|
| 1157 | + if (!bkt) |
|---|
| 1158 | + return -ENOENT; |
|---|
| 1036 | 1159 | |
|---|
| 1037 | | - spin_lock_bh(lock); |
|---|
| 1160 | + pprev = NULL; |
|---|
| 1161 | + rht_lock(tbl, bkt); |
|---|
| 1038 | 1162 | |
|---|
| 1039 | | - pprev = rht_bucket_var(tbl, hash); |
|---|
| 1040 | | - rht_for_each_continue(he, *pprev, tbl, hash) { |
|---|
| 1163 | + rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { |
|---|
| 1041 | 1164 | if (he != obj_old) { |
|---|
| 1042 | 1165 | pprev = &he->next; |
|---|
| 1043 | 1166 | continue; |
|---|
| 1044 | 1167 | } |
|---|
| 1045 | 1168 | |
|---|
| 1046 | 1169 | rcu_assign_pointer(obj_new->next, obj_old->next); |
|---|
| 1047 | | - rcu_assign_pointer(*pprev, obj_new); |
|---|
| 1170 | + if (pprev) { |
|---|
| 1171 | + rcu_assign_pointer(*pprev, obj_new); |
|---|
| 1172 | + rht_unlock(tbl, bkt); |
|---|
| 1173 | + } else { |
|---|
| 1174 | + rht_assign_unlock(tbl, bkt, obj_new); |
|---|
| 1175 | + } |
|---|
| 1048 | 1176 | err = 0; |
|---|
| 1049 | | - break; |
|---|
| 1177 | + goto unlocked; |
|---|
| 1050 | 1178 | } |
|---|
| 1051 | 1179 | |
|---|
| 1052 | | - spin_unlock_bh(lock); |
|---|
| 1180 | + rht_unlock(tbl, bkt); |
|---|
| 1053 | 1181 | |
|---|
| 1182 | +unlocked: |
|---|
| 1054 | 1183 | return err; |
|---|
| 1055 | 1184 | } |
|---|
| 1056 | 1185 | |
|---|
| .. | .. |
|---|
| 1093 | 1222 | rcu_read_unlock(); |
|---|
| 1094 | 1223 | |
|---|
| 1095 | 1224 | return err; |
|---|
| 1096 | | -} |
|---|
| 1097 | | - |
|---|
| 1098 | | -/* Obsolete function, do not use in new code. */ |
|---|
| 1099 | | -static inline int rhashtable_walk_init(struct rhashtable *ht, |
|---|
| 1100 | | - struct rhashtable_iter *iter, gfp_t gfp) |
|---|
| 1101 | | -{ |
|---|
| 1102 | | - rhashtable_walk_enter(ht, iter); |
|---|
| 1103 | | - return 0; |
|---|
| 1104 | 1225 | } |
|---|
| 1105 | 1226 | |
|---|
| 1106 | 1227 | /** |
|---|