| .. | .. |
|---|
| 307 | 307 | __seqprop_case((s), mutex, prop), \ |
|---|
| 308 | 308 | __seqprop_case((s), ww_mutex, prop)) |
|---|
| 309 | 309 | |
|---|
| 310 | | -#define __seqcount_ptr(s) __seqprop(s, ptr) |
|---|
| 311 | | -#define __seqcount_sequence(s) __seqprop(s, sequence) |
|---|
| 312 | | -#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) |
|---|
| 313 | | -#define __seqcount_assert_lock_held(s) __seqprop(s, assert) |
|---|
| 310 | +#define seqprop_ptr(s) __seqprop(s, ptr) |
|---|
| 311 | +#define seqprop_sequence(s) __seqprop(s, sequence) |
|---|
| 312 | +#define seqprop_preemptible(s) __seqprop(s, preemptible) |
|---|
| 313 | +#define seqprop_assert(s) __seqprop(s, assert) |
|---|
| 314 | 314 | |
|---|
| 315 | 315 | /** |
|---|
| 316 | 316 | * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier |
|---|
| .. | .. |
|---|
| 328 | 328 | */ |
|---|
| 329 | 329 | #define __read_seqcount_begin(s) \ |
|---|
| 330 | 330 | ({ \ |
|---|
| 331 | | - unsigned seq; \ |
|---|
| 331 | + unsigned __seq; \ |
|---|
| 332 | 332 | \ |
|---|
| 333 | | - while ((seq = __seqcount_sequence(s)) & 1) \ |
|---|
| 333 | + while ((__seq = seqprop_sequence(s)) & 1) \ |
|---|
| 334 | 334 | cpu_relax(); \ |
|---|
| 335 | 335 | \ |
|---|
| 336 | 336 | kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ |
|---|
| 337 | | - seq; \ |
|---|
| 337 | + __seq; \ |
|---|
| 338 | 338 | }) |
|---|
| 339 | 339 | |
|---|
| 340 | 340 | /** |
|---|
| .. | .. |
|---|
| 345 | 345 | */ |
|---|
| 346 | 346 | #define raw_read_seqcount_begin(s) \ |
|---|
| 347 | 347 | ({ \ |
|---|
| 348 | | - unsigned seq = __read_seqcount_begin(s); \ |
|---|
| 348 | + unsigned _seq = __read_seqcount_begin(s); \ |
|---|
| 349 | 349 | \ |
|---|
| 350 | 350 | smp_rmb(); \ |
|---|
| 351 | | - seq; \ |
|---|
| 351 | + _seq; \ |
|---|
| 352 | 352 | }) |
|---|
| 353 | 353 | |
|---|
| 354 | 354 | /** |
|---|
| .. | .. |
|---|
| 359 | 359 | */ |
|---|
| 360 | 360 | #define read_seqcount_begin(s) \ |
|---|
| 361 | 361 | ({ \ |
|---|
| 362 | | - seqcount_lockdep_reader_access(__seqcount_ptr(s)); \ |
|---|
| 362 | + seqcount_lockdep_reader_access(seqprop_ptr(s)); \ |
|---|
| 363 | 363 | raw_read_seqcount_begin(s); \ |
|---|
| 364 | 364 | }) |
|---|
| 365 | 365 | |
|---|
| .. | .. |
|---|
| 376 | 376 | */ |
|---|
| 377 | 377 | #define raw_read_seqcount(s) \ |
|---|
| 378 | 378 | ({ \ |
|---|
| 379 | | - unsigned seq = __seqcount_sequence(s); \ |
|---|
| 379 | + unsigned __seq = seqprop_sequence(s); \ |
|---|
| 380 | 380 | \ |
|---|
| 381 | 381 | smp_rmb(); \ |
|---|
| 382 | 382 | kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ |
|---|
| 383 | | - seq; \ |
|---|
| 383 | + __seq; \ |
|---|
| 384 | 384 | }) |
|---|
| 385 | 385 | |
|---|
| 386 | 386 | /** |
|---|
| .. | .. |
|---|
| 425 | 425 | * Return: true if a read section retry is required, else false |
|---|
| 426 | 426 | */ |
|---|
| 427 | 427 | #define __read_seqcount_retry(s, start) \ |
|---|
| 428 | | - __read_seqcount_t_retry(__seqcount_ptr(s), start) |
|---|
| 428 | + do___read_seqcount_retry(seqprop_ptr(s), start) |
|---|
| 429 | 429 | |
|---|
| 430 | | -static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) |
|---|
| 430 | +static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) |
|---|
| 431 | 431 | { |
|---|
| 432 | 432 | kcsan_atomic_next(0); |
|---|
| 433 | 433 | return unlikely(READ_ONCE(s->sequence) != start); |
|---|
| .. | .. |
|---|
| 445 | 445 | * Return: true if a read section retry is required, else false |
|---|
| 446 | 446 | */ |
|---|
| 447 | 447 | #define read_seqcount_retry(s, start) \ |
|---|
| 448 | | - read_seqcount_t_retry(__seqcount_ptr(s), start) |
|---|
| 448 | + do_read_seqcount_retry(seqprop_ptr(s), start) |
|---|
| 449 | 449 | |
|---|
| 450 | | -static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) |
|---|
| 450 | +static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) |
|---|
| 451 | 451 | { |
|---|
| 452 | 452 | smp_rmb(); |
|---|
| 453 | | - return __read_seqcount_t_retry(s, start); |
|---|
| 453 | + return do___read_seqcount_retry(s, start); |
|---|
| 454 | 454 | } |
|---|
| 455 | 455 | |
|---|
| 456 | 456 | /** |
|---|
| .. | .. |
|---|
| 459 | 459 | */ |
|---|
| 460 | 460 | #define raw_write_seqcount_begin(s) \ |
|---|
| 461 | 461 | do { \ |
|---|
| 462 | | - if (__seqcount_lock_preemptible(s)) \ |
|---|
| 462 | + if (seqprop_preemptible(s)) \ |
|---|
| 463 | 463 | preempt_disable(); \ |
|---|
| 464 | 464 | \ |
|---|
| 465 | | - raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ |
|---|
| 465 | + do_raw_write_seqcount_begin(seqprop_ptr(s)); \ |
|---|
| 466 | 466 | } while (0) |
|---|
| 467 | 467 | |
|---|
| 468 | | -static inline void raw_write_seqcount_t_begin(seqcount_t *s) |
|---|
| 468 | +static inline void do_raw_write_seqcount_begin(seqcount_t *s) |
|---|
| 469 | 469 | { |
|---|
| 470 | 470 | kcsan_nestable_atomic_begin(); |
|---|
| 471 | 471 | s->sequence++; |
|---|
| .. | .. |
|---|
| 478 | 478 | */ |
|---|
| 479 | 479 | #define raw_write_seqcount_end(s) \ |
|---|
| 480 | 480 | do { \ |
|---|
| 481 | | - raw_write_seqcount_t_end(__seqcount_ptr(s)); \ |
|---|
| 481 | + do_raw_write_seqcount_end(seqprop_ptr(s)); \ |
|---|
| 482 | 482 | \ |
|---|
| 483 | | - if (__seqcount_lock_preemptible(s)) \ |
|---|
| 483 | + if (seqprop_preemptible(s)) \ |
|---|
| 484 | 484 | preempt_enable(); \ |
|---|
| 485 | 485 | } while (0) |
|---|
| 486 | 486 | |
|---|
| 487 | | -static inline void raw_write_seqcount_t_end(seqcount_t *s) |
|---|
| 487 | +static inline void do_raw_write_seqcount_end(seqcount_t *s) |
|---|
| 488 | 488 | { |
|---|
| 489 | 489 | smp_wmb(); |
|---|
| 490 | 490 | s->sequence++; |
|---|
| .. | .. |
|---|
| 501 | 501 | */ |
|---|
| 502 | 502 | #define write_seqcount_begin_nested(s, subclass) \ |
|---|
| 503 | 503 | do { \ |
|---|
| 504 | | - __seqcount_assert_lock_held(s); \ |
|---|
| 504 | + seqprop_assert(s); \ |
|---|
| 505 | 505 | \ |
|---|
| 506 | | - if (__seqcount_lock_preemptible(s)) \ |
|---|
| 506 | + if (seqprop_preemptible(s)) \ |
|---|
| 507 | 507 | preempt_disable(); \ |
|---|
| 508 | 508 | \ |
|---|
| 509 | | - write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ |
|---|
| 509 | + do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ |
|---|
| 510 | 510 | } while (0) |
|---|
| 511 | 511 | |
|---|
| 512 | | -static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) |
|---|
| 512 | +static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) |
|---|
| 513 | 513 | { |
|---|
| 514 | | - raw_write_seqcount_t_begin(s); |
|---|
| 515 | 514 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
|---|
| 515 | + do_raw_write_seqcount_begin(s); |
|---|
| 516 | 516 | } |
|---|
| 517 | 517 | |
|---|
| 518 | 518 | /** |
|---|
| .. | .. |
|---|
| 528 | 528 | */ |
|---|
| 529 | 529 | #define write_seqcount_begin(s) \ |
|---|
| 530 | 530 | do { \ |
|---|
| 531 | | - __seqcount_assert_lock_held(s); \ |
|---|
| 531 | + seqprop_assert(s); \ |
|---|
| 532 | 532 | \ |
|---|
| 533 | | - if (__seqcount_lock_preemptible(s)) \ |
|---|
| 533 | + if (seqprop_preemptible(s)) \ |
|---|
| 534 | 534 | preempt_disable(); \ |
|---|
| 535 | 535 | \ |
|---|
| 536 | | - write_seqcount_t_begin(__seqcount_ptr(s)); \ |
|---|
| 536 | + do_write_seqcount_begin(seqprop_ptr(s)); \ |
|---|
| 537 | 537 | } while (0) |
|---|
| 538 | 538 | |
|---|
| 539 | | -static inline void write_seqcount_t_begin(seqcount_t *s) |
|---|
| 539 | +static inline void do_write_seqcount_begin(seqcount_t *s) |
|---|
| 540 | 540 | { |
|---|
| 541 | | - write_seqcount_t_begin_nested(s, 0); |
|---|
| 541 | + do_write_seqcount_begin_nested(s, 0); |
|---|
| 542 | 542 | } |
|---|
| 543 | 543 | |
|---|
| 544 | 544 | /** |
|---|
| .. | .. |
|---|
| 549 | 549 | */ |
|---|
| 550 | 550 | #define write_seqcount_end(s) \ |
|---|
| 551 | 551 | do { \ |
|---|
| 552 | | - write_seqcount_t_end(__seqcount_ptr(s)); \ |
|---|
| 552 | + do_write_seqcount_end(seqprop_ptr(s)); \ |
|---|
| 553 | 553 | \ |
|---|
| 554 | | - if (__seqcount_lock_preemptible(s)) \ |
|---|
| 554 | + if (seqprop_preemptible(s)) \ |
|---|
| 555 | 555 | preempt_enable(); \ |
|---|
| 556 | 556 | } while (0) |
|---|
| 557 | 557 | |
|---|
| 558 | | -static inline void write_seqcount_t_end(seqcount_t *s) |
|---|
| 558 | +static inline void do_write_seqcount_end(seqcount_t *s) |
|---|
| 559 | 559 | { |
|---|
| 560 | 560 | seqcount_release(&s->dep_map, _RET_IP_); |
|---|
| 561 | | - raw_write_seqcount_t_end(s); |
|---|
| 561 | + do_raw_write_seqcount_end(s); |
|---|
| 562 | 562 | } |
|---|
| 563 | 563 | |
|---|
| 564 | 564 | /** |
|---|
| .. | .. |
|---|
| 603 | 603 | * } |
|---|
| 604 | 604 | */ |
|---|
| 605 | 605 | #define raw_write_seqcount_barrier(s) \ |
|---|
| 606 | | - raw_write_seqcount_t_barrier(__seqcount_ptr(s)) |
|---|
| 606 | + do_raw_write_seqcount_barrier(seqprop_ptr(s)) |
|---|
| 607 | 607 | |
|---|
| 608 | | -static inline void raw_write_seqcount_t_barrier(seqcount_t *s) |
|---|
| 608 | +static inline void do_raw_write_seqcount_barrier(seqcount_t *s) |
|---|
| 609 | 609 | { |
|---|
| 610 | 610 | kcsan_nestable_atomic_begin(); |
|---|
| 611 | 611 | s->sequence++; |
|---|
| .. | .. |
|---|
| 623 | 623 | * will complete successfully and see data older than this. |
|---|
| 624 | 624 | */ |
|---|
| 625 | 625 | #define write_seqcount_invalidate(s) \ |
|---|
| 626 | | - write_seqcount_t_invalidate(__seqcount_ptr(s)) |
|---|
| 626 | + do_write_seqcount_invalidate(seqprop_ptr(s)) |
|---|
| 627 | 627 | |
|---|
| 628 | | -static inline void write_seqcount_t_invalidate(seqcount_t *s) |
|---|
| 628 | +static inline void do_write_seqcount_invalidate(seqcount_t *s) |
|---|
| 629 | 629 | { |
|---|
| 630 | 630 | smp_wmb(); |
|---|
| 631 | 631 | kcsan_nestable_atomic_begin(); |
|---|
| .. | .. |
|---|
| 862 | 862 | } |
|---|
| 863 | 863 | |
|---|
| 864 | 864 | /* |
|---|
| 865 | | - * For all seqlock_t write side functions, use write_seqcount_*t*_begin() |
|---|
| 866 | | - * instead of the generic write_seqcount_begin(). This way, no redundant |
|---|
| 867 | | - * lockdep_assert_held() checks are added. |
|---|
| 865 | + * For all seqlock_t write side functions, use the the internal |
|---|
| 866 | + * do_write_seqcount_begin() instead of generic write_seqcount_begin(). |
|---|
| 867 | + * This way, no redundant lockdep_assert_held() checks are added. |
|---|
| 868 | 868 | */ |
|---|
| 869 | 869 | |
|---|
| 870 | 870 | /** |
|---|
| .. | .. |
|---|
| 883 | 883 | static inline void write_seqlock(seqlock_t *sl) |
|---|
| 884 | 884 | { |
|---|
| 885 | 885 | spin_lock(&sl->lock); |
|---|
| 886 | | - write_seqcount_t_begin(&sl->seqcount.seqcount); |
|---|
| 886 | + do_write_seqcount_begin(&sl->seqcount.seqcount); |
|---|
| 887 | 887 | } |
|---|
| 888 | 888 | |
|---|
| 889 | 889 | /** |
|---|
| .. | .. |
|---|
| 895 | 895 | */ |
|---|
| 896 | 896 | static inline void write_sequnlock(seqlock_t *sl) |
|---|
| 897 | 897 | { |
|---|
| 898 | | - write_seqcount_t_end(&sl->seqcount.seqcount); |
|---|
| 898 | + do_write_seqcount_end(&sl->seqcount.seqcount); |
|---|
| 899 | 899 | spin_unlock(&sl->lock); |
|---|
| 900 | 900 | } |
|---|
| 901 | 901 | |
|---|
| .. | .. |
|---|
| 909 | 909 | static inline void write_seqlock_bh(seqlock_t *sl) |
|---|
| 910 | 910 | { |
|---|
| 911 | 911 | spin_lock_bh(&sl->lock); |
|---|
| 912 | | - write_seqcount_t_begin(&sl->seqcount.seqcount); |
|---|
| 912 | + do_write_seqcount_begin(&sl->seqcount.seqcount); |
|---|
| 913 | 913 | } |
|---|
| 914 | 914 | |
|---|
| 915 | 915 | /** |
|---|
| .. | .. |
|---|
| 922 | 922 | */ |
|---|
| 923 | 923 | static inline void write_sequnlock_bh(seqlock_t *sl) |
|---|
| 924 | 924 | { |
|---|
| 925 | | - write_seqcount_t_end(&sl->seqcount.seqcount); |
|---|
| 925 | + do_write_seqcount_end(&sl->seqcount.seqcount); |
|---|
| 926 | 926 | spin_unlock_bh(&sl->lock); |
|---|
| 927 | 927 | } |
|---|
| 928 | 928 | |
|---|
| .. | .. |
|---|
| 936 | 936 | static inline void write_seqlock_irq(seqlock_t *sl) |
|---|
| 937 | 937 | { |
|---|
| 938 | 938 | spin_lock_irq(&sl->lock); |
|---|
| 939 | | - write_seqcount_t_begin(&sl->seqcount.seqcount); |
|---|
| 939 | + do_write_seqcount_begin(&sl->seqcount.seqcount); |
|---|
| 940 | 940 | } |
|---|
| 941 | 941 | |
|---|
| 942 | 942 | /** |
|---|
| .. | .. |
|---|
| 948 | 948 | */ |
|---|
| 949 | 949 | static inline void write_sequnlock_irq(seqlock_t *sl) |
|---|
| 950 | 950 | { |
|---|
| 951 | | - write_seqcount_t_end(&sl->seqcount.seqcount); |
|---|
| 951 | + do_write_seqcount_end(&sl->seqcount.seqcount); |
|---|
| 952 | 952 | spin_unlock_irq(&sl->lock); |
|---|
| 953 | 953 | } |
|---|
| 954 | 954 | |
|---|
| .. | .. |
|---|
| 957 | 957 | unsigned long flags; |
|---|
| 958 | 958 | |
|---|
| 959 | 959 | spin_lock_irqsave(&sl->lock, flags); |
|---|
| 960 | | - write_seqcount_t_begin(&sl->seqcount.seqcount); |
|---|
| 960 | + do_write_seqcount_begin(&sl->seqcount.seqcount); |
|---|
| 961 | 961 | return flags; |
|---|
| 962 | 962 | } |
|---|
| 963 | 963 | |
|---|
| .. | .. |
|---|
| 986 | 986 | static inline void |
|---|
| 987 | 987 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
|---|
| 988 | 988 | { |
|---|
| 989 | | - write_seqcount_t_end(&sl->seqcount.seqcount); |
|---|
| 989 | + do_write_seqcount_end(&sl->seqcount.seqcount); |
|---|
| 990 | 990 | spin_unlock_irqrestore(&sl->lock, flags); |
|---|
| 991 | 991 | } |
|---|
| 992 | 992 | |
|---|