hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/linux/seqlock.h
....@@ -307,10 +307,10 @@
307307 __seqprop_case((s), mutex, prop), \
308308 __seqprop_case((s), ww_mutex, prop))
309309
310
-#define __seqcount_ptr(s) __seqprop(s, ptr)
311
-#define __seqcount_sequence(s) __seqprop(s, sequence)
312
-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
313
-#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
310
+#define seqprop_ptr(s) __seqprop(s, ptr)
311
+#define seqprop_sequence(s) __seqprop(s, sequence)
312
+#define seqprop_preemptible(s) __seqprop(s, preemptible)
313
+#define seqprop_assert(s) __seqprop(s, assert)
314314
315315 /**
316316 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
....@@ -328,13 +328,13 @@
328328 */
329329 #define __read_seqcount_begin(s) \
330330 ({ \
331
- unsigned seq; \
331
+ unsigned __seq; \
332332 \
333
- while ((seq = __seqcount_sequence(s)) & 1) \
333
+ while ((__seq = seqprop_sequence(s)) & 1) \
334334 cpu_relax(); \
335335 \
336336 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
337
- seq; \
337
+ __seq; \
338338 })
339339
340340 /**
....@@ -345,10 +345,10 @@
345345 */
346346 #define raw_read_seqcount_begin(s) \
347347 ({ \
348
- unsigned seq = __read_seqcount_begin(s); \
348
+ unsigned _seq = __read_seqcount_begin(s); \
349349 \
350350 smp_rmb(); \
351
- seq; \
351
+ _seq; \
352352 })
353353
354354 /**
....@@ -359,7 +359,7 @@
359359 */
360360 #define read_seqcount_begin(s) \
361361 ({ \
362
- seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
362
+ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
363363 raw_read_seqcount_begin(s); \
364364 })
365365
....@@ -376,11 +376,11 @@
376376 */
377377 #define raw_read_seqcount(s) \
378378 ({ \
379
- unsigned seq = __seqcount_sequence(s); \
379
+ unsigned __seq = seqprop_sequence(s); \
380380 \
381381 smp_rmb(); \
382382 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
383
- seq; \
383
+ __seq; \
384384 })
385385
386386 /**
....@@ -425,9 +425,9 @@
425425 * Return: true if a read section retry is required, else false
426426 */
427427 #define __read_seqcount_retry(s, start) \
428
- __read_seqcount_t_retry(__seqcount_ptr(s), start)
428
+ do___read_seqcount_retry(seqprop_ptr(s), start)
429429
430
-static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
430
+static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
431431 {
432432 kcsan_atomic_next(0);
433433 return unlikely(READ_ONCE(s->sequence) != start);
....@@ -445,12 +445,12 @@
445445 * Return: true if a read section retry is required, else false
446446 */
447447 #define read_seqcount_retry(s, start) \
448
- read_seqcount_t_retry(__seqcount_ptr(s), start)
448
+ do_read_seqcount_retry(seqprop_ptr(s), start)
449449
450
-static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
450
+static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
451451 {
452452 smp_rmb();
453
- return __read_seqcount_t_retry(s, start);
453
+ return do___read_seqcount_retry(s, start);
454454 }
455455
456456 /**
....@@ -459,13 +459,13 @@
459459 */
460460 #define raw_write_seqcount_begin(s) \
461461 do { \
462
- if (__seqcount_lock_preemptible(s)) \
462
+ if (seqprop_preemptible(s)) \
463463 preempt_disable(); \
464464 \
465
- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
465
+ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
466466 } while (0)
467467
468
-static inline void raw_write_seqcount_t_begin(seqcount_t *s)
468
+static inline void do_raw_write_seqcount_begin(seqcount_t *s)
469469 {
470470 kcsan_nestable_atomic_begin();
471471 s->sequence++;
....@@ -478,13 +478,13 @@
478478 */
479479 #define raw_write_seqcount_end(s) \
480480 do { \
481
- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
481
+ do_raw_write_seqcount_end(seqprop_ptr(s)); \
482482 \
483
- if (__seqcount_lock_preemptible(s)) \
483
+ if (seqprop_preemptible(s)) \
484484 preempt_enable(); \
485485 } while (0)
486486
487
-static inline void raw_write_seqcount_t_end(seqcount_t *s)
487
+static inline void do_raw_write_seqcount_end(seqcount_t *s)
488488 {
489489 smp_wmb();
490490 s->sequence++;
....@@ -501,18 +501,18 @@
501501 */
502502 #define write_seqcount_begin_nested(s, subclass) \
503503 do { \
504
- __seqcount_assert_lock_held(s); \
504
+ seqprop_assert(s); \
505505 \
506
- if (__seqcount_lock_preemptible(s)) \
506
+ if (seqprop_preemptible(s)) \
507507 preempt_disable(); \
508508 \
509
- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
509
+ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
510510 } while (0)
511511
512
-static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
512
+static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
513513 {
514
- raw_write_seqcount_t_begin(s);
515514 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
515
+ do_raw_write_seqcount_begin(s);
516516 }
517517
518518 /**
....@@ -528,17 +528,17 @@
528528 */
529529 #define write_seqcount_begin(s) \
530530 do { \
531
- __seqcount_assert_lock_held(s); \
531
+ seqprop_assert(s); \
532532 \
533
- if (__seqcount_lock_preemptible(s)) \
533
+ if (seqprop_preemptible(s)) \
534534 preempt_disable(); \
535535 \
536
- write_seqcount_t_begin(__seqcount_ptr(s)); \
536
+ do_write_seqcount_begin(seqprop_ptr(s)); \
537537 } while (0)
538538
539
-static inline void write_seqcount_t_begin(seqcount_t *s)
539
+static inline void do_write_seqcount_begin(seqcount_t *s)
540540 {
541
- write_seqcount_t_begin_nested(s, 0);
541
+ do_write_seqcount_begin_nested(s, 0);
542542 }
543543
544544 /**
....@@ -549,16 +549,16 @@
549549 */
550550 #define write_seqcount_end(s) \
551551 do { \
552
- write_seqcount_t_end(__seqcount_ptr(s)); \
552
+ do_write_seqcount_end(seqprop_ptr(s)); \
553553 \
554
- if (__seqcount_lock_preemptible(s)) \
554
+ if (seqprop_preemptible(s)) \
555555 preempt_enable(); \
556556 } while (0)
557557
558
-static inline void write_seqcount_t_end(seqcount_t *s)
558
+static inline void do_write_seqcount_end(seqcount_t *s)
559559 {
560560 seqcount_release(&s->dep_map, _RET_IP_);
561
- raw_write_seqcount_t_end(s);
561
+ do_raw_write_seqcount_end(s);
562562 }
563563
564564 /**
....@@ -603,9 +603,9 @@
603603 * }
604604 */
605605 #define raw_write_seqcount_barrier(s) \
606
- raw_write_seqcount_t_barrier(__seqcount_ptr(s))
606
+ do_raw_write_seqcount_barrier(seqprop_ptr(s))
607607
608
-static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
608
+static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
609609 {
610610 kcsan_nestable_atomic_begin();
611611 s->sequence++;
....@@ -623,9 +623,9 @@
623623 * will complete successfully and see data older than this.
624624 */
625625 #define write_seqcount_invalidate(s) \
626
- write_seqcount_t_invalidate(__seqcount_ptr(s))
626
+ do_write_seqcount_invalidate(seqprop_ptr(s))
627627
628
-static inline void write_seqcount_t_invalidate(seqcount_t *s)
628
+static inline void do_write_seqcount_invalidate(seqcount_t *s)
629629 {
630630 smp_wmb();
631631 kcsan_nestable_atomic_begin();
....@@ -862,9 +862,9 @@
862862 }
863863
864864 /*
865
- * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
866
- * instead of the generic write_seqcount_begin(). This way, no redundant
867
- * lockdep_assert_held() checks are added.
865
+ * For all seqlock_t write side functions, use the the internal
866
+ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
867
+ * This way, no redundant lockdep_assert_held() checks are added.
868868 */
869869
870870 /**
....@@ -883,7 +883,7 @@
883883 static inline void write_seqlock(seqlock_t *sl)
884884 {
885885 spin_lock(&sl->lock);
886
- write_seqcount_t_begin(&sl->seqcount.seqcount);
886
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
887887 }
888888
889889 /**
....@@ -895,7 +895,7 @@
895895 */
896896 static inline void write_sequnlock(seqlock_t *sl)
897897 {
898
- write_seqcount_t_end(&sl->seqcount.seqcount);
898
+ do_write_seqcount_end(&sl->seqcount.seqcount);
899899 spin_unlock(&sl->lock);
900900 }
901901
....@@ -909,7 +909,7 @@
909909 static inline void write_seqlock_bh(seqlock_t *sl)
910910 {
911911 spin_lock_bh(&sl->lock);
912
- write_seqcount_t_begin(&sl->seqcount.seqcount);
912
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
913913 }
914914
915915 /**
....@@ -922,7 +922,7 @@
922922 */
923923 static inline void write_sequnlock_bh(seqlock_t *sl)
924924 {
925
- write_seqcount_t_end(&sl->seqcount.seqcount);
925
+ do_write_seqcount_end(&sl->seqcount.seqcount);
926926 spin_unlock_bh(&sl->lock);
927927 }
928928
....@@ -936,7 +936,7 @@
936936 static inline void write_seqlock_irq(seqlock_t *sl)
937937 {
938938 spin_lock_irq(&sl->lock);
939
- write_seqcount_t_begin(&sl->seqcount.seqcount);
939
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
940940 }
941941
942942 /**
....@@ -948,7 +948,7 @@
948948 */
949949 static inline void write_sequnlock_irq(seqlock_t *sl)
950950 {
951
- write_seqcount_t_end(&sl->seqcount.seqcount);
951
+ do_write_seqcount_end(&sl->seqcount.seqcount);
952952 spin_unlock_irq(&sl->lock);
953953 }
954954
....@@ -957,7 +957,7 @@
957957 unsigned long flags;
958958
959959 spin_lock_irqsave(&sl->lock, flags);
960
- write_seqcount_t_begin(&sl->seqcount.seqcount);
960
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
961961 return flags;
962962 }
963963
....@@ -986,7 +986,7 @@
986986 static inline void
987987 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
988988 {
989
- write_seqcount_t_end(&sl->seqcount.seqcount);
989
+ do_write_seqcount_end(&sl->seqcount.seqcount);
990990 spin_unlock_irqrestore(&sl->lock, flags);
991991 }
992992