hc
2023-11-07 f45e756958099c35d6afb746df1d40a1c6302cfc
kernel/include/linux/seqlock.h
....@@ -221,18 +221,28 @@
221221 return __read_seqcount_retry(s, start);
222222 }
223223
224
-
225
-
226
-static inline void raw_write_seqcount_begin(seqcount_t *s)
224
+static inline void __raw_write_seqcount_begin(seqcount_t *s)
227225 {
228226 s->sequence++;
229227 smp_wmb();
230228 }
231229
232
-static inline void raw_write_seqcount_end(seqcount_t *s)
230
+static inline void raw_write_seqcount_begin(seqcount_t *s)
231
+{
232
+ preempt_disable_rt();
233
+ __raw_write_seqcount_begin(s);
234
+}
235
+
236
+static inline void __raw_write_seqcount_end(seqcount_t *s)
233237 {
234238 smp_wmb();
235239 s->sequence++;
240
+}
241
+
242
+static inline void raw_write_seqcount_end(seqcount_t *s)
243
+{
244
+ __raw_write_seqcount_end(s);
245
+ preempt_enable_rt();
236246 }
237247
238248 /**
....@@ -435,10 +445,33 @@
435445 /*
436446 * Read side functions for starting and finalizing a read side section.
437447 */
448
+#ifndef CONFIG_PREEMPT_RT_FULL
438449 static inline unsigned read_seqbegin(const seqlock_t *sl)
439450 {
440451 return read_seqcount_begin(&sl->seqcount);
441452 }
453
+#else
454
+/*
455
+ * Starvation safe read side for RT
456
+ */
457
+static inline unsigned read_seqbegin(seqlock_t *sl)
458
+{
459
+ unsigned ret;
460
+
461
+repeat:
462
+ ret = READ_ONCE(sl->seqcount.sequence);
463
+ if (unlikely(ret & 1)) {
464
+ /*
465
+ * Take the lock and let the writer proceed (i.e. evtl
466
+ * boost it), otherwise we could loop here forever.
467
+ */
468
+ spin_unlock_wait(&sl->lock);
469
+ goto repeat;
470
+ }
471
+ smp_rmb();
472
+ return ret;
473
+}
474
+#endif
442475
443476 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
444477 {
....@@ -453,36 +486,45 @@
453486 static inline void write_seqlock(seqlock_t *sl)
454487 {
455488 spin_lock(&sl->lock);
456
- write_seqcount_begin(&sl->seqcount);
489
+ __raw_write_seqcount_begin(&sl->seqcount);
490
+}
491
+
492
+static inline int try_write_seqlock(seqlock_t *sl)
493
+{
494
+ if (spin_trylock(&sl->lock)) {
495
+ __raw_write_seqcount_begin(&sl->seqcount);
496
+ return 1;
497
+ }
498
+ return 0;
457499 }
458500
459501 static inline void write_sequnlock(seqlock_t *sl)
460502 {
461
- write_seqcount_end(&sl->seqcount);
503
+ __raw_write_seqcount_end(&sl->seqcount);
462504 spin_unlock(&sl->lock);
463505 }
464506
465507 static inline void write_seqlock_bh(seqlock_t *sl)
466508 {
467509 spin_lock_bh(&sl->lock);
468
- write_seqcount_begin(&sl->seqcount);
510
+ __raw_write_seqcount_begin(&sl->seqcount);
469511 }
470512
471513 static inline void write_sequnlock_bh(seqlock_t *sl)
472514 {
473
- write_seqcount_end(&sl->seqcount);
515
+ __raw_write_seqcount_end(&sl->seqcount);
474516 spin_unlock_bh(&sl->lock);
475517 }
476518
477519 static inline void write_seqlock_irq(seqlock_t *sl)
478520 {
479521 spin_lock_irq(&sl->lock);
480
- write_seqcount_begin(&sl->seqcount);
522
+ __raw_write_seqcount_begin(&sl->seqcount);
481523 }
482524
483525 static inline void write_sequnlock_irq(seqlock_t *sl)
484526 {
485
- write_seqcount_end(&sl->seqcount);
527
+ __raw_write_seqcount_end(&sl->seqcount);
486528 spin_unlock_irq(&sl->lock);
487529 }
488530
....@@ -491,7 +533,7 @@
491533 unsigned long flags;
492534
493535 spin_lock_irqsave(&sl->lock, flags);
494
- write_seqcount_begin(&sl->seqcount);
536
+ __raw_write_seqcount_begin(&sl->seqcount);
495537 return flags;
496538 }
497539
....@@ -501,7 +543,7 @@
501543 static inline void
502544 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
503545 {
504
- write_seqcount_end(&sl->seqcount);
546
+ __raw_write_seqcount_end(&sl->seqcount);
505547 spin_unlock_irqrestore(&sl->lock, flags);
506548 }
507549