.. | .. |
---|
221 | 221 | return __read_seqcount_retry(s, start); |
---|
222 | 222 | } |
---|
223 | 223 | |
---|
224 | | - |
---|
225 | | - |
---|
226 | | -static inline void raw_write_seqcount_begin(seqcount_t *s) |
---|
| 224 | +static inline void __raw_write_seqcount_begin(seqcount_t *s) |
---|
227 | 225 | { |
---|
228 | 226 | s->sequence++; |
---|
229 | 227 | smp_wmb(); |
---|
230 | 228 | } |
---|
231 | 229 | |
---|
232 | | -static inline void raw_write_seqcount_end(seqcount_t *s) |
---|
| 230 | +static inline void raw_write_seqcount_begin(seqcount_t *s) |
---|
| 231 | +{ |
---|
| 232 | + preempt_disable_rt(); |
---|
| 233 | + __raw_write_seqcount_begin(s); |
---|
| 234 | +} |
---|
| 235 | + |
---|
| 236 | +static inline void __raw_write_seqcount_end(seqcount_t *s) |
---|
233 | 237 | { |
---|
234 | 238 | smp_wmb(); |
---|
235 | 239 | s->sequence++; |
---|
| 240 | +} |
---|
| 241 | + |
---|
| 242 | +static inline void raw_write_seqcount_end(seqcount_t *s) |
---|
| 243 | +{ |
---|
| 244 | + __raw_write_seqcount_end(s); |
---|
| 245 | + preempt_enable_rt(); |
---|
236 | 246 | } |
---|
237 | 247 | |
---|
238 | 248 | /** |
---|
.. | .. |
---|
435 | 445 | /* |
---|
436 | 446 | * Read side functions for starting and finalizing a read side section. |
---|
437 | 447 | */ |
---|
| 448 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
438 | 449 | static inline unsigned read_seqbegin(const seqlock_t *sl) |
---|
439 | 450 | { |
---|
440 | 451 | return read_seqcount_begin(&sl->seqcount); |
---|
441 | 452 | } |
---|
| 453 | +#else |
---|
| 454 | +/* |
---|
| 455 | + * Starvation safe read side for RT |
---|
| 456 | + */ |
---|
| 457 | +static inline unsigned read_seqbegin(seqlock_t *sl) |
---|
| 458 | +{ |
---|
| 459 | + unsigned ret; |
---|
| 460 | + |
---|
| 461 | +repeat: |
---|
| 462 | + ret = READ_ONCE(sl->seqcount.sequence); |
---|
| 463 | + if (unlikely(ret & 1)) { |
---|
| 464 | + /* |
---|
| 465 | + * Take the lock and let the writer proceed (i.e. evtl |
---|
| 466 | + * boost it), otherwise we could loop here forever. |
---|
| 467 | + */ |
---|
| 468 | + spin_unlock_wait(&sl->lock); |
---|
| 469 | + goto repeat; |
---|
| 470 | + } |
---|
| 471 | + smp_rmb(); |
---|
| 472 | + return ret; |
---|
| 473 | +} |
---|
| 474 | +#endif |
---|
442 | 475 | |
---|
443 | 476 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
---|
444 | 477 | { |
---|
.. | .. |
---|
453 | 486 | static inline void write_seqlock(seqlock_t *sl) |
---|
454 | 487 | { |
---|
455 | 488 | spin_lock(&sl->lock); |
---|
456 | | - write_seqcount_begin(&sl->seqcount); |
---|
| 489 | + __raw_write_seqcount_begin(&sl->seqcount); |
---|
| 490 | +} |
---|
| 491 | + |
---|
| 492 | +static inline int try_write_seqlock(seqlock_t *sl) |
---|
| 493 | +{ |
---|
| 494 | + if (spin_trylock(&sl->lock)) { |
---|
| 495 | + __raw_write_seqcount_begin(&sl->seqcount); |
---|
| 496 | + return 1; |
---|
| 497 | + } |
---|
| 498 | + return 0; |
---|
457 | 499 | } |
---|
458 | 500 | |
---|
459 | 501 | static inline void write_sequnlock(seqlock_t *sl) |
---|
460 | 502 | { |
---|
461 | | - write_seqcount_end(&sl->seqcount); |
---|
| 503 | + __raw_write_seqcount_end(&sl->seqcount); |
---|
462 | 504 | spin_unlock(&sl->lock); |
---|
463 | 505 | } |
---|
464 | 506 | |
---|
465 | 507 | static inline void write_seqlock_bh(seqlock_t *sl) |
---|
466 | 508 | { |
---|
467 | 509 | spin_lock_bh(&sl->lock); |
---|
468 | | - write_seqcount_begin(&sl->seqcount); |
---|
| 510 | + __raw_write_seqcount_begin(&sl->seqcount); |
---|
469 | 511 | } |
---|
470 | 512 | |
---|
471 | 513 | static inline void write_sequnlock_bh(seqlock_t *sl) |
---|
472 | 514 | { |
---|
473 | | - write_seqcount_end(&sl->seqcount); |
---|
| 515 | + __raw_write_seqcount_end(&sl->seqcount); |
---|
474 | 516 | spin_unlock_bh(&sl->lock); |
---|
475 | 517 | } |
---|
476 | 518 | |
---|
477 | 519 | static inline void write_seqlock_irq(seqlock_t *sl) |
---|
478 | 520 | { |
---|
479 | 521 | spin_lock_irq(&sl->lock); |
---|
480 | | - write_seqcount_begin(&sl->seqcount); |
---|
| 522 | + __raw_write_seqcount_begin(&sl->seqcount); |
---|
481 | 523 | } |
---|
482 | 524 | |
---|
483 | 525 | static inline void write_sequnlock_irq(seqlock_t *sl) |
---|
484 | 526 | { |
---|
485 | | - write_seqcount_end(&sl->seqcount); |
---|
| 527 | + __raw_write_seqcount_end(&sl->seqcount); |
---|
486 | 528 | spin_unlock_irq(&sl->lock); |
---|
487 | 529 | } |
---|
488 | 530 | |
---|
.. | .. |
---|
491 | 533 | unsigned long flags; |
---|
492 | 534 | |
---|
493 | 535 | spin_lock_irqsave(&sl->lock, flags); |
---|
494 | | - write_seqcount_begin(&sl->seqcount); |
---|
| 536 | + __raw_write_seqcount_begin(&sl->seqcount); |
---|
495 | 537 | return flags; |
---|
496 | 538 | } |
---|
497 | 539 | |
---|
.. | .. |
---|
501 | 543 | static inline void |
---|
502 | 544 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
---|
503 | 545 | { |
---|
504 | | - write_seqcount_end(&sl->seqcount); |
---|
| 546 | + __raw_write_seqcount_end(&sl->seqcount); |
---|
505 | 547 | spin_unlock_irqrestore(&sl->lock, flags); |
---|
506 | 548 | } |
---|
507 | 549 | |
---|