| .. | .. |
|---|
| 42 | 42 | #include <linux/lockdep.h> |
|---|
| 43 | 43 | #include <asm/processor.h> |
|---|
| 44 | 44 | #include <linux/cpumask.h> |
|---|
| 45 | +#include <linux/rcu_assign_pointer.h> |
|---|
| 45 | 46 | |
|---|
| 46 | 47 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
|---|
| 47 | 48 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
|---|
| .. | .. |
|---|
| 55 | 56 | #define call_rcu call_rcu_sched |
|---|
| 56 | 57 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
|---|
| 57 | 58 | |
|---|
| 59 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 60 | +#define call_rcu_bh call_rcu |
|---|
| 61 | +#else |
|---|
| 58 | 62 | void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); |
|---|
| 63 | +#endif |
|---|
| 59 | 64 | void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); |
|---|
| 60 | 65 | void synchronize_sched(void); |
|---|
| 61 | 66 | void rcu_barrier_tasks(void); |
|---|
| .. | .. |
|---|
| 73 | 78 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. |
|---|
| 74 | 79 | */ |
|---|
| 75 | 80 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
|---|
| 81 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 82 | +#define sched_rcu_preempt_depth() rcu_preempt_depth() |
|---|
| 83 | +#else |
|---|
| 84 | +static inline int sched_rcu_preempt_depth(void) { return 0; } |
|---|
| 85 | +#endif |
|---|
| 76 | 86 | |
|---|
| 77 | 87 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
|---|
| 78 | 88 | |
|---|
| .. | .. |
|---|
| 95 | 105 | { |
|---|
| 96 | 106 | return 0; |
|---|
| 97 | 107 | } |
|---|
| 108 | + |
|---|
| 109 | +#define sched_rcu_preempt_depth() rcu_preempt_depth() |
|---|
| 98 | 110 | |
|---|
| 99 | 111 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
|---|
| 100 | 112 | |
|---|
| .. | .. |
|---|
| 253 | 265 | extern struct lockdep_map rcu_callback_map; |
|---|
| 254 | 266 | int debug_lockdep_rcu_enabled(void); |
|---|
| 255 | 267 | int rcu_read_lock_held(void); |
|---|
| 268 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 269 | +static inline int rcu_read_lock_bh_held(void) |
|---|
| 270 | +{ |
|---|
| 271 | + return rcu_read_lock_held(); |
|---|
| 272 | +} |
|---|
| 273 | +#else |
|---|
| 256 | 274 | int rcu_read_lock_bh_held(void); |
|---|
| 275 | +#endif |
|---|
| 257 | 276 | int rcu_read_lock_sched_held(void); |
|---|
| 258 | 277 | |
|---|
| 259 | 278 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
|---|
| .. | .. |
|---|
| 360 | 379 | /* Dependency order vs. p above. */ \ |
|---|
| 361 | 380 | typeof(p) ________p1 = READ_ONCE(p); \ |
|---|
| 362 | 381 | ((typeof(*p) __force __kernel *)(________p1)); \ |
|---|
| 363 | | -}) |
|---|
| 364 | | - |
|---|
| 365 | | -/** |
|---|
| 366 | | - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
|---|
| 367 | | - * @v: The value to statically initialize with. |
|---|
| 368 | | - */ |
|---|
| 369 | | -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
|---|
| 370 | | - |
|---|
| 371 | | -/** |
|---|
| 372 | | - * rcu_assign_pointer() - assign to RCU-protected pointer |
|---|
| 373 | | - * @p: pointer to assign to |
|---|
| 374 | | - * @v: value to assign (publish) |
|---|
| 375 | | - * |
|---|
| 376 | | - * Assigns the specified value to the specified RCU-protected |
|---|
| 377 | | - * pointer, ensuring that any concurrent RCU readers will see |
|---|
| 378 | | - * any prior initialization. |
|---|
| 379 | | - * |
|---|
| 380 | | - * Inserts memory barriers on architectures that require them |
|---|
| 381 | | - * (which is most of them), and also prevents the compiler from |
|---|
| 382 | | - * reordering the code that initializes the structure after the pointer |
|---|
| 383 | | - * assignment. More importantly, this call documents which pointers |
|---|
| 384 | | - * will be dereferenced by RCU read-side code. |
|---|
| 385 | | - * |
|---|
| 386 | | - * In some special cases, you may use RCU_INIT_POINTER() instead |
|---|
| 387 | | - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due |
|---|
| 388 | | - * to the fact that it does not constrain either the CPU or the compiler. |
|---|
| 389 | | - * That said, using RCU_INIT_POINTER() when you should have used |
|---|
| 390 | | - * rcu_assign_pointer() is a very bad thing that results in |
|---|
| 391 | | - * impossible-to-diagnose memory corruption. So please be careful. |
|---|
| 392 | | - * See the RCU_INIT_POINTER() comment header for details. |
|---|
| 393 | | - * |
|---|
| 394 | | - * Note that rcu_assign_pointer() evaluates each of its arguments only |
|---|
| 395 | | - * once, appearances notwithstanding. One of the "extra" evaluations |
|---|
| 396 | | - * is in typeof() and the other visible only to sparse (__CHECKER__), |
|---|
| 397 | | - * neither of which actually execute the argument. As with most cpp |
|---|
| 398 | | - * macros, this execute-arguments-only-once property is important, so |
|---|
| 399 | | - * please be careful when making changes to rcu_assign_pointer() and the |
|---|
| 400 | | - * other macros that it invokes. |
|---|
| 401 | | - */ |
|---|
| 402 | | -#define rcu_assign_pointer(p, v) \ |
|---|
| 403 | | -({ \ |
|---|
| 404 | | - uintptr_t _r_a_p__v = (uintptr_t)(v); \ |
|---|
| 405 | | - \ |
|---|
| 406 | | - if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ |
|---|
| 407 | | - WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ |
|---|
| 408 | | - else \ |
|---|
| 409 | | - smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ |
|---|
| 410 | | - _r_a_p__v; \ |
|---|
| 411 | 382 | }) |
|---|
| 412 | 383 | |
|---|
| 413 | 384 | /** |
|---|
| .. | .. |
|---|
| 701 | 672 | static inline void rcu_read_lock_bh(void) |
|---|
| 702 | 673 | { |
|---|
| 703 | 674 | local_bh_disable(); |
|---|
| 675 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 676 | + rcu_read_lock(); |
|---|
| 677 | +#else |
|---|
| 704 | 678 | __acquire(RCU_BH); |
|---|
| 705 | 679 | rcu_lock_acquire(&rcu_bh_lock_map); |
|---|
| 706 | 680 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
|---|
| 707 | 681 | "rcu_read_lock_bh() used illegally while idle"); |
|---|
| 682 | +#endif |
|---|
| 708 | 683 | } |
|---|
| 709 | 684 | |
|---|
| 710 | 685 | /* |
|---|
| .. | .. |
|---|
| 714 | 689 | */ |
|---|
| 715 | 690 | static inline void rcu_read_unlock_bh(void) |
|---|
| 716 | 691 | { |
|---|
| 692 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 693 | + rcu_read_unlock(); |
|---|
| 694 | +#else |
|---|
| 717 | 695 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
|---|
| 718 | 696 | "rcu_read_unlock_bh() used illegally while idle"); |
|---|
| 719 | 697 | rcu_lock_release(&rcu_bh_lock_map); |
|---|
| 720 | 698 | __release(RCU_BH); |
|---|
| 699 | +#endif |
|---|
| 721 | 700 | local_bh_enable(); |
|---|
| 722 | 701 | } |
|---|
| 723 | 702 | |
|---|