| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0+ */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
|---|
| 3 | 4 | * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 6 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 7 | | - * (at your option) any later version. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * You should have received a copy of the GNU General Public License |
|---|
| 15 | | - * along with this program; if not, you can access it online at |
|---|
| 16 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
|---|
| 17 | | - * |
|---|
| 18 | 5 | * Copyright IBM Corporation, 2008 |
|---|
| 19 | 6 | * |
|---|
| 20 | | - * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
|---|
| 7 | + * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
|---|
| 21 | 8 | * |
|---|
| 22 | 9 | * For detailed explanation of Read-Copy Update mechanism see - |
|---|
| 23 | 10 | * Documentation/RCU |
|---|
| .. | .. |
|---|
| 25 | 12 | #ifndef __LINUX_TINY_H |
|---|
| 26 | 13 | #define __LINUX_TINY_H |
|---|
| 27 | 14 | |
|---|
| 28 | | -#include <linux/ktime.h> |
|---|
| 29 | | - |
|---|
| 30 | | -struct rcu_dynticks; |
|---|
| 31 | | -static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) |
|---|
| 32 | | -{ |
|---|
| 33 | | - return 0; |
|---|
| 34 | | -} |
|---|
| 15 | +#include <asm/param.h> /* for HZ */ |
|---|
| 35 | 16 | |
|---|
| 36 | 17 | /* Never flag non-existent other CPUs! */ |
|---|
| 37 | 18 | static inline bool rcu_eqs_special_set(int cpu) { return false; } |
|---|
| .. | .. |
|---|
| 46 | 27 | might_sleep(); |
|---|
| 47 | 28 | } |
|---|
| 48 | 29 | |
|---|
| 49 | | -static inline unsigned long get_state_synchronize_sched(void) |
|---|
| 50 | | -{ |
|---|
| 51 | | - return 0; |
|---|
| 52 | | -} |
|---|
| 53 | | - |
|---|
| 54 | | -static inline void cond_synchronize_sched(unsigned long oldstate) |
|---|
| 55 | | -{ |
|---|
| 56 | | - might_sleep(); |
|---|
| 57 | | -} |
|---|
| 58 | | - |
|---|
| 59 | | -extern void rcu_barrier_bh(void); |
|---|
| 60 | | -extern void rcu_barrier_sched(void); |
|---|
| 30 | +extern void rcu_barrier(void); |
|---|
| 61 | 31 | |
|---|
| 62 | 32 | static inline void synchronize_rcu_expedited(void) |
|---|
| 63 | 33 | { |
|---|
| 64 | | - synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
|---|
| 34 | + synchronize_rcu(); |
|---|
| 65 | 35 | } |
|---|
| 66 | 36 | |
|---|
| 67 | | -static inline void rcu_barrier(void) |
|---|
| 37 | +/* |
|---|
| 38 | + * Add one more declaration of kvfree() here. It is |
|---|
| 39 | + * not so straight forward to just include <linux/mm.h> |
|---|
| 40 | + * where it is defined due to getting many compile |
|---|
| 41 | + * errors caused by that include. |
|---|
| 42 | + */ |
|---|
| 43 | +extern void kvfree(const void *addr); |
|---|
| 44 | + |
|---|
| 45 | +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) |
|---|
| 68 | 46 | { |
|---|
| 69 | | - rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
|---|
| 47 | + if (head) { |
|---|
| 48 | + call_rcu(head, func); |
|---|
| 49 | + return; |
|---|
| 50 | + } |
|---|
| 51 | + |
|---|
| 52 | + // kvfree_rcu(one_arg) call. |
|---|
| 53 | + might_sleep(); |
|---|
| 54 | + synchronize_rcu(); |
|---|
| 55 | + kvfree((void *) func); |
|---|
| 70 | 56 | } |
|---|
| 71 | 57 | |
|---|
| 72 | | -static inline void synchronize_rcu_bh(void) |
|---|
| 73 | | -{ |
|---|
| 74 | | - synchronize_sched(); |
|---|
| 75 | | -} |
|---|
| 58 | +void rcu_qs(void); |
|---|
| 76 | 59 | |
|---|
| 77 | | -static inline void synchronize_rcu_bh_expedited(void) |
|---|
| 60 | +static inline void rcu_softirq_qs(void) |
|---|
| 78 | 61 | { |
|---|
| 79 | | - synchronize_sched(); |
|---|
| 80 | | -} |
|---|
| 81 | | - |
|---|
| 82 | | -static inline void synchronize_sched_expedited(void) |
|---|
| 83 | | -{ |
|---|
| 84 | | - synchronize_sched(); |
|---|
| 85 | | -} |
|---|
| 86 | | - |
|---|
| 87 | | -static inline void kfree_call_rcu(struct rcu_head *head, |
|---|
| 88 | | - rcu_callback_t func) |
|---|
| 89 | | -{ |
|---|
| 90 | | - call_rcu(head, func); |
|---|
| 62 | + rcu_qs(); |
|---|
| 91 | 63 | } |
|---|
| 92 | 64 | |
|---|
| 93 | 65 | #define rcu_note_context_switch(preempt) \ |
|---|
| 94 | 66 | do { \ |
|---|
| 95 | | - rcu_sched_qs(); \ |
|---|
| 96 | | - rcu_tasks_qs(current); \ |
|---|
| 67 | + rcu_qs(); \ |
|---|
| 68 | + rcu_tasks_qs(current, (preempt)); \ |
|---|
| 97 | 69 | } while (0) |
|---|
| 98 | 70 | |
|---|
| 99 | 71 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
|---|
| .. | .. |
|---|
| 108 | 80 | */ |
|---|
| 109 | 81 | static inline void rcu_virt_note_context_switch(int cpu) { } |
|---|
| 110 | 82 | static inline void rcu_cpu_stall_reset(void) { } |
|---|
| 83 | +static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } |
|---|
| 111 | 84 | static inline void rcu_idle_enter(void) { } |
|---|
| 112 | 85 | static inline void rcu_idle_exit(void) { } |
|---|
| 113 | 86 | static inline void rcu_irq_enter(void) { } |
|---|
| 114 | 87 | static inline void rcu_irq_exit_irqson(void) { } |
|---|
| 115 | 88 | static inline void rcu_irq_enter_irqson(void) { } |
|---|
| 116 | 89 | static inline void rcu_irq_exit(void) { } |
|---|
| 90 | +static inline void rcu_irq_exit_preempt(void) { } |
|---|
| 91 | +static inline void rcu_irq_exit_check_preempt(void) { } |
|---|
| 117 | 92 | static inline void exit_rcu(void) { } |
|---|
| 93 | +static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
|---|
| 94 | +{ |
|---|
| 95 | + return false; |
|---|
| 96 | +} |
|---|
| 97 | +static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } |
|---|
| 118 | 98 | #ifdef CONFIG_SRCU |
|---|
| 119 | 99 | void rcu_scheduler_starting(void); |
|---|
| 120 | 100 | #else /* #ifndef CONFIG_SRCU */ |
|---|
| 121 | 101 | static inline void rcu_scheduler_starting(void) { } |
|---|
| 122 | 102 | #endif /* #else #ifndef CONFIG_SRCU */ |
|---|
| 123 | 103 | static inline void rcu_end_inkernel_boot(void) { } |
|---|
| 104 | +static inline bool rcu_inkernel_boot_has_ended(void) { return true; } |
|---|
| 124 | 105 | static inline bool rcu_is_watching(void) { return true; } |
|---|
| 106 | +static inline void rcu_momentary_dyntick_idle(void) { } |
|---|
| 107 | +static inline void kfree_rcu_scheduler_running(void) { } |
|---|
| 108 | +static inline bool rcu_gp_might_be_stalled(void) { return false; } |
|---|
| 125 | 109 | |
|---|
| 126 | 110 | /* Avoid RCU read-side critical sections leaking across. */ |
|---|
| 127 | 111 | static inline void rcu_all_qs(void) { barrier(); } |
|---|