.. | .. |
---|
846 | 846 | rcu_read_lock(); |
---|
847 | 847 | unlock_timer(timer, *flags); |
---|
848 | 848 | |
---|
| 849 | + /* |
---|
| 850 | + * kc->timer_wait_running() might drop RCU lock. So @timer |
---|
| 851 | + * cannot be touched anymore after the function returns! |
---|
| 852 | + */ |
---|
849 | 853 | if (!WARN_ON_ONCE(!kc->timer_wait_running)) |
---|
850 | 854 | kc->timer_wait_running(timer); |
---|
851 | 855 | |
---|
.. | .. |
---|
1033 | 1037 | } |
---|
1034 | 1038 | |
---|
1035 | 1039 | /* |
---|
1036 | | - * return timer owned by the process, used by exit_itimers |
---|
| 1040 | + * Delete a timer if it is armed, remove it from the hash and schedule it |
---|
| 1041 | + * for RCU freeing. |
---|
1037 | 1042 | */ |
---|
1038 | 1043 | static void itimer_delete(struct k_itimer *timer) |
---|
1039 | 1044 | { |
---|
1040 | | -retry_delete: |
---|
1041 | | - spin_lock_irq(&timer->it_lock); |
---|
| 1045 | + unsigned long flags; |
---|
1042 | 1046 | |
---|
| 1047 | + /* |
---|
| 1048 | + * irqsave is required to make timer_wait_running() work. |
---|
| 1049 | + */ |
---|
| 1050 | + spin_lock_irqsave(&timer->it_lock, flags); |
---|
| 1051 | + |
---|
| 1052 | +retry_delete: |
---|
| 1053 | + /* |
---|
| 1054 | + * Even if the timer is not longer accessible from other tasks |
---|
| 1055 | + * it still might be armed and queued in the underlying timer |
---|
| 1056 | + * mechanism. Worse, that timer mechanism might run the expiry |
---|
| 1057 | + * function concurrently. |
---|
| 1058 | + */ |
---|
1043 | 1059 | if (timer_delete_hook(timer) == TIMER_RETRY) { |
---|
1044 | | - spin_unlock_irq(&timer->it_lock); |
---|
| 1060 | + /* |
---|
| 1061 | + * Timer is expired concurrently, prevent livelocks |
---|
| 1062 | + * and pointless spinning on RT. |
---|
| 1063 | + * |
---|
| 1064 | + * timer_wait_running() drops timer::it_lock, which opens |
---|
| 1065 | + * the possibility for another task to delete the timer. |
---|
| 1066 | + * |
---|
| 1067 | + * That's not possible here because this is invoked from |
---|
| 1068 | + * do_exit() only for the last thread of the thread group. |
---|
| 1069 | + * So no other task can access and delete that timer. |
---|
| 1070 | + */ |
---|
| 1071 | + if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer)) |
---|
| 1072 | + return; |
---|
| 1073 | + |
---|
1045 | 1074 | goto retry_delete; |
---|
1046 | 1075 | } |
---|
1047 | 1076 | list_del(&timer->list); |
---|
1048 | 1077 | |
---|
1049 | | - spin_unlock_irq(&timer->it_lock); |
---|
| 1078 | + spin_unlock_irqrestore(&timer->it_lock, flags); |
---|
1050 | 1079 | release_posix_timer(timer, IT_ID_SET); |
---|
1051 | 1080 | } |
---|
1052 | 1081 | |
---|
1053 | 1082 | /* |
---|
1054 | | - * This is called by do_exit or de_thread, only when nobody else can |
---|
1055 | | - * modify the signal->posix_timers list. Yet we need sighand->siglock |
---|
1056 | | - * to prevent the race with /proc/pid/timers. |
---|
| 1083 | + * Invoked from do_exit() when the last thread of a thread group exits. |
---|
| 1084 | + * At that point no other task can access the timers of the dying |
---|
| 1085 | + * task anymore. |
---|
1057 | 1086 | */ |
---|
1058 | 1087 | void exit_itimers(struct task_struct *tsk) |
---|
1059 | 1088 | { |
---|
.. | .. |
---|
1063 | 1092 | if (list_empty(&tsk->signal->posix_timers)) |
---|
1064 | 1093 | return; |
---|
1065 | 1094 | |
---|
| 1095 | + /* Protect against concurrent read via /proc/$PID/timers */ |
---|
1066 | 1096 | spin_lock_irq(&tsk->sighand->siglock); |
---|
1067 | 1097 | list_replace_init(&tsk->signal->posix_timers, &timers); |
---|
1068 | 1098 | spin_unlock_irq(&tsk->sighand->siglock); |
---|
1069 | 1099 | |
---|
| 1100 | + /* The timers are not longer accessible via tsk::signal */ |
---|
1070 | 1101 | while (!list_empty(&timers)) { |
---|
1071 | 1102 | tmr = list_first_entry(&timers, struct k_itimer, list); |
---|
1072 | 1103 | itimer_delete(tmr); |
---|
.. | .. |
---|
1270 | 1301 | return -EINVAL; |
---|
1271 | 1302 | if (flags & TIMER_ABSTIME) |
---|
1272 | 1303 | rmtp = NULL; |
---|
| 1304 | + current->restart_block.fn = do_no_restart_syscall; |
---|
1273 | 1305 | current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; |
---|
1274 | 1306 | current->restart_block.nanosleep.rmtp = rmtp; |
---|
1275 | 1307 | |
---|
.. | .. |
---|
1297 | 1329 | return -EINVAL; |
---|
1298 | 1330 | if (flags & TIMER_ABSTIME) |
---|
1299 | 1331 | rmtp = NULL; |
---|
| 1332 | + current->restart_block.fn = do_no_restart_syscall; |
---|
1300 | 1333 | current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; |
---|
1301 | 1334 | current->restart_block.nanosleep.compat_rmtp = rmtp; |
---|
1302 | 1335 | |
---|