.. | .. |
---|
782 | 782 | return expires; |
---|
783 | 783 | |
---|
784 | 784 | ctmr->firing = 1; |
---|
| 785 | + /* See posix_cpu_timer_wait_running() */ |
---|
| 786 | + rcu_assign_pointer(ctmr->handling, current); |
---|
785 | 787 | cpu_timer_dequeue(ctmr); |
---|
786 | 788 | list_add_tail(&ctmr->elist, firing); |
---|
787 | 789 | } |
---|
.. | .. |
---|
1097 | 1099 | #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK |
---|
1098 | 1100 | static void posix_cpu_timers_work(struct callback_head *work) |
---|
1099 | 1101 | { |
---|
| 1102 | + struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work); |
---|
| 1103 | + |
---|
| 1104 | + mutex_lock(&cw->mutex); |
---|
1100 | 1105 | handle_posix_cpu_timers(current); |
---|
| 1106 | + mutex_unlock(&cw->mutex); |
---|
| 1107 | +} |
---|
| 1108 | + |
---|
| 1109 | +/* |
---|
| 1110 | + * Invoked from the posix-timer core when a cancel operation failed because |
---|
| 1111 | + * the timer is marked firing. The caller holds rcu_read_lock(), which |
---|
| 1112 | + * protects the timer and the task which is expiring it from being freed. |
---|
| 1113 | + */ |
---|
| 1114 | +static void posix_cpu_timer_wait_running(struct k_itimer *timr) |
---|
| 1115 | +{ |
---|
| 1116 | + struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling); |
---|
| 1117 | + |
---|
| 1118 | + /* Has the handling task completed expiry already? */ |
---|
| 1119 | + if (!tsk) |
---|
| 1120 | + return; |
---|
| 1121 | + |
---|
| 1122 | + /* Ensure that the task cannot go away */ |
---|
| 1123 | + get_task_struct(tsk); |
---|
| 1124 | + /* Now drop the RCU protection so the mutex can be locked */ |
---|
| 1125 | + rcu_read_unlock(); |
---|
| 1126 | + /* Wait on the expiry mutex */ |
---|
| 1127 | + mutex_lock(&tsk->posix_cputimers_work.mutex); |
---|
| 1128 | + /* Release it immediately again. */ |
---|
| 1129 | + mutex_unlock(&tsk->posix_cputimers_work.mutex); |
---|
| 1130 | + /* Drop the task reference. */ |
---|
| 1131 | + put_task_struct(tsk); |
---|
| 1132 | + /* Relock RCU so the callsite is balanced */ |
---|
| 1133 | + rcu_read_lock(); |
---|
| 1134 | +} |
---|
| 1135 | + |
---|
| 1136 | +static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr) |
---|
| 1137 | +{ |
---|
| 1138 | + /* Ensure that timr->it.cpu.handling task cannot go away */ |
---|
| 1139 | + rcu_read_lock(); |
---|
| 1140 | + spin_unlock_irq(&timr->it_lock); |
---|
| 1141 | + posix_cpu_timer_wait_running(timr); |
---|
| 1142 | + rcu_read_unlock(); |
---|
| 1143 | + /* @timr is on stack and is valid */ |
---|
| 1144 | + spin_lock_irq(&timr->it_lock); |
---|
1101 | 1145 | } |
---|
1102 | 1146 | |
---|
1103 | 1147 | /* |
---|
.. | .. |
---|
1113 | 1157 | sizeof(p->posix_cputimers_work.work)); |
---|
1114 | 1158 | init_task_work(&p->posix_cputimers_work.work, |
---|
1115 | 1159 | posix_cpu_timers_work); |
---|
| 1160 | + mutex_init(&p->posix_cputimers_work.mutex); |
---|
1116 | 1161 | p->posix_cputimers_work.scheduled = false; |
---|
1117 | 1162 | } |
---|
1118 | 1163 | |
---|
.. | .. |
---|
1189 | 1234 | lockdep_posixtimer_enter(); |
---|
1190 | 1235 | handle_posix_cpu_timers(tsk); |
---|
1191 | 1236 | lockdep_posixtimer_exit(); |
---|
| 1237 | +} |
---|
| 1238 | + |
---|
| 1239 | +static void posix_cpu_timer_wait_running(struct k_itimer *timr) |
---|
| 1240 | +{ |
---|
| 1241 | + cpu_relax(); |
---|
| 1242 | +} |
---|
| 1243 | + |
---|
| 1244 | +static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr) |
---|
| 1245 | +{ |
---|
| 1246 | + spin_unlock_irq(&timr->it_lock); |
---|
| 1247 | + cpu_relax(); |
---|
| 1248 | + spin_lock_irq(&timr->it_lock); |
---|
1192 | 1249 | } |
---|
1193 | 1250 | |
---|
1194 | 1251 | static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) |
---|
.. | .. |
---|
1299 | 1356 | */ |
---|
1300 | 1357 | if (likely(cpu_firing >= 0)) |
---|
1301 | 1358 | cpu_timer_fire(timer); |
---|
| 1359 | + /* See posix_cpu_timer_wait_running() */ |
---|
| 1360 | + rcu_assign_pointer(timer->it.cpu.handling, NULL); |
---|
1302 | 1361 | spin_unlock(&timer->it_lock); |
---|
1303 | 1362 | } |
---|
1304 | 1363 | } |
---|
.. | .. |
---|
1434 | 1493 | expires = cpu_timer_getexpires(&timer.it.cpu); |
---|
1435 | 1494 | error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); |
---|
1436 | 1495 | if (!error) { |
---|
1437 | | - /* |
---|
1438 | | - * Timer is now unarmed, deletion can not fail. |
---|
1439 | | - */ |
---|
| 1496 | + /* Timer is now unarmed, deletion can not fail. */ |
---|
1440 | 1497 | posix_cpu_timer_del(&timer); |
---|
| 1498 | + } else { |
---|
| 1499 | + while (error == TIMER_RETRY) { |
---|
| 1500 | + posix_cpu_timer_wait_running_nsleep(&timer); |
---|
| 1501 | + error = posix_cpu_timer_del(&timer); |
---|
| 1502 | + } |
---|
1441 | 1503 | } |
---|
1442 | | - spin_unlock_irq(&timer.it_lock); |
---|
1443 | 1504 | |
---|
1444 | | - while (error == TIMER_RETRY) { |
---|
1445 | | - /* |
---|
1446 | | - * We need to handle case when timer was or is in the |
---|
1447 | | - * middle of firing. In other cases we already freed |
---|
1448 | | - * resources. |
---|
1449 | | - */ |
---|
1450 | | - spin_lock_irq(&timer.it_lock); |
---|
1451 | | - error = posix_cpu_timer_del(&timer); |
---|
1452 | | - spin_unlock_irq(&timer.it_lock); |
---|
1453 | | - } |
---|
| 1505 | + spin_unlock_irq(&timer.it_lock); |
---|
1454 | 1506 | |
---|
1455 | 1507 | if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { |
---|
1456 | 1508 | /* |
---|
.. | .. |
---|
1560 | 1612 | .timer_del = posix_cpu_timer_del, |
---|
1561 | 1613 | .timer_get = posix_cpu_timer_get, |
---|
1562 | 1614 | .timer_rearm = posix_cpu_timer_rearm, |
---|
| 1615 | + .timer_wait_running = posix_cpu_timer_wait_running, |
---|
1563 | 1616 | }; |
---|
1564 | 1617 | |
---|
1565 | 1618 | const struct k_clock clock_process = { |
---|