.. | .. |
---|
3 | 3 | * Implement CPU time clocks for the POSIX clock interface. |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
| 6 | +#include <uapi/linux/sched/types.h> |
---|
6 | 7 | #include <linux/sched/signal.h> |
---|
7 | 8 | #include <linux/sched/cputime.h> |
---|
| 9 | +#include <linux/sched/rt.h> |
---|
8 | 10 | #include <linux/posix-timers.h> |
---|
9 | 11 | #include <linux/errno.h> |
---|
10 | 12 | #include <linux/math64.h> |
---|
.. | .. |
---|
15 | 17 | #include <linux/workqueue.h> |
---|
16 | 18 | #include <linux/compat.h> |
---|
17 | 19 | #include <linux/sched/deadline.h> |
---|
| 20 | +#include <linux/smpboot.h> |
---|
18 | 21 | |
---|
19 | 22 | #include "posix-timers.h" |
---|
20 | 23 | |
---|
.. | .. |
---|
789 | 792 | return t->expires; |
---|
790 | 793 | |
---|
791 | 794 | t->firing = 1; |
---|
| 795 | + t->firing_cpu = smp_processor_id(); |
---|
792 | 796 | list_move_tail(&t->entry, firing); |
---|
793 | 797 | } |
---|
794 | 798 | |
---|
.. | .. |
---|
1135 | 1139 | return 0; |
---|
1136 | 1140 | } |
---|
1137 | 1141 | |
---|
| 1142 | +static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock); |
---|
| 1143 | + |
---|
| 1144 | +void cpu_timers_grab_expiry_lock(struct k_itimer *timer) |
---|
| 1145 | +{ |
---|
| 1146 | + int cpu = timer->it.cpu.firing_cpu; |
---|
| 1147 | + |
---|
| 1148 | + if (cpu >= 0) { |
---|
| 1149 | + spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu); |
---|
| 1150 | + |
---|
| 1151 | + spin_lock_irq(expiry_lock); |
---|
| 1152 | + spin_unlock_irq(expiry_lock); |
---|
| 1153 | + } |
---|
| 1154 | +} |
---|
| 1155 | + |
---|
1138 | 1156 | /* |
---|
1139 | 1157 | * This is called from the timer interrupt handler. The irq handler has |
---|
1140 | 1158 | * already updated our counts. We need to check if any timers fire now. |
---|
1141 | 1159 | * Interrupts are disabled. |
---|
1142 | 1160 | */ |
---|
1143 | | -void run_posix_cpu_timers(struct task_struct *tsk) |
---|
| 1161 | +static void __run_posix_cpu_timers(struct task_struct *tsk) |
---|
1144 | 1162 | { |
---|
1145 | 1163 | LIST_HEAD(firing); |
---|
1146 | 1164 | struct k_itimer *timer, *next; |
---|
1147 | 1165 | unsigned long flags; |
---|
1148 | | - |
---|
1149 | | - lockdep_assert_irqs_disabled(); |
---|
| 1166 | + spinlock_t *expiry_lock; |
---|
1150 | 1167 | |
---|
1151 | 1168 | /* |
---|
1152 | 1169 | * The fast path checks that there are no expired thread or thread |
---|
.. | .. |
---|
1155 | 1172 | if (!fastpath_timer_check(tsk)) |
---|
1156 | 1173 | return; |
---|
1157 | 1174 | |
---|
1158 | | - if (!lock_task_sighand(tsk, &flags)) |
---|
| 1175 | + expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); |
---|
| 1176 | + spin_lock(expiry_lock); |
---|
| 1177 | + |
---|
| 1178 | + if (!lock_task_sighand(tsk, &flags)) { |
---|
| 1179 | + spin_unlock(expiry_lock); |
---|
1159 | 1180 | return; |
---|
| 1181 | + } |
---|
1160 | 1182 | /* |
---|
1161 | 1183 | * Here we take off tsk->signal->cpu_timers[N] and |
---|
1162 | 1184 | * tsk->cpu_timers[N] all the timers that are firing, and |
---|
.. | .. |
---|
1189 | 1211 | list_del_init(&timer->it.cpu.entry); |
---|
1190 | 1212 | cpu_firing = timer->it.cpu.firing; |
---|
1191 | 1213 | timer->it.cpu.firing = 0; |
---|
| 1214 | + timer->it.cpu.firing_cpu = -1; |
---|
1192 | 1215 | /* |
---|
1193 | 1216 | * The firing flag is -1 if we collided with a reset |
---|
1194 | 1217 | * of the timer, which already reported this |
---|
.. | .. |
---|
1198 | 1221 | cpu_timer_fire(timer); |
---|
1199 | 1222 | spin_unlock(&timer->it_lock); |
---|
1200 | 1223 | } |
---|
| 1224 | + spin_unlock(expiry_lock); |
---|
1201 | 1225 | } |
---|
| 1226 | + |
---|
| 1227 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 1228 | +#include <linux/kthread.h> |
---|
| 1229 | +#include <linux/cpu.h> |
---|
| 1230 | +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); |
---|
| 1231 | +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); |
---|
| 1232 | +DEFINE_PER_CPU(bool, posix_timer_th_active); |
---|
| 1233 | + |
---|
| 1234 | +static void posix_cpu_kthread_fn(unsigned int cpu) |
---|
| 1235 | +{ |
---|
| 1236 | + struct task_struct *tsk = NULL; |
---|
| 1237 | + struct task_struct *next = NULL; |
---|
| 1238 | + |
---|
| 1239 | + BUG_ON(per_cpu(posix_timer_task, cpu) != current); |
---|
| 1240 | + |
---|
| 1241 | + /* grab task list */ |
---|
| 1242 | + raw_local_irq_disable(); |
---|
| 1243 | + tsk = per_cpu(posix_timer_tasklist, cpu); |
---|
| 1244 | + per_cpu(posix_timer_tasklist, cpu) = NULL; |
---|
| 1245 | + raw_local_irq_enable(); |
---|
| 1246 | + |
---|
| 1247 | + /* its possible the list is empty, just return */ |
---|
| 1248 | + if (!tsk) |
---|
| 1249 | + return; |
---|
| 1250 | + |
---|
| 1251 | + /* Process task list */ |
---|
| 1252 | + while (1) { |
---|
| 1253 | + /* save next */ |
---|
| 1254 | + next = tsk->posix_timer_list; |
---|
| 1255 | + |
---|
| 1256 | + /* run the task timers, clear its ptr and |
---|
| 1257 | + * unreference it |
---|
| 1258 | + */ |
---|
| 1259 | + __run_posix_cpu_timers(tsk); |
---|
| 1260 | + tsk->posix_timer_list = NULL; |
---|
| 1261 | + put_task_struct(tsk); |
---|
| 1262 | + |
---|
| 1263 | + /* check if this is the last on the list */ |
---|
| 1264 | + if (next == tsk) |
---|
| 1265 | + break; |
---|
| 1266 | + tsk = next; |
---|
| 1267 | + } |
---|
| 1268 | +} |
---|
| 1269 | + |
---|
| 1270 | +static inline int __fastpath_timer_check(struct task_struct *tsk) |
---|
| 1271 | +{ |
---|
| 1272 | + /* tsk == current, ensure it is safe to use ->signal/sighand */ |
---|
| 1273 | + if (unlikely(tsk->exit_state)) |
---|
| 1274 | + return 0; |
---|
| 1275 | + |
---|
| 1276 | + if (!task_cputime_zero(&tsk->cputime_expires)) |
---|
| 1277 | + return 1; |
---|
| 1278 | + |
---|
| 1279 | + if (!task_cputime_zero(&tsk->signal->cputime_expires)) |
---|
| 1280 | + return 1; |
---|
| 1281 | + |
---|
| 1282 | + return 0; |
---|
| 1283 | +} |
---|
| 1284 | + |
---|
| 1285 | +void run_posix_cpu_timers(struct task_struct *tsk) |
---|
| 1286 | +{ |
---|
| 1287 | + unsigned int cpu = smp_processor_id(); |
---|
| 1288 | + struct task_struct *tasklist; |
---|
| 1289 | + |
---|
| 1290 | + BUG_ON(!irqs_disabled()); |
---|
| 1291 | + |
---|
| 1292 | + if (per_cpu(posix_timer_th_active, cpu) != true) |
---|
| 1293 | + return; |
---|
| 1294 | + |
---|
| 1295 | + /* get per-cpu references */ |
---|
| 1296 | + tasklist = per_cpu(posix_timer_tasklist, cpu); |
---|
| 1297 | + |
---|
| 1298 | + /* check to see if we're already queued */ |
---|
| 1299 | + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { |
---|
| 1300 | + get_task_struct(tsk); |
---|
| 1301 | + if (tasklist) { |
---|
| 1302 | + tsk->posix_timer_list = tasklist; |
---|
| 1303 | + } else { |
---|
| 1304 | + /* |
---|
| 1305 | + * The list is terminated by a self-pointing |
---|
| 1306 | + * task_struct |
---|
| 1307 | + */ |
---|
| 1308 | + tsk->posix_timer_list = tsk; |
---|
| 1309 | + } |
---|
| 1310 | + per_cpu(posix_timer_tasklist, cpu) = tsk; |
---|
| 1311 | + |
---|
| 1312 | + wake_up_process(per_cpu(posix_timer_task, cpu)); |
---|
| 1313 | + } |
---|
| 1314 | +} |
---|
| 1315 | + |
---|
| 1316 | +static int posix_cpu_kthread_should_run(unsigned int cpu) |
---|
| 1317 | +{ |
---|
| 1318 | + return __this_cpu_read(posix_timer_tasklist) != NULL; |
---|
| 1319 | +} |
---|
| 1320 | + |
---|
| 1321 | +static void posix_cpu_kthread_park(unsigned int cpu) |
---|
| 1322 | +{ |
---|
| 1323 | + this_cpu_write(posix_timer_th_active, false); |
---|
| 1324 | +} |
---|
| 1325 | + |
---|
| 1326 | +static void posix_cpu_kthread_unpark(unsigned int cpu) |
---|
| 1327 | +{ |
---|
| 1328 | + this_cpu_write(posix_timer_th_active, true); |
---|
| 1329 | +} |
---|
| 1330 | + |
---|
| 1331 | +static void posix_cpu_kthread_setup(unsigned int cpu) |
---|
| 1332 | +{ |
---|
| 1333 | + struct sched_param sp; |
---|
| 1334 | + |
---|
| 1335 | + sp.sched_priority = MAX_RT_PRIO - 1; |
---|
| 1336 | + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
---|
| 1337 | + posix_cpu_kthread_unpark(cpu); |
---|
| 1338 | +} |
---|
| 1339 | + |
---|
| 1340 | +static struct smp_hotplug_thread posix_cpu_thread = { |
---|
| 1341 | + .store = &posix_timer_task, |
---|
| 1342 | + .thread_should_run = posix_cpu_kthread_should_run, |
---|
| 1343 | + .thread_fn = posix_cpu_kthread_fn, |
---|
| 1344 | + .thread_comm = "posixcputmr/%u", |
---|
| 1345 | + .setup = posix_cpu_kthread_setup, |
---|
| 1346 | + .park = posix_cpu_kthread_park, |
---|
| 1347 | + .unpark = posix_cpu_kthread_unpark, |
---|
| 1348 | +}; |
---|
| 1349 | + |
---|
| 1350 | +static int __init posix_cpu_thread_init(void) |
---|
| 1351 | +{ |
---|
| 1352 | + /* Start one for boot CPU. */ |
---|
| 1353 | + unsigned long cpu; |
---|
| 1354 | + int ret; |
---|
| 1355 | + |
---|
| 1356 | + /* init the per-cpu posix_timer_tasklets */ |
---|
| 1357 | + for_each_possible_cpu(cpu) |
---|
| 1358 | + per_cpu(posix_timer_tasklist, cpu) = NULL; |
---|
| 1359 | + |
---|
| 1360 | + ret = smpboot_register_percpu_thread(&posix_cpu_thread); |
---|
| 1361 | + WARN_ON(ret); |
---|
| 1362 | + |
---|
| 1363 | + return 0; |
---|
| 1364 | +} |
---|
| 1365 | +early_initcall(posix_cpu_thread_init); |
---|
| 1366 | +#else /* CONFIG_PREEMPT_RT_BASE */ |
---|
| 1367 | +void run_posix_cpu_timers(struct task_struct *tsk) |
---|
| 1368 | +{ |
---|
| 1369 | + lockdep_assert_irqs_disabled(); |
---|
| 1370 | + __run_posix_cpu_timers(tsk); |
---|
| 1371 | +} |
---|
| 1372 | +#endif /* CONFIG_PREEMPT_RT_BASE */ |
---|
1202 | 1373 | |
---|
1203 | 1374 | /* |
---|
1204 | 1375 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
---|
.. | .. |
---|
1318 | 1489 | spin_unlock_irq(&timer.it_lock); |
---|
1319 | 1490 | |
---|
1320 | 1491 | while (error == TIMER_RETRY) { |
---|
| 1492 | + |
---|
| 1493 | + cpu_timers_grab_expiry_lock(&timer); |
---|
1321 | 1494 | /* |
---|
1322 | 1495 | * We need to handle case when timer was or is in the |
---|
1323 | 1496 | * middle of firing. In other cases we already freed |
---|