.. | .. |
---|
45 | 45 | * Number of tasks to iterate in a single balance run. |
---|
46 | 46 | * Limited because this is done with IRQs disabled. |
---|
47 | 47 | */ |
---|
| 48 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 49 | +const_debug unsigned int sysctl_sched_nr_migrate = 8; |
---|
| 50 | +#else |
---|
48 | 51 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
---|
| 52 | +#endif |
---|
49 | 53 | |
---|
50 | 54 | /* |
---|
51 | 55 | * period over which we measure -rt task CPU usage in us. |
---|
.. | .. |
---|
317 | 321 | rq->hrtick_csd.info = rq; |
---|
318 | 322 | #endif |
---|
319 | 323 | |
---|
320 | | - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
| 324 | + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
---|
321 | 325 | rq->hrtick_timer.function = hrtick; |
---|
322 | 326 | } |
---|
323 | 327 | #else /* CONFIG_SCHED_HRTICK */ |
---|
.. | .. |
---|
399 | 403 | #endif |
---|
400 | 404 | #endif |
---|
401 | 405 | |
---|
402 | | -void wake_q_add(struct wake_q_head *head, struct task_struct *task) |
---|
| 406 | +void __wake_q_add(struct wake_q_head *head, struct task_struct *task, |
---|
| 407 | + bool sleeper) |
---|
403 | 408 | { |
---|
404 | | - struct wake_q_node *node = &task->wake_q; |
---|
| 409 | + struct wake_q_node *node; |
---|
| 410 | + |
---|
| 411 | + if (sleeper) |
---|
| 412 | + node = &task->wake_q_sleeper; |
---|
| 413 | + else |
---|
| 414 | + node = &task->wake_q; |
---|
405 | 415 | |
---|
406 | 416 | /* |
---|
407 | 417 | * Atomically grab the task, if ->wake_q is !nil already it means |
---|
.. | .. |
---|
429 | 439 | static int |
---|
430 | 440 | try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, |
---|
431 | 441 | int sibling_count_hint); |
---|
432 | | - |
---|
433 | | -void wake_up_q(struct wake_q_head *head) |
---|
| 442 | +void __wake_up_q(struct wake_q_head *head, bool sleeper) |
---|
434 | 443 | { |
---|
435 | 444 | struct wake_q_node *node = head->first; |
---|
436 | 445 | |
---|
437 | 446 | while (node != WAKE_Q_TAIL) { |
---|
438 | 447 | struct task_struct *task; |
---|
439 | 448 | |
---|
440 | | - task = container_of(node, struct task_struct, wake_q); |
---|
| 449 | + if (sleeper) |
---|
| 450 | + task = container_of(node, struct task_struct, wake_q_sleeper); |
---|
| 451 | + else |
---|
| 452 | + task = container_of(node, struct task_struct, wake_q); |
---|
441 | 453 | BUG_ON(!task); |
---|
442 | 454 | /* Task can safely be re-inserted now: */ |
---|
443 | 455 | node = node->next; |
---|
444 | | - task->wake_q.next = NULL; |
---|
445 | | - |
---|
| 456 | + if (sleeper) |
---|
| 457 | + task->wake_q_sleeper.next = NULL; |
---|
| 458 | + else |
---|
| 459 | + task->wake_q.next = NULL; |
---|
446 | 460 | /* |
---|
447 | | - * try_to_wake_up() executes a full barrier, which pairs with |
---|
| 461 | + * wake_up_process() executes a full barrier, which pairs with |
---|
448 | 462 | * the queueing in wake_q_add() so as not to miss wakeups. |
---|
449 | 463 | */ |
---|
450 | | - try_to_wake_up(task, TASK_NORMAL, 0, head->count); |
---|
| 464 | + if (sleeper) |
---|
| 465 | + wake_up_lock_sleeper(task); |
---|
| 466 | + else |
---|
| 467 | + wake_up_process(task); |
---|
| 468 | + |
---|
451 | 469 | put_task_struct(task); |
---|
452 | 470 | } |
---|
453 | 471 | } |
---|
.. | .. |
---|
486 | 504 | else |
---|
487 | 505 | trace_sched_wake_idle_without_ipi(cpu); |
---|
488 | 506 | } |
---|
| 507 | + |
---|
| 508 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 509 | + |
---|
| 510 | +static int tsk_is_polling(struct task_struct *p) |
---|
| 511 | +{ |
---|
| 512 | +#ifdef TIF_POLLING_NRFLAG |
---|
| 513 | + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); |
---|
| 514 | +#else |
---|
| 515 | + return 0; |
---|
| 516 | +#endif |
---|
| 517 | +} |
---|
| 518 | + |
---|
| 519 | +void resched_curr_lazy(struct rq *rq) |
---|
| 520 | +{ |
---|
| 521 | + struct task_struct *curr = rq->curr; |
---|
| 522 | + int cpu; |
---|
| 523 | + |
---|
| 524 | + if (!sched_feat(PREEMPT_LAZY)) { |
---|
| 525 | + resched_curr(rq); |
---|
| 526 | + return; |
---|
| 527 | + } |
---|
| 528 | + |
---|
| 529 | + lockdep_assert_held(&rq->lock); |
---|
| 530 | + |
---|
| 531 | + if (test_tsk_need_resched(curr)) |
---|
| 532 | + return; |
---|
| 533 | + |
---|
| 534 | + if (test_tsk_need_resched_lazy(curr)) |
---|
| 535 | + return; |
---|
| 536 | + |
---|
| 537 | + set_tsk_need_resched_lazy(curr); |
---|
| 538 | + |
---|
| 539 | + cpu = cpu_of(rq); |
---|
| 540 | + if (cpu == smp_processor_id()) |
---|
| 541 | + return; |
---|
| 542 | + |
---|
| 543 | + /* NEED_RESCHED_LAZY must be visible before we test polling */ |
---|
| 544 | + smp_mb(); |
---|
| 545 | + if (!tsk_is_polling(curr)) |
---|
| 546 | + smp_send_reschedule(cpu); |
---|
| 547 | +} |
---|
| 548 | +#endif |
---|
489 | 549 | |
---|
490 | 550 | void resched_cpu(int cpu) |
---|
491 | 551 | { |
---|
.. | .. |
---|
1481 | 1541 | */ |
---|
1482 | 1542 | static inline bool is_cpu_allowed(struct task_struct *p, int cpu) |
---|
1483 | 1543 | { |
---|
1484 | | - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) |
---|
| 1544 | + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
1485 | 1545 | return false; |
---|
1486 | 1546 | |
---|
1487 | | - if (is_per_cpu_kthread(p)) |
---|
| 1547 | + if (is_per_cpu_kthread(p) || __migrate_disabled(p)) |
---|
1488 | 1548 | return cpu_online(cpu); |
---|
1489 | 1549 | |
---|
1490 | 1550 | return cpu_active(cpu); |
---|
.. | .. |
---|
1533 | 1593 | struct migration_arg { |
---|
1534 | 1594 | struct task_struct *task; |
---|
1535 | 1595 | int dest_cpu; |
---|
| 1596 | + bool done; |
---|
1536 | 1597 | }; |
---|
1537 | 1598 | |
---|
1538 | 1599 | /* |
---|
.. | .. |
---|
1568 | 1629 | struct task_struct *p = arg->task; |
---|
1569 | 1630 | struct rq *rq = this_rq(); |
---|
1570 | 1631 | struct rq_flags rf; |
---|
| 1632 | + int dest_cpu = arg->dest_cpu; |
---|
| 1633 | + |
---|
| 1634 | + /* We don't look at arg after this point. */ |
---|
| 1635 | + smp_mb(); |
---|
| 1636 | + arg->done = true; |
---|
1571 | 1637 | |
---|
1572 | 1638 | /* |
---|
1573 | 1639 | * The original target CPU might have gone down and we might |
---|
.. | .. |
---|
1576 | 1642 | local_irq_disable(); |
---|
1577 | 1643 | /* |
---|
1578 | 1644 | * We need to explicitly wake pending tasks before running |
---|
1579 | | - * __migrate_task() such that we will not miss enforcing cpus_allowed |
---|
| 1645 | + * __migrate_task() such that we will not miss enforcing cpus_ptr |
---|
1580 | 1646 | * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. |
---|
1581 | 1647 | */ |
---|
1582 | 1648 | sched_ttwu_pending(); |
---|
.. | .. |
---|
1590 | 1656 | */ |
---|
1591 | 1657 | if (task_rq(p) == rq) { |
---|
1592 | 1658 | if (task_on_rq_queued(p)) |
---|
1593 | | - rq = __migrate_task(rq, &rf, p, arg->dest_cpu); |
---|
| 1659 | + rq = __migrate_task(rq, &rf, p, dest_cpu); |
---|
1594 | 1660 | else |
---|
1595 | | - p->wake_cpu = arg->dest_cpu; |
---|
| 1661 | + p->wake_cpu = dest_cpu; |
---|
1596 | 1662 | } |
---|
1597 | 1663 | rq_unlock(rq, &rf); |
---|
1598 | 1664 | raw_spin_unlock(&p->pi_lock); |
---|
.. | .. |
---|
1607 | 1673 | */ |
---|
1608 | 1674 | void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) |
---|
1609 | 1675 | { |
---|
1610 | | - cpumask_copy(&p->cpus_allowed, new_mask); |
---|
1611 | | - p->nr_cpus_allowed = cpumask_weight(new_mask); |
---|
| 1676 | + cpumask_copy(&p->cpus_mask, new_mask); |
---|
| 1677 | + if (p->cpus_ptr == &p->cpus_mask) |
---|
| 1678 | + p->nr_cpus_allowed = cpumask_weight(new_mask); |
---|
1612 | 1679 | } |
---|
| 1680 | + |
---|
| 1681 | +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 1682 | +int __migrate_disabled(struct task_struct *p) |
---|
| 1683 | +{ |
---|
| 1684 | + return p->migrate_disable; |
---|
| 1685 | +} |
---|
| 1686 | +EXPORT_SYMBOL_GPL(__migrate_disabled); |
---|
| 1687 | +#endif |
---|
1613 | 1688 | |
---|
1614 | 1689 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
---|
1615 | 1690 | { |
---|
.. | .. |
---|
1677 | 1752 | goto out; |
---|
1678 | 1753 | } |
---|
1679 | 1754 | |
---|
1680 | | - if (cpumask_equal(&p->cpus_allowed, new_mask)) |
---|
| 1755 | + if (cpumask_equal(&p->cpus_mask, new_mask)) |
---|
1681 | 1756 | goto out; |
---|
1682 | 1757 | |
---|
1683 | 1758 | dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); |
---|
.. | .. |
---|
1699 | 1774 | } |
---|
1700 | 1775 | |
---|
1701 | 1776 | /* Can the task run on the task's current CPU? If so, we're done */ |
---|
1702 | | - if (cpumask_test_cpu(task_cpu(p), new_mask)) |
---|
| 1777 | + if (cpumask_test_cpu(task_cpu(p), new_mask) || |
---|
| 1778 | + p->cpus_ptr != &p->cpus_mask) |
---|
1703 | 1779 | goto out; |
---|
1704 | 1780 | |
---|
1705 | 1781 | if (task_running(rq, p) || p->state == TASK_WAKING) { |
---|
.. | .. |
---|
1840 | 1916 | if (task_cpu(arg->src_task) != arg->src_cpu) |
---|
1841 | 1917 | goto unlock; |
---|
1842 | 1918 | |
---|
1843 | | - if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) |
---|
| 1919 | + if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) |
---|
1844 | 1920 | goto unlock; |
---|
1845 | 1921 | |
---|
1846 | | - if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) |
---|
| 1922 | + if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) |
---|
1847 | 1923 | goto unlock; |
---|
1848 | 1924 | |
---|
1849 | 1925 | __migrate_swap_task(arg->src_task, arg->dst_cpu); |
---|
.. | .. |
---|
1885 | 1961 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) |
---|
1886 | 1962 | goto out; |
---|
1887 | 1963 | |
---|
1888 | | - if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) |
---|
| 1964 | + if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) |
---|
1889 | 1965 | goto out; |
---|
1890 | 1966 | |
---|
1891 | | - if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) |
---|
| 1967 | + if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) |
---|
1892 | 1968 | goto out; |
---|
1893 | 1969 | |
---|
1894 | 1970 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); |
---|
.. | .. |
---|
1898 | 1974 | return ret; |
---|
1899 | 1975 | } |
---|
1900 | 1976 | #endif /* CONFIG_NUMA_BALANCING */ |
---|
| 1977 | + |
---|
| 1978 | +static bool check_task_state(struct task_struct *p, long match_state) |
---|
| 1979 | +{ |
---|
| 1980 | + bool match = false; |
---|
| 1981 | + |
---|
| 1982 | + raw_spin_lock_irq(&p->pi_lock); |
---|
| 1983 | + if (p->state == match_state || p->saved_state == match_state) |
---|
| 1984 | + match = true; |
---|
| 1985 | + raw_spin_unlock_irq(&p->pi_lock); |
---|
| 1986 | + |
---|
| 1987 | + return match; |
---|
| 1988 | +} |
---|
1901 | 1989 | |
---|
1902 | 1990 | /* |
---|
1903 | 1991 | * wait_task_inactive - wait for a thread to unschedule. |
---|
.. | .. |
---|
1943 | 2031 | * is actually now running somewhere else! |
---|
1944 | 2032 | */ |
---|
1945 | 2033 | while (task_running(rq, p)) { |
---|
1946 | | - if (match_state && unlikely(p->state != match_state)) |
---|
| 2034 | + if (match_state && !check_task_state(p, match_state)) |
---|
1947 | 2035 | return 0; |
---|
1948 | 2036 | cpu_relax(); |
---|
1949 | 2037 | } |
---|
.. | .. |
---|
1958 | 2046 | running = task_running(rq, p); |
---|
1959 | 2047 | queued = task_on_rq_queued(p); |
---|
1960 | 2048 | ncsw = 0; |
---|
1961 | | - if (!match_state || p->state == match_state) |
---|
| 2049 | + if (!match_state || p->state == match_state || |
---|
| 2050 | + p->saved_state == match_state) |
---|
1962 | 2051 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
---|
1963 | 2052 | task_rq_unlock(rq, p, &rf); |
---|
1964 | 2053 | |
---|
.. | .. |
---|
2033 | 2122 | EXPORT_SYMBOL_GPL(kick_process); |
---|
2034 | 2123 | |
---|
2035 | 2124 | /* |
---|
2036 | | - * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
---|
| 2125 | + * ->cpus_ptr is protected by both rq->lock and p->pi_lock |
---|
2037 | 2126 | * |
---|
2038 | 2127 | * A few notes on cpu_active vs cpu_online: |
---|
2039 | 2128 | * |
---|
.. | .. |
---|
2073 | 2162 | for_each_cpu(dest_cpu, nodemask) { |
---|
2074 | 2163 | if (!cpu_active(dest_cpu)) |
---|
2075 | 2164 | continue; |
---|
2076 | | - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
---|
| 2165 | + if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) |
---|
2077 | 2166 | return dest_cpu; |
---|
2078 | 2167 | } |
---|
2079 | 2168 | } |
---|
2080 | 2169 | |
---|
2081 | 2170 | for (;;) { |
---|
2082 | 2171 | /* Any allowed, online CPU? */ |
---|
2083 | | - for_each_cpu(dest_cpu, &p->cpus_allowed) { |
---|
| 2172 | + for_each_cpu(dest_cpu, p->cpus_ptr) { |
---|
2084 | 2173 | if (!is_cpu_allowed(p, dest_cpu)) |
---|
2085 | 2174 | continue; |
---|
2086 | 2175 | |
---|
.. | .. |
---|
2124 | 2213 | } |
---|
2125 | 2214 | |
---|
2126 | 2215 | /* |
---|
2127 | | - * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
---|
| 2216 | + * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. |
---|
2128 | 2217 | */ |
---|
2129 | 2218 | static inline |
---|
2130 | 2219 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags, |
---|
.. | .. |
---|
2136 | 2225 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags, |
---|
2137 | 2226 | sibling_count_hint); |
---|
2138 | 2227 | else |
---|
2139 | | - cpu = cpumask_any(&p->cpus_allowed); |
---|
| 2228 | + cpu = cpumask_any(p->cpus_ptr); |
---|
2140 | 2229 | |
---|
2141 | 2230 | /* |
---|
2142 | 2231 | * In order not to call set_task_cpu() on a blocking task we need |
---|
2143 | | - * to rely on ttwu() to place the task on a valid ->cpus_allowed |
---|
| 2232 | + * to rely on ttwu() to place the task on a valid ->cpus_ptr |
---|
2144 | 2233 | * CPU. |
---|
2145 | 2234 | * |
---|
2146 | 2235 | * Since this is common to all placement strategies, this lives here. |
---|
.. | .. |
---|
2243 | 2332 | { |
---|
2244 | 2333 | activate_task(rq, p, en_flags); |
---|
2245 | 2334 | p->on_rq = TASK_ON_RQ_QUEUED; |
---|
2246 | | - |
---|
2247 | | - /* If a worker is waking up, notify the workqueue: */ |
---|
2248 | | - if (p->flags & PF_WQ_WORKER) |
---|
2249 | | - wq_worker_waking_up(p, cpu_of(rq)); |
---|
2250 | 2335 | } |
---|
2251 | 2336 | |
---|
2252 | 2337 | /* |
---|
.. | .. |
---|
2571 | 2656 | */ |
---|
2572 | 2657 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
---|
2573 | 2658 | smp_mb__after_spinlock(); |
---|
2574 | | - if (!(p->state & state)) |
---|
| 2659 | + if (!(p->state & state)) { |
---|
| 2660 | + /* |
---|
| 2661 | + * The task might be running due to a spinlock sleeper |
---|
| 2662 | + * wakeup. Check the saved state and set it to running |
---|
| 2663 | + * if the wakeup condition is true. |
---|
| 2664 | + */ |
---|
| 2665 | + if (!(wake_flags & WF_LOCK_SLEEPER)) { |
---|
| 2666 | + if (p->saved_state & state) { |
---|
| 2667 | + p->saved_state = TASK_RUNNING; |
---|
| 2668 | + success = 1; |
---|
| 2669 | + } |
---|
| 2670 | + } |
---|
2575 | 2671 | goto out; |
---|
| 2672 | + } |
---|
| 2673 | + |
---|
| 2674 | + /* |
---|
| 2675 | + * If this is a regular wakeup, then we can unconditionally |
---|
| 2676 | + * clear the saved state of a "lock sleeper". |
---|
| 2677 | + */ |
---|
| 2678 | + if (!(wake_flags & WF_LOCK_SLEEPER)) |
---|
| 2679 | + p->saved_state = TASK_RUNNING; |
---|
2576 | 2680 | |
---|
2577 | 2681 | trace_sched_waking(p); |
---|
2578 | 2682 | |
---|
.. | .. |
---|
2672 | 2776 | } |
---|
2673 | 2777 | |
---|
2674 | 2778 | /** |
---|
2675 | | - * try_to_wake_up_local - try to wake up a local task with rq lock held |
---|
2676 | | - * @p: the thread to be awakened |
---|
2677 | | - * @rf: request-queue flags for pinning |
---|
2678 | | - * |
---|
2679 | | - * Put @p on the run-queue if it's not already there. The caller must |
---|
2680 | | - * ensure that this_rq() is locked, @p is bound to this_rq() and not |
---|
2681 | | - * the current task. |
---|
2682 | | - */ |
---|
2683 | | -static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) |
---|
2684 | | -{ |
---|
2685 | | - struct rq *rq = task_rq(p); |
---|
2686 | | - |
---|
2687 | | - if (WARN_ON_ONCE(rq != this_rq()) || |
---|
2688 | | - WARN_ON_ONCE(p == current)) |
---|
2689 | | - return; |
---|
2690 | | - |
---|
2691 | | - lockdep_assert_held(&rq->lock); |
---|
2692 | | - |
---|
2693 | | - if (!raw_spin_trylock(&p->pi_lock)) { |
---|
2694 | | - /* |
---|
2695 | | - * This is OK, because current is on_cpu, which avoids it being |
---|
2696 | | - * picked for load-balance and preemption/IRQs are still |
---|
2697 | | - * disabled avoiding further scheduler activity on it and we've |
---|
2698 | | - * not yet picked a replacement task. |
---|
2699 | | - */ |
---|
2700 | | - rq_unlock(rq, rf); |
---|
2701 | | - raw_spin_lock(&p->pi_lock); |
---|
2702 | | - rq_relock(rq, rf); |
---|
2703 | | - } |
---|
2704 | | - |
---|
2705 | | - if (!(p->state & TASK_NORMAL)) |
---|
2706 | | - goto out; |
---|
2707 | | - |
---|
2708 | | - trace_sched_waking(p); |
---|
2709 | | - |
---|
2710 | | - if (!task_on_rq_queued(p)) { |
---|
2711 | | - if (p->in_iowait) { |
---|
2712 | | - delayacct_blkio_end(p); |
---|
2713 | | - atomic_dec(&rq->nr_iowait); |
---|
2714 | | - } |
---|
2715 | | - ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); |
---|
2716 | | - } |
---|
2717 | | - |
---|
2718 | | - ttwu_do_wakeup(rq, p, 0, rf); |
---|
2719 | | - ttwu_stat(p, smp_processor_id(), 0); |
---|
2720 | | -out: |
---|
2721 | | - raw_spin_unlock(&p->pi_lock); |
---|
2722 | | -} |
---|
2723 | | - |
---|
2724 | | -/** |
---|
2725 | 2779 | * wake_up_process - Wake up a specific process |
---|
2726 | 2780 | * @p: The process to be woken up. |
---|
2727 | 2781 | * |
---|
.. | .. |
---|
2737 | 2791 | return try_to_wake_up(p, TASK_NORMAL, 0, 1); |
---|
2738 | 2792 | } |
---|
2739 | 2793 | EXPORT_SYMBOL(wake_up_process); |
---|
| 2794 | + |
---|
| 2795 | +/** |
---|
| 2796 | + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" |
---|
| 2797 | + * @p: The process to be woken up. |
---|
| 2798 | + * |
---|
| 2799 | + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate |
---|
| 2800 | + * the nature of the wakeup. |
---|
| 2801 | + */ |
---|
| 2802 | +int wake_up_lock_sleeper(struct task_struct *p) |
---|
| 2803 | +{ |
---|
| 2804 | + return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER, 1); |
---|
| 2805 | +} |
---|
2740 | 2806 | |
---|
2741 | 2807 | int wake_up_state(struct task_struct *p, unsigned int state) |
---|
2742 | 2808 | { |
---|
.. | .. |
---|
2978 | 3044 | p->on_cpu = 0; |
---|
2979 | 3045 | #endif |
---|
2980 | 3046 | init_task_preempt_count(p); |
---|
| 3047 | +#ifdef CONFIG_HAVE_PREEMPT_LAZY |
---|
| 3048 | + task_thread_info(p)->preempt_lazy_count = 0; |
---|
| 3049 | +#endif |
---|
2981 | 3050 | #ifdef CONFIG_SMP |
---|
2982 | 3051 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
---|
2983 | 3052 | RB_CLEAR_NODE(&p->pushable_dl_tasks); |
---|
.. | .. |
---|
3018 | 3087 | #ifdef CONFIG_SMP |
---|
3019 | 3088 | /* |
---|
3020 | 3089 | * Fork balancing, do it here and not earlier because: |
---|
3021 | | - * - cpus_allowed can change in the fork path |
---|
| 3090 | + * - cpus_ptr can change in the fork path |
---|
3022 | 3091 | * - any previously selected CPU might disappear through hotplug |
---|
3023 | 3092 | * |
---|
3024 | 3093 | * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, |
---|
.. | .. |
---|
3307 | 3376 | * provided by mmdrop(), |
---|
3308 | 3377 | * - a sync_core for SYNC_CORE. |
---|
3309 | 3378 | */ |
---|
| 3379 | + /* |
---|
| 3380 | + * We use mmdrop_delayed() here so we don't have to do the |
---|
| 3381 | + * full __mmdrop() when we are the last user. |
---|
| 3382 | + */ |
---|
3310 | 3383 | if (mm) { |
---|
3311 | 3384 | membarrier_mm_sync_core_before_usermode(mm); |
---|
3312 | | - mmdrop(mm); |
---|
| 3385 | + mmdrop_delayed(mm); |
---|
3313 | 3386 | } |
---|
3314 | 3387 | if (unlikely(prev_state == TASK_DEAD)) { |
---|
3315 | 3388 | if (prev->sched_class->task_dead) |
---|
3316 | 3389 | prev->sched_class->task_dead(prev); |
---|
3317 | | - |
---|
3318 | | - /* |
---|
3319 | | - * Remove function-return probe instances associated with this |
---|
3320 | | - * task and put them back on the free list. |
---|
3321 | | - */ |
---|
3322 | | - kprobe_flush_task(prev); |
---|
3323 | | - |
---|
3324 | | - /* Task is done with its stack. */ |
---|
3325 | | - put_task_stack(prev); |
---|
3326 | 3390 | |
---|
3327 | 3391 | put_task_struct(prev); |
---|
3328 | 3392 | } |
---|
.. | .. |
---|
4001 | 4065 | BUG(); |
---|
4002 | 4066 | } |
---|
4003 | 4067 | |
---|
| 4068 | +static void migrate_disabled_sched(struct task_struct *p); |
---|
| 4069 | + |
---|
4004 | 4070 | /* |
---|
4005 | 4071 | * __schedule() is the main scheduler function. |
---|
4006 | 4072 | * |
---|
.. | .. |
---|
4071 | 4137 | rq_lock(rq, &rf); |
---|
4072 | 4138 | smp_mb__after_spinlock(); |
---|
4073 | 4139 | |
---|
| 4140 | + if (__migrate_disabled(prev)) |
---|
| 4141 | + migrate_disabled_sched(prev); |
---|
| 4142 | + |
---|
4074 | 4143 | /* Promote REQ to ACT */ |
---|
4075 | 4144 | rq->clock_update_flags <<= 1; |
---|
4076 | 4145 | update_rq_clock(rq); |
---|
.. | .. |
---|
4087 | 4156 | atomic_inc(&rq->nr_iowait); |
---|
4088 | 4157 | delayacct_blkio_start(); |
---|
4089 | 4158 | } |
---|
4090 | | - |
---|
4091 | | - /* |
---|
4092 | | - * If a worker went to sleep, notify and ask workqueue |
---|
4093 | | - * whether it wants to wake up a task to maintain |
---|
4094 | | - * concurrency. |
---|
4095 | | - */ |
---|
4096 | | - if (prev->flags & PF_WQ_WORKER) { |
---|
4097 | | - struct task_struct *to_wakeup; |
---|
4098 | | - |
---|
4099 | | - to_wakeup = wq_worker_sleeping(prev); |
---|
4100 | | - if (to_wakeup) |
---|
4101 | | - try_to_wake_up_local(to_wakeup, &rf); |
---|
4102 | | - } |
---|
4103 | 4159 | } |
---|
4104 | 4160 | switch_count = &prev->nvcsw; |
---|
4105 | 4161 | } |
---|
4106 | 4162 | |
---|
4107 | 4163 | next = pick_next_task(rq, prev, &rf); |
---|
4108 | 4164 | clear_tsk_need_resched(prev); |
---|
| 4165 | + clear_tsk_need_resched_lazy(prev); |
---|
4109 | 4166 | clear_preempt_need_resched(); |
---|
4110 | 4167 | |
---|
4111 | 4168 | if (likely(prev != next)) { |
---|
.. | .. |
---|
4157 | 4214 | |
---|
4158 | 4215 | static inline void sched_submit_work(struct task_struct *tsk) |
---|
4159 | 4216 | { |
---|
4160 | | - if (!tsk->state || tsk_is_pi_blocked(tsk)) |
---|
| 4217 | + if (!tsk->state) |
---|
4161 | 4218 | return; |
---|
| 4219 | + |
---|
| 4220 | + /* |
---|
| 4221 | + * If a worker went to sleep, notify and ask workqueue whether |
---|
| 4222 | + * it wants to wake up a task to maintain concurrency. |
---|
| 4223 | + * As this function is called inside the schedule() context, |
---|
| 4224 | + * we disable preemption to avoid it calling schedule() again |
---|
| 4225 | + * in the possible wakeup of a kworker. |
---|
| 4226 | + */ |
---|
| 4227 | + if (tsk->flags & PF_WQ_WORKER) { |
---|
| 4228 | + preempt_disable(); |
---|
| 4229 | + wq_worker_sleeping(tsk); |
---|
| 4230 | + preempt_enable_no_resched(); |
---|
| 4231 | + } |
---|
| 4232 | + |
---|
| 4233 | + if (tsk_is_pi_blocked(tsk)) |
---|
| 4234 | + return; |
---|
| 4235 | + |
---|
4162 | 4236 | /* |
---|
4163 | 4237 | * If we are going to sleep and we have plugged IO queued, |
---|
4164 | 4238 | * make sure to submit it to avoid deadlocks. |
---|
4165 | 4239 | */ |
---|
4166 | 4240 | if (blk_needs_flush_plug(tsk)) |
---|
4167 | 4241 | blk_schedule_flush_plug(tsk); |
---|
| 4242 | +} |
---|
| 4243 | + |
---|
| 4244 | +static void sched_update_worker(struct task_struct *tsk) |
---|
| 4245 | +{ |
---|
| 4246 | + if (tsk->flags & PF_WQ_WORKER) |
---|
| 4247 | + wq_worker_running(tsk); |
---|
4168 | 4248 | } |
---|
4169 | 4249 | |
---|
4170 | 4250 | asmlinkage __visible void __sched schedule(void) |
---|
.. | .. |
---|
4177 | 4257 | __schedule(false); |
---|
4178 | 4258 | sched_preempt_enable_no_resched(); |
---|
4179 | 4259 | } while (need_resched()); |
---|
| 4260 | + sched_update_worker(tsk); |
---|
4180 | 4261 | } |
---|
4181 | 4262 | EXPORT_SYMBOL(schedule); |
---|
4182 | 4263 | |
---|
.. | .. |
---|
4265 | 4346 | } while (need_resched()); |
---|
4266 | 4347 | } |
---|
4267 | 4348 | |
---|
| 4349 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 4350 | +/* |
---|
| 4351 | + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is |
---|
| 4352 | + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as |
---|
| 4353 | + * preempt_lazy_count counter >0. |
---|
| 4354 | + */ |
---|
| 4355 | +static __always_inline int preemptible_lazy(void) |
---|
| 4356 | +{ |
---|
| 4357 | + if (test_thread_flag(TIF_NEED_RESCHED)) |
---|
| 4358 | + return 1; |
---|
| 4359 | + if (current_thread_info()->preempt_lazy_count) |
---|
| 4360 | + return 0; |
---|
| 4361 | + return 1; |
---|
| 4362 | +} |
---|
| 4363 | + |
---|
| 4364 | +#else |
---|
| 4365 | + |
---|
| 4366 | +static inline int preemptible_lazy(void) |
---|
| 4367 | +{ |
---|
| 4368 | + return 1; |
---|
| 4369 | +} |
---|
| 4370 | + |
---|
| 4371 | +#endif |
---|
| 4372 | + |
---|
4268 | 4373 | #ifdef CONFIG_PREEMPT |
---|
4269 | 4374 | /* |
---|
4270 | 4375 | * this is the entry point to schedule() from in-kernel preemption |
---|
.. | .. |
---|
4279 | 4384 | */ |
---|
4280 | 4385 | if (likely(!preemptible())) |
---|
4281 | 4386 | return; |
---|
4282 | | - |
---|
| 4387 | + if (!preemptible_lazy()) |
---|
| 4388 | + return; |
---|
4283 | 4389 | preempt_schedule_common(); |
---|
4284 | 4390 | } |
---|
4285 | 4391 | NOKPROBE_SYMBOL(preempt_schedule); |
---|
.. | .. |
---|
4304 | 4410 | enum ctx_state prev_ctx; |
---|
4305 | 4411 | |
---|
4306 | 4412 | if (likely(!preemptible())) |
---|
| 4413 | + return; |
---|
| 4414 | + |
---|
| 4415 | + if (!preemptible_lazy()) |
---|
4307 | 4416 | return; |
---|
4308 | 4417 | |
---|
4309 | 4418 | do { |
---|
.. | .. |
---|
4951 | 5060 | * the entire root_domain to become SCHED_DEADLINE. We |
---|
4952 | 5061 | * will also fail if there's no bandwidth available. |
---|
4953 | 5062 | */ |
---|
4954 | | - if (!cpumask_subset(span, &p->cpus_allowed) || |
---|
| 5063 | + if (!cpumask_subset(span, p->cpus_ptr) || |
---|
4955 | 5064 | rq->rd->dl_bw.bw == 0) { |
---|
4956 | 5065 | task_rq_unlock(rq, p, &rf); |
---|
4957 | 5066 | return -EPERM; |
---|
.. | .. |
---|
5569 | 5678 | goto out_unlock; |
---|
5570 | 5679 | |
---|
5571 | 5680 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
---|
5572 | | - cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); |
---|
| 5681 | + cpumask_and(mask, &p->cpus_mask, cpu_active_mask); |
---|
5573 | 5682 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
---|
5574 | 5683 | |
---|
5575 | 5684 | out_unlock: |
---|
.. | .. |
---|
6106 | 6215 | |
---|
6107 | 6216 | /* Set the preempt count _outside_ the spinlocks! */ |
---|
6108 | 6217 | init_idle_preempt_count(idle, cpu); |
---|
6109 | | - |
---|
| 6218 | +#ifdef CONFIG_HAVE_PREEMPT_LAZY |
---|
| 6219 | + task_thread_info(idle)->preempt_lazy_count = 0; |
---|
| 6220 | +#endif |
---|
6110 | 6221 | /* |
---|
6111 | 6222 | * The idle tasks have their own, simple scheduling class: |
---|
6112 | 6223 | */ |
---|
.. | .. |
---|
6145 | 6256 | * allowed nodes is unnecessary. Thus, cpusets are not |
---|
6146 | 6257 | * applicable for such threads. This prevents checking for |
---|
6147 | 6258 | * success of set_cpus_allowed_ptr() on all attached tasks |
---|
6148 | | - * before cpus_allowed may be changed. |
---|
| 6259 | + * before cpus_mask may be changed. |
---|
6149 | 6260 | */ |
---|
6150 | 6261 | if (p->flags & PF_NO_SETAFFINITY) { |
---|
6151 | 6262 | ret = -EINVAL; |
---|
.. | .. |
---|
6172 | 6283 | if (curr_cpu == target_cpu) |
---|
6173 | 6284 | return 0; |
---|
6174 | 6285 | |
---|
6175 | | - if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) |
---|
| 6286 | + if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) |
---|
6176 | 6287 | return -EINVAL; |
---|
6177 | 6288 | |
---|
6178 | 6289 | /* TODO: This is not properly updating schedstats */ |
---|
.. | .. |
---|
6211 | 6322 | #endif /* CONFIG_NUMA_BALANCING */ |
---|
6212 | 6323 | |
---|
6213 | 6324 | #ifdef CONFIG_HOTPLUG_CPU |
---|
| 6325 | + |
---|
6214 | 6326 | /* |
---|
6215 | 6327 | * Ensure that the idle task is using init_mm right before its CPU goes |
---|
6216 | 6328 | * offline. |
---|
.. | .. |
---|
6310 | 6422 | BUG_ON(!next); |
---|
6311 | 6423 | put_prev_task(rq, next); |
---|
6312 | 6424 | |
---|
| 6425 | + WARN_ON_ONCE(__migrate_disabled(next)); |
---|
| 6426 | + |
---|
6313 | 6427 | /* |
---|
6314 | | - * Rules for changing task_struct::cpus_allowed are holding |
---|
| 6428 | + * Rules for changing task_struct::cpus_mask are holding |
---|
6315 | 6429 | * both pi_lock and rq->lock, such that holding either |
---|
6316 | 6430 | * stabilizes the mask. |
---|
6317 | 6431 | * |
---|
.. | .. |
---|
6777 | 6891 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
6778 | 6892 | static inline int preempt_count_equals(int preempt_offset) |
---|
6779 | 6893 | { |
---|
6780 | | - int nested = preempt_count() + rcu_preempt_depth(); |
---|
| 6894 | + int nested = preempt_count() + sched_rcu_preempt_depth(); |
---|
6781 | 6895 | |
---|
6782 | 6896 | return (nested == preempt_offset); |
---|
6783 | 6897 | } |
---|
.. | .. |
---|
8014 | 8128 | }; |
---|
8015 | 8129 | |
---|
8016 | 8130 | #undef CREATE_TRACE_POINTS |
---|
| 8131 | + |
---|
| 8132 | +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 8133 | + |
---|
| 8134 | +static inline void |
---|
| 8135 | +update_nr_migratory(struct task_struct *p, long delta) |
---|
| 8136 | +{ |
---|
| 8137 | + if (unlikely((p->sched_class == &rt_sched_class || |
---|
| 8138 | + p->sched_class == &dl_sched_class) && |
---|
| 8139 | + p->nr_cpus_allowed > 1)) { |
---|
| 8140 | + if (p->sched_class == &rt_sched_class) |
---|
| 8141 | + task_rq(p)->rt.rt_nr_migratory += delta; |
---|
| 8142 | + else |
---|
| 8143 | + task_rq(p)->dl.dl_nr_migratory += delta; |
---|
| 8144 | + } |
---|
| 8145 | +} |
---|
| 8146 | + |
---|
| 8147 | +static inline void |
---|
| 8148 | +migrate_disable_update_cpus_allowed(struct task_struct *p) |
---|
| 8149 | +{ |
---|
| 8150 | + p->cpus_ptr = cpumask_of(smp_processor_id()); |
---|
| 8151 | + update_nr_migratory(p, -1); |
---|
| 8152 | + p->nr_cpus_allowed = 1; |
---|
| 8153 | +} |
---|
| 8154 | + |
---|
| 8155 | +static inline void |
---|
| 8156 | +migrate_enable_update_cpus_allowed(struct task_struct *p) |
---|
| 8157 | +{ |
---|
| 8158 | + struct rq *rq; |
---|
| 8159 | + struct rq_flags rf; |
---|
| 8160 | + |
---|
| 8161 | + rq = task_rq_lock(p, &rf); |
---|
| 8162 | + p->cpus_ptr = &p->cpus_mask; |
---|
| 8163 | + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); |
---|
| 8164 | + update_nr_migratory(p, 1); |
---|
| 8165 | + task_rq_unlock(rq, p, &rf); |
---|
| 8166 | +} |
---|
| 8167 | + |
---|
| 8168 | +void migrate_disable(void) |
---|
| 8169 | +{ |
---|
| 8170 | + preempt_disable(); |
---|
| 8171 | + |
---|
| 8172 | + if (++current->migrate_disable == 1) { |
---|
| 8173 | + this_rq()->nr_pinned++; |
---|
| 8174 | + preempt_lazy_disable(); |
---|
| 8175 | +#ifdef CONFIG_SCHED_DEBUG |
---|
| 8176 | + WARN_ON_ONCE(current->pinned_on_cpu >= 0); |
---|
| 8177 | + current->pinned_on_cpu = smp_processor_id(); |
---|
| 8178 | +#endif |
---|
| 8179 | + } |
---|
| 8180 | + |
---|
| 8181 | + preempt_enable(); |
---|
| 8182 | +} |
---|
| 8183 | +EXPORT_SYMBOL(migrate_disable); |
---|
| 8184 | + |
---|
| 8185 | +static void migrate_disabled_sched(struct task_struct *p) |
---|
| 8186 | +{ |
---|
| 8187 | + if (p->migrate_disable_scheduled) |
---|
| 8188 | + return; |
---|
| 8189 | + |
---|
| 8190 | + migrate_disable_update_cpus_allowed(p); |
---|
| 8191 | + p->migrate_disable_scheduled = 1; |
---|
| 8192 | +} |
---|
| 8193 | + |
---|
| 8194 | +static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work); |
---|
| 8195 | +static DEFINE_PER_CPU(struct migration_arg, migrate_arg); |
---|
| 8196 | + |
---|
| 8197 | +void migrate_enable(void) |
---|
| 8198 | +{ |
---|
| 8199 | + struct task_struct *p = current; |
---|
| 8200 | + struct rq *rq = this_rq(); |
---|
| 8201 | + int cpu = task_cpu(p); |
---|
| 8202 | + |
---|
| 8203 | + WARN_ON_ONCE(p->migrate_disable <= 0); |
---|
| 8204 | + if (p->migrate_disable > 1) { |
---|
| 8205 | + p->migrate_disable--; |
---|
| 8206 | + return; |
---|
| 8207 | + } |
---|
| 8208 | + |
---|
| 8209 | + preempt_disable(); |
---|
| 8210 | + |
---|
| 8211 | +#ifdef CONFIG_SCHED_DEBUG |
---|
| 8212 | + WARN_ON_ONCE(current->pinned_on_cpu != cpu); |
---|
| 8213 | + current->pinned_on_cpu = -1; |
---|
| 8214 | +#endif |
---|
| 8215 | + |
---|
| 8216 | + WARN_ON_ONCE(rq->nr_pinned < 1); |
---|
| 8217 | + |
---|
| 8218 | + p->migrate_disable = 0; |
---|
| 8219 | + rq->nr_pinned--; |
---|
| 8220 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 8221 | + if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && |
---|
| 8222 | + takedown_cpu_task) |
---|
| 8223 | + wake_up_process(takedown_cpu_task); |
---|
| 8224 | +#endif |
---|
| 8225 | + |
---|
| 8226 | + if (!p->migrate_disable_scheduled) |
---|
| 8227 | + goto out; |
---|
| 8228 | + |
---|
| 8229 | + p->migrate_disable_scheduled = 0; |
---|
| 8230 | + |
---|
| 8231 | + migrate_enable_update_cpus_allowed(p); |
---|
| 8232 | + |
---|
| 8233 | + WARN_ON(smp_processor_id() != cpu); |
---|
| 8234 | + if (!is_cpu_allowed(p, cpu)) { |
---|
| 8235 | + struct migration_arg __percpu *arg; |
---|
| 8236 | + struct cpu_stop_work __percpu *work; |
---|
| 8237 | + struct rq_flags rf; |
---|
| 8238 | + |
---|
| 8239 | + work = this_cpu_ptr(&migrate_work); |
---|
| 8240 | + arg = this_cpu_ptr(&migrate_arg); |
---|
| 8241 | + WARN_ON_ONCE(!arg->done && !work->disabled && work->arg); |
---|
| 8242 | + |
---|
| 8243 | + arg->task = p; |
---|
| 8244 | + arg->done = false; |
---|
| 8245 | + |
---|
| 8246 | + rq = task_rq_lock(p, &rf); |
---|
| 8247 | + update_rq_clock(rq); |
---|
| 8248 | + arg->dest_cpu = select_fallback_rq(cpu, p); |
---|
| 8249 | + task_rq_unlock(rq, p, &rf); |
---|
| 8250 | + |
---|
| 8251 | + stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, |
---|
| 8252 | + arg, work); |
---|
| 8253 | + tlb_migrate_finish(p->mm); |
---|
| 8254 | + } |
---|
| 8255 | + |
---|
| 8256 | +out: |
---|
| 8257 | + preempt_lazy_enable(); |
---|
| 8258 | + preempt_enable(); |
---|
| 8259 | +} |
---|
| 8260 | +EXPORT_SYMBOL(migrate_enable); |
---|
| 8261 | + |
---|
| 8262 | +int cpu_nr_pinned(int cpu) |
---|
| 8263 | +{ |
---|
| 8264 | + struct rq *rq = cpu_rq(cpu); |
---|
| 8265 | + |
---|
| 8266 | + return rq->nr_pinned; |
---|
| 8267 | +} |
---|
| 8268 | + |
---|
| 8269 | +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 8270 | +static void migrate_disabled_sched(struct task_struct *p) |
---|
| 8271 | +{ |
---|
| 8272 | +} |
---|
| 8273 | + |
---|
| 8274 | +void migrate_disable(void) |
---|
| 8275 | +{ |
---|
| 8276 | +#ifdef CONFIG_SCHED_DEBUG |
---|
| 8277 | + current->migrate_disable++; |
---|
| 8278 | +#endif |
---|
| 8279 | + barrier(); |
---|
| 8280 | +} |
---|
| 8281 | +EXPORT_SYMBOL(migrate_disable); |
---|
| 8282 | + |
---|
| 8283 | +void migrate_enable(void) |
---|
| 8284 | +{ |
---|
| 8285 | +#ifdef CONFIG_SCHED_DEBUG |
---|
| 8286 | + struct task_struct *p = current; |
---|
| 8287 | + |
---|
| 8288 | + WARN_ON_ONCE(p->migrate_disable <= 0); |
---|
| 8289 | + p->migrate_disable--; |
---|
| 8290 | +#endif |
---|
| 8291 | + barrier(); |
---|
| 8292 | +} |
---|
| 8293 | +EXPORT_SYMBOL(migrate_enable); |
---|
| 8294 | +#else |
---|
| 8295 | +static void migrate_disabled_sched(struct task_struct *p) |
---|
| 8296 | +{ |
---|
| 8297 | +} |
---|
| 8298 | +#endif |
---|