From e3e12f52b214121840b44c91de5b3e5af5d3eb84 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 06 Nov 2023 03:04:41 +0000 Subject: [PATCH] rk3568 rt init --- kernel/kernel/futex.c | 126 ++++++++++++++++++++++++++++++++---------- 1 files changed, 96 insertions(+), 30 deletions(-) diff --git a/kernel/kernel/futex.c b/kernel/kernel/futex.c index 3c67da9..81090f7 100644 --- a/kernel/kernel/futex.c +++ b/kernel/kernel/futex.c @@ -962,7 +962,9 @@ if (head->next != next) { /* retain curr->pi_lock for the loop invariant */ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock_irq(&curr->pi_lock); spin_unlock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); put_pi_state(pi_state); continue; } @@ -1573,6 +1575,7 @@ struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); + DEFINE_WAKE_Q(wake_sleeper_q); int ret = 0; new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); @@ -1622,14 +1625,15 @@ * not fail. */ pi_state_update_owner(pi_state, new_owner); - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, + &wake_sleeper_q); } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); if (postunlock) - rt_mutex_postunlock(&wake_q); + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); return ret; } @@ -2253,6 +2257,16 @@ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; + } else if (ret == -EAGAIN) { + /* + * Waiter was woken by timeout or + * signal and has set pi_blocked_on to + * PI_WAKEUP_INPROGRESS before we + * tried to enqueue it on the rtmutex. + */ + this->pi_state = NULL; + put_pi_state(pi_state); + continue; } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a @@ -2816,10 +2830,9 @@ if (abs_time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? - CLOCK_REALTIME : CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? + CLOCK_REALTIME : CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } @@ -2917,9 +2930,8 @@ if (time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME, + HRTIMER_MODE_ABS, current); hrtimer_set_expires(&to->timer, *time); } @@ -2982,7 +2994,7 @@ goto no_block; } - rt_mutex_init_waiter(&rt_waiter); + rt_mutex_init_waiter(&rt_waiter, false); /* * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not @@ -2998,6 +3010,14 @@ * before __rt_mutex_start_proxy_lock() is done. */ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); + /* + * the migrate_disable() here disables migration in the in_atomic() fast + * path which is enabled again in the following spin_unlock(). We have + * one migrate_disable() pending in the slow-path which is reversed + * after the raw_spin_unlock_irq() where we leave the atomic context. + */ + migrate_disable(); + spin_unlock(q.lock_ptr); /* * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter @@ -3006,6 +3026,7 @@ */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); + migrate_enable(); if (ret) { if (ret == 1) @@ -3140,10 +3161,19 @@ * rt_waiter. Also see the WARN in wake_futex_pi(). */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + /* + * Magic trickery for now to make the RT migrate disable + * logic happy. The following spin_unlock() happens with + * interrupts disabled so the internal migrate_enable() + * won't undo the migrate_disable() which was issued when + * locking hb->lock. + */ + migrate_disable(); spin_unlock(&hb->lock); /* drops pi_state->pi_mutex.wait_lock */ ret = wake_futex_pi(uaddr, uval, pi_state); + migrate_enable(); put_pi_state(pi_state); @@ -3314,7 +3344,7 @@ { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; - struct futex_hash_bucket *hb; + struct futex_hash_bucket *hb, *hb2; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; @@ -3330,10 +3360,9 @@ if (abs_time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? - CLOCK_REALTIME : CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? + CLOCK_REALTIME : CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } @@ -3342,7 +3371,7 @@ * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - rt_mutex_init_waiter(&rt_waiter); + rt_mutex_init_waiter(&rt_waiter, false); ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -3373,20 +3402,55 @@ /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); - spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); - spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; + /* + * On RT we must avoid races with requeue and trying to block + * on two mutexes (hb->lock and uaddr2's rtmutex) by + * serializing access to pi_blocked_on with pi_lock. + */ + raw_spin_lock_irq(¤t->pi_lock); + if (current->pi_blocked_on) { + /* + * We have been requeued or are in the process of + * being requeued. + */ + raw_spin_unlock_irq(¤t->pi_lock); + } else { + /* + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS + * prevents a concurrent requeue from moving us to the + * uaddr2 rtmutex. After that we can safely acquire + * (and possibly block on) hb->lock. + */ + current->pi_blocked_on = PI_WAKEUP_INPROGRESS; + raw_spin_unlock_irq(¤t->pi_lock); + + spin_lock(&hb->lock); + + /* + * Clean up pi_blocked_on. We might leak it otherwise + * when we succeeded with the hb->lock in the fast + * path. + */ + raw_spin_lock_irq(¤t->pi_lock); + current->pi_blocked_on = NULL; + raw_spin_unlock_irq(¤t->pi_lock); + + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); + spin_unlock(&hb->lock); + if (ret) + goto out_put_keys; + } /* - * In order for us to be here, we know our q.key == key2, and since - * we took the hb->lock above, we also know that futex_requeue() has - * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquisition by the requeue code. The - * futex_requeue dropped our key1 reference and incremented our key2 - * reference count. + * In order to be here, we have either been requeued, are in + * the process of being requeued, or requeue successfully + * acquired uaddr2 on our behalf. If pi_blocked_on was + * non-null above, we may be racing with a requeue. Do not + * rely on q->lock_ptr to be hb2->lock until after blocking on + * hb->lock or hb2->lock. The futex_requeue dropped our key1 + * reference and incremented our key2 reference count. */ + hb2 = hash_futex(&key2); /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { @@ -3395,14 +3459,15 @@ * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); - spin_unlock(q.lock_ptr); + spin_unlock(&hb2->lock); /* * Adjust the return value. It's either -EFAULT or * success (1) but the caller expects 0 for success. @@ -3421,7 +3486,8 @@ pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ret = 0; -- Gitblit v1.6.2