| .. | .. |
|---|
| 17 | 17 | |
|---|
| 18 | 18 | bool pv_is_native_spin_unlock(void) |
|---|
| 19 | 19 | { |
|---|
| 20 | | - return pv_lock_ops.queued_spin_unlock.func == |
|---|
| 20 | + return pv_ops.lock.queued_spin_unlock.func == |
|---|
| 21 | 21 | __raw_callee_save___native_queued_spin_unlock; |
|---|
| 22 | 22 | } |
|---|
| 23 | 23 | |
|---|
| .. | .. |
|---|
| 29 | 29 | |
|---|
| 30 | 30 | bool pv_is_native_vcpu_is_preempted(void) |
|---|
| 31 | 31 | { |
|---|
| 32 | | - return pv_lock_ops.vcpu_is_preempted.func == |
|---|
| 32 | + return pv_ops.lock.vcpu_is_preempted.func == |
|---|
| 33 | 33 | __raw_callee_save___native_vcpu_is_preempted; |
|---|
| 34 | 34 | } |
|---|
| 35 | | - |
|---|
| 36 | | -struct pv_lock_ops pv_lock_ops = { |
|---|
| 37 | | -#ifdef CONFIG_SMP |
|---|
| 38 | | - .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, |
|---|
| 39 | | - .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), |
|---|
| 40 | | - .wait = paravirt_nop, |
|---|
| 41 | | - .kick = paravirt_nop, |
|---|
| 42 | | - .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), |
|---|
| 43 | | -#endif /* SMP */ |
|---|
| 44 | | -}; |
|---|
| 45 | | -EXPORT_SYMBOL(pv_lock_ops); |
|---|