| .. | .. |
|---|
| 728 | 728 | bri 6f; |
|---|
| 729 | 729 | /* MS: Return to kernel state. */ |
|---|
| 730 | 730 | 2: |
|---|
| 731 | | -#ifdef CONFIG_PREEMPT |
|---|
| 731 | +#ifdef CONFIG_PREEMPTION |
|---|
| 732 | 732 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
|---|
| 733 | 733 | /* MS: get preempt_count from thread info */ |
|---|
| 734 | 734 | lwi r5, r11, TI_PREEMPT_COUNT; |
|---|
| .. | .. |
|---|
| 738 | 738 | andi r5, r5, _TIF_NEED_RESCHED; |
|---|
| 739 | 739 | beqi r5, restore /* if zero jump over */ |
|---|
| 740 | 740 | |
|---|
| 741 | | -preempt: |
|---|
| 742 | 741 | /* interrupts are off that's why I am calling preempt_chedule_irq */ |
|---|
| 743 | 742 | bralid r15, preempt_schedule_irq |
|---|
| 744 | 743 | nop |
|---|
| 745 | | - lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
|---|
| 746 | | - lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
|---|
| 747 | | - andi r5, r5, _TIF_NEED_RESCHED; |
|---|
| 748 | | - bnei r5, preempt /* if non zero jump to resched */ |
|---|
| 749 | 744 | restore: |
|---|
| 750 | 745 | #endif |
|---|
| 751 | 746 | VM_OFF /* MS: turn off MMU */ |
|---|
| .. | .. |
|---|
| 963 | 958 | nop |
|---|
| 964 | 959 | |
|---|
| 965 | 960 | ENTRY(_reset) |
|---|
| 961 | + VM_OFF |
|---|
| 966 | 962 | brai 0; /* Jump to reset vector */ |
|---|
| 967 | 963 | |
|---|
| 968 | 964 | /* These are compiled and loaded into high memory, then |
|---|