.. | .. |
---|
204 | 204 | #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) |
---|
205 | 205 | #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) |
---|
206 | 206 | |
---|
207 | | -#define _PREF(hint, addr, type) \ |
---|
| 207 | +#ifdef CONFIG_CPU_HAS_PREFETCH |
---|
| 208 | +# define _PREF(hint, addr, type) \ |
---|
208 | 209 | .if \mode == LEGACY_MODE; \ |
---|
209 | | - PREF(hint, addr); \ |
---|
| 210 | + kernel_pref(hint, addr); \ |
---|
210 | 211 | .else; \ |
---|
211 | 212 | .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ |
---|
212 | 213 | ((\to == USEROP) && (type == DST_PREFETCH)); \ |
---|
.. | .. |
---|
218 | 219 | * used later on. Therefore use $v1. \ |
---|
219 | 220 | */ \ |
---|
220 | 221 | .set at=v1; \ |
---|
221 | | - PREFE(hint, addr); \ |
---|
| 222 | + user_pref(hint, addr); \ |
---|
222 | 223 | .set noat; \ |
---|
223 | 224 | .else; \ |
---|
224 | | - PREF(hint, addr); \ |
---|
| 225 | + kernel_pref(hint, addr); \ |
---|
225 | 226 | .endif; \ |
---|
226 | 227 | .endif |
---|
| 228 | +#else |
---|
| 229 | +# define _PREF(hint, addr, type) |
---|
| 230 | +#endif |
---|
227 | 231 | |
---|
228 | 232 | #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) |
---|
229 | 233 | #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) |
---|
.. | .. |
---|
297 | 301 | and t0, src, ADDRMASK |
---|
298 | 302 | PREFS( 0, 2*32(src) ) |
---|
299 | 303 | PREFD( 1, 2*32(dst) ) |
---|
300 | | -#ifndef CONFIG_CPU_MIPSR6 |
---|
| 304 | +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR |
---|
301 | 305 | bnez t1, .Ldst_unaligned\@ |
---|
302 | 306 | nop |
---|
303 | 307 | bnez t0, .Lsrc_unaligned_dst_aligned\@ |
---|
304 | | -#else |
---|
| 308 | +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ |
---|
305 | 309 | or t0, t0, t1 |
---|
306 | 310 | bnez t0, .Lcopy_unaligned_bytes\@ |
---|
307 | | -#endif |
---|
| 311 | +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ |
---|
308 | 312 | /* |
---|
309 | 313 | * use delay slot for fall-through |
---|
310 | 314 | * src and dst are aligned; need to compute rem |
---|
.. | .. |
---|
385 | 389 | bne rem, len, 1b |
---|
386 | 390 | .set noreorder |
---|
387 | 391 | |
---|
388 | | -#ifndef CONFIG_CPU_MIPSR6 |
---|
| 392 | +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR |
---|
389 | 393 | /* |
---|
390 | 394 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) |
---|
391 | 395 | * A loop would do only a byte at a time with possible branch |
---|
.. | .. |
---|
487 | 491 | bne len, rem, 1b |
---|
488 | 492 | .set noreorder |
---|
489 | 493 | |
---|
490 | | -#endif /* !CONFIG_CPU_MIPSR6 */ |
---|
| 494 | +#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */ |
---|
491 | 495 | .Lcopy_bytes_checklen\@: |
---|
492 | 496 | beqz len, .Ldone\@ |
---|
493 | 497 | nop |
---|
.. | .. |
---|
516 | 520 | jr ra |
---|
517 | 521 | nop |
---|
518 | 522 | |
---|
519 | | -#ifdef CONFIG_CPU_MIPSR6 |
---|
| 523 | +#ifdef CONFIG_CPU_NO_LOAD_STORE_LR |
---|
520 | 524 | .Lcopy_unaligned_bytes\@: |
---|
521 | 525 | 1: |
---|
522 | 526 | COPY_BYTE(0) |
---|
.. | .. |
---|
530 | 534 | ADD src, src, 8 |
---|
531 | 535 | b 1b |
---|
532 | 536 | ADD dst, dst, 8 |
---|
533 | | -#endif /* CONFIG_CPU_MIPSR6 */ |
---|
| 537 | +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ |
---|
534 | 538 | .if __memcpy == 1 |
---|
535 | 539 | END(memcpy) |
---|
536 | 540 | .set __memcpy, 0 |
---|
.. | .. |
---|
594 | 598 | nop |
---|
595 | 599 | .endm |
---|
596 | 600 | |
---|
| 601 | +#ifndef CONFIG_HAVE_PLAT_MEMCPY |
---|
597 | 602 | .align 5 |
---|
598 | 603 | LEAF(memmove) |
---|
599 | 604 | EXPORT_SYMBOL(memmove) |
---|
.. | .. |
---|
661 | 666 | /* Legacy Mode, user <-> user */ |
---|
662 | 667 | __BUILD_COPY_USER LEGACY_MODE USEROP USEROP |
---|
663 | 668 | |
---|
| 669 | +#endif |
---|
| 670 | + |
---|
664 | 671 | #ifdef CONFIG_EVA |
---|
665 | 672 | |
---|
666 | 673 | /* |
---|