| .. | .. |
|---|
| 24 | 24 | #include <asm/cacheflush.h> |
|---|
| 25 | 25 | #include <asm/tlbflush.h> |
|---|
| 26 | 26 | #include <asm/page.h> |
|---|
| 27 | | -#include <asm/pgalloc.h> |
|---|
| 28 | 27 | #include <asm/processor.h> |
|---|
| 29 | 28 | #include <asm/sections.h> |
|---|
| 30 | 29 | #include <asm/shmparam.h> |
|---|
| 31 | 30 | |
|---|
| 32 | | -int split_tlb __read_mostly; |
|---|
| 33 | | -int dcache_stride __read_mostly; |
|---|
| 34 | | -int icache_stride __read_mostly; |
|---|
| 31 | +int split_tlb __ro_after_init; |
|---|
| 32 | +int dcache_stride __ro_after_init; |
|---|
| 33 | +int icache_stride __ro_after_init; |
|---|
| 35 | 34 | EXPORT_SYMBOL(dcache_stride); |
|---|
| 36 | 35 | |
|---|
| 37 | 36 | void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); |
|---|
| 38 | 37 | EXPORT_SYMBOL(flush_dcache_page_asm); |
|---|
| 38 | +void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); |
|---|
| 39 | 39 | void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); |
|---|
| 40 | 40 | |
|---|
| 41 | 41 | |
|---|
| 42 | | -/* On some machines (e.g. ones with the Merced bus), there can be |
|---|
| 42 | +/* On some machines (i.e., ones with the Merced bus), there can be |
|---|
| 43 | 43 | * only a single PxTLB broadcast at a time; this must be guaranteed |
|---|
| 44 | | - * by software. We put a spinlock around all TLB flushes to |
|---|
| 45 | | - * ensure this. |
|---|
| 44 | + * by software. We need a spinlock around all TLB flushes to ensure |
|---|
| 45 | + * this. |
|---|
| 46 | 46 | */ |
|---|
| 47 | | -DEFINE_SPINLOCK(pa_tlb_lock); |
|---|
| 47 | +DEFINE_SPINLOCK(pa_tlb_flush_lock); |
|---|
| 48 | 48 | |
|---|
| 49 | | -struct pdc_cache_info cache_info __read_mostly; |
|---|
| 49 | +/* Swapper page setup lock. */ |
|---|
| 50 | +DEFINE_SPINLOCK(pa_swapper_pg_lock); |
|---|
| 51 | + |
|---|
| 52 | +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
|---|
| 53 | +int pa_serialize_tlb_flushes __ro_after_init; |
|---|
| 54 | +#endif |
|---|
| 55 | + |
|---|
| 56 | +struct pdc_cache_info cache_info __ro_after_init; |
|---|
| 50 | 57 | #ifndef CONFIG_PA20 |
|---|
| 51 | | -static struct pdc_btlb_info btlb_info __read_mostly; |
|---|
| 58 | +static struct pdc_btlb_info btlb_info __ro_after_init; |
|---|
| 52 | 59 | #endif |
|---|
| 53 | 60 | |
|---|
| 54 | 61 | #ifdef CONFIG_SMP |
|---|
| .. | .. |
|---|
| 76 | 83 | #define pfn_va(pfn) __va(PFN_PHYS(pfn)) |
|---|
| 77 | 84 | |
|---|
| 78 | 85 | void |
|---|
| 79 | | -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
|---|
| 86 | +__update_cache(pte_t pte) |
|---|
| 80 | 87 | { |
|---|
| 81 | | - unsigned long pfn = pte_pfn(*ptep); |
|---|
| 88 | + unsigned long pfn = pte_pfn(pte); |
|---|
| 82 | 89 | struct page *page; |
|---|
| 83 | 90 | |
|---|
| 84 | 91 | /* We don't have pte special. As a result, we can be called with |
|---|
| .. | .. |
|---|
| 303 | 310 | preempt_enable(); |
|---|
| 304 | 311 | } |
|---|
| 305 | 312 | |
|---|
| 313 | +static inline void |
|---|
| 314 | +__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, |
|---|
| 315 | + unsigned long physaddr) |
|---|
| 316 | +{ |
|---|
| 317 | + preempt_disable(); |
|---|
| 318 | + purge_dcache_page_asm(physaddr, vmaddr); |
|---|
| 319 | + if (vma->vm_flags & VM_EXEC) |
|---|
| 320 | + flush_icache_page_asm(physaddr, vmaddr); |
|---|
| 321 | + preempt_enable(); |
|---|
| 322 | +} |
|---|
| 323 | + |
|---|
| 306 | 324 | void flush_dcache_page(struct page *page) |
|---|
| 307 | 325 | { |
|---|
| 308 | 326 | struct address_space *mapping = page_mapping_file(page); |
|---|
| 309 | 327 | struct vm_area_struct *mpnt; |
|---|
| 310 | 328 | unsigned long offset; |
|---|
| 311 | 329 | unsigned long addr, old_addr = 0; |
|---|
| 330 | + unsigned long flags; |
|---|
| 312 | 331 | pgoff_t pgoff; |
|---|
| 313 | 332 | |
|---|
| 314 | 333 | if (mapping && !mapping_mapped(mapping)) { |
|---|
| .. | .. |
|---|
| 328 | 347 | * declared as MAP_PRIVATE or MAP_SHARED), so we only need |
|---|
| 329 | 348 | * to flush one address here for them all to become coherent */ |
|---|
| 330 | 349 | |
|---|
| 331 | | - flush_dcache_mmap_lock(mapping); |
|---|
| 350 | + flush_dcache_mmap_lock_irqsave(mapping, flags); |
|---|
| 332 | 351 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
|---|
| 333 | 352 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
|---|
| 334 | 353 | addr = mpnt->vm_start + offset; |
|---|
| .. | .. |
|---|
| 346 | 365 | if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) |
|---|
| 347 | 366 | != (addr & (SHM_COLOUR - 1))) { |
|---|
| 348 | 367 | __flush_cache_page(mpnt, addr, page_to_phys(page)); |
|---|
| 349 | | - if (old_addr) |
|---|
| 368 | + if (parisc_requires_coherency() && old_addr) |
|---|
| 350 | 369 | printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file); |
|---|
| 351 | 370 | old_addr = addr; |
|---|
| 352 | 371 | } |
|---|
| 353 | 372 | } |
|---|
| 354 | | - flush_dcache_mmap_unlock(mapping); |
|---|
| 373 | + flush_dcache_mmap_unlock_irqrestore(mapping, flags); |
|---|
| 355 | 374 | } |
|---|
| 356 | 375 | EXPORT_SYMBOL(flush_dcache_page); |
|---|
| 357 | 376 | |
|---|
| .. | .. |
|---|
| 362 | 381 | EXPORT_SYMBOL(flush_kernel_icache_range_asm); |
|---|
| 363 | 382 | |
|---|
| 364 | 383 | #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ |
|---|
| 365 | | -static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; |
|---|
| 384 | +static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD; |
|---|
| 366 | 385 | |
|---|
| 367 | | -#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */ |
|---|
| 368 | | -static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; |
|---|
| 386 | +#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ |
|---|
| 387 | +static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL; |
|---|
| 369 | 388 | |
|---|
| 370 | 389 | void __init parisc_setup_cache_timing(void) |
|---|
| 371 | 390 | { |
|---|
| 372 | 391 | unsigned long rangetime, alltime; |
|---|
| 373 | | - unsigned long size, start; |
|---|
| 392 | + unsigned long size; |
|---|
| 374 | 393 | unsigned long threshold; |
|---|
| 375 | 394 | |
|---|
| 376 | 395 | alltime = mfctl(16); |
|---|
| .. | .. |
|---|
| 404 | 423 | goto set_tlb_threshold; |
|---|
| 405 | 424 | } |
|---|
| 406 | 425 | |
|---|
| 426 | + size = (unsigned long)_end - (unsigned long)_text; |
|---|
| 427 | + rangetime = mfctl(16); |
|---|
| 428 | + flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end); |
|---|
| 429 | + rangetime = mfctl(16) - rangetime; |
|---|
| 430 | + |
|---|
| 407 | 431 | alltime = mfctl(16); |
|---|
| 408 | 432 | flush_tlb_all(); |
|---|
| 409 | 433 | alltime = mfctl(16) - alltime; |
|---|
| 410 | 434 | |
|---|
| 411 | | - size = 0; |
|---|
| 412 | | - start = (unsigned long) _text; |
|---|
| 413 | | - rangetime = mfctl(16); |
|---|
| 414 | | - while (start < (unsigned long) _end) { |
|---|
| 415 | | - flush_tlb_kernel_range(start, start + PAGE_SIZE); |
|---|
| 416 | | - start += PAGE_SIZE; |
|---|
| 417 | | - size += PAGE_SIZE; |
|---|
| 418 | | - } |
|---|
| 419 | | - rangetime = mfctl(16) - rangetime; |
|---|
| 420 | | - |
|---|
| 421 | | - printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", |
|---|
| 435 | + printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n", |
|---|
| 422 | 436 | alltime, size, rangetime); |
|---|
| 423 | 437 | |
|---|
| 424 | | - threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); |
|---|
| 438 | + threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime); |
|---|
| 439 | + printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n", |
|---|
| 440 | + threshold/1024); |
|---|
| 425 | 441 | |
|---|
| 426 | 442 | set_tlb_threshold: |
|---|
| 427 | | - if (threshold) |
|---|
| 443 | + if (threshold > FLUSH_TLB_THRESHOLD) |
|---|
| 428 | 444 | parisc_tlb_flush_threshold = threshold; |
|---|
| 445 | + else |
|---|
| 446 | + parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; |
|---|
| 447 | + |
|---|
| 429 | 448 | printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", |
|---|
| 430 | 449 | parisc_tlb_flush_threshold/1024); |
|---|
| 431 | 450 | } |
|---|
| .. | .. |
|---|
| 477 | 496 | /* Purge TLB entries for small ranges using the pdtlb and |
|---|
| 478 | 497 | pitlb instructions. These instructions execute locally |
|---|
| 479 | 498 | but cause a purge request to be broadcast to other TLBs. */ |
|---|
| 480 | | - if (likely(!split_tlb)) { |
|---|
| 481 | | - while (start < end) { |
|---|
| 482 | | - purge_tlb_start(flags); |
|---|
| 483 | | - mtsp(sid, 1); |
|---|
| 484 | | - pdtlb(start); |
|---|
| 485 | | - purge_tlb_end(flags); |
|---|
| 486 | | - start += PAGE_SIZE; |
|---|
| 487 | | - } |
|---|
| 488 | | - return 0; |
|---|
| 489 | | - } |
|---|
| 490 | | - |
|---|
| 491 | | - /* split TLB case */ |
|---|
| 492 | 499 | while (start < end) { |
|---|
| 493 | 500 | purge_tlb_start(flags); |
|---|
| 494 | 501 | mtsp(sid, 1); |
|---|
| .. | .. |
|---|
| 525 | 532 | pte_t *ptep = NULL; |
|---|
| 526 | 533 | |
|---|
| 527 | 534 | if (!pgd_none(*pgd)) { |
|---|
| 528 | | - pud_t *pud = pud_offset(pgd, addr); |
|---|
| 529 | | - if (!pud_none(*pud)) { |
|---|
| 530 | | - pmd_t *pmd = pmd_offset(pud, addr); |
|---|
| 531 | | - if (!pmd_none(*pmd)) |
|---|
| 532 | | - ptep = pte_offset_map(pmd, addr); |
|---|
| 535 | + p4d_t *p4d = p4d_offset(pgd, addr); |
|---|
| 536 | + if (!p4d_none(*p4d)) { |
|---|
| 537 | + pud_t *pud = pud_offset(p4d, addr); |
|---|
| 538 | + if (!pud_none(*pud)) { |
|---|
| 539 | + pmd_t *pmd = pmd_offset(pud, addr); |
|---|
| 540 | + if (!pmd_none(*pmd)) |
|---|
| 541 | + ptep = pte_offset_map(pmd, addr); |
|---|
| 542 | + } |
|---|
| 533 | 543 | } |
|---|
| 534 | 544 | } |
|---|
| 535 | 545 | return ptep; |
|---|
| .. | .. |
|---|
| 573 | 583 | pfn = pte_pfn(*ptep); |
|---|
| 574 | 584 | if (!pfn_valid(pfn)) |
|---|
| 575 | 585 | continue; |
|---|
| 576 | | - if (unlikely(mm->context)) |
|---|
| 586 | + if (unlikely(mm->context)) { |
|---|
| 577 | 587 | flush_tlb_page(vma, addr); |
|---|
| 578 | | - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 588 | + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 589 | + } else { |
|---|
| 590 | + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 591 | + } |
|---|
| 579 | 592 | } |
|---|
| 580 | 593 | } |
|---|
| 581 | 594 | } |
|---|
| .. | .. |
|---|
| 610 | 623 | continue; |
|---|
| 611 | 624 | pfn = pte_pfn(*ptep); |
|---|
| 612 | 625 | if (pfn_valid(pfn)) { |
|---|
| 613 | | - if (unlikely(vma->vm_mm->context)) |
|---|
| 626 | + if (unlikely(vma->vm_mm->context)) { |
|---|
| 614 | 627 | flush_tlb_page(vma, addr); |
|---|
| 615 | | - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 628 | + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 629 | + } else { |
|---|
| 630 | + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); |
|---|
| 631 | + } |
|---|
| 616 | 632 | } |
|---|
| 617 | 633 | } |
|---|
| 618 | 634 | } |
|---|
| .. | .. |
|---|
| 621 | 637 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) |
|---|
| 622 | 638 | { |
|---|
| 623 | 639 | if (pfn_valid(pfn)) { |
|---|
| 624 | | - if (likely(vma->vm_mm->context)) |
|---|
| 640 | + if (likely(vma->vm_mm->context)) { |
|---|
| 625 | 641 | flush_tlb_page(vma, vmaddr); |
|---|
| 626 | | - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
|---|
| 642 | + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
|---|
| 643 | + } else { |
|---|
| 644 | + __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
|---|
| 645 | + } |
|---|
| 627 | 646 | } |
|---|
| 628 | 647 | } |
|---|
| 629 | 648 | |
|---|