| .. | .. | 
|---|
| 11 | 11 |   | 
|---|
| 12 | 12 |  #include <linux/seq_file.h> | 
|---|
| 13 | 13 |  #include <linux/spinlock.h> | 
|---|
| 14 |  | -#include <linux/bootmem.h>  | 
|---|
 | 14 | +#include <linux/memblock.h>  | 
|---|
| 15 | 15 |  #include <linux/pagemap.h> | 
|---|
| 16 | 16 |  #include <linux/vmalloc.h> | 
|---|
| 17 | 17 |  #include <linux/kdebug.h> | 
|---|
| .. | .. | 
|---|
| 136 | 136 |   | 
|---|
| 137 | 137 |  void pmd_set(pmd_t *pmdp, pte_t *ptep) | 
|---|
| 138 | 138 |  { | 
|---|
| 139 |  | -	unsigned long ptp;	/* Physical address, shifted right by 4 */  | 
|---|
| 140 |  | -	int i;  | 
|---|
| 141 |  | -  | 
|---|
| 142 |  | -	ptp = __nocache_pa(ptep) >> 4;  | 
|---|
| 143 |  | -	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {  | 
|---|
| 144 |  | -		set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));  | 
|---|
| 145 |  | -		ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);  | 
|---|
| 146 |  | -	}  | 
|---|
| 147 |  | -}  | 
|---|
| 148 |  | -  | 
|---|
| 149 |  | -void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)  | 
|---|
| 150 |  | -{  | 
|---|
| 151 |  | -	unsigned long ptp;	/* Physical address, shifted right by 4 */  | 
|---|
| 152 |  | -	int i;  | 
|---|
| 153 |  | -  | 
|---|
| 154 |  | -	ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);	/* watch for overflow */  | 
|---|
| 155 |  | -	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {  | 
|---|
| 156 |  | -		set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));  | 
|---|
| 157 |  | -		ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);  | 
|---|
| 158 |  | -	}  | 
|---|
| 159 |  | -}  | 
|---|
| 160 |  | -  | 
|---|
| 161 |  | -/* Find an entry in the third-level page table.. */  | 
|---|
| 162 |  | -pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)  | 
|---|
| 163 |  | -{  | 
|---|
| 164 |  | -	void *pte;  | 
|---|
| 165 |  | -  | 
|---|
| 166 |  | -	pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);  | 
|---|
| 167 |  | -	return (pte_t *) pte +  | 
|---|
| 168 |  | -	    ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));  | 
|---|
 | 139 | +	unsigned long ptp = __nocache_pa(ptep) >> 4;  | 
|---|
 | 140 | +	set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));  | 
|---|
| 169 | 141 |  } | 
|---|
| 170 | 142 |   | 
|---|
| 171 | 143 |  /* | 
|---|
| .. | .. | 
|---|
| 175 | 147 |   */ | 
|---|
| 176 | 148 |  static void *__srmmu_get_nocache(int size, int align) | 
|---|
| 177 | 149 |  { | 
|---|
| 178 |  | -	int offset;  | 
|---|
 | 150 | +	int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;  | 
|---|
| 179 | 151 |  	unsigned long addr; | 
|---|
| 180 | 152 |   | 
|---|
| 181 |  | -	if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {  | 
|---|
 | 153 | +	if (size < minsz) {  | 
|---|
| 182 | 154 |  		printk(KERN_ERR "Size 0x%x too small for nocache request\n", | 
|---|
| 183 | 155 |  		       size); | 
|---|
| 184 |  | -		size = SRMMU_NOCACHE_BITMAP_SHIFT;  | 
|---|
 | 156 | +		size = minsz;  | 
|---|
| 185 | 157 |  	} | 
|---|
| 186 |  | -	if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {  | 
|---|
| 187 |  | -		printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",  | 
|---|
 | 158 | +	if (size & (minsz - 1)) {  | 
|---|
 | 159 | +		printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",  | 
|---|
| 188 | 160 |  		       size); | 
|---|
| 189 |  | -		size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;  | 
|---|
 | 161 | +		size += minsz - 1;  | 
|---|
| 190 | 162 |  	} | 
|---|
| 191 | 163 |  	BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); | 
|---|
| 192 | 164 |   | 
|---|
| .. | .. | 
|---|
| 296 | 268 |  	void *srmmu_nocache_bitmap; | 
|---|
| 297 | 269 |  	unsigned int bitmap_bits; | 
|---|
| 298 | 270 |  	pgd_t *pgd; | 
|---|
 | 271 | +	p4d_t *p4d;  | 
|---|
 | 272 | +	pud_t *pud;  | 
|---|
| 299 | 273 |  	pmd_t *pmd; | 
|---|
| 300 | 274 |  	pte_t *pte; | 
|---|
| 301 | 275 |  	unsigned long paddr, vaddr; | 
|---|
| .. | .. | 
|---|
| 303 | 277 |   | 
|---|
| 304 | 278 |  	bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; | 
|---|
| 305 | 279 |   | 
|---|
| 306 |  | -	srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,  | 
|---|
| 307 |  | -		SRMMU_NOCACHE_ALIGN_MAX, 0UL);  | 
|---|
 | 280 | +	srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,  | 
|---|
 | 281 | +					    SRMMU_NOCACHE_ALIGN_MAX);  | 
|---|
 | 282 | +	if (!srmmu_nocache_pool)  | 
|---|
 | 283 | +		panic("%s: Failed to allocate %lu bytes align=0x%x\n",  | 
|---|
 | 284 | +		      __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);  | 
|---|
| 308 | 285 |  	memset(srmmu_nocache_pool, 0, srmmu_nocache_size); | 
|---|
| 309 | 286 |   | 
|---|
| 310 | 287 |  	srmmu_nocache_bitmap = | 
|---|
| 311 |  | -		__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),  | 
|---|
| 312 |  | -				SMP_CACHE_BYTES, 0UL);  | 
|---|
 | 288 | +		memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),  | 
|---|
 | 289 | +			       SMP_CACHE_BYTES);  | 
|---|
 | 290 | +	if (!srmmu_nocache_bitmap)  | 
|---|
 | 291 | +		panic("%s: Failed to allocate %zu bytes\n", __func__,  | 
|---|
 | 292 | +		      BITS_TO_LONGS(bitmap_bits) * sizeof(long));  | 
|---|
| 313 | 293 |  	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); | 
|---|
| 314 | 294 |   | 
|---|
| 315 | 295 |  	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 
|---|
| .. | .. | 
|---|
| 323 | 303 |   | 
|---|
| 324 | 304 |  	while (vaddr < srmmu_nocache_end) { | 
|---|
| 325 | 305 |  		pgd = pgd_offset_k(vaddr); | 
|---|
| 326 |  | -		pmd = pmd_offset(__nocache_fix(pgd), vaddr);  | 
|---|
 | 306 | +		p4d = p4d_offset(pgd, vaddr);  | 
|---|
 | 307 | +		pud = pud_offset(p4d, vaddr);  | 
|---|
 | 308 | +		pmd = pmd_offset(__nocache_fix(pud), vaddr);  | 
|---|
| 327 | 309 |  		pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); | 
|---|
| 328 | 310 |   | 
|---|
| 329 | 311 |  		pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); | 
|---|
| .. | .. | 
|---|
| 364 | 346 |   * Alignments up to the page size are the same for physical and virtual | 
|---|
| 365 | 347 |   * addresses of the nocache area. | 
|---|
| 366 | 348 |   */ | 
|---|
| 367 |  | -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)  | 
|---|
 | 349 | +pgtable_t pte_alloc_one(struct mm_struct *mm)  | 
|---|
| 368 | 350 |  { | 
|---|
| 369 |  | -	unsigned long pte;  | 
|---|
 | 351 | +	pte_t *ptep;  | 
|---|
| 370 | 352 |  	struct page *page; | 
|---|
| 371 | 353 |   | 
|---|
| 372 |  | -	if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)  | 
|---|
 | 354 | +	if ((ptep = pte_alloc_one_kernel(mm)) == 0)  | 
|---|
| 373 | 355 |  		return NULL; | 
|---|
| 374 |  | -	page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);  | 
|---|
| 375 |  | -	if (!pgtable_page_ctor(page)) {  | 
|---|
| 376 |  | -		__free_page(page);  | 
|---|
| 377 |  | -		return NULL;  | 
|---|
 | 356 | +	page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);  | 
|---|
 | 357 | +	spin_lock(&mm->page_table_lock);  | 
|---|
 | 358 | +	if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {  | 
|---|
 | 359 | +		page_ref_dec(page);  | 
|---|
 | 360 | +		ptep = NULL;  | 
|---|
| 378 | 361 |  	} | 
|---|
| 379 |  | -	return page;  | 
|---|
 | 362 | +	spin_unlock(&mm->page_table_lock);  | 
|---|
 | 363 | +  | 
|---|
 | 364 | +	return ptep;  | 
|---|
| 380 | 365 |  } | 
|---|
| 381 | 366 |   | 
|---|
| 382 |  | -void pte_free(struct mm_struct *mm, pgtable_t pte)  | 
|---|
 | 367 | +void pte_free(struct mm_struct *mm, pgtable_t ptep)  | 
|---|
| 383 | 368 |  { | 
|---|
| 384 |  | -	unsigned long p;  | 
|---|
 | 369 | +	struct page *page;  | 
|---|
| 385 | 370 |   | 
|---|
| 386 |  | -	pgtable_page_dtor(pte);  | 
|---|
| 387 |  | -	p = (unsigned long)page_address(pte);	/* Cached address (for test) */  | 
|---|
| 388 |  | -	if (p == 0)  | 
|---|
| 389 |  | -		BUG();  | 
|---|
| 390 |  | -	p = page_to_pfn(pte) << PAGE_SHIFT;	/* Physical address */  | 
|---|
 | 371 | +	page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);  | 
|---|
 | 372 | +	spin_lock(&mm->page_table_lock);  | 
|---|
 | 373 | +	if (page_ref_dec_return(page) == 1)  | 
|---|
 | 374 | +		pgtable_pte_page_dtor(page);  | 
|---|
 | 375 | +	spin_unlock(&mm->page_table_lock);  | 
|---|
| 391 | 376 |   | 
|---|
| 392 |  | -	/* free non cached virtual address*/  | 
|---|
| 393 |  | -	srmmu_free_nocache(__nocache_va(p), PTE_SIZE);  | 
|---|
 | 377 | +	srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);  | 
|---|
| 394 | 378 |  } | 
|---|
| 395 | 379 |   | 
|---|
| 396 | 380 |  /* context handling - a dynamically sized pool is used */ | 
|---|
| .. | .. | 
|---|
| 467 | 451 |  	unsigned long size; | 
|---|
| 468 | 452 |   | 
|---|
| 469 | 453 |  	size = numctx * sizeof(struct ctx_list); | 
|---|
| 470 |  | -	ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);  | 
|---|
 | 454 | +	ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);  | 
|---|
 | 455 | +	if (!ctx_list_pool)  | 
|---|
 | 456 | +		panic("%s: Failed to allocate %lu bytes\n", __func__, size);  | 
|---|
| 471 | 457 |   | 
|---|
| 472 | 458 |  	for (ctx = 0; ctx < numctx; ctx++) { | 
|---|
| 473 | 459 |  		struct ctx_list *clist; | 
|---|
| .. | .. | 
|---|
| 508 | 494 |  				   unsigned long virt_addr, int bus_type) | 
|---|
| 509 | 495 |  { | 
|---|
| 510 | 496 |  	pgd_t *pgdp; | 
|---|
 | 497 | +	p4d_t *p4dp;  | 
|---|
 | 498 | +	pud_t *pudp;  | 
|---|
| 511 | 499 |  	pmd_t *pmdp; | 
|---|
| 512 | 500 |  	pte_t *ptep; | 
|---|
| 513 | 501 |  	unsigned long tmp; | 
|---|
| 514 | 502 |   | 
|---|
| 515 | 503 |  	physaddr &= PAGE_MASK; | 
|---|
| 516 | 504 |  	pgdp = pgd_offset_k(virt_addr); | 
|---|
| 517 |  | -	pmdp = pmd_offset(pgdp, virt_addr);  | 
|---|
 | 505 | +	p4dp = p4d_offset(pgdp, virt_addr);  | 
|---|
 | 506 | +	pudp = pud_offset(p4dp, virt_addr);  | 
|---|
 | 507 | +	pmdp = pmd_offset(pudp, virt_addr);  | 
|---|
| 518 | 508 |  	ptep = pte_offset_kernel(pmdp, virt_addr); | 
|---|
| 519 | 509 |  	tmp = (physaddr >> 4) | SRMMU_ET_PTE; | 
|---|
| 520 | 510 |   | 
|---|
| .. | .. | 
|---|
| 543 | 533 |  static inline void srmmu_unmapioaddr(unsigned long virt_addr) | 
|---|
| 544 | 534 |  { | 
|---|
| 545 | 535 |  	pgd_t *pgdp; | 
|---|
 | 536 | +	p4d_t *p4dp;  | 
|---|
 | 537 | +	pud_t *pudp;  | 
|---|
| 546 | 538 |  	pmd_t *pmdp; | 
|---|
| 547 | 539 |  	pte_t *ptep; | 
|---|
| 548 | 540 |   | 
|---|
 | 541 | +  | 
|---|
| 549 | 542 |  	pgdp = pgd_offset_k(virt_addr); | 
|---|
| 550 |  | -	pmdp = pmd_offset(pgdp, virt_addr);  | 
|---|
 | 543 | +	p4dp = p4d_offset(pgdp, virt_addr);  | 
|---|
 | 544 | +	pudp = pud_offset(p4dp, virt_addr);  | 
|---|
 | 545 | +	pmdp = pmd_offset(pudp, virt_addr);  | 
|---|
| 551 | 546 |  	ptep = pte_offset_kernel(pmdp, virt_addr); | 
|---|
| 552 | 547 |   | 
|---|
| 553 | 548 |  	/* No need to flush uncacheable page. */ | 
|---|
| .. | .. | 
|---|
| 685 | 680 |  							unsigned long end) | 
|---|
| 686 | 681 |  { | 
|---|
| 687 | 682 |  	pgd_t *pgdp; | 
|---|
 | 683 | +	p4d_t *p4dp;  | 
|---|
 | 684 | +	pud_t *pudp;  | 
|---|
| 688 | 685 |  	pmd_t *pmdp; | 
|---|
| 689 | 686 |  	pte_t *ptep; | 
|---|
| 690 | 687 |   | 
|---|
| 691 | 688 |  	while (start < end) { | 
|---|
| 692 | 689 |  		pgdp = pgd_offset_k(start); | 
|---|
| 693 |  | -		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {  | 
|---|
 | 690 | +		p4dp = p4d_offset(pgdp, start);  | 
|---|
 | 691 | +		pudp = pud_offset(p4dp, start);  | 
|---|
 | 692 | +		if (pud_none(*(pud_t *)__nocache_fix(pudp))) {  | 
|---|
| 694 | 693 |  			pmdp = __srmmu_get_nocache( | 
|---|
| 695 | 694 |  			    SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 
|---|
| 696 | 695 |  			if (pmdp == NULL) | 
|---|
| 697 | 696 |  				early_pgtable_allocfail("pmd"); | 
|---|
| 698 | 697 |  			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 
|---|
| 699 |  | -			pgd_set(__nocache_fix(pgdp), pmdp);  | 
|---|
 | 698 | +			pud_set(__nocache_fix(pudp), pmdp);  | 
|---|
| 700 | 699 |  		} | 
|---|
| 701 |  | -		pmdp = pmd_offset(__nocache_fix(pgdp), start);  | 
|---|
 | 700 | +		pmdp = pmd_offset(__nocache_fix(pudp), start);  | 
|---|
| 702 | 701 |  		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 
|---|
| 703 | 702 |  			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 
|---|
| 704 | 703 |  			if (ptep == NULL) | 
|---|
| .. | .. | 
|---|
| 716 | 715 |  						  unsigned long end) | 
|---|
| 717 | 716 |  { | 
|---|
| 718 | 717 |  	pgd_t *pgdp; | 
|---|
 | 718 | +	p4d_t *p4dp;  | 
|---|
 | 719 | +	pud_t *pudp;  | 
|---|
| 719 | 720 |  	pmd_t *pmdp; | 
|---|
| 720 | 721 |  	pte_t *ptep; | 
|---|
| 721 | 722 |   | 
|---|
| 722 | 723 |  	while (start < end) { | 
|---|
| 723 | 724 |  		pgdp = pgd_offset_k(start); | 
|---|
| 724 |  | -		if (pgd_none(*pgdp)) {  | 
|---|
 | 725 | +		p4dp = p4d_offset(pgdp, start);  | 
|---|
 | 726 | +		pudp = pud_offset(p4dp, start);  | 
|---|
 | 727 | +		if (pud_none(*pudp)) {  | 
|---|
| 725 | 728 |  			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 
|---|
| 726 | 729 |  			if (pmdp == NULL) | 
|---|
| 727 | 730 |  				early_pgtable_allocfail("pmd"); | 
|---|
| 728 | 731 |  			memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); | 
|---|
| 729 |  | -			pgd_set(pgdp, pmdp);  | 
|---|
 | 732 | +			pud_set((pud_t *)pgdp, pmdp);  | 
|---|
| 730 | 733 |  		} | 
|---|
| 731 |  | -		pmdp = pmd_offset(pgdp, start);  | 
|---|
 | 734 | +		pmdp = pmd_offset(pudp, start);  | 
|---|
| 732 | 735 |  		if (srmmu_pmd_none(*pmdp)) { | 
|---|
| 733 | 736 |  			ptep = __srmmu_get_nocache(PTE_SIZE, | 
|---|
| 734 | 737 |  							     PTE_SIZE); | 
|---|
| .. | .. | 
|---|
| 771 | 774 |  	unsigned long probed; | 
|---|
| 772 | 775 |  	unsigned long addr; | 
|---|
| 773 | 776 |  	pgd_t *pgdp; | 
|---|
 | 777 | +	p4d_t *p4dp;  | 
|---|
 | 778 | +	pud_t *pudp;  | 
|---|
| 774 | 779 |  	pmd_t *pmdp; | 
|---|
| 775 | 780 |  	pte_t *ptep; | 
|---|
| 776 | 781 |  	int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ | 
|---|
| .. | .. | 
|---|
| 791 | 796 |  		what = 0; | 
|---|
| 792 | 797 |  		addr = start - PAGE_SIZE; | 
|---|
| 793 | 798 |   | 
|---|
| 794 |  | -		if (!(start & ~(SRMMU_REAL_PMD_MASK))) {  | 
|---|
| 795 |  | -			if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)  | 
|---|
 | 799 | +		if (!(start & ~(PMD_MASK))) {  | 
|---|
 | 800 | +			if (srmmu_probe(addr + PMD_SIZE) == probed)  | 
|---|
| 796 | 801 |  				what = 1; | 
|---|
| 797 | 802 |  		} | 
|---|
| 798 | 803 |   | 
|---|
| 799 |  | -		if (!(start & ~(SRMMU_PGDIR_MASK))) {  | 
|---|
| 800 |  | -			if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)  | 
|---|
 | 804 | +		if (!(start & ~(PGDIR_MASK))) {  | 
|---|
 | 805 | +			if (srmmu_probe(addr + PGDIR_SIZE) == probed)  | 
|---|
| 801 | 806 |  				what = 2; | 
|---|
| 802 | 807 |  		} | 
|---|
| 803 | 808 |   | 
|---|
| 804 | 809 |  		pgdp = pgd_offset_k(start); | 
|---|
 | 810 | +		p4dp = p4d_offset(pgdp, start);  | 
|---|
 | 811 | +		pudp = pud_offset(p4dp, start);  | 
|---|
| 805 | 812 |  		if (what == 2) { | 
|---|
| 806 | 813 |  			*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); | 
|---|
| 807 |  | -			start += SRMMU_PGDIR_SIZE;  | 
|---|
 | 814 | +			start += PGDIR_SIZE;  | 
|---|
| 808 | 815 |  			continue; | 
|---|
| 809 | 816 |  		} | 
|---|
| 810 |  | -		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {  | 
|---|
 | 817 | +		if (pud_none(*(pud_t *)__nocache_fix(pudp))) {  | 
|---|
| 811 | 818 |  			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, | 
|---|
| 812 | 819 |  						   SRMMU_PMD_TABLE_SIZE); | 
|---|
| 813 | 820 |  			if (pmdp == NULL) | 
|---|
| 814 | 821 |  				early_pgtable_allocfail("pmd"); | 
|---|
| 815 | 822 |  			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 
|---|
| 816 |  | -			pgd_set(__nocache_fix(pgdp), pmdp);  | 
|---|
 | 823 | +			pud_set(__nocache_fix(pudp), pmdp);  | 
|---|
| 817 | 824 |  		} | 
|---|
| 818 | 825 |  		pmdp = pmd_offset(__nocache_fix(pgdp), start); | 
|---|
 | 826 | +		if (what == 1) {  | 
|---|
 | 827 | +			*(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);  | 
|---|
 | 828 | +			start += PMD_SIZE;  | 
|---|
 | 829 | +			continue;  | 
|---|
 | 830 | +		}  | 
|---|
| 819 | 831 |  		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 
|---|
| 820 | 832 |  			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 
|---|
| 821 | 833 |  			if (ptep == NULL) | 
|---|
| 822 | 834 |  				early_pgtable_allocfail("pte"); | 
|---|
| 823 | 835 |  			memset(__nocache_fix(ptep), 0, PTE_SIZE); | 
|---|
| 824 | 836 |  			pmd_set(__nocache_fix(pmdp), ptep); | 
|---|
| 825 |  | -		}  | 
|---|
| 826 |  | -		if (what == 1) {  | 
|---|
| 827 |  | -			/* We bend the rule where all 16 PTPs in a pmd_t point  | 
|---|
| 828 |  | -			 * inside the same PTE page, and we leak a perfectly  | 
|---|
| 829 |  | -			 * good hardware PTE piece. Alternatives seem worse.  | 
|---|
| 830 |  | -			 */  | 
|---|
| 831 |  | -			unsigned int x;	/* Index of HW PMD in soft cluster */  | 
|---|
| 832 |  | -			unsigned long *val;  | 
|---|
| 833 |  | -			x = (start >> PMD_SHIFT) & 15;  | 
|---|
| 834 |  | -			val = &pmdp->pmdv[x];  | 
|---|
| 835 |  | -			*(unsigned long *)__nocache_fix(val) = probed;  | 
|---|
| 836 |  | -			start += SRMMU_REAL_PMD_SIZE;  | 
|---|
| 837 |  | -			continue;  | 
|---|
| 838 | 837 |  		} | 
|---|
| 839 | 838 |  		ptep = pte_offset_kernel(__nocache_fix(pmdp), start); | 
|---|
| 840 | 839 |  		*(pte_t *)__nocache_fix(ptep) = __pte(probed); | 
|---|
| .. | .. | 
|---|
| 857 | 856 |  /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ | 
|---|
| 858 | 857 |  static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) | 
|---|
| 859 | 858 |  { | 
|---|
| 860 |  | -	unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);  | 
|---|
| 861 |  | -	unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);  | 
|---|
| 862 |  | -	unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);  | 
|---|
 | 859 | +	unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);  | 
|---|
 | 860 | +	unsigned long vstart = (vbase & PGDIR_MASK);  | 
|---|
 | 861 | +	unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);  | 
|---|
| 863 | 862 |  	/* Map "low" memory only */ | 
|---|
| 864 | 863 |  	const unsigned long min_vaddr = PAGE_OFFSET; | 
|---|
| 865 | 864 |  	const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; | 
|---|
| .. | .. | 
|---|
| 872 | 871 |   | 
|---|
| 873 | 872 |  	while (vstart < vend) { | 
|---|
| 874 | 873 |  		do_large_mapping(vstart, pstart); | 
|---|
| 875 |  | -		vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;  | 
|---|
 | 874 | +		vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;  | 
|---|
| 876 | 875 |  	} | 
|---|
| 877 | 876 |  	return vstart; | 
|---|
| 878 | 877 |  } | 
|---|
| .. | .. | 
|---|
| 898 | 897 |  	phandle cpunode; | 
|---|
| 899 | 898 |  	char node_str[128]; | 
|---|
| 900 | 899 |  	pgd_t *pgd; | 
|---|
 | 900 | +	p4d_t *p4d;  | 
|---|
 | 901 | +	pud_t *pud;  | 
|---|
| 901 | 902 |  	pmd_t *pmd; | 
|---|
| 902 | 903 |  	pte_t *pte; | 
|---|
| 903 | 904 |  	unsigned long pages_avail; | 
|---|
| .. | .. | 
|---|
| 959 | 960 |  	srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); | 
|---|
| 960 | 961 |   | 
|---|
| 961 | 962 |  	pgd = pgd_offset_k(PKMAP_BASE); | 
|---|
| 962 |  | -	pmd = pmd_offset(pgd, PKMAP_BASE);  | 
|---|
 | 963 | +	p4d = p4d_offset(pgd, PKMAP_BASE);  | 
|---|
 | 964 | +	pud = pud_offset(p4d, PKMAP_BASE);  | 
|---|
 | 965 | +	pmd = pmd_offset(pud, PKMAP_BASE);  | 
|---|
| 963 | 966 |  	pte = pte_offset_kernel(pmd, PKMAP_BASE); | 
|---|
| 964 | 967 |  	pkmap_page_table = pte; | 
|---|
| 965 | 968 |   | 
|---|
| .. | .. | 
|---|
| 971 | 974 |  	kmap_init(); | 
|---|
| 972 | 975 |   | 
|---|
| 973 | 976 |  	{ | 
|---|
| 974 |  | -		unsigned long zones_size[MAX_NR_ZONES];  | 
|---|
| 975 |  | -		unsigned long zholes_size[MAX_NR_ZONES];  | 
|---|
| 976 |  | -		unsigned long npages;  | 
|---|
| 977 |  | -		int znum;  | 
|---|
 | 977 | +		unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };  | 
|---|
| 978 | 978 |   | 
|---|
| 979 |  | -		for (znum = 0; znum < MAX_NR_ZONES; znum++)  | 
|---|
| 980 |  | -			zones_size[znum] = zholes_size[znum] = 0;  | 
|---|
 | 979 | +		max_zone_pfn[ZONE_DMA] = max_low_pfn;  | 
|---|
 | 980 | +		max_zone_pfn[ZONE_NORMAL] = max_low_pfn;  | 
|---|
 | 981 | +		max_zone_pfn[ZONE_HIGHMEM] = highend_pfn;  | 
|---|
| 981 | 982 |   | 
|---|
| 982 |  | -		npages = max_low_pfn - pfn_base;  | 
|---|
| 983 |  | -  | 
|---|
| 984 |  | -		zones_size[ZONE_DMA] = npages;  | 
|---|
| 985 |  | -		zholes_size[ZONE_DMA] = npages - pages_avail;  | 
|---|
| 986 |  | -  | 
|---|
| 987 |  | -		npages = highend_pfn - max_low_pfn;  | 
|---|
| 988 |  | -		zones_size[ZONE_HIGHMEM] = npages;  | 
|---|
| 989 |  | -		zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();  | 
|---|
| 990 |  | -  | 
|---|
| 991 |  | -		free_area_init_node(0, zones_size, pfn_base, zholes_size);  | 
|---|
 | 983 | +		free_area_init(max_zone_pfn);  | 
|---|
| 992 | 984 |  	} | 
|---|
| 993 | 985 |  } | 
|---|
| 994 | 986 |   | 
|---|
| .. | .. | 
|---|
| 1828 | 1820 |  		&smp_cachetlb_ops; | 
|---|
| 1829 | 1821 |  #endif | 
|---|
| 1830 | 1822 |   | 
|---|
| 1831 |  | -	if (sparc_cpu_model == sun4d)  | 
|---|
| 1832 |  | -		ld_mmu_iounit();  | 
|---|
| 1833 |  | -	else  | 
|---|
 | 1823 | +	if (sparc_cpu_model != sun4d)  | 
|---|
| 1834 | 1824 |  		ld_mmu_iommu(); | 
|---|
| 1835 | 1825 |  #ifdef CONFIG_SMP | 
|---|
| 1836 | 1826 |  	if (sparc_cpu_model == sun4d) | 
|---|