| .. | .. |
|---|
| 11 | 11 | #include <linux/sched.h> |
|---|
| 12 | 12 | #include <linux/string.h> |
|---|
| 13 | 13 | #include <linux/init.h> |
|---|
| 14 | | -#include <linux/bootmem.h> |
|---|
| 14 | +#include <linux/memblock.h> |
|---|
| 15 | 15 | #include <linux/mm.h> |
|---|
| 16 | 16 | #include <linux/hugetlb.h> |
|---|
| 17 | 17 | #include <linux/initrd.h> |
|---|
| .. | .. |
|---|
| 25 | 25 | #include <linux/sort.h> |
|---|
| 26 | 26 | #include <linux/ioport.h> |
|---|
| 27 | 27 | #include <linux/percpu.h> |
|---|
| 28 | | -#include <linux/memblock.h> |
|---|
| 29 | 28 | #include <linux/mmzone.h> |
|---|
| 30 | 29 | #include <linux/gfp.h> |
|---|
| 31 | 30 | |
|---|
| 32 | 31 | #include <asm/head.h> |
|---|
| 33 | 32 | #include <asm/page.h> |
|---|
| 34 | 33 | #include <asm/pgalloc.h> |
|---|
| 35 | | -#include <asm/pgtable.h> |
|---|
| 36 | 34 | #include <asm/oplib.h> |
|---|
| 37 | 35 | #include <asm/iommu.h> |
|---|
| 38 | 36 | #include <asm/io.h> |
|---|
| .. | .. |
|---|
| 326 | 324 | } |
|---|
| 327 | 325 | |
|---|
| 328 | 326 | #ifdef CONFIG_HUGETLB_PAGE |
|---|
| 329 | | -static void __init add_huge_page_size(unsigned long size) |
|---|
| 330 | | -{ |
|---|
| 331 | | - unsigned int order; |
|---|
| 332 | | - |
|---|
| 333 | | - if (size_to_hstate(size)) |
|---|
| 334 | | - return; |
|---|
| 335 | | - |
|---|
| 336 | | - order = ilog2(size) - PAGE_SHIFT; |
|---|
| 337 | | - hugetlb_add_hstate(order); |
|---|
| 338 | | -} |
|---|
| 339 | | - |
|---|
| 340 | 327 | static int __init hugetlbpage_init(void) |
|---|
| 341 | 328 | { |
|---|
| 342 | | - add_huge_page_size(1UL << HPAGE_64K_SHIFT); |
|---|
| 343 | | - add_huge_page_size(1UL << HPAGE_SHIFT); |
|---|
| 344 | | - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); |
|---|
| 345 | | - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); |
|---|
| 329 | + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); |
|---|
| 330 | + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); |
|---|
| 331 | + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); |
|---|
| 332 | + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); |
|---|
| 346 | 333 | |
|---|
| 347 | 334 | return 0; |
|---|
| 348 | 335 | } |
|---|
| .. | .. |
|---|
| 361 | 348 | __asm__ __volatile__("flush %0" : : "r" (addr)); |
|---|
| 362 | 349 | } |
|---|
| 363 | 350 | |
|---|
| 364 | | -static int __init setup_hugepagesz(char *string) |
|---|
| 351 | +bool __init arch_hugetlb_valid_size(unsigned long size) |
|---|
| 365 | 352 | { |
|---|
| 366 | | - unsigned long long hugepage_size; |
|---|
| 367 | | - unsigned int hugepage_shift; |
|---|
| 353 | + unsigned int hugepage_shift = ilog2(size); |
|---|
| 368 | 354 | unsigned short hv_pgsz_idx; |
|---|
| 369 | 355 | unsigned int hv_pgsz_mask; |
|---|
| 370 | | - int rc = 0; |
|---|
| 371 | | - |
|---|
| 372 | | - hugepage_size = memparse(string, &string); |
|---|
| 373 | | - hugepage_shift = ilog2(hugepage_size); |
|---|
| 374 | 356 | |
|---|
| 375 | 357 | switch (hugepage_shift) { |
|---|
| 376 | 358 | case HPAGE_16GB_SHIFT: |
|---|
| .. | .. |
|---|
| 398 | 380 | hv_pgsz_mask = 0; |
|---|
| 399 | 381 | } |
|---|
| 400 | 382 | |
|---|
| 401 | | - if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { |
|---|
| 402 | | - hugetlb_bad_size(); |
|---|
| 403 | | - pr_err("hugepagesz=%llu not supported by MMU.\n", |
|---|
| 404 | | - hugepage_size); |
|---|
| 405 | | - goto out; |
|---|
| 406 | | - } |
|---|
| 383 | + if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) |
|---|
| 384 | + return false; |
|---|
| 407 | 385 | |
|---|
| 408 | | - add_huge_page_size(hugepage_size); |
|---|
| 409 | | - rc = 1; |
|---|
| 410 | | - |
|---|
| 411 | | -out: |
|---|
| 412 | | - return rc; |
|---|
| 386 | + return true; |
|---|
| 413 | 387 | } |
|---|
| 414 | | -__setup("hugepagesz=", setup_hugepagesz); |
|---|
| 415 | 388 | #endif /* CONFIG_HUGETLB_PAGE */ |
|---|
| 416 | 389 | |
|---|
| 417 | 390 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
|---|
| .. | .. |
|---|
| 530 | 503 | if (kaddr >= PAGE_OFFSET) |
|---|
| 531 | 504 | paddr = kaddr & mask; |
|---|
| 532 | 505 | else { |
|---|
| 533 | | - pgd_t *pgdp = pgd_offset_k(kaddr); |
|---|
| 534 | | - pud_t *pudp = pud_offset(pgdp, kaddr); |
|---|
| 535 | | - pmd_t *pmdp = pmd_offset(pudp, kaddr); |
|---|
| 536 | | - pte_t *ptep = pte_offset_kernel(pmdp, kaddr); |
|---|
| 506 | + pte_t *ptep = virt_to_kpte(kaddr); |
|---|
| 537 | 507 | |
|---|
| 538 | 508 | paddr = pte_val(*ptep) & mask; |
|---|
| 539 | 509 | } |
|---|
| .. | .. |
|---|
| 977 | 947 | { |
|---|
| 978 | 948 | int prev_nid, new_nid; |
|---|
| 979 | 949 | |
|---|
| 980 | | - prev_nid = -1; |
|---|
| 950 | + prev_nid = NUMA_NO_NODE; |
|---|
| 981 | 951 | for ( ; start < end; start += PAGE_SIZE) { |
|---|
| 982 | 952 | for (new_nid = 0; new_nid < num_node_masks; new_nid++) { |
|---|
| 983 | 953 | struct node_mem_mask *p = &node_masks[new_nid]; |
|---|
| 984 | 954 | |
|---|
| 985 | 955 | if ((start & p->mask) == p->match) { |
|---|
| 986 | | - if (prev_nid == -1) |
|---|
| 956 | + if (prev_nid == NUMA_NO_NODE) |
|---|
| 987 | 957 | prev_nid = new_nid; |
|---|
| 988 | 958 | break; |
|---|
| 989 | 959 | } |
|---|
| .. | .. |
|---|
| 1090 | 1060 | struct pglist_data *p; |
|---|
| 1091 | 1061 | unsigned long start_pfn, end_pfn; |
|---|
| 1092 | 1062 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
|---|
| 1093 | | - unsigned long paddr; |
|---|
| 1094 | 1063 | |
|---|
| 1095 | | - paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); |
|---|
| 1096 | | - if (!paddr) { |
|---|
| 1064 | + NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), |
|---|
| 1065 | + SMP_CACHE_BYTES, nid); |
|---|
| 1066 | + if (!NODE_DATA(nid)) { |
|---|
| 1097 | 1067 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); |
|---|
| 1098 | 1068 | prom_halt(); |
|---|
| 1099 | 1069 | } |
|---|
| 1100 | | - NODE_DATA(nid) = __va(paddr); |
|---|
| 1101 | | - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
|---|
| 1102 | 1070 | |
|---|
| 1103 | 1071 | NODE_DATA(nid)->node_id = nid; |
|---|
| 1104 | 1072 | #endif |
|---|
| .. | .. |
|---|
| 1208 | 1176 | md = mdesc_grab(); |
|---|
| 1209 | 1177 | |
|---|
| 1210 | 1178 | count = 0; |
|---|
| 1211 | | - nid = -1; |
|---|
| 1179 | + nid = NUMA_NO_NODE; |
|---|
| 1212 | 1180 | mdesc_for_each_node_by_name(md, grp, "group") { |
|---|
| 1213 | 1181 | if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { |
|---|
| 1214 | 1182 | nid = count; |
|---|
| .. | .. |
|---|
| 1224 | 1192 | |
|---|
| 1225 | 1193 | static void __init add_node_ranges(void) |
|---|
| 1226 | 1194 | { |
|---|
| 1227 | | - struct memblock_region *reg; |
|---|
| 1195 | + phys_addr_t start, end; |
|---|
| 1228 | 1196 | unsigned long prev_max; |
|---|
| 1197 | + u64 i; |
|---|
| 1229 | 1198 | |
|---|
| 1230 | 1199 | memblock_resized: |
|---|
| 1231 | 1200 | prev_max = memblock.memory.max; |
|---|
| 1232 | 1201 | |
|---|
| 1233 | | - for_each_memblock(memory, reg) { |
|---|
| 1234 | | - unsigned long size = reg->size; |
|---|
| 1235 | | - unsigned long start, end; |
|---|
| 1236 | | - |
|---|
| 1237 | | - start = reg->base; |
|---|
| 1238 | | - end = start + size; |
|---|
| 1202 | + for_each_mem_range(i, &start, &end) { |
|---|
| 1239 | 1203 | while (start < end) { |
|---|
| 1240 | 1204 | unsigned long this_end; |
|---|
| 1241 | 1205 | int nid; |
|---|
| .. | .. |
|---|
| 1243 | 1207 | this_end = memblock_nid_range(start, end, &nid); |
|---|
| 1244 | 1208 | |
|---|
| 1245 | 1209 | numadbg("Setting memblock NUMA node nid[%d] " |
|---|
| 1246 | | - "start[%lx] end[%lx]\n", |
|---|
| 1210 | + "start[%llx] end[%lx]\n", |
|---|
| 1247 | 1211 | nid, start, this_end); |
|---|
| 1248 | 1212 | |
|---|
| 1249 | 1213 | memblock_set_node(start, this_end - start, |
|---|
| .. | .. |
|---|
| 1266 | 1230 | if (!count) |
|---|
| 1267 | 1231 | return -ENOENT; |
|---|
| 1268 | 1232 | |
|---|
| 1269 | | - paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), |
|---|
| 1270 | | - SMP_CACHE_BYTES); |
|---|
| 1233 | + paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup), |
|---|
| 1234 | + SMP_CACHE_BYTES); |
|---|
| 1271 | 1235 | if (!paddr) |
|---|
| 1272 | 1236 | return -ENOMEM; |
|---|
| 1273 | 1237 | |
|---|
| .. | .. |
|---|
| 1307 | 1271 | if (!count) |
|---|
| 1308 | 1272 | return -ENOENT; |
|---|
| 1309 | 1273 | |
|---|
| 1310 | | - paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), |
|---|
| 1311 | | - SMP_CACHE_BYTES); |
|---|
| 1274 | + paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock), |
|---|
| 1275 | + SMP_CACHE_BYTES); |
|---|
| 1312 | 1276 | if (!paddr) |
|---|
| 1313 | 1277 | return -ENOMEM; |
|---|
| 1314 | 1278 | |
|---|
| .. | .. |
|---|
| 1642 | 1606 | |
|---|
| 1643 | 1607 | /* XXX cpu notifier XXX */ |
|---|
| 1644 | 1608 | |
|---|
| 1645 | | - sparse_memory_present_with_active_regions(MAX_NUMNODES); |
|---|
| 1646 | 1609 | sparse_init(); |
|---|
| 1647 | 1610 | |
|---|
| 1648 | 1611 | return end_pfn; |
|---|
| .. | .. |
|---|
| 1656 | 1619 | bool kern_addr_valid(unsigned long addr) |
|---|
| 1657 | 1620 | { |
|---|
| 1658 | 1621 | pgd_t *pgd; |
|---|
| 1622 | + p4d_t *p4d; |
|---|
| 1659 | 1623 | pud_t *pud; |
|---|
| 1660 | 1624 | pmd_t *pmd; |
|---|
| 1661 | 1625 | pte_t *pte; |
|---|
| .. | .. |
|---|
| 1675 | 1639 | |
|---|
| 1676 | 1640 | pgd = pgd_offset_k(addr); |
|---|
| 1677 | 1641 | if (pgd_none(*pgd)) |
|---|
| 1678 | | - return 0; |
|---|
| 1642 | + return false; |
|---|
| 1679 | 1643 | |
|---|
| 1680 | | - pud = pud_offset(pgd, addr); |
|---|
| 1644 | + p4d = p4d_offset(pgd, addr); |
|---|
| 1645 | + if (p4d_none(*p4d)) |
|---|
| 1646 | + return false; |
|---|
| 1647 | + |
|---|
| 1648 | + pud = pud_offset(p4d, addr); |
|---|
| 1681 | 1649 | if (pud_none(*pud)) |
|---|
| 1682 | | - return 0; |
|---|
| 1650 | + return false; |
|---|
| 1683 | 1651 | |
|---|
| 1684 | 1652 | if (pud_large(*pud)) |
|---|
| 1685 | 1653 | return pfn_valid(pud_pfn(*pud)); |
|---|
| 1686 | 1654 | |
|---|
| 1687 | 1655 | pmd = pmd_offset(pud, addr); |
|---|
| 1688 | 1656 | if (pmd_none(*pmd)) |
|---|
| 1689 | | - return 0; |
|---|
| 1657 | + return false; |
|---|
| 1690 | 1658 | |
|---|
| 1691 | 1659 | if (pmd_large(*pmd)) |
|---|
| 1692 | 1660 | return pfn_valid(pmd_pfn(*pmd)); |
|---|
| 1693 | 1661 | |
|---|
| 1694 | 1662 | pte = pte_offset_kernel(pmd, addr); |
|---|
| 1695 | 1663 | if (pte_none(*pte)) |
|---|
| 1696 | | - return 0; |
|---|
| 1664 | + return false; |
|---|
| 1697 | 1665 | |
|---|
| 1698 | 1666 | return pfn_valid(pte_pfn(*pte)); |
|---|
| 1699 | 1667 | } |
|---|
| .. | .. |
|---|
| 1803 | 1771 | while (vstart < vend) { |
|---|
| 1804 | 1772 | unsigned long this_end, paddr = __pa(vstart); |
|---|
| 1805 | 1773 | pgd_t *pgd = pgd_offset_k(vstart); |
|---|
| 1774 | + p4d_t *p4d; |
|---|
| 1806 | 1775 | pud_t *pud; |
|---|
| 1807 | 1776 | pmd_t *pmd; |
|---|
| 1808 | 1777 | pte_t *pte; |
|---|
| .. | .. |
|---|
| 1810 | 1779 | if (pgd_none(*pgd)) { |
|---|
| 1811 | 1780 | pud_t *new; |
|---|
| 1812 | 1781 | |
|---|
| 1813 | | - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
|---|
| 1782 | + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, |
|---|
| 1783 | + PAGE_SIZE); |
|---|
| 1784 | + if (!new) |
|---|
| 1785 | + goto err_alloc; |
|---|
| 1814 | 1786 | alloc_bytes += PAGE_SIZE; |
|---|
| 1815 | 1787 | pgd_populate(&init_mm, pgd, new); |
|---|
| 1816 | 1788 | } |
|---|
| 1817 | | - pud = pud_offset(pgd, vstart); |
|---|
| 1789 | + |
|---|
| 1790 | + p4d = p4d_offset(pgd, vstart); |
|---|
| 1791 | + if (p4d_none(*p4d)) { |
|---|
| 1792 | + pud_t *new; |
|---|
| 1793 | + |
|---|
| 1794 | + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, |
|---|
| 1795 | + PAGE_SIZE); |
|---|
| 1796 | + if (!new) |
|---|
| 1797 | + goto err_alloc; |
|---|
| 1798 | + alloc_bytes += PAGE_SIZE; |
|---|
| 1799 | + p4d_populate(&init_mm, p4d, new); |
|---|
| 1800 | + } |
|---|
| 1801 | + |
|---|
| 1802 | + pud = pud_offset(p4d, vstart); |
|---|
| 1818 | 1803 | if (pud_none(*pud)) { |
|---|
| 1819 | 1804 | pmd_t *new; |
|---|
| 1820 | 1805 | |
|---|
| .. | .. |
|---|
| 1822 | 1807 | vstart = kernel_map_hugepud(vstart, vend, pud); |
|---|
| 1823 | 1808 | continue; |
|---|
| 1824 | 1809 | } |
|---|
| 1825 | | - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
|---|
| 1810 | + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, |
|---|
| 1811 | + PAGE_SIZE); |
|---|
| 1812 | + if (!new) |
|---|
| 1813 | + goto err_alloc; |
|---|
| 1826 | 1814 | alloc_bytes += PAGE_SIZE; |
|---|
| 1827 | 1815 | pud_populate(&init_mm, pud, new); |
|---|
| 1828 | 1816 | } |
|---|
| .. | .. |
|---|
| 1835 | 1823 | vstart = kernel_map_hugepmd(vstart, vend, pmd); |
|---|
| 1836 | 1824 | continue; |
|---|
| 1837 | 1825 | } |
|---|
| 1838 | | - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
|---|
| 1826 | + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, |
|---|
| 1827 | + PAGE_SIZE); |
|---|
| 1828 | + if (!new) |
|---|
| 1829 | + goto err_alloc; |
|---|
| 1839 | 1830 | alloc_bytes += PAGE_SIZE; |
|---|
| 1840 | 1831 | pmd_populate_kernel(&init_mm, pmd, new); |
|---|
| 1841 | 1832 | } |
|---|
| .. | .. |
|---|
| 1855 | 1846 | } |
|---|
| 1856 | 1847 | |
|---|
| 1857 | 1848 | return alloc_bytes; |
|---|
| 1849 | + |
|---|
| 1850 | +err_alloc: |
|---|
| 1851 | + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", |
|---|
| 1852 | + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
|---|
| 1853 | + return -ENOMEM; |
|---|
| 1858 | 1854 | } |
|---|
| 1859 | 1855 | |
|---|
| 1860 | 1856 | static void __init flush_all_kernel_tsbs(void) |
|---|
| .. | .. |
|---|
| 2258 | 2254 | static void sun4u_pgprot_init(void); |
|---|
| 2259 | 2255 | static void sun4v_pgprot_init(void); |
|---|
| 2260 | 2256 | |
|---|
| 2261 | | -static phys_addr_t __init available_memory(void) |
|---|
| 2262 | | -{ |
|---|
| 2263 | | - phys_addr_t available = 0ULL; |
|---|
| 2264 | | - phys_addr_t pa_start, pa_end; |
|---|
| 2265 | | - u64 i; |
|---|
| 2266 | | - |
|---|
| 2267 | | - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, |
|---|
| 2268 | | - &pa_end, NULL) |
|---|
| 2269 | | - available = available + (pa_end - pa_start); |
|---|
| 2270 | | - |
|---|
| 2271 | | - return available; |
|---|
| 2272 | | -} |
|---|
| 2273 | | - |
|---|
| 2274 | 2257 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) |
|---|
| 2275 | 2258 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) |
|---|
| 2276 | 2259 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) |
|---|
| .. | .. |
|---|
| 2284 | 2267 | */ |
|---|
| 2285 | 2268 | static void __init reduce_memory(phys_addr_t limit_ram) |
|---|
| 2286 | 2269 | { |
|---|
| 2287 | | - phys_addr_t avail_ram = available_memory(); |
|---|
| 2288 | | - phys_addr_t pa_start, pa_end; |
|---|
| 2289 | | - u64 i; |
|---|
| 2290 | | - |
|---|
| 2291 | | - if (limit_ram >= avail_ram) |
|---|
| 2292 | | - return; |
|---|
| 2293 | | - |
|---|
| 2294 | | - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, |
|---|
| 2295 | | - &pa_end, NULL) { |
|---|
| 2296 | | - phys_addr_t region_size = pa_end - pa_start; |
|---|
| 2297 | | - phys_addr_t clip_start = pa_start; |
|---|
| 2298 | | - |
|---|
| 2299 | | - avail_ram = avail_ram - region_size; |
|---|
| 2300 | | - /* Are we consuming too much? */ |
|---|
| 2301 | | - if (avail_ram < limit_ram) { |
|---|
| 2302 | | - phys_addr_t give_back = limit_ram - avail_ram; |
|---|
| 2303 | | - |
|---|
| 2304 | | - region_size = region_size - give_back; |
|---|
| 2305 | | - clip_start = clip_start + give_back; |
|---|
| 2306 | | - } |
|---|
| 2307 | | - |
|---|
| 2308 | | - memblock_remove(clip_start, region_size); |
|---|
| 2309 | | - |
|---|
| 2310 | | - if (avail_ram <= limit_ram) |
|---|
| 2311 | | - break; |
|---|
| 2312 | | - i = 0UL; |
|---|
| 2313 | | - } |
|---|
| 2270 | + limit_ram += memblock_reserved_size(); |
|---|
| 2271 | + memblock_enforce_memory_limit(limit_ram); |
|---|
| 2314 | 2272 | } |
|---|
| 2315 | 2273 | |
|---|
| 2316 | 2274 | void __init paging_init(void) |
|---|
| .. | .. |
|---|
| 2495 | 2453 | |
|---|
| 2496 | 2454 | max_zone_pfns[ZONE_NORMAL] = end_pfn; |
|---|
| 2497 | 2455 | |
|---|
| 2498 | | - free_area_init_nodes(max_zone_pfns); |
|---|
| 2456 | + free_area_init(max_zone_pfns); |
|---|
| 2499 | 2457 | } |
|---|
| 2500 | 2458 | |
|---|
| 2501 | 2459 | printk("Booting Linux...\n"); |
|---|
| .. | .. |
|---|
| 2541 | 2499 | { |
|---|
| 2542 | 2500 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
|---|
| 2543 | 2501 | |
|---|
| 2544 | | - free_all_bootmem(); |
|---|
| 2502 | + memblock_free_all(); |
|---|
| 2545 | 2503 | |
|---|
| 2546 | 2504 | /* |
|---|
| 2547 | 2505 | * Must be done after boot memory is put on freelist, because here we |
|---|
| 2548 | 2506 | * might set fields in deferred struct pages that have not yet been |
|---|
| 2549 | | - * initialized, and free_all_bootmem() initializes all the reserved |
|---|
| 2507 | + * initialized, and memblock_free_all() initializes all the reserved |
|---|
| 2550 | 2508 | * deferred pages for us. |
|---|
| 2551 | 2509 | */ |
|---|
| 2552 | 2510 | register_page_bootmem_info(); |
|---|
| .. | .. |
|---|
| 2599 | 2557 | } |
|---|
| 2600 | 2558 | } |
|---|
| 2601 | 2559 | |
|---|
| 2602 | | -#ifdef CONFIG_BLK_DEV_INITRD |
|---|
| 2603 | | -void free_initrd_mem(unsigned long start, unsigned long end) |
|---|
| 2604 | | -{ |
|---|
| 2605 | | - free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
|---|
| 2606 | | - "initrd"); |
|---|
| 2607 | | -} |
|---|
| 2608 | | -#endif |
|---|
| 2609 | | - |
|---|
| 2610 | 2560 | pgprot_t PAGE_KERNEL __read_mostly; |
|---|
| 2611 | 2561 | EXPORT_SYMBOL(PAGE_KERNEL); |
|---|
| 2612 | 2562 | |
|---|
| .. | .. |
|---|
| 2647 | 2597 | for (; vstart < vend; vstart += PMD_SIZE) { |
|---|
| 2648 | 2598 | pgd_t *pgd = vmemmap_pgd_populate(vstart, node); |
|---|
| 2649 | 2599 | unsigned long pte; |
|---|
| 2600 | + p4d_t *p4d; |
|---|
| 2650 | 2601 | pud_t *pud; |
|---|
| 2651 | 2602 | pmd_t *pmd; |
|---|
| 2652 | 2603 | |
|---|
| 2653 | 2604 | if (!pgd) |
|---|
| 2654 | 2605 | return -ENOMEM; |
|---|
| 2655 | 2606 | |
|---|
| 2656 | | - pud = vmemmap_pud_populate(pgd, vstart, node); |
|---|
| 2607 | + p4d = vmemmap_p4d_populate(pgd, vstart, node); |
|---|
| 2608 | + if (!p4d) |
|---|
| 2609 | + return -ENOMEM; |
|---|
| 2610 | + |
|---|
| 2611 | + pud = vmemmap_pud_populate(p4d, vstart, node); |
|---|
| 2657 | 2612 | if (!pud) |
|---|
| 2658 | 2613 | return -ENOMEM; |
|---|
| 2659 | 2614 | |
|---|
| .. | .. |
|---|
| 2922 | 2877 | : : "r" (pstate)); |
|---|
| 2923 | 2878 | } |
|---|
| 2924 | 2879 | |
|---|
| 2925 | | -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
|---|
| 2926 | | - unsigned long address) |
|---|
| 2880 | +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) |
|---|
| 2927 | 2881 | { |
|---|
| 2928 | 2882 | struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
|---|
| 2929 | 2883 | pte_t *pte = NULL; |
|---|
| .. | .. |
|---|
| 2934 | 2888 | return pte; |
|---|
| 2935 | 2889 | } |
|---|
| 2936 | 2890 | |
|---|
| 2937 | | -pgtable_t pte_alloc_one(struct mm_struct *mm, |
|---|
| 2938 | | - unsigned long address) |
|---|
| 2891 | +pgtable_t pte_alloc_one(struct mm_struct *mm) |
|---|
| 2939 | 2892 | { |
|---|
| 2940 | 2893 | struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
|---|
| 2941 | 2894 | if (!page) |
|---|
| 2942 | 2895 | return NULL; |
|---|
| 2943 | | - if (!pgtable_page_ctor(page)) { |
|---|
| 2944 | | - free_unref_page(page); |
|---|
| 2896 | + if (!pgtable_pte_page_ctor(page)) { |
|---|
| 2897 | + __free_page(page); |
|---|
| 2945 | 2898 | return NULL; |
|---|
| 2946 | 2899 | } |
|---|
| 2947 | 2900 | return (pte_t *) page_address(page); |
|---|
| .. | .. |
|---|
| 2956 | 2909 | { |
|---|
| 2957 | 2910 | struct page *page = virt_to_page(pte); |
|---|
| 2958 | 2911 | |
|---|
| 2959 | | - pgtable_page_dtor(page); |
|---|
| 2912 | + pgtable_pte_page_dtor(page); |
|---|
| 2960 | 2913 | __free_page(page); |
|---|
| 2961 | 2914 | } |
|---|
| 2962 | 2915 | |
|---|