.. | .. |
---|
61 | 61 | #include <linux/hugetlb.h> |
---|
62 | 62 | #include <linux/sched/rt.h> |
---|
63 | 63 | #include <linux/sched/mm.h> |
---|
64 | | -#include <linux/local_lock.h> |
---|
65 | 64 | #include <linux/page_owner.h> |
---|
66 | 65 | #include <linux/page_pinner.h> |
---|
67 | 66 | #include <linux/kthread.h> |
---|
.. | .. |
---|
385 | 384 | EXPORT_SYMBOL(nr_node_ids); |
---|
386 | 385 | EXPORT_SYMBOL(nr_online_nodes); |
---|
387 | 386 | #endif |
---|
388 | | - |
---|
389 | | -struct pa_lock { |
---|
390 | | - local_lock_t l; |
---|
391 | | -}; |
---|
392 | | -static DEFINE_PER_CPU(struct pa_lock, pa_lock) = { |
---|
393 | | - .l = INIT_LOCAL_LOCK(l), |
---|
394 | | -}; |
---|
395 | 387 | |
---|
396 | 388 | int page_group_by_mobility_disabled __read_mostly; |
---|
397 | 389 | |
---|
.. | .. |
---|
1430 | 1422 | } |
---|
1431 | 1423 | |
---|
1432 | 1424 | /* |
---|
1433 | | - * Frees a number of pages which have been collected from the pcp lists. |
---|
| 1425 | + * Frees a number of pages from the PCP lists |
---|
1434 | 1426 | * Assumes all pages on list are in same zone, and of same order. |
---|
1435 | 1427 | * count is the number of pages to free. |
---|
1436 | 1428 | * |
---|
.. | .. |
---|
1440 | 1432 | * And clear the zone's pages_scanned counter, to hold off the "all pages are |
---|
1441 | 1433 | * pinned" detection logic. |
---|
1442 | 1434 | */ |
---|
1443 | | -static void free_pcppages_bulk(struct zone *zone, struct list_head *head, |
---|
1444 | | - bool zone_retry) |
---|
1445 | | -{ |
---|
1446 | | - bool isolated_pageblocks; |
---|
1447 | | - struct page *page, *tmp; |
---|
1448 | | - unsigned long flags; |
---|
1449 | | - |
---|
1450 | | - spin_lock_irqsave(&zone->lock, flags); |
---|
1451 | | - isolated_pageblocks = has_isolate_pageblock(zone); |
---|
1452 | | - |
---|
1453 | | - /* |
---|
1454 | | - * Use safe version since after __free_one_page(), |
---|
1455 | | - * page->lru.next will not point to original list. |
---|
1456 | | - */ |
---|
1457 | | - list_for_each_entry_safe(page, tmp, head, lru) { |
---|
1458 | | - int mt = get_pcppage_migratetype(page); |
---|
1459 | | - |
---|
1460 | | - if (page_zone(page) != zone) { |
---|
1461 | | - /* |
---|
1462 | | - * free_unref_page_list() sorts pages by zone. If we end |
---|
1463 | | - * up with pages from a different NUMA nodes belonging |
---|
1464 | | - * to the same ZONE index then we need to redo with the |
---|
1465 | | - * correct ZONE pointer. Skip the page for now, redo it |
---|
1466 | | - * on the next iteration. |
---|
1467 | | - */ |
---|
1468 | | - WARN_ON_ONCE(zone_retry == false); |
---|
1469 | | - if (zone_retry) |
---|
1470 | | - continue; |
---|
1471 | | - } |
---|
1472 | | - |
---|
1473 | | - /* MIGRATE_ISOLATE page should not go to pcplists */ |
---|
1474 | | - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); |
---|
1475 | | - /* Pageblock could have been isolated meanwhile */ |
---|
1476 | | - if (unlikely(isolated_pageblocks)) |
---|
1477 | | - mt = get_pageblock_migratetype(page); |
---|
1478 | | - |
---|
1479 | | - list_del(&page->lru); |
---|
1480 | | - __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); |
---|
1481 | | - trace_mm_page_pcpu_drain(page, 0, mt); |
---|
1482 | | - } |
---|
1483 | | - spin_unlock_irqrestore(&zone->lock, flags); |
---|
1484 | | -} |
---|
1485 | | - |
---|
1486 | | -static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, |
---|
1487 | | - struct list_head *dst) |
---|
| 1435 | +static void free_pcppages_bulk(struct zone *zone, int count, |
---|
| 1436 | + struct per_cpu_pages *pcp) |
---|
1488 | 1437 | { |
---|
1489 | 1438 | int migratetype = 0; |
---|
1490 | 1439 | int batch_free = 0; |
---|
1491 | 1440 | int prefetch_nr = 0; |
---|
1492 | | - struct page *page; |
---|
| 1441 | + bool isolated_pageblocks; |
---|
| 1442 | + struct page *page, *tmp; |
---|
| 1443 | + LIST_HEAD(head); |
---|
1493 | 1444 | |
---|
1494 | 1445 | /* |
---|
1495 | 1446 | * Ensure proper count is passed which otherwise would stuck in the |
---|
.. | .. |
---|
1526 | 1477 | if (bulkfree_pcp_prepare(page)) |
---|
1527 | 1478 | continue; |
---|
1528 | 1479 | |
---|
1529 | | - list_add_tail(&page->lru, dst); |
---|
| 1480 | + list_add_tail(&page->lru, &head); |
---|
1530 | 1481 | |
---|
1531 | 1482 | /* |
---|
1532 | 1483 | * We are going to put the page back to the global |
---|
.. | .. |
---|
1541 | 1492 | prefetch_buddy(page); |
---|
1542 | 1493 | } while (--count && --batch_free && !list_empty(list)); |
---|
1543 | 1494 | } |
---|
| 1495 | + |
---|
| 1496 | + spin_lock(&zone->lock); |
---|
| 1497 | + isolated_pageblocks = has_isolate_pageblock(zone); |
---|
| 1498 | + |
---|
| 1499 | + /* |
---|
| 1500 | + * Use safe version since after __free_one_page(), |
---|
| 1501 | + * page->lru.next will not point to original list. |
---|
| 1502 | + */ |
---|
| 1503 | + list_for_each_entry_safe(page, tmp, &head, lru) { |
---|
| 1504 | + int mt = get_pcppage_migratetype(page); |
---|
| 1505 | + /* MIGRATE_ISOLATE page should not go to pcplists */ |
---|
| 1506 | + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); |
---|
| 1507 | + /* Pageblock could have been isolated meanwhile */ |
---|
| 1508 | + if (unlikely(isolated_pageblocks)) |
---|
| 1509 | + mt = get_pageblock_migratetype(page); |
---|
| 1510 | + |
---|
| 1511 | + __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); |
---|
| 1512 | + trace_mm_page_pcpu_drain(page, 0, mt); |
---|
| 1513 | + } |
---|
| 1514 | + spin_unlock(&zone->lock); |
---|
1544 | 1515 | } |
---|
1545 | 1516 | |
---|
1546 | 1517 | static void free_one_page(struct zone *zone, |
---|
.. | .. |
---|
1648 | 1619 | return; |
---|
1649 | 1620 | |
---|
1650 | 1621 | migratetype = get_pfnblock_migratetype(page, pfn); |
---|
1651 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 1622 | + local_irq_save(flags); |
---|
1652 | 1623 | __count_vm_events(PGFREE, 1 << order); |
---|
1653 | 1624 | free_one_page(page_zone(page), page, pfn, order, migratetype, |
---|
1654 | 1625 | fpi_flags); |
---|
1655 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 1626 | + local_irq_restore(flags); |
---|
1656 | 1627 | } |
---|
1657 | 1628 | |
---|
1658 | 1629 | void __free_pages_core(struct page *page, unsigned int order) |
---|
.. | .. |
---|
3105 | 3076 | { |
---|
3106 | 3077 | unsigned long flags; |
---|
3107 | 3078 | int to_drain, batch; |
---|
3108 | | - LIST_HEAD(dst); |
---|
3109 | 3079 | |
---|
3110 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3080 | + local_irq_save(flags); |
---|
3111 | 3081 | batch = READ_ONCE(pcp->batch); |
---|
3112 | 3082 | to_drain = min(pcp->count, batch); |
---|
3113 | 3083 | if (to_drain > 0) |
---|
3114 | | - isolate_pcp_pages(to_drain, pcp, &dst); |
---|
3115 | | - |
---|
3116 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3117 | | - |
---|
3118 | | - if (to_drain > 0) |
---|
3119 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3084 | + free_pcppages_bulk(zone, to_drain, pcp); |
---|
| 3085 | + local_irq_restore(flags); |
---|
3120 | 3086 | } |
---|
3121 | 3087 | #endif |
---|
3122 | 3088 | |
---|
.. | .. |
---|
3132 | 3098 | unsigned long flags; |
---|
3133 | 3099 | struct per_cpu_pageset *pset; |
---|
3134 | 3100 | struct per_cpu_pages *pcp; |
---|
3135 | | - LIST_HEAD(dst); |
---|
3136 | | - int count; |
---|
3137 | 3101 | |
---|
3138 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3102 | + local_irq_save(flags); |
---|
3139 | 3103 | pset = per_cpu_ptr(zone->pageset, cpu); |
---|
3140 | 3104 | |
---|
3141 | 3105 | pcp = &pset->pcp; |
---|
3142 | | - count = pcp->count; |
---|
3143 | | - if (count) |
---|
3144 | | - isolate_pcp_pages(count, pcp, &dst); |
---|
3145 | | - |
---|
3146 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3147 | | - |
---|
3148 | | - if (count) |
---|
3149 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3106 | + if (pcp->count) |
---|
| 3107 | + free_pcppages_bulk(zone, pcp->count, pcp); |
---|
| 3108 | + local_irq_restore(flags); |
---|
3150 | 3109 | } |
---|
3151 | 3110 | |
---|
3152 | 3111 | /* |
---|
.. | .. |
---|
3194 | 3153 | * cpu which is allright but we also have to make sure to not move to |
---|
3195 | 3154 | * a different one. |
---|
3196 | 3155 | */ |
---|
3197 | | - migrate_disable(); |
---|
| 3156 | + preempt_disable(); |
---|
3198 | 3157 | drain_local_pages(drain->zone); |
---|
3199 | | - migrate_enable(); |
---|
| 3158 | + preempt_enable(); |
---|
3200 | 3159 | } |
---|
3201 | 3160 | |
---|
3202 | 3161 | /* |
---|
.. | .. |
---|
3345 | 3304 | return true; |
---|
3346 | 3305 | } |
---|
3347 | 3306 | |
---|
3348 | | -static void free_unref_page_commit(struct page *page, unsigned long pfn, |
---|
3349 | | - struct list_head *dst) |
---|
| 3307 | +static void free_unref_page_commit(struct page *page, unsigned long pfn) |
---|
3350 | 3308 | { |
---|
3351 | 3309 | struct zone *zone = page_zone(page); |
---|
3352 | 3310 | struct per_cpu_pages *pcp; |
---|
.. | .. |
---|
3380 | 3338 | pcp->count++; |
---|
3381 | 3339 | if (pcp->count >= pcp->high) { |
---|
3382 | 3340 | unsigned long batch = READ_ONCE(pcp->batch); |
---|
3383 | | - |
---|
3384 | | - isolate_pcp_pages(batch, pcp, dst); |
---|
| 3341 | + free_pcppages_bulk(zone, batch, pcp); |
---|
3385 | 3342 | } |
---|
3386 | 3343 | } |
---|
3387 | 3344 | |
---|
.. | .. |
---|
3392 | 3349 | { |
---|
3393 | 3350 | unsigned long flags; |
---|
3394 | 3351 | unsigned long pfn = page_to_pfn(page); |
---|
3395 | | - struct zone *zone = page_zone(page); |
---|
3396 | | - LIST_HEAD(dst); |
---|
3397 | 3352 | |
---|
3398 | 3353 | if (!free_unref_page_prepare(page, pfn)) |
---|
3399 | 3354 | return; |
---|
3400 | 3355 | |
---|
3401 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
3402 | | - free_unref_page_commit(page, pfn, &dst); |
---|
3403 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3404 | | - if (!list_empty(&dst)) |
---|
3405 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3356 | + local_irq_save(flags); |
---|
| 3357 | + free_unref_page_commit(page, pfn); |
---|
| 3358 | + local_irq_restore(flags); |
---|
3406 | 3359 | } |
---|
3407 | 3360 | |
---|
3408 | 3361 | /* |
---|
.. | .. |
---|
3413 | 3366 | struct page *page, *next; |
---|
3414 | 3367 | unsigned long flags, pfn; |
---|
3415 | 3368 | int batch_count = 0; |
---|
3416 | | - struct list_head dsts[__MAX_NR_ZONES]; |
---|
3417 | | - int i; |
---|
3418 | | - |
---|
3419 | | - for (i = 0; i < __MAX_NR_ZONES; i++) |
---|
3420 | | - INIT_LIST_HEAD(&dsts[i]); |
---|
3421 | 3369 | |
---|
3422 | 3370 | /* Prepare pages for freeing */ |
---|
3423 | 3371 | list_for_each_entry_safe(page, next, list, lru) { |
---|
.. | .. |
---|
3427 | 3375 | set_page_private(page, pfn); |
---|
3428 | 3376 | } |
---|
3429 | 3377 | |
---|
3430 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3378 | + local_irq_save(flags); |
---|
3431 | 3379 | list_for_each_entry_safe(page, next, list, lru) { |
---|
3432 | 3380 | unsigned long pfn = page_private(page); |
---|
3433 | | - enum zone_type type; |
---|
3434 | 3381 | |
---|
3435 | 3382 | set_page_private(page, 0); |
---|
3436 | 3383 | trace_mm_page_free_batched(page); |
---|
3437 | | - type = page_zonenum(page); |
---|
3438 | | - free_unref_page_commit(page, pfn, &dsts[type]); |
---|
| 3384 | + free_unref_page_commit(page, pfn); |
---|
3439 | 3385 | |
---|
3440 | 3386 | /* |
---|
3441 | 3387 | * Guard against excessive IRQ disabled times when we get |
---|
3442 | 3388 | * a large list of pages to free. |
---|
3443 | 3389 | */ |
---|
3444 | 3390 | if (++batch_count == SWAP_CLUSTER_MAX) { |
---|
3445 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3391 | + local_irq_restore(flags); |
---|
3446 | 3392 | batch_count = 0; |
---|
3447 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3393 | + local_irq_save(flags); |
---|
3448 | 3394 | } |
---|
3449 | 3395 | } |
---|
3450 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3451 | | - |
---|
3452 | | - for (i = 0; i < __MAX_NR_ZONES; ) { |
---|
3453 | | - struct page *page; |
---|
3454 | | - struct zone *zone; |
---|
3455 | | - |
---|
3456 | | - if (list_empty(&dsts[i])) { |
---|
3457 | | - i++; |
---|
3458 | | - continue; |
---|
3459 | | - } |
---|
3460 | | - |
---|
3461 | | - page = list_first_entry(&dsts[i], struct page, lru); |
---|
3462 | | - zone = page_zone(page); |
---|
3463 | | - |
---|
3464 | | - free_pcppages_bulk(zone, &dsts[i], true); |
---|
3465 | | - } |
---|
| 3396 | + local_irq_restore(flags); |
---|
3466 | 3397 | } |
---|
3467 | 3398 | |
---|
3468 | 3399 | /* |
---|
.. | .. |
---|
3629 | 3560 | struct page *page; |
---|
3630 | 3561 | unsigned long flags; |
---|
3631 | 3562 | |
---|
3632 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3563 | + local_irq_save(flags); |
---|
3633 | 3564 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
---|
3634 | 3565 | page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, |
---|
3635 | 3566 | gfp_flags); |
---|
.. | .. |
---|
3637 | 3568 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); |
---|
3638 | 3569 | zone_statistics(preferred_zone, zone); |
---|
3639 | 3570 | } |
---|
3640 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3571 | + local_irq_restore(flags); |
---|
3641 | 3572 | return page; |
---|
3642 | 3573 | } |
---|
3643 | 3574 | |
---|
.. | .. |
---|
3664 | 3595 | * allocate greater than order-1 page units with __GFP_NOFAIL. |
---|
3665 | 3596 | */ |
---|
3666 | 3597 | WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); |
---|
3667 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
3668 | | - spin_lock(&zone->lock); |
---|
| 3598 | + spin_lock_irqsave(&zone->lock, flags); |
---|
3669 | 3599 | |
---|
3670 | 3600 | do { |
---|
3671 | 3601 | page = NULL; |
---|
.. | .. |
---|
3700 | 3630 | zone_statistics(preferred_zone, zone); |
---|
3701 | 3631 | trace_android_vh_rmqueue(preferred_zone, zone, order, |
---|
3702 | 3632 | gfp_flags, alloc_flags, migratetype); |
---|
3703 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3633 | + local_irq_restore(flags); |
---|
3704 | 3634 | |
---|
3705 | 3635 | out: |
---|
3706 | 3636 | /* Separate test+clear to avoid unnecessary atomics */ |
---|
.. | .. |
---|
3713 | 3643 | return page; |
---|
3714 | 3644 | |
---|
3715 | 3645 | failed: |
---|
3716 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3646 | + local_irq_restore(flags); |
---|
3717 | 3647 | return NULL; |
---|
3718 | 3648 | } |
---|
3719 | 3649 | |
---|
.. | .. |
---|
9141 | 9071 | struct per_cpu_pageset *pset; |
---|
9142 | 9072 | |
---|
9143 | 9073 | /* avoid races with drain_pages() */ |
---|
9144 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 9074 | + local_irq_save(flags); |
---|
9145 | 9075 | if (zone->pageset != &boot_pageset) { |
---|
9146 | 9076 | for_each_online_cpu(cpu) { |
---|
9147 | 9077 | pset = per_cpu_ptr(zone->pageset, cpu); |
---|
.. | .. |
---|
9150 | 9080 | free_percpu(zone->pageset); |
---|
9151 | 9081 | zone->pageset = &boot_pageset; |
---|
9152 | 9082 | } |
---|
9153 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 9083 | + local_irq_restore(flags); |
---|
9154 | 9084 | } |
---|
9155 | 9085 | |
---|
9156 | 9086 | #ifdef CONFIG_MEMORY_HOTREMOVE |
---|