.. | .. |
---|
61 | 61 | #include <linux/hugetlb.h> |
---|
62 | 62 | #include <linux/sched/rt.h> |
---|
63 | 63 | #include <linux/sched/mm.h> |
---|
64 | | -#include <linux/local_lock.h> |
---|
65 | 64 | #include <linux/page_owner.h> |
---|
66 | 65 | #include <linux/page_pinner.h> |
---|
67 | 66 | #include <linux/kthread.h> |
---|
.. | .. |
---|
385 | 384 | EXPORT_SYMBOL(nr_node_ids); |
---|
386 | 385 | EXPORT_SYMBOL(nr_online_nodes); |
---|
387 | 386 | #endif |
---|
388 | | - |
---|
389 | | -struct pa_lock { |
---|
390 | | - local_lock_t l; |
---|
391 | | -}; |
---|
392 | | -static DEFINE_PER_CPU(struct pa_lock, pa_lock) = { |
---|
393 | | - .l = INIT_LOCAL_LOCK(l), |
---|
394 | | -}; |
---|
395 | 387 | |
---|
396 | 388 | int page_group_by_mobility_disabled __read_mostly; |
---|
397 | 389 | |
---|
.. | .. |
---|
1430 | 1422 | } |
---|
1431 | 1423 | |
---|
1432 | 1424 | /* |
---|
1433 | | - * Frees a number of pages which have been collected from the pcp lists. |
---|
| 1425 | + * Frees a number of pages from the PCP lists |
---|
1434 | 1426 | * Assumes all pages on list are in same zone, and of same order. |
---|
1435 | 1427 | * count is the number of pages to free. |
---|
1436 | 1428 | * |
---|
.. | .. |
---|
1440 | 1432 | * And clear the zone's pages_scanned counter, to hold off the "all pages are |
---|
1441 | 1433 | * pinned" detection logic. |
---|
1442 | 1434 | */ |
---|
1443 | | -static void free_pcppages_bulk(struct zone *zone, struct list_head *head, |
---|
1444 | | - bool zone_retry) |
---|
1445 | | -{ |
---|
1446 | | - bool isolated_pageblocks; |
---|
1447 | | - struct page *page, *tmp; |
---|
1448 | | - unsigned long flags; |
---|
1449 | | - |
---|
1450 | | - spin_lock_irqsave(&zone->lock, flags); |
---|
1451 | | - isolated_pageblocks = has_isolate_pageblock(zone); |
---|
1452 | | - |
---|
1453 | | - /* |
---|
1454 | | - * Use safe version since after __free_one_page(), |
---|
1455 | | - * page->lru.next will not point to original list. |
---|
1456 | | - */ |
---|
1457 | | - list_for_each_entry_safe(page, tmp, head, lru) { |
---|
1458 | | - int mt = get_pcppage_migratetype(page); |
---|
1459 | | - |
---|
1460 | | - if (page_zone(page) != zone) { |
---|
1461 | | - /* |
---|
1462 | | - * free_unref_page_list() sorts pages by zone. If we end |
---|
1463 | | - * up with pages from a different NUMA nodes belonging |
---|
1464 | | - * to the same ZONE index then we need to redo with the |
---|
1465 | | - * correct ZONE pointer. Skip the page for now, redo it |
---|
1466 | | - * on the next iteration. |
---|
1467 | | - */ |
---|
1468 | | - WARN_ON_ONCE(zone_retry == false); |
---|
1469 | | - if (zone_retry) |
---|
1470 | | - continue; |
---|
1471 | | - } |
---|
1472 | | - |
---|
1473 | | - /* MIGRATE_ISOLATE page should not go to pcplists */ |
---|
1474 | | - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); |
---|
1475 | | - /* Pageblock could have been isolated meanwhile */ |
---|
1476 | | - if (unlikely(isolated_pageblocks)) |
---|
1477 | | - mt = get_pageblock_migratetype(page); |
---|
1478 | | - |
---|
1479 | | - list_del(&page->lru); |
---|
1480 | | - __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); |
---|
1481 | | - trace_mm_page_pcpu_drain(page, 0, mt); |
---|
1482 | | - } |
---|
1483 | | - spin_unlock_irqrestore(&zone->lock, flags); |
---|
1484 | | -} |
---|
1485 | | - |
---|
1486 | | -static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, |
---|
1487 | | - struct list_head *dst) |
---|
| 1435 | +static void free_pcppages_bulk(struct zone *zone, int count, |
---|
| 1436 | + struct per_cpu_pages *pcp) |
---|
1488 | 1437 | { |
---|
1489 | 1438 | int migratetype = 0; |
---|
1490 | 1439 | int batch_free = 0; |
---|
1491 | 1440 | int prefetch_nr = 0; |
---|
1492 | | - struct page *page; |
---|
| 1441 | + bool isolated_pageblocks; |
---|
| 1442 | + struct page *page, *tmp; |
---|
| 1443 | + LIST_HEAD(head); |
---|
1493 | 1444 | |
---|
1494 | 1445 | /* |
---|
1495 | 1446 | * Ensure proper count is passed which otherwise would stuck in the |
---|
.. | .. |
---|
1526 | 1477 | if (bulkfree_pcp_prepare(page)) |
---|
1527 | 1478 | continue; |
---|
1528 | 1479 | |
---|
1529 | | - list_add_tail(&page->lru, dst); |
---|
| 1480 | + list_add_tail(&page->lru, &head); |
---|
1530 | 1481 | |
---|
1531 | 1482 | /* |
---|
1532 | 1483 | * We are going to put the page back to the global |
---|
.. | .. |
---|
1541 | 1492 | prefetch_buddy(page); |
---|
1542 | 1493 | } while (--count && --batch_free && !list_empty(list)); |
---|
1543 | 1494 | } |
---|
| 1495 | + |
---|
| 1496 | + spin_lock(&zone->lock); |
---|
| 1497 | + isolated_pageblocks = has_isolate_pageblock(zone); |
---|
| 1498 | + |
---|
| 1499 | + /* |
---|
| 1500 | + * Use safe version since after __free_one_page(), |
---|
| 1501 | + * page->lru.next will not point to original list. |
---|
| 1502 | + */ |
---|
| 1503 | + list_for_each_entry_safe(page, tmp, &head, lru) { |
---|
| 1504 | + int mt = get_pcppage_migratetype(page); |
---|
| 1505 | + /* MIGRATE_ISOLATE page should not go to pcplists */ |
---|
| 1506 | + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); |
---|
| 1507 | + /* Pageblock could have been isolated meanwhile */ |
---|
| 1508 | + if (unlikely(isolated_pageblocks)) |
---|
| 1509 | + mt = get_pageblock_migratetype(page); |
---|
| 1510 | + |
---|
| 1511 | + __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); |
---|
| 1512 | + trace_mm_page_pcpu_drain(page, 0, mt); |
---|
| 1513 | + } |
---|
| 1514 | + spin_unlock(&zone->lock); |
---|
1544 | 1515 | } |
---|
1545 | 1516 | |
---|
1546 | 1517 | static void free_one_page(struct zone *zone, |
---|
.. | .. |
---|
1643 | 1614 | unsigned long flags; |
---|
1644 | 1615 | int migratetype; |
---|
1645 | 1616 | unsigned long pfn = page_to_pfn(page); |
---|
| 1617 | + bool skip_free_unref_page = false; |
---|
1646 | 1618 | |
---|
1647 | 1619 | if (!free_pages_prepare(page, order, true, fpi_flags)) |
---|
1648 | 1620 | return; |
---|
1649 | 1621 | |
---|
1650 | 1622 | migratetype = get_pfnblock_migratetype(page, pfn); |
---|
1651 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 1623 | + trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page); |
---|
| 1624 | + if (skip_free_unref_page) |
---|
| 1625 | + return; |
---|
| 1626 | + |
---|
| 1627 | + local_irq_save(flags); |
---|
1652 | 1628 | __count_vm_events(PGFREE, 1 << order); |
---|
1653 | 1629 | free_one_page(page_zone(page), page, pfn, order, migratetype, |
---|
1654 | 1630 | fpi_flags); |
---|
1655 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 1631 | + local_irq_restore(flags); |
---|
1656 | 1632 | } |
---|
1657 | 1633 | |
---|
1658 | 1634 | void __free_pages_core(struct page *page, unsigned int order) |
---|
.. | .. |
---|
2826 | 2802 | struct page *page; |
---|
2827 | 2803 | int order; |
---|
2828 | 2804 | bool ret; |
---|
| 2805 | + bool skip_unreserve_highatomic = false; |
---|
2829 | 2806 | |
---|
2830 | 2807 | for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, |
---|
2831 | 2808 | ac->nodemask) { |
---|
.. | .. |
---|
2835 | 2812 | */ |
---|
2836 | 2813 | if (!force && zone->nr_reserved_highatomic <= |
---|
2837 | 2814 | pageblock_nr_pages) |
---|
| 2815 | + continue; |
---|
| 2816 | + |
---|
| 2817 | + trace_android_vh_unreserve_highatomic_bypass(force, zone, |
---|
| 2818 | + &skip_unreserve_highatomic); |
---|
| 2819 | + if (skip_unreserve_highatomic) |
---|
2838 | 2820 | continue; |
---|
2839 | 2821 | |
---|
2840 | 2822 | spin_lock_irqsave(&zone->lock, flags); |
---|
.. | .. |
---|
3082 | 3064 | struct list_head *list = &pcp->lists[migratetype]; |
---|
3083 | 3065 | |
---|
3084 | 3066 | if (list_empty(list)) { |
---|
| 3067 | + trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list); |
---|
| 3068 | + if (!list_empty(list)) |
---|
| 3069 | + return list; |
---|
| 3070 | + |
---|
3085 | 3071 | pcp->count += rmqueue_bulk(zone, order, |
---|
3086 | 3072 | pcp->batch, list, |
---|
3087 | 3073 | migratetype, alloc_flags); |
---|
.. | .. |
---|
3105 | 3091 | { |
---|
3106 | 3092 | unsigned long flags; |
---|
3107 | 3093 | int to_drain, batch; |
---|
3108 | | - LIST_HEAD(dst); |
---|
3109 | 3094 | |
---|
3110 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3095 | + local_irq_save(flags); |
---|
3111 | 3096 | batch = READ_ONCE(pcp->batch); |
---|
3112 | 3097 | to_drain = min(pcp->count, batch); |
---|
3113 | 3098 | if (to_drain > 0) |
---|
3114 | | - isolate_pcp_pages(to_drain, pcp, &dst); |
---|
3115 | | - |
---|
3116 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3117 | | - |
---|
3118 | | - if (to_drain > 0) |
---|
3119 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3099 | + free_pcppages_bulk(zone, to_drain, pcp); |
---|
| 3100 | + local_irq_restore(flags); |
---|
3120 | 3101 | } |
---|
3121 | 3102 | #endif |
---|
3122 | 3103 | |
---|
.. | .. |
---|
3132 | 3113 | unsigned long flags; |
---|
3133 | 3114 | struct per_cpu_pageset *pset; |
---|
3134 | 3115 | struct per_cpu_pages *pcp; |
---|
3135 | | - LIST_HEAD(dst); |
---|
3136 | | - int count; |
---|
3137 | 3116 | |
---|
3138 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3117 | + local_irq_save(flags); |
---|
3139 | 3118 | pset = per_cpu_ptr(zone->pageset, cpu); |
---|
3140 | 3119 | |
---|
3141 | 3120 | pcp = &pset->pcp; |
---|
3142 | | - count = pcp->count; |
---|
3143 | | - if (count) |
---|
3144 | | - isolate_pcp_pages(count, pcp, &dst); |
---|
3145 | | - |
---|
3146 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3147 | | - |
---|
3148 | | - if (count) |
---|
3149 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3121 | + if (pcp->count) |
---|
| 3122 | + free_pcppages_bulk(zone, pcp->count, pcp); |
---|
| 3123 | + local_irq_restore(flags); |
---|
3150 | 3124 | } |
---|
3151 | 3125 | |
---|
3152 | 3126 | /* |
---|
.. | .. |
---|
3194 | 3168 | * cpu which is allright but we also have to make sure to not move to |
---|
3195 | 3169 | * a different one. |
---|
3196 | 3170 | */ |
---|
3197 | | - migrate_disable(); |
---|
| 3171 | + preempt_disable(); |
---|
3198 | 3172 | drain_local_pages(drain->zone); |
---|
3199 | | - migrate_enable(); |
---|
| 3173 | + preempt_enable(); |
---|
3200 | 3174 | } |
---|
3201 | 3175 | |
---|
3202 | 3176 | /* |
---|
.. | .. |
---|
3345 | 3319 | return true; |
---|
3346 | 3320 | } |
---|
3347 | 3321 | |
---|
3348 | | -static void free_unref_page_commit(struct page *page, unsigned long pfn, |
---|
3349 | | - struct list_head *dst) |
---|
| 3322 | +static void free_unref_page_commit(struct page *page, unsigned long pfn) |
---|
3350 | 3323 | { |
---|
3351 | 3324 | struct zone *zone = page_zone(page); |
---|
3352 | 3325 | struct per_cpu_pages *pcp; |
---|
.. | .. |
---|
3380 | 3353 | pcp->count++; |
---|
3381 | 3354 | if (pcp->count >= pcp->high) { |
---|
3382 | 3355 | unsigned long batch = READ_ONCE(pcp->batch); |
---|
3383 | | - |
---|
3384 | | - isolate_pcp_pages(batch, pcp, dst); |
---|
| 3356 | + free_pcppages_bulk(zone, batch, pcp); |
---|
3385 | 3357 | } |
---|
3386 | 3358 | } |
---|
3387 | 3359 | |
---|
.. | .. |
---|
3392 | 3364 | { |
---|
3393 | 3365 | unsigned long flags; |
---|
3394 | 3366 | unsigned long pfn = page_to_pfn(page); |
---|
3395 | | - struct zone *zone = page_zone(page); |
---|
3396 | | - LIST_HEAD(dst); |
---|
| 3367 | + int migratetype; |
---|
| 3368 | + bool skip_free_unref_page = false; |
---|
3397 | 3369 | |
---|
3398 | 3370 | if (!free_unref_page_prepare(page, pfn)) |
---|
3399 | 3371 | return; |
---|
3400 | 3372 | |
---|
3401 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
3402 | | - free_unref_page_commit(page, pfn, &dst); |
---|
3403 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3404 | | - if (!list_empty(&dst)) |
---|
3405 | | - free_pcppages_bulk(zone, &dst, false); |
---|
| 3373 | + migratetype = get_pfnblock_migratetype(page, pfn); |
---|
| 3374 | + trace_android_vh_free_unref_page_bypass(page, 0, migratetype, &skip_free_unref_page); |
---|
| 3375 | + if (skip_free_unref_page) |
---|
| 3376 | + return; |
---|
| 3377 | + |
---|
| 3378 | + local_irq_save(flags); |
---|
| 3379 | + free_unref_page_commit(page, pfn); |
---|
| 3380 | + local_irq_restore(flags); |
---|
3406 | 3381 | } |
---|
3407 | 3382 | |
---|
3408 | 3383 | /* |
---|
.. | .. |
---|
3413 | 3388 | struct page *page, *next; |
---|
3414 | 3389 | unsigned long flags, pfn; |
---|
3415 | 3390 | int batch_count = 0; |
---|
3416 | | - struct list_head dsts[__MAX_NR_ZONES]; |
---|
3417 | | - int i; |
---|
3418 | | - |
---|
3419 | | - for (i = 0; i < __MAX_NR_ZONES; i++) |
---|
3420 | | - INIT_LIST_HEAD(&dsts[i]); |
---|
3421 | 3391 | |
---|
3422 | 3392 | /* Prepare pages for freeing */ |
---|
3423 | 3393 | list_for_each_entry_safe(page, next, list, lru) { |
---|
.. | .. |
---|
3427 | 3397 | set_page_private(page, pfn); |
---|
3428 | 3398 | } |
---|
3429 | 3399 | |
---|
3430 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3400 | + local_irq_save(flags); |
---|
3431 | 3401 | list_for_each_entry_safe(page, next, list, lru) { |
---|
3432 | 3402 | unsigned long pfn = page_private(page); |
---|
3433 | | - enum zone_type type; |
---|
3434 | 3403 | |
---|
3435 | 3404 | set_page_private(page, 0); |
---|
3436 | 3405 | trace_mm_page_free_batched(page); |
---|
3437 | | - type = page_zonenum(page); |
---|
3438 | | - free_unref_page_commit(page, pfn, &dsts[type]); |
---|
| 3406 | + free_unref_page_commit(page, pfn); |
---|
3439 | 3407 | |
---|
3440 | 3408 | /* |
---|
3441 | 3409 | * Guard against excessive IRQ disabled times when we get |
---|
3442 | 3410 | * a large list of pages to free. |
---|
3443 | 3411 | */ |
---|
3444 | 3412 | if (++batch_count == SWAP_CLUSTER_MAX) { |
---|
3445 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3413 | + local_irq_restore(flags); |
---|
3446 | 3414 | batch_count = 0; |
---|
3447 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3415 | + local_irq_save(flags); |
---|
3448 | 3416 | } |
---|
3449 | 3417 | } |
---|
3450 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
3451 | | - |
---|
3452 | | - for (i = 0; i < __MAX_NR_ZONES; ) { |
---|
3453 | | - struct page *page; |
---|
3454 | | - struct zone *zone; |
---|
3455 | | - |
---|
3456 | | - if (list_empty(&dsts[i])) { |
---|
3457 | | - i++; |
---|
3458 | | - continue; |
---|
3459 | | - } |
---|
3460 | | - |
---|
3461 | | - page = list_first_entry(&dsts[i], struct page, lru); |
---|
3462 | | - zone = page_zone(page); |
---|
3463 | | - |
---|
3464 | | - free_pcppages_bulk(zone, &dsts[i], true); |
---|
3465 | | - } |
---|
| 3418 | + local_irq_restore(flags); |
---|
3466 | 3419 | } |
---|
3467 | 3420 | |
---|
3468 | 3421 | /* |
---|
.. | .. |
---|
3629 | 3582 | struct page *page; |
---|
3630 | 3583 | unsigned long flags; |
---|
3631 | 3584 | |
---|
3632 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 3585 | + local_irq_save(flags); |
---|
3633 | 3586 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
---|
3634 | 3587 | page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, |
---|
3635 | 3588 | gfp_flags); |
---|
.. | .. |
---|
3637 | 3590 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); |
---|
3638 | 3591 | zone_statistics(preferred_zone, zone); |
---|
3639 | 3592 | } |
---|
3640 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3593 | + local_irq_restore(flags); |
---|
3641 | 3594 | return page; |
---|
3642 | 3595 | } |
---|
3643 | 3596 | |
---|
.. | .. |
---|
3664 | 3617 | * allocate greater than order-1 page units with __GFP_NOFAIL. |
---|
3665 | 3618 | */ |
---|
3666 | 3619 | WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); |
---|
3667 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
3668 | | - spin_lock(&zone->lock); |
---|
| 3620 | + spin_lock_irqsave(&zone->lock, flags); |
---|
3669 | 3621 | |
---|
3670 | 3622 | do { |
---|
3671 | 3623 | page = NULL; |
---|
.. | .. |
---|
3700 | 3652 | zone_statistics(preferred_zone, zone); |
---|
3701 | 3653 | trace_android_vh_rmqueue(preferred_zone, zone, order, |
---|
3702 | 3654 | gfp_flags, alloc_flags, migratetype); |
---|
3703 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3655 | + local_irq_restore(flags); |
---|
3704 | 3656 | |
---|
3705 | 3657 | out: |
---|
3706 | 3658 | /* Separate test+clear to avoid unnecessary atomics */ |
---|
.. | .. |
---|
3713 | 3665 | return page; |
---|
3714 | 3666 | |
---|
3715 | 3667 | failed: |
---|
3716 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 3668 | + local_irq_restore(flags); |
---|
3717 | 3669 | return NULL; |
---|
3718 | 3670 | } |
---|
3719 | 3671 | |
---|
.. | .. |
---|
4898 | 4850 | unsigned int zonelist_iter_cookie; |
---|
4899 | 4851 | int reserve_flags; |
---|
4900 | 4852 | unsigned long vh_record; |
---|
| 4853 | + bool should_alloc_retry = false; |
---|
4901 | 4854 | |
---|
4902 | 4855 | trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record); |
---|
4903 | 4856 | /* |
---|
.. | .. |
---|
5037 | 4990 | |
---|
5038 | 4991 | if (page) |
---|
5039 | 4992 | goto got_pg; |
---|
| 4993 | + |
---|
| 4994 | + trace_android_vh_should_alloc_pages_retry(gfp_mask, order, |
---|
| 4995 | + &alloc_flags, ac->migratetype, ac->preferred_zoneref->zone, |
---|
| 4996 | + &page, &should_alloc_retry); |
---|
| 4997 | + if (should_alloc_retry) |
---|
| 4998 | + goto retry; |
---|
5040 | 4999 | |
---|
5041 | 5000 | /* Try direct reclaim and then allocating */ |
---|
5042 | 5001 | page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, |
---|
.. | .. |
---|
5310 | 5269 | |
---|
5311 | 5270 | void __free_pages(struct page *page, unsigned int order) |
---|
5312 | 5271 | { |
---|
| 5272 | + /* get PageHead before we drop reference */ |
---|
| 5273 | + int head = PageHead(page); |
---|
| 5274 | + |
---|
5313 | 5275 | trace_android_vh_free_pages(page, order); |
---|
5314 | 5276 | if (put_page_testzero(page)) |
---|
5315 | 5277 | free_the_page(page, order); |
---|
5316 | | - else if (!PageHead(page)) |
---|
| 5278 | + else if (!head) |
---|
5317 | 5279 | while (order-- > 0) |
---|
5318 | 5280 | free_the_page(page + (1 << order), order); |
---|
5319 | 5281 | } |
---|
.. | .. |
---|
6228 | 6190 | int nid; |
---|
6229 | 6191 | int __maybe_unused cpu; |
---|
6230 | 6192 | pg_data_t *self = data; |
---|
| 6193 | + unsigned long flags; |
---|
6231 | 6194 | |
---|
| 6195 | + /* |
---|
| 6196 | + * Explicitly disable this CPU's interrupts before taking seqlock |
---|
| 6197 | + * to prevent any IRQ handler from calling into the page allocator |
---|
| 6198 | + * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock. |
---|
| 6199 | + */ |
---|
| 6200 | + local_irq_save(flags); |
---|
| 6201 | + /* |
---|
| 6202 | + * Explicitly disable this CPU's synchronous printk() before taking |
---|
| 6203 | + * seqlock to prevent any printk() from trying to hold port->lock, for |
---|
| 6204 | + * tty_insert_flip_string_and_push_buffer() on other CPU might be |
---|
| 6205 | + * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. |
---|
| 6206 | + */ |
---|
| 6207 | + printk_deferred_enter(); |
---|
6232 | 6208 | write_seqlock(&zonelist_update_seq); |
---|
6233 | 6209 | |
---|
6234 | 6210 | #ifdef CONFIG_NUMA |
---|
.. | .. |
---|
6263 | 6239 | } |
---|
6264 | 6240 | |
---|
6265 | 6241 | write_sequnlock(&zonelist_update_seq); |
---|
| 6242 | + printk_deferred_exit(); |
---|
| 6243 | + local_irq_restore(flags); |
---|
6266 | 6244 | } |
---|
6267 | 6245 | |
---|
6268 | 6246 | static noinline void __init |
---|
.. | .. |
---|
6682 | 6660 | static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, |
---|
6683 | 6661 | unsigned long batch) |
---|
6684 | 6662 | { |
---|
| 6663 | + trace_android_vh_pageset_update(&high, &batch); |
---|
6685 | 6664 | /* start with a fail safe value for batch */ |
---|
6686 | 6665 | pcp->batch = 1; |
---|
6687 | 6666 | smp_wmb(); |
---|
.. | .. |
---|
9141 | 9120 | struct per_cpu_pageset *pset; |
---|
9142 | 9121 | |
---|
9143 | 9122 | /* avoid races with drain_pages() */ |
---|
9144 | | - local_lock_irqsave(&pa_lock.l, flags); |
---|
| 9123 | + local_irq_save(flags); |
---|
9145 | 9124 | if (zone->pageset != &boot_pageset) { |
---|
9146 | 9125 | for_each_online_cpu(cpu) { |
---|
9147 | 9126 | pset = per_cpu_ptr(zone->pageset, cpu); |
---|
.. | .. |
---|
9150 | 9129 | free_percpu(zone->pageset); |
---|
9151 | 9130 | zone->pageset = &boot_pageset; |
---|
9152 | 9131 | } |
---|
9153 | | - local_unlock_irqrestore(&pa_lock.l, flags); |
---|
| 9132 | + local_irq_restore(flags); |
---|
9154 | 9133 | } |
---|
9155 | 9134 | |
---|
9156 | 9135 | #ifdef CONFIG_MEMORY_HOTREMOVE |
---|