From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/mm/swap.c | 669 +++++++++++++++++++++++++++++++++++++++----------------
1 files changed, 469 insertions(+), 200 deletions(-)
diff --git a/kernel/mm/swap.c b/kernel/mm/swap.c
index 3885645..16b2bc4 100644
--- a/kernel/mm/swap.c
+++ b/kernel/mm/swap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/swap.c
*
@@ -7,7 +8,7 @@
/*
* This file contains the default values for the operation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
- * Documentation/sysctl/vm.txt.
+ * Documentation/admin-guide/sysctl/vm.rst.
* Started 18.12.91
* Swap aging added 23.2.95, Stephen Tweedie.
* Buffermem limits added 12.3.98, Rik van Riel.
@@ -29,13 +30,13 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
-#include <linux/memremap.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
-#include <linux/locallock.h>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
+#include <linux/local_lock.h>
+#include <linux/buffer_head.h>
#include "internal.h"
@@ -45,15 +46,33 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
-static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
+/* Protecting only lru_rotate.pvec which requires disabling interrupts */
+struct lru_rotate {
+ local_lock_t lock;
+ struct pagevec pvec;
+};
+static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
+
+/*
+ * The following struct pagevec are grouped together because they are protected
+ * by disabling preemption (and interrupts remain enabled).
+ */
+struct lru_pvecs {
+ local_lock_t lock;
+ struct pagevec lru_add;
+ struct pagevec lru_deactivate_file;
+ struct pagevec lru_deactivate;
+ struct pagevec lru_lazyfree;
+ struct pagevec lru_lazyfree_movetail;
#ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+ struct pagevec activate_page;
#endif
-static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
-DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
+};
+static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
/*
* This path almost never happens for VM activity - pages are normally
@@ -62,31 +81,29 @@
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
unsigned long flags;
- spin_lock_irqsave(zone_lru_lock(zone), flags);
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ spin_lock_irqsave(&pgdat->lru_lock, flags);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
- spin_unlock_irqrestore(zone_lru_lock(zone), flags);
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
}
__ClearPageWaiters(page);
- mem_cgroup_uncharge(page);
}
static void __put_single_page(struct page *page)
{
__page_cache_release(page);
+ mem_cgroup_uncharge(page);
free_unref_page(page);
}
static void __put_compound_page(struct page *page)
{
- compound_page_dtor *dtor;
-
/*
* __page_cache_release() is supposed to be called for thp, not for
* hugetlb. This is because hugetlb page does never have PageLRU set
@@ -95,8 +112,7 @@
*/
if (!PageHuge(page))
__page_cache_release(page);
- dtor = get_compound_page_dtor(page);
- (*dtor)(page);
+ destroy_compound_page(page);
}
void __put_page(struct page *page)
@@ -130,7 +146,7 @@
while (!list_empty(pages)) {
struct page *victim;
- victim = list_entry(pages->prev, struct page, lru);
+ victim = lru_to_page(pages);
list_del(&victim->lru);
put_page(victim);
}
@@ -227,7 +243,7 @@
del_page_from_lru_list(page, lruvec, page_lru(page));
ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec, page_lru(page));
- (*pgmoved)++;
+ (*pgmoved) += thp_nr_pages(page);
}
}
@@ -243,6 +259,18 @@
__count_vm_events(PGROTATED, pgmoved);
}
+/* return true if pagevec needs to drain */
+static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
+{
+ bool ret = false;
+
+ if (!pagevec_add(pvec, page) || PageCompound(page) ||
+ lru_cache_disabled())
+ ret = true;
+
+ return ret;
+}
+
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
@@ -256,30 +284,57 @@
unsigned long flags;
get_page(page);
- local_lock_irqsave(rotate_lock, flags);
- pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page) || PageCompound(page))
+ local_lock_irqsave(&lru_rotate.lock, flags);
+ pvec = this_cpu_ptr(&lru_rotate.pvec);
+ if (pagevec_add_and_need_flush(pvec, page))
pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
+ local_unlock_irqrestore(&lru_rotate.lock, flags);
}
}
-static void update_page_reclaim_stat(struct lruvec *lruvec,
- int file, int rotated)
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
{
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+ do {
+ unsigned long lrusize;
- reclaim_stat->recent_scanned[file]++;
- if (rotated)
- reclaim_stat->recent_rotated[file]++;
+ /* Record cost event */
+ if (file)
+ lruvec->file_cost += nr_pages;
+ else
+ lruvec->anon_cost += nr_pages;
+
+ /*
+ * Decay previous events
+ *
+ * Because workloads change over time (and to avoid
+ * overflow) we keep these statistics as a floating
+ * average, which ends up weighing recent refaults
+ * more than old ones.
+ */
+ lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
+ lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
+ lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
+ lruvec_page_state(lruvec, NR_ACTIVE_FILE);
+
+ if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
+ lruvec->file_cost /= 2;
+ lruvec->anon_cost /= 2;
+ }
+ } while ((lruvec = parent_lruvec(lruvec)));
+}
+
+void lru_note_cost_page(struct page *page)
+{
+ lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
+ page_is_file_lru(page), thp_nr_pages(page));
}
static void __activate_page(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int file = page_is_file_cache(page);
int lru = page_lru_base_type(page);
+ int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
@@ -287,15 +342,16 @@
add_page_to_lru_list(page, lruvec, lru);
trace_mm_lru_activate(page);
- __count_vm_event(PGACTIVATE);
- update_page_reclaim_stat(lruvec, file, 1);
+ __count_vm_events(PGACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
+ nr_pages);
}
}
#ifdef CONFIG_SMP
static void activate_page_drain(int cpu)
{
- struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
+ struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
@@ -303,20 +359,21 @@
static bool need_activate_page_drain(int cpu)
{
- return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
+ return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
}
-void activate_page(struct page *page)
+static void activate_page(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- activate_page_pvecs);
+ struct pagevec *pvec;
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.activate_page);
get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
+ if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_locked_var(swapvec_lock, activate_page_pvecs);
+ local_unlock(&lru_pvecs.lock);
}
}
@@ -325,21 +382,24 @@
{
}
-void activate_page(struct page *page)
+static void activate_page(struct page *page)
{
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
page = compound_head(page);
- spin_lock_irq(zone_lru_lock(zone));
- __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL);
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_lock_irq(&pgdat->lru_lock);
+ __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
+ spin_unlock_irq(&pgdat->lru_lock);
}
#endif
static void __lru_cache_activate_page(struct page *page)
{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ struct pagevec *pvec;
int i;
+
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_add);
/*
* Search backwards on the optimistic assumption that the page being
@@ -360,7 +420,7 @@
}
}
- put_locked_var(swapvec_lock, lru_add_pvec);
+ local_unlock(&lru_pvecs.lock);
}
/*
@@ -376,12 +436,20 @@
void mark_page_accessed(struct page *page)
{
page = compound_head(page);
- if (!PageActive(page) && !PageUnevictable(page) &&
- PageReferenced(page)) {
+ trace_android_vh_mark_page_accessed(page);
+ if (!PageReferenced(page)) {
+ SetPageReferenced(page);
+ } else if (PageUnevictable(page)) {
+ /*
+ * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
+ * this list is never rotated or maintained, so marking an
+ * evictable page accessed has no effect.
+ */
+ } else if (!PageActive(page)) {
/*
* If the page is on the LRU, queue it for activation via
- * activate_page_pvecs. Otherwise, assume the page is on a
+ * lru_pvecs.activate_page. Otherwise, assume the page is on a
* pagevec, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
@@ -390,44 +458,12 @@
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
- if (page_is_file_cache(page))
- workingset_activation(page);
- } else if (!PageReferenced(page)) {
- SetPageReferenced(page);
+ workingset_activation(page);
}
if (page_is_idle(page))
clear_page_idle(page);
}
EXPORT_SYMBOL(mark_page_accessed);
-
-static void __lru_cache_add(struct page *page)
-{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
-
- get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- __pagevec_lru_add(pvec);
- put_locked_var(swapvec_lock, lru_add_pvec);
-}
-
-/**
- * lru_cache_add_anon - add a page to the page lists
- * @page: the page to add
- */
-void lru_cache_add_anon(struct page *page)
-{
- if (PageActive(page))
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-
-void lru_cache_add_file(struct page *page)
-{
- if (PageActive(page))
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-EXPORT_SYMBOL(lru_cache_add_file);
/**
* lru_cache_add - add a page to a page list
@@ -440,37 +476,45 @@
*/
void lru_cache_add(struct page *page)
{
+ struct pagevec *pvec;
+
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
- __lru_cache_add(page);
+
+ get_page(page);
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_add);
+ if (pagevec_add_and_need_flush(pvec, page))
+ __pagevec_lru_add(pvec);
+ local_unlock(&lru_pvecs.lock);
}
+EXPORT_SYMBOL(lru_cache_add);
/**
- * lru_cache_add_active_or_unevictable
+ * lru_cache_add_inactive_or_unevictable
* @page: the page to be added to LRU
* @vma: vma in which page is mapped for determining reclaimability
*
- * Place @page on the active or unevictable LRU list, depending on its
- * evictability. Note that if the page is not evictable, it goes
- * directly back onto it's zone's unevictable list, it does NOT use a
- * per cpu pagevec.
+ * Place @page on the inactive or unevictable LRU list, depending on its
+ * evictability.
*/
-void lru_cache_add_active_or_unevictable(struct page *page,
- struct vm_area_struct *vma)
+void __lru_cache_add_inactive_or_unevictable(struct page *page,
+ unsigned long vma_flags)
{
+ bool unevictable;
+
VM_BUG_ON_PAGE(PageLRU(page), page);
- if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
- SetPageActive(page);
- else if (!TestSetPageMlocked(page)) {
+ unevictable = (vma_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
+ if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
+ int nr_pages = thp_nr_pages(page);
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
- __mod_zone_page_state(page_zone(page), NR_MLOCK,
- hpage_nr_pages(page));
- count_vm_event(UNEVICTABLE_PGMLOCKED);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+ count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
}
lru_cache_add(page);
}
@@ -499,8 +543,9 @@
static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
- int lru, file;
+ int lru;
bool active;
+ int nr_pages = thp_nr_pages(page);
if (!PageLRU(page))
return;
@@ -513,13 +558,11 @@
return;
active = PageActive(page);
- file = page_is_file_cache(page);
lru = page_lru_base_type(page);
del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
- add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
@@ -527,21 +570,41 @@
* It can make readahead confusing. But race window
* is _really_ small and it's non-critical problem.
*/
+ add_page_to_lru_list(page, lruvec, lru);
SetPageReclaim(page);
} else {
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
- list_move_tail(&page->lru, &lruvec->lists[lru]);
- __count_vm_event(PGROTATED);
+ add_page_to_lru_list_tail(page, lruvec, lru);
+ __count_vm_events(PGROTATED, nr_pages);
}
- if (active)
- __count_vm_event(PGDEACTIVATE);
- update_page_reclaim_stat(lruvec, file, 0);
+ if (active) {
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ nr_pages);
+ }
}
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ int lru = page_lru_base_type(page);
+ int nr_pages = thp_nr_pages(page);
+
+ del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
+ ClearPageActive(page);
+ ClearPageReferenced(page);
+ add_page_to_lru_list(page, lruvec, lru);
+
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ nr_pages);
+ }
+}
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
@@ -549,22 +612,43 @@
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
+ int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
/*
- * lazyfree pages are clean anonymous pages. They have
- * SwapBacked flag cleared to distinguish normal anonymous
- * pages
+ * Lazyfree pages are clean anonymous pages. They have
+ * PG_swapbacked flag cleared, to distinguish them from normal
+ * anonymous pages
*/
ClearPageSwapBacked(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
- __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
- count_memcg_page_event(page, PGLAZYFREE);
- update_page_reclaim_stat(lruvec, 1, 0);
+ __count_vm_events(PGLAZYFREE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
+ nr_pages);
+ }
+}
+
+static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
+{
+ bool *add_to_tail = (bool *)arg;
+
+ if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
+ !PageSwapCache(page)) {
+ bool active = PageActive(page);
+
+ del_page_from_lru_list(page, lruvec,
+ LRU_INACTIVE_ANON + active);
+ ClearPageActive(page);
+ ClearPageReferenced(page);
+ if (add_to_tail && *add_to_tail)
+ add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
+ else
+ add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
}
}
@@ -575,34 +659,37 @@
*/
void lru_add_drain_cpu(int cpu)
{
- struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
+ struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
- pvec = &per_cpu(lru_rotate_pvecs, cpu);
- if (pagevec_count(pvec)) {
+ pvec = &per_cpu(lru_rotate.pvec, cpu);
+ /* Disabling interrupts below acts as a compiler barrier. */
+ if (data_race(pagevec_count(pvec))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
-#ifdef CONFIG_PREEMPT_RT_BASE
- local_lock_irqsave_on(rotate_lock, flags, cpu);
+ local_lock_irqsave(&lru_rotate.lock, flags);
pagevec_move_tail(pvec);
- local_unlock_irqrestore_on(rotate_lock, flags, cpu);
-#else
- local_lock_irqsave(rotate_lock, flags);
- pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
-#endif
+ local_unlock_irqrestore(&lru_rotate.lock, flags);
}
- pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
+ pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
- pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
+ pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
+ pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+
+ pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu);
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL);
activate_page_drain(cpu);
}
@@ -625,12 +712,36 @@
return;
if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- lru_deactivate_file_pvecs);
+ struct pagevec *pvec;
- if (!pagevec_add(pvec, page) || PageCompound(page))
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
+
+ if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
- put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
+ local_unlock(&lru_pvecs.lock);
+ }
+}
+
+/*
+ * deactivate_page - deactivate a page
+ * @page: page to deactivate
+ *
+ * deactivate_page() moves @page to the inactive list if @page was on the active
+ * list and was not an unevictable page. This is done to accelerate the reclaim
+ * of @page.
+ */
+void deactivate_page(struct page *page)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ struct pagevec *pvec;
+
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
+ get_page(page);
+ if (pagevec_add_and_need_flush(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ local_unlock(&lru_pvecs.lock);
}
}
@@ -645,50 +756,77 @@
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- lru_lazyfree_pvecs);
+ struct pagevec *pvec;
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
+ if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
- put_locked_var(swapvec_lock, lru_lazyfree_pvecs);
+ local_unlock(&lru_pvecs.lock);
+ }
+}
+
+/**
+ * mark_page_lazyfree_movetail - make a swapbacked page lazyfree
+ * @page: page to deactivate
+ *
+ * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
+ * This is done to accelerate the reclaim of @page.
+ */
+void mark_page_lazyfree_movetail(struct page *page, bool tail)
+{
+ if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
+ !PageSwapCache(page)) {
+ struct pagevec *pvec;
+
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail);
+ get_page(page);
+ if (pagevec_add_and_need_flush(pvec, page))
+ pagevec_lru_move_fn(pvec,
+ lru_lazyfree_movetail_fn, &tail);
+ local_unlock(&lru_pvecs.lock);
}
}
void lru_add_drain(void)
{
- lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
- local_unlock_cpu(swapvec_lock);
+ local_lock(&lru_pvecs.lock);
+ lru_add_drain_cpu(smp_processor_id());
+ local_unlock(&lru_pvecs.lock);
+}
+
+/*
+ * It's called from per-cpu workqueue context in SMP case so
+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
+ * the same cpu. It shouldn't be a problem in !SMP case since
+ * the core is only one and the locks will disable preemption.
+ */
+static void lru_add_and_bh_lrus_drain(void)
+{
+ local_lock(&lru_pvecs.lock);
+ lru_add_drain_cpu(smp_processor_id());
+ local_unlock(&lru_pvecs.lock);
+ invalidate_bh_lrus_cpu();
+}
+
+void lru_add_drain_cpu_zone(struct zone *zone)
+{
+ local_lock(&lru_pvecs.lock);
+ lru_add_drain_cpu(smp_processor_id());
+ drain_local_pages(zone);
+ local_unlock(&lru_pvecs.lock);
}
#ifdef CONFIG_SMP
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
-{
- local_lock_on(swapvec_lock, cpu);
- lru_add_drain_cpu(cpu);
- local_unlock_on(swapvec_lock, cpu);
-}
-
-#else
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
- lru_add_drain();
+ lru_add_and_bh_lrus_drain();
}
-
-static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
-{
- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
-
- INIT_WORK(work, lru_add_drain_per_cpu);
- queue_work_on(cpu, mm_percpu_wq, work);
- cpumask_set_cpu(cpu, has_work);
-}
-#endif
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
@@ -697,11 +835,22 @@
* Calling this function with cpu hotplug locks held can actually lead
* to obscure indirect dependencies via WQ context.
*/
-void lru_add_drain_all(void)
+inline void __lru_add_drain_all(bool force_all_cpus)
{
- static DEFINE_MUTEX(lock);
+ /*
+ * lru_drain_gen - Global pages generation number
+ *
+ * (A) Definition: global lru_drain_gen = x implies that all generations
+ * 0 < n <= x are already *scheduled* for draining.
+ *
+ * This is an optimization for the highly-contended use case where a
+ * user space workload keeps constantly generating a flow of pages for
+ * each CPU.
+ */
+ static unsigned int lru_drain_gen;
static struct cpumask has_work;
- int cpu;
+ static DEFINE_MUTEX(lock);
+ unsigned cpu, this_gen;
/*
* Make sure nobody triggers this path before mm_percpu_wq is fully
@@ -710,32 +859,135 @@
if (WARN_ON(!mm_percpu_wq))
return;
+ /*
+ * Guarantee pagevec counter stores visible by this CPU are visible to
+ * other CPUs before loading the current drain generation.
+ */
+ smp_mb();
+
+ /*
+ * (B) Locally cache global LRU draining generation number
+ *
+ * The read barrier ensures that the counter is loaded before the mutex
+ * is taken. It pairs with smp_mb() inside the mutex critical section
+ * at (D).
+ */
+ this_gen = smp_load_acquire(&lru_drain_gen);
+
mutex_lock(&lock);
+
+ /*
+ * (C) Exit the draining operation if a newer generation, from another
+ * lru_add_drain_all(), was already scheduled for draining. Check (A).
+ */
+ if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
+ goto done;
+
+ /*
+ * (D) Increment global generation number
+ *
+ * Pairs with smp_load_acquire() at (B), outside of the critical
+ * section. Use a full memory barrier to guarantee that the new global
+ * drain generation number is stored before loading pagevec counters.
+ *
+ * This pairing must be done here, before the for_each_online_cpu loop
+ * below which drains the page vectors.
+ *
+ * Let x, y, and z represent some system CPU numbers, where x < y < z.
+ * Assume CPU #z is is in the middle of the for_each_online_cpu loop
+ * below and has already reached CPU #y's per-cpu data. CPU #x comes
+ * along, adds some pages to its per-cpu vectors, then calls
+ * lru_add_drain_all().
+ *
+ * If the paired barrier is done at any later step, e.g. after the
+ * loop, CPU #x will just exit at (C) and miss flushing out all of its
+ * added pages.
+ */
+ WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
+ smp_mb();
+
cpumask_clear(&has_work);
-
for_each_online_cpu(cpu) {
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
- if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
- pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
- pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
- pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
- need_activate_page_drain(cpu))
- remote_lru_add_drain(cpu, &has_work);
+ if (force_all_cpus ||
+ pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
+ data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
+ pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
+ pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
+ pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
+ pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) ||
+ need_activate_page_drain(cpu) ||
+ has_bh_in_lru(cpu, NULL)) {
+ INIT_WORK(work, lru_add_drain_per_cpu);
+ queue_work_on(cpu, mm_percpu_wq, work);
+ __cpumask_set_cpu(cpu, &has_work);
+ }
}
-#ifndef CONFIG_PREEMPT_RT_BASE
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
-#endif
+done:
mutex_unlock(&lock);
+}
+
+void lru_add_drain_all(void)
+{
+ __lru_add_drain_all(false);
}
#else
void lru_add_drain_all(void)
{
lru_add_drain();
}
+#endif /* CONFIG_SMP */
+
+static atomic_t lru_disable_count = ATOMIC_INIT(0);
+
+bool lru_cache_disabled(void)
+{
+ return atomic_read(&lru_disable_count) != 0;
+}
+
+void lru_cache_enable(void)
+{
+ atomic_dec(&lru_disable_count);
+}
+EXPORT_SYMBOL_GPL(lru_cache_enable);
+
+/*
+ * lru_cache_disable() needs to be called before we start compiling
+ * a list of pages to be migrated using isolate_lru_page().
+ * It drains pages on LRU cache and then disable on all cpus until
+ * lru_cache_enable is called.
+ *
+ * Must be paired with a call to lru_cache_enable().
+ */
+void lru_cache_disable(void)
+{
+ /*
+ * If someone is already disabled lru_cache, just return with
+ * increasing the lru_disable_count.
+ */
+ if (atomic_inc_not_zero(&lru_disable_count))
+ return;
+#ifdef CONFIG_SMP
+ /*
+ * lru_add_drain_all in the force mode will schedule draining on
+ * all online CPUs so any calls of lru_cache_disabled wrapped by
+ * local_lock or preemption disabled would be ordered by that.
+ * The atomic operation doesn't need to have stronger ordering
+ * requirements because that is enforeced by the scheduling
+ * guarantees.
+ */
+ __lru_add_drain_all(true);
+#else
+ lru_add_and_bh_lrus_drain();
#endif
+ atomic_inc(&lru_disable_count);
+}
+EXPORT_SYMBOL_GPL(lru_cache_disable);
/**
* release_pages - batched put_page()
@@ -751,8 +1003,8 @@
LIST_HEAD(pages_to_free);
struct pglist_data *locked_pgdat = NULL;
struct lruvec *lruvec;
- unsigned long uninitialized_var(flags);
- unsigned int uninitialized_var(lock_batch);
+ unsigned long flags;
+ unsigned int lock_batch;
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
@@ -767,6 +1019,7 @@
locked_pgdat = NULL;
}
+ page = compound_head(page);
if (is_huge_zero_page(page))
continue;
@@ -778,15 +1031,16 @@
}
/*
* ZONE_DEVICE pages that return 'false' from
- * put_devmap_managed_page() do not require special
+ * page_is_devmap_managed() do not require special
* processing, and instead, expect a call to
* put_page_testzero().
*/
- if (put_devmap_managed_page(page))
+ if (page_is_devmap_managed(page)) {
+ put_devmap_managed_page(page);
continue;
+ }
}
- page = compound_head(page);
if (!put_page_testzero(page))
continue;
@@ -817,8 +1071,6 @@
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
- /* Clear Active bit in case of parallel mark_page_accessed */
- __ClearPageActive(page);
__ClearPageWaiters(page);
list_add(&page->lru, &pages_to_free);
@@ -857,13 +1109,10 @@
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- const int file = 0;
-
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
- VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
+ lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
if (!list)
SetPageLRU(page_tail);
@@ -875,21 +1124,16 @@
get_page(page_tail);
list_add_tail(&page_tail->lru, list);
} else {
- struct list_head *list_head;
/*
* Head page has not yet been counted, as an hpage,
* so we must account for each subpage individually.
*
- * Use the standard add function to put page_tail on the list,
- * but then correct its position so they all end up in order.
+ * Put page_tail on the list at the correct position
+ * so they all end up in order.
*/
- add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
- list_head = page_tail->lru.prev;
- list_move_tail(&page_tail->lru, list_head);
+ add_page_to_lru_list_tail(page_tail, lruvec,
+ page_lru(page_tail));
}
-
- if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -898,13 +1142,13 @@
{
enum lru_list lru;
int was_unevictable = TestClearPageUnevictable(page);
+ int nr_pages = thp_nr_pages(page);
VM_BUG_ON_PAGE(PageLRU(page), page);
- SetPageLRU(page);
/*
* Page becomes evictable in two ways:
- * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
+ * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
* 2) Before acquiring LRU lock to put the page to correct LRU and then
* a) do PageLRU check with lock [check_move_unevictable_pages]
* b) do PageLRU check before lock [clear_page_mlock]
@@ -928,20 +1172,19 @@
* looking at the same page) and the evictable page will be stranded
* in an unevictable LRU.
*/
- smp_mb();
+ SetPageLRU(page);
+ smp_mb__after_atomic();
if (page_evictable(page)) {
lru = page_lru(page);
- update_page_reclaim_stat(lruvec, page_is_file_cache(page),
- PageActive(page));
if (was_unevictable)
- count_vm_event(UNEVICTABLE_PGRESCUED);
+ __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
lru = LRU_UNEVICTABLE;
ClearPageActive(page);
SetPageUnevictable(page);
if (!was_unevictable)
- count_vm_event(UNEVICTABLE_PGCULLED);
+ __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}
add_page_to_lru_list(page, lruvec, lru);
@@ -956,7 +1199,6 @@
{
pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
}
-EXPORT_SYMBOL(__pagevec_lru_add);
/**
* pagevec_lookup_entries - gang pagecache lookup
@@ -974,6 +1216,10 @@
* The search returns a group of mapping-contiguous entries with
* ascending indexes. There may be holes in the indices due to
* not-present entries.
+ *
+ * Only one subpage of a Transparent Huge Page is returned in one call:
+ * allowing truncate_inode_pages_range() to evict the whole THP without
+ * cycling through a pagevec of extra references.
*
* pagevec_lookup_entries() returns the number of entries which were
* found.
@@ -1003,7 +1249,7 @@
for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- if (!radix_tree_exceptional_entry(page))
+ if (!xa_is_value(page))
pvec->pages[j++] = page;
}
pvec->nr = j;
@@ -1040,7 +1286,7 @@
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
- int tag)
+ xa_mark_t tag)
{
pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
PAGEVEC_SIZE, pvec->pages);
@@ -1050,7 +1296,7 @@
unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
- int tag, unsigned max_pages)
+ xa_mark_t tag, unsigned max_pages)
{
pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
@@ -1062,7 +1308,7 @@
*/
void __init swap_setup(void)
{
- unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
+ unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
@@ -1074,3 +1320,26 @@
* _really_ don't want to cluster much more
*/
}
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+void put_devmap_managed_page(struct page *page)
+{
+ int count;
+
+ if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
+ return;
+
+ count = page_ref_dec_return(page);
+
+ /*
+ * devmap page refcounts are 1-based, rather than 0-based: if
+ * refcount is 1, then the page is free and the refcount is
+ * stable because nobody holds a reference on the page.
+ */
+ if (count == 1)
+ free_devmap_managed_page(page);
+ else if (!count)
+ __put_page(page);
+}
+EXPORT_SYMBOL(put_devmap_managed_page);
+#endif
--
Gitblit v1.6.2