From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/mm/swap_state.c | 392 +++++++++++++++++++++++++++++++++++--------------------
1 files changed, 247 insertions(+), 145 deletions(-)
diff --git a/kernel/mm/swap_state.c b/kernel/mm/swap_state.c
index b99e8fd..a8a98bf 100644
--- a/kernel/mm/swap_state.c
+++ b/kernel/mm/swap_state.c
@@ -21,8 +21,7 @@
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
-
-#include <asm/pgtable.h>
+#include <linux/shmem_fs.h>
#include "internal.h"
/*
@@ -59,8 +58,8 @@
#define GET_SWAP_RA_VAL(vma) \
(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
-#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
-#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
+#define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
+#define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
static struct {
unsigned long add_total;
@@ -74,25 +73,27 @@
unsigned int i, j, nr;
unsigned long ret = 0;
struct address_space *spaces;
+ struct swap_info_struct *si;
- rcu_read_lock();
for (i = 0; i < MAX_SWAPFILES; i++) {
- /*
- * The corresponding entries in nr_swapper_spaces and
- * swapper_spaces will be reused only after at least
- * one grace period. So it is impossible for them
- * belongs to different usage.
- */
- nr = nr_swapper_spaces[i];
- spaces = rcu_dereference(swapper_spaces[i]);
- if (!nr || !spaces)
+ swp_entry_t entry = swp_entry(i, 1);
+
+ /* Avoid get_swap_device() to warn for bad swap entry */
+ if (!swp_swap_info(entry))
continue;
+ /* Prevent swapoff to free swapper_spaces */
+ si = get_swap_device(entry);
+ if (!si)
+ continue;
+ nr = nr_swapper_spaces[i];
+ spaces = swapper_spaces[i];
for (j = 0; j < nr; j++)
ret += spaces[j].nrpages;
+ put_swap_device(si);
}
- rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(total_swapcache_pages);
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
@@ -107,15 +108,32 @@
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+ struct address_space *address_space = swap_address_space(entry);
+ pgoff_t idx = swp_offset(entry);
+ struct page *page;
+
+ page = find_get_entry(address_space, idx);
+ if (xa_is_value(page))
+ return page;
+ if (page)
+ put_page(page);
+ return NULL;
+}
+
/*
- * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp, void **shadowp)
{
- int error, i, nr = hpage_nr_pages(page);
- struct address_space *address_space;
+ struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
+ XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
+ unsigned long i, nr = thp_nr_pages(page);
+ void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -124,75 +142,66 @@
page_ref_add(page, nr);
SetPageSwapCache(page);
- address_space = swap_address_space(entry);
- xa_lock_irq(&address_space->i_pages);
- for (i = 0; i < nr; i++) {
- set_page_private(page + i, entry.val + i);
- error = radix_tree_insert(&address_space->i_pages,
- idx + i, page + i);
- if (unlikely(error))
- break;
- }
- if (likely(!error)) {
+ do {
+ unsigned long nr_shadows = 0;
+
+ xas_lock_irq(&xas);
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < nr; i++) {
+ VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+ old = xas_load(&xas);
+ if (xa_is_value(old)) {
+ nr_shadows++;
+ if (shadowp)
+ *shadowp = old;
+ }
+ set_page_private(page + i, entry.val + i);
+ xas_store(&xas, page);
+ xas_next(&xas);
+ }
+ address_space->nrexceptional -= nr_shadows;
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
- } else {
- /*
- * Only the context which have set SWAP_HAS_CACHE flag
- * would call add_to_swap_cache().
- * So add_to_swap_cache() doesn't returns -EEXIST.
- */
- VM_BUG_ON(error == -EEXIST);
- set_page_private(page + i, 0UL);
- while (i--) {
- radix_tree_delete(&address_space->i_pages, idx + i);
- set_page_private(page + i, 0UL);
- }
- ClearPageSwapCache(page);
- page_ref_sub(page, nr);
- }
- xa_unlock_irq(&address_space->i_pages);
+unlock:
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, gfp));
- return error;
-}
+ if (!xas_error(&xas))
+ return 0;
-
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
-{
- int error;
-
- error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
- if (!error) {
- error = __add_to_swap_cache(page, entry);
- radix_tree_preload_end();
- }
- return error;
+ ClearPageSwapCache(page);
+ page_ref_sub(page, nr);
+ return xas_error(&xas);
}
/*
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
-void __delete_from_swap_cache(struct page *page)
+void __delete_from_swap_cache(struct page *page,
+ swp_entry_t entry, void *shadow)
{
- struct address_space *address_space;
- int i, nr = hpage_nr_pages(page);
- swp_entry_t entry;
- pgoff_t idx;
+ struct address_space *address_space = swap_address_space(entry);
+ int i, nr = thp_nr_pages(page);
+ pgoff_t idx = swp_offset(entry);
+ XA_STATE(xas, &address_space->i_pages, idx);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(PageWriteback(page), page);
- entry.val = page_private(page);
- address_space = swap_address_space(entry);
- idx = swp_offset(entry);
for (i = 0; i < nr; i++) {
- radix_tree_delete(&address_space->i_pages, idx + i);
+ void *entry = xas_store(&xas, shadow);
+ VM_BUG_ON_PAGE(entry != page, entry);
set_page_private(page + i, 0);
+ xas_next(&xas);
}
ClearPageSwapCache(page);
+ if (shadow)
+ address_space->nrexceptional += nr;
address_space->nrpages -= nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
ADD_CACHE_INFO(del_total, nr);
@@ -218,7 +227,7 @@
return 0;
/*
- * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * XArray node allocations from PF_MEMALLOC contexts could
* completely exhaust the page allocator. __GFP_NOMEMALLOC
* stops emergency reserves from being allocated.
*
@@ -229,8 +238,7 @@
* Add it to the swap cache.
*/
err = add_to_swap_cache(page, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
- /* -ENOMEM radix-tree allocation failure */
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -239,7 +247,7 @@
goto fail;
/*
* Normally the page will be dirtied in unmap because its pte should be
- * dirty. A special case is MADV_FREE page. The page'e pte could have
+ * dirty. A special case is MADV_FREE page. The page's pte could have
* dirty bit cleared but the page's SwapBacked bit is still set because
* clearing the dirty bit and SwapBacked bit has no lock protected. For
* such page, unmap will not set dirty bit for it, so page reclaim will
@@ -264,18 +272,46 @@
*/
void delete_from_swap_cache(struct page *page)
{
- swp_entry_t entry;
- struct address_space *address_space;
+ swp_entry_t entry = { .val = page_private(page) };
+ struct address_space *address_space = swap_address_space(entry);
- entry.val = page_private(page);
-
- address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(page);
+ __delete_from_swap_cache(page, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
- page_ref_sub(page, hpage_nr_pages(page));
+ page_ref_sub(page, thp_nr_pages(page));
+}
+
+void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long curr = begin;
+ void *old;
+
+ for (;;) {
+ unsigned long nr_shadows = 0;
+ swp_entry_t entry = swp_entry(type, curr);
+ struct address_space *address_space = swap_address_space(entry);
+ XA_STATE(xas, &address_space->i_pages, curr);
+
+ xa_lock_irq(&address_space->i_pages);
+ xas_for_each(&xas, old, end) {
+ if (!xa_is_value(old))
+ continue;
+ xas_store(&xas, NULL);
+ nr_shadows++;
+ }
+ address_space->nrexceptional -= nr_shadows;
+ xa_unlock_irq(&address_space->i_pages);
+
+ /* search the next swapcache until we meet end */
+ curr >>= SWAP_ADDRESS_SPACE_SHIFT;
+ curr++;
+ curr <<= SWAP_ADDRESS_SPACE_SHIFT;
+ if (curr > end)
+ break;
+ }
}
/*
@@ -335,8 +371,13 @@
unsigned long addr)
{
struct page *page;
+ struct swap_info_struct *si;
+ si = get_swap_device(entry);
+ if (!si)
+ return NULL;
page = find_get_page(swap_address_space(entry), swp_offset(entry));
+ put_swap_device(si);
INC_CACHE_INFO(find_total);
if (page) {
@@ -375,24 +416,64 @@
return page;
}
+/**
+ * find_get_incore_page - Find and get a page from the page or swap caches.
+ * @mapping: The address_space to search.
+ * @index: The page cache index.
+ *
+ * This differs from find_get_page() in that it will also look for the
+ * page in the swap cache.
+ *
+ * Return: The found page or %NULL.
+ */
+struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
+{
+ swp_entry_t swp;
+ struct swap_info_struct *si;
+ struct page *page = find_get_entry(mapping, index);
+
+ if (!page)
+ return page;
+ if (!xa_is_value(page))
+ return find_subpage(page, index);
+ if (!shmem_mapping(mapping))
+ return NULL;
+
+ swp = radix_to_swp_entry(page);
+ /* Prevent swapoff from happening to us */
+ si = get_swap_device(swp);
+ if (!si)
+ return NULL;
+ page = find_get_page(swap_address_space(swp), swp_offset(swp));
+ put_swap_device(si);
+ return page;
+}
+
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated)
{
- struct page *found_page, *new_page = NULL;
- struct address_space *swapper_space = swap_address_space(entry);
- int err;
+ struct swap_info_struct *si;
+ struct page *page;
+ void *shadow = NULL;
+
*new_page_allocated = false;
- do {
+ for (;;) {
+ int err;
/*
* First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
- found_page = find_get_page(swapper_space, swp_offset(entry));
- if (found_page)
- break;
+ si = get_swap_device(entry);
+ if (!si)
+ return NULL;
+ page = find_get_page(swap_address_space(entry),
+ swp_offset(entry));
+ put_swap_device(si);
+ if (page)
+ return page;
/*
* Just skip read ahead for unused swap slot.
@@ -403,69 +484,69 @@
* else swap_off will be aborted if we return NULL.
*/
if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
- break;
+ return NULL;
/*
- * Get a new page to read into from swap.
+ * Get a new page to read into from swap. Allocate it now,
+ * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
+ * cause any racers to loop around until we add it to cache.
*/
- if (!new_page) {
- new_page = alloc_page_vma(gfp_mask, vma, addr);
- if (!new_page)
- break; /* Out of memory */
- }
-
- /*
- * call radix_tree_preload() while we can wait.
- */
- err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
- if (err)
- break;
+ page = alloc_page_vma(gfp_mask, vma, addr);
+ if (!page)
+ return NULL;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
- if (err == -EEXIST) {
- radix_tree_preload_end();
- /*
- * We might race against get_swap_page() and stumble
- * across a SWAP_HAS_CACHE swap_map entry whose page
- * has not been brought into the swapcache yet.
- */
- cond_resched();
- continue;
- }
- if (err) { /* swp entry is obsolete ? */
- radix_tree_preload_end();
+ if (!err)
break;
- }
- /* May fail (-ENOMEM) if radix-tree node allocation failed. */
- __SetPageLocked(new_page);
- __SetPageSwapBacked(new_page);
- err = __add_to_swap_cache(new_page, entry);
- if (likely(!err)) {
- radix_tree_preload_end();
- /*
- * Initiate read into locked page and return.
- */
- SetPageWorkingset(new_page);
- lru_cache_add_anon(new_page);
- *new_page_allocated = true;
- return new_page;
- }
- radix_tree_preload_end();
- __ClearPageLocked(new_page);
+ put_page(page);
+ if (err != -EEXIST)
+ return NULL;
+
/*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
+ * We might race against __delete_from_swap_cache(), and
+ * stumble across a swap_map entry whose SWAP_HAS_CACHE
+ * has not yet been cleared. Or race against another
+ * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
+ * in swap_map, but not yet added its page to swap cache.
*/
- put_swap_page(new_page, entry);
- } while (err != -ENOMEM);
+ schedule_timeout_uninterruptible(1);
+ }
- if (new_page)
- put_page(new_page);
- return found_page;
+ /*
+ * The swap entry is ours to swap in. Prepare the new page.
+ */
+
+ __SetPageLocked(page);
+ __SetPageSwapBacked(page);
+
+ /* May fail (-ENOMEM) if XArray node allocation failed. */
+ if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
+ put_swap_page(page, entry);
+ goto fail_unlock;
+ }
+
+ if (mem_cgroup_charge(page, NULL, gfp_mask)) {
+ delete_from_swap_cache(page);
+ goto fail_unlock;
+ }
+
+ if (shadow)
+ workingset_refault(page, shadow);
+
+ /* Caller will initiate read into locked page */
+ SetPageWorkingset(page);
+ lru_cache_add(page);
+ *new_page_allocated = true;
+ return page;
+
+fail_unlock:
+ unlock_page(page);
+ put_page(page);
+ return NULL;
}
/*
@@ -565,6 +646,10 @@
* the readahead.
*
* Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
+ * This is needed to ensure the VMA will not be freed in our back. In the case
+ * of the speculative page fault handler, this cannot happen, even if we don't
+ * hold the mmap_sem. Callees are assumed to take care of reading VMA's fields
+ * using READ_ONCE() to read consistent values.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
@@ -583,6 +668,13 @@
mask = swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
+
+ /* Test swap type to make sure the dereference is safe */
+ if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
+ struct inode *inode = si->swap_file->f_mapping->host;
+ if (inode_read_congested(inode))
+ goto skip;
+ }
do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
@@ -628,27 +720,23 @@
return -ENOMEM;
for (i = 0; i < nr; i++) {
space = spaces + i;
- INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
+ xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
atomic_set(&space->i_mmap_writable, 0);
space->a_ops = &swap_aops;
/* swap cache doesn't use writeback related tags */
mapping_set_no_writeback_tags(space);
}
nr_swapper_spaces[type] = nr;
- rcu_assign_pointer(swapper_spaces[type], spaces);
+ swapper_spaces[type] = spaces;
return 0;
}
void exit_swap_address_space(unsigned int type)
{
- struct address_space *spaces;
-
- spaces = swapper_spaces[type];
+ kvfree(swapper_spaces[type]);
nr_swapper_spaces[type] = 0;
- rcu_assign_pointer(swapper_spaces[type], NULL);
- synchronize_rcu();
- kvfree(spaces);
+ swapper_spaces[type] = NULL;
}
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
@@ -658,9 +746,9 @@
unsigned long *start,
unsigned long *end)
{
- *start = max3(lpfn, PFN_DOWN(vma->vm_start),
+ *start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)),
PFN_DOWN(faddr & PMD_MASK));
- *end = min3(rpfn, PFN_DOWN(vma->vm_end),
+ *end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)),
PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
}
@@ -732,6 +820,20 @@
pte_unmap(orig_pte);
}
+/**
+ * swap_vma_readahead - swap in pages in hope we need them soon
+ * @fentry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read in a few pages whoes
+ * virtual addresses are around the fault address in the same vma.
+ *
+ * Caller must hold read mmap_lock if vmf->vma is not NULL.
+ *
+ */
static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
--
Gitblit v1.6.2