From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/mm/truncate.c | 104 ++++++++++++++++++++++++++++++++++-----------------
1 files changed, 69 insertions(+), 35 deletions(-)
diff --git a/kernel/mm/truncate.c b/kernel/mm/truncate.c
index 43c73db..8914ca4 100644
--- a/kernel/mm/truncate.c
+++ b/kernel/mm/truncate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/truncate.c - code for taking down pages from address_spaces
*
@@ -33,15 +34,12 @@
static inline void __clear_shadow_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
- struct radix_tree_node *node;
- void **slot;
+ XA_STATE(xas, &mapping->i_pages, index);
- if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot))
+ xas_set_update(&xas, workingset_update_node);
+ if (xas_load(&xas) != entry)
return;
- if (*slot != entry)
- return;
- __radix_tree_replace(&mapping->i_pages, node, slot, NULL,
- workingset_update_node);
+ xas_store(&xas, NULL);
mapping->nrexceptional--;
}
@@ -70,7 +68,7 @@
return;
for (j = 0; j < pagevec_count(pvec); j++)
- if (radix_tree_exceptional_entry(pvec->pages[j]))
+ if (xa_is_value(pvec->pages[j]))
break;
if (j == pagevec_count(pvec))
@@ -85,7 +83,7 @@
struct page *page = pvec->pages[i];
pgoff_t index = indices[i];
- if (!radix_tree_exceptional_entry(page)) {
+ if (!xa_is_value(page)) {
pvec->pages[j++] = page;
continue;
}
@@ -170,7 +168,7 @@
* becomes orphaned. It will be left on the LRU and may even be mapped into
* user pagetables if we're racing with filemap_fault().
*
- * We need to bale out if page->mapping is no longer equal to the original
+ * We need to bail out if page->mapping is no longer equal to the original
* mapping. This happens a) when the VM reclaimed the page while we waited on
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
@@ -181,7 +179,7 @@
unmap_mapping_page(page);
if (page_has_private(page))
- do_invalidatepage(page, 0, PAGE_SIZE);
+ do_invalidatepage(page, 0, thp_size(page));
/*
* Some filesystems seem to re-dirty the page even after
@@ -344,7 +342,7 @@
if (index >= end)
break;
- if (radix_tree_exceptional_entry(page))
+ if (xa_is_value(page))
continue;
if (!trylock_page(page))
@@ -439,7 +437,7 @@
break;
}
- if (radix_tree_exceptional_entry(page))
+ if (xa_is_value(page))
continue;
lock_page(page);
@@ -527,21 +525,8 @@
}
EXPORT_SYMBOL(truncate_inode_pages_final);
-/**
- * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
- * @mapping: the address_space which holds the pages to invalidate
- * @start: the offset 'from' which to invalidate
- * @end: the offset 'to' which to invalidate (inclusive)
- *
- * This function only removes the unlocked pages, if you want to
- * remove all the pages of one inode, you must call truncate_inode_pages.
- *
- * invalidate_mapping_pages() will not block on IO activity. It will not
- * invalidate pages which are dirty, locked, under writeback or mapped into
- * pagetables.
- */
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end)
+static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
{
pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
@@ -562,7 +547,7 @@
if (index > end)
break;
- if (radix_tree_exceptional_entry(page)) {
+ if (xa_is_value(page)) {
invalidate_exceptional_entry(mapping, index,
page);
continue;
@@ -589,6 +574,16 @@
unlock_page(page);
continue;
}
+
+ /* Take a pin outside pagevec */
+ get_page(page);
+
+ /*
+ * Drop extra pins before trying to invalidate
+ * the huge page.
+ */
+ pagevec_remove_exceptionals(&pvec);
+ pagevec_release(&pvec);
}
ret = invalidate_inode_page(page);
@@ -597,8 +592,15 @@
* Invalidation is a hint that the page is no longer
* of interest and try to speed up its reclaim.
*/
- if (!ret)
+ if (!ret) {
deactivate_file_page(page);
+ /* It is likely on the pagevec of a remote CPU */
+ if (nr_pagevec)
+ (*nr_pagevec)++;
+ }
+
+ if (PageTransHuge(page))
+ put_page(page);
count += ret;
}
pagevec_remove_exceptionals(&pvec);
@@ -608,7 +610,39 @@
}
return count;
}
+
+/**
+ * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
+ * @mapping: the address_space which holds the pages to invalidate
+ * @start: the offset 'from' which to invalidate
+ * @end: the offset 'to' which to invalidate (inclusive)
+ *
+ * This function only removes the unlocked pages, if you want to
+ * remove all the pages of one inode, you must call truncate_inode_pages.
+ *
+ * invalidate_mapping_pages() will not block on IO activity. It will not
+ * invalidate pages which are dirty, locked, under writeback or mapped into
+ * pagetables.
+ *
+ * Return: the number of the pages that were invalidated
+ */
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end)
+{
+ return __invalidate_mapping_pages(mapping, start, end, NULL);
+}
EXPORT_SYMBOL(invalidate_mapping_pages);
+
+/**
+ * This helper is similar with the above one, except that it accounts for pages
+ * that are likely on a pagevec and count them in @nr_pagevec, which will used by
+ * the caller.
+ */
+void invalidate_mapping_pagevec(struct address_space *mapping,
+ pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
+{
+ __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
+}
/*
* This is like invalidate_complete_page(), except it ignores the page's
@@ -664,7 +698,7 @@
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
- * Returns -EBUSY if any pages could not be invalidated.
+ * Return: -EBUSY if any pages could not be invalidated.
*/
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
@@ -693,7 +727,7 @@
if (index > end)
break;
- if (radix_tree_exceptional_entry(page)) {
+ if (xa_is_value(page)) {
if (!invalidate_exceptional_entry2(mapping,
index, page))
ret = -EBUSY;
@@ -737,10 +771,10 @@
index++;
}
/*
- * For DAX we invalidate page tables after invalidating radix tree. We
+ * For DAX we invalidate page tables after invalidating page cache. We
* could invalidate page tables while invalidating each entry however
* that would be expensive. And doing range unmapping before doesn't
- * work as we have no cheap way to find whether radix tree entry didn't
+ * work as we have no cheap way to find whether page cache entry didn't
* get remapped later.
*/
if (dax_mapping(mapping)) {
@@ -759,7 +793,7 @@
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
- * Returns -EBUSY if any pages could not be invalidated.
+ * Return: -EBUSY if any pages could not be invalidated.
*/
int invalidate_inode_pages2(struct address_space *mapping)
{
--
Gitblit v1.6.2