From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/include/net/page_pool.h | 123 +++++++++++++++++++++++++++++++++++------
1 files changed, 105 insertions(+), 18 deletions(-)
diff --git a/kernel/include/net/page_pool.h b/kernel/include/net/page_pool.h
index 694d055..6c46995 100644
--- a/kernel/include/net/page_pool.h
+++ b/kernel/include/net/page_pool.h
@@ -16,14 +16,16 @@
* page_pool_alloc_pages() call. Drivers should likely use
* page_pool_dev_alloc_pages() replacing dev_alloc_pages().
*
- * If page_pool handles DMA mapping (use page->private), then API user
- * is responsible for invoking page_pool_put_page() once. In-case of
- * elevated refcnt, the DMA state is released, assuming other users of
- * the page will eventually call put_page().
+ * API keeps track of in-flight pages, in-order to let API user know
+ * when it is safe to dealloactor page_pool object. Thus, API users
+ * must make sure to call page_pool_release_page() when a page is
+ * "leaving" the page_pool. Or call page_pool_put_page() where
+ * appropiate. For maintaining correct accounting.
*
- * If no DMA mapping is done, then it can act as shim-layer that
- * fall-through to alloc_page. As no state is kept on the page, the
- * regular put_page() call is sufficient.
+ * API user must only call page_pool_put_page() once on a page, as it
+ * will either recycle the page, or in case of elevated refcnt, it
+ * will release the DMA mapping and in-flight state accounting. We
+ * hope to lift this requirement in the future.
*/
#ifndef _NET_PAGE_POOL_H
#define _NET_PAGE_POOL_H
@@ -31,9 +33,20 @@
#include <linux/mm.h> /* Needed by ptr_ring */
#include <linux/ptr_ring.h>
#include <linux/dma-direction.h>
+#include <linux/android_kabi.h>
-#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
-#define PP_FLAG_ALL PP_FLAG_DMA_MAP
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
/*
* Fast allocation side cache array/stack
@@ -63,11 +76,19 @@
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
enum dma_data_direction dma_dir; /* DMA mapping direction */
+ unsigned int max_len; /* max DMA sync memory size */
+ unsigned int offset; /* DMA addr offset */
};
struct page_pool {
- struct rcu_head rcu;
struct page_pool_params p;
+
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+ u32 pages_state_hold_cnt;
/*
* Data structure for allocation side
@@ -96,6 +117,18 @@
* TODO: Implement bulk return pages into this structure.
*/
struct ptr_ring ring;
+
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+ * protected by NAPI, due to above pp_alloc_cache. This
+ * refcnt serves purpose is to simplify drivers error handling.
+ */
+ refcount_t user_cnt;
+
+ u64 destroy_cnt;
+
+ ANDROID_KABI_RESERVE(1);
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -107,29 +140,71 @@
return page_pool_alloc_pages(pool, gfp);
}
+/* get the stored dma direction. A driver might decide to treat this locally and
+ * avoid the extra cache line from page_pool to determine the direction
+ */
+static
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+{
+ return pool->p.dma_dir;
+}
+
struct page_pool *page_pool_create(const struct page_pool_params *params);
+#ifdef CONFIG_PAGE_POOL
void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+void page_pool_release_page(struct page_pool *pool, struct page *page);
+#else
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
-/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct);
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *))
+{
+}
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+}
+#endif
-static inline void page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct);
+
+/* Same as above but will try to sync the entire area pool->max_len */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
{
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, allow_direct);
+ page_pool_put_page(pool, page, -1, allow_direct);
#endif
}
-/* Very limited use-cases allow recycle direct */
+
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, true);
+ page_pool_put_full_page(pool, page, true);
+}
+
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+{
+ dma_addr_t ret = page->dma_addr[0];
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
+ ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
+ return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ page->dma_addr[0] = addr;
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
+ page->dma_addr[1] = upper_32_bits(addr);
}
static inline bool is_page_pool_compiled_in(void)
@@ -141,4 +216,16 @@
#endif
}
+static inline bool page_pool_put(struct page_pool *pool)
+{
+ return refcount_dec_and_test(&pool->user_cnt);
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
#endif /* _NET_PAGE_POOL_H */
--
Gitblit v1.6.2