hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/include/net/page_pool.h
....@@ -16,14 +16,16 @@
1616 * page_pool_alloc_pages() call. Drivers should likely use
1717 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
1818 *
19
- * If page_pool handles DMA mapping (use page->private), then API user
20
- * is responsible for invoking page_pool_put_page() once. In-case of
21
- * elevated refcnt, the DMA state is released, assuming other users of
22
- * the page will eventually call put_page().
19
+ * API keeps track of in-flight pages, in-order to let API user know
20
+ * when it is safe to dealloactor page_pool object. Thus, API users
21
+ * must make sure to call page_pool_release_page() when a page is
22
+ * "leaving" the page_pool. Or call page_pool_put_page() where
23
+ * appropiate. For maintaining correct accounting.
2324 *
24
- * If no DMA mapping is done, then it can act as shim-layer that
25
- * fall-through to alloc_page. As no state is kept on the page, the
26
- * regular put_page() call is sufficient.
25
+ * API user must only call page_pool_put_page() once on a page, as it
26
+ * will either recycle the page, or in case of elevated refcnt, it
27
+ * will release the DMA mapping and in-flight state accounting. We
28
+ * hope to lift this requirement in the future.
2729 */
2830 #ifndef _NET_PAGE_POOL_H
2931 #define _NET_PAGE_POOL_H
....@@ -31,9 +33,20 @@
3133 #include <linux/mm.h> /* Needed by ptr_ring */
3234 #include <linux/ptr_ring.h>
3335 #include <linux/dma-direction.h>
36
+#include <linux/android_kabi.h>
3437
35
-#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
36
-#define PP_FLAG_ALL PP_FLAG_DMA_MAP
38
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
39
+ * map/unmap
40
+ */
41
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
42
+ * from page_pool will be
43
+ * DMA-synced-for-device according to
44
+ * the length provided by the device
45
+ * driver.
46
+ * Please note DMA-sync-for-CPU is still
47
+ * device driver responsibility
48
+ */
49
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
3750
3851 /*
3952 * Fast allocation side cache array/stack
....@@ -63,11 +76,19 @@
6376 int nid; /* Numa node id to allocate from pages from */
6477 struct device *dev; /* device, for DMA pre-mapping purposes */
6578 enum dma_data_direction dma_dir; /* DMA mapping direction */
79
+ unsigned int max_len; /* max DMA sync memory size */
80
+ unsigned int offset; /* DMA addr offset */
6681 };
6782
6883 struct page_pool {
69
- struct rcu_head rcu;
7084 struct page_pool_params p;
85
+
86
+ struct delayed_work release_dw;
87
+ void (*disconnect)(void *);
88
+ unsigned long defer_start;
89
+ unsigned long defer_warn;
90
+
91
+ u32 pages_state_hold_cnt;
7192
7293 /*
7394 * Data structure for allocation side
....@@ -96,6 +117,18 @@
96117 * TODO: Implement bulk return pages into this structure.
97118 */
98119 struct ptr_ring ring;
120
+
121
+ atomic_t pages_state_release_cnt;
122
+
123
+ /* A page_pool is strictly tied to a single RX-queue being
124
+ * protected by NAPI, due to above pp_alloc_cache. This
125
+ * refcnt serves purpose is to simplify drivers error handling.
126
+ */
127
+ refcount_t user_cnt;
128
+
129
+ u64 destroy_cnt;
130
+
131
+ ANDROID_KABI_RESERVE(1);
99132 };
100133
101134 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
....@@ -107,29 +140,71 @@
107140 return page_pool_alloc_pages(pool, gfp);
108141 }
109142
143
+/* get the stored dma direction. A driver might decide to treat this locally and
144
+ * avoid the extra cache line from page_pool to determine the direction
145
+ */
146
+static
147
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
148
+{
149
+ return pool->p.dma_dir;
150
+}
151
+
110152 struct page_pool *page_pool_create(const struct page_pool_params *params);
111153
154
+#ifdef CONFIG_PAGE_POOL
112155 void page_pool_destroy(struct page_pool *pool);
156
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
157
+void page_pool_release_page(struct page_pool *pool, struct page *page);
158
+#else
159
+static inline void page_pool_destroy(struct page_pool *pool)
160
+{
161
+}
113162
114
-/* Never call this directly, use helpers below */
115
-void __page_pool_put_page(struct page_pool *pool,
116
- struct page *page, bool allow_direct);
163
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
164
+ void (*disconnect)(void *))
165
+{
166
+}
167
+static inline void page_pool_release_page(struct page_pool *pool,
168
+ struct page *page)
169
+{
170
+}
171
+#endif
117172
118
-static inline void page_pool_put_page(struct page_pool *pool,
119
- struct page *page, bool allow_direct)
173
+void page_pool_put_page(struct page_pool *pool, struct page *page,
174
+ unsigned int dma_sync_size, bool allow_direct);
175
+
176
+/* Same as above but will try to sync the entire area pool->max_len */
177
+static inline void page_pool_put_full_page(struct page_pool *pool,
178
+ struct page *page, bool allow_direct)
120179 {
121180 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
122181 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
123182 */
124183 #ifdef CONFIG_PAGE_POOL
125
- __page_pool_put_page(pool, page, allow_direct);
184
+ page_pool_put_page(pool, page, -1, allow_direct);
126185 #endif
127186 }
128
-/* Very limited use-cases allow recycle direct */
187
+
188
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
129189 static inline void page_pool_recycle_direct(struct page_pool *pool,
130190 struct page *page)
131191 {
132
- __page_pool_put_page(pool, page, true);
192
+ page_pool_put_full_page(pool, page, true);
193
+}
194
+
195
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
196
+{
197
+ dma_addr_t ret = page->dma_addr[0];
198
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
199
+ ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
200
+ return ret;
201
+}
202
+
203
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
204
+{
205
+ page->dma_addr[0] = addr;
206
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
207
+ page->dma_addr[1] = upper_32_bits(addr);
133208 }
134209
135210 static inline bool is_page_pool_compiled_in(void)
....@@ -141,4 +216,16 @@
141216 #endif
142217 }
143218
219
+static inline bool page_pool_put(struct page_pool *pool)
220
+{
221
+ return refcount_dec_and_test(&pool->user_cnt);
222
+}
223
+
224
+/* Caller must provide appropriate safe context, e.g. NAPI. */
225
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
226
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
227
+{
228
+ if (unlikely(pool->p.nid != new_nid))
229
+ page_pool_update_nid(pool, new_nid);
230
+}
144231 #endif /* _NET_PAGE_POOL_H */