.. | .. |
---|
16 | 16 | * page_pool_alloc_pages() call. Drivers should likely use |
---|
17 | 17 | * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). |
---|
18 | 18 | * |
---|
19 | | - * If page_pool handles DMA mapping (use page->private), then API user |
---|
20 | | - * is responsible for invoking page_pool_put_page() once. In-case of |
---|
21 | | - * elevated refcnt, the DMA state is released, assuming other users of |
---|
22 | | - * the page will eventually call put_page(). |
---|
| 19 | + * API keeps track of in-flight pages, in-order to let API user know |
---|
| 20 | + * when it is safe to dealloactor page_pool object. Thus, API users |
---|
| 21 | + * must make sure to call page_pool_release_page() when a page is |
---|
| 22 | + * "leaving" the page_pool. Or call page_pool_put_page() where |
---|
| 23 | + * appropiate. For maintaining correct accounting. |
---|
23 | 24 | * |
---|
24 | | - * If no DMA mapping is done, then it can act as shim-layer that |
---|
25 | | - * fall-through to alloc_page. As no state is kept on the page, the |
---|
26 | | - * regular put_page() call is sufficient. |
---|
| 25 | + * API user must only call page_pool_put_page() once on a page, as it |
---|
| 26 | + * will either recycle the page, or in case of elevated refcnt, it |
---|
| 27 | + * will release the DMA mapping and in-flight state accounting. We |
---|
| 28 | + * hope to lift this requirement in the future. |
---|
27 | 29 | */ |
---|
28 | 30 | #ifndef _NET_PAGE_POOL_H |
---|
29 | 31 | #define _NET_PAGE_POOL_H |
---|
.. | .. |
---|
31 | 33 | #include <linux/mm.h> /* Needed by ptr_ring */ |
---|
32 | 34 | #include <linux/ptr_ring.h> |
---|
33 | 35 | #include <linux/dma-direction.h> |
---|
| 36 | +#include <linux/android_kabi.h> |
---|
34 | 37 | |
---|
35 | | -#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */ |
---|
36 | | -#define PP_FLAG_ALL PP_FLAG_DMA_MAP |
---|
| 38 | +#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA |
---|
| 39 | + * map/unmap |
---|
| 40 | + */ |
---|
| 41 | +#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets |
---|
| 42 | + * from page_pool will be |
---|
| 43 | + * DMA-synced-for-device according to |
---|
| 44 | + * the length provided by the device |
---|
| 45 | + * driver. |
---|
| 46 | + * Please note DMA-sync-for-CPU is still |
---|
| 47 | + * device driver responsibility |
---|
| 48 | + */ |
---|
| 49 | +#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) |
---|
37 | 50 | |
---|
38 | 51 | /* |
---|
39 | 52 | * Fast allocation side cache array/stack |
---|
.. | .. |
---|
63 | 76 | int nid; /* Numa node id to allocate from pages from */ |
---|
64 | 77 | struct device *dev; /* device, for DMA pre-mapping purposes */ |
---|
65 | 78 | enum dma_data_direction dma_dir; /* DMA mapping direction */ |
---|
| 79 | + unsigned int max_len; /* max DMA sync memory size */ |
---|
| 80 | + unsigned int offset; /* DMA addr offset */ |
---|
66 | 81 | }; |
---|
67 | 82 | |
---|
68 | 83 | struct page_pool { |
---|
69 | | - struct rcu_head rcu; |
---|
70 | 84 | struct page_pool_params p; |
---|
| 85 | + |
---|
| 86 | + struct delayed_work release_dw; |
---|
| 87 | + void (*disconnect)(void *); |
---|
| 88 | + unsigned long defer_start; |
---|
| 89 | + unsigned long defer_warn; |
---|
| 90 | + |
---|
| 91 | + u32 pages_state_hold_cnt; |
---|
71 | 92 | |
---|
72 | 93 | /* |
---|
73 | 94 | * Data structure for allocation side |
---|
.. | .. |
---|
96 | 117 | * TODO: Implement bulk return pages into this structure. |
---|
97 | 118 | */ |
---|
98 | 119 | struct ptr_ring ring; |
---|
| 120 | + |
---|
| 121 | + atomic_t pages_state_release_cnt; |
---|
| 122 | + |
---|
| 123 | + /* A page_pool is strictly tied to a single RX-queue being |
---|
| 124 | + * protected by NAPI, due to above pp_alloc_cache. This |
---|
| 125 | + * refcnt serves purpose is to simplify drivers error handling. |
---|
| 126 | + */ |
---|
| 127 | + refcount_t user_cnt; |
---|
| 128 | + |
---|
| 129 | + u64 destroy_cnt; |
---|
| 130 | + |
---|
| 131 | + ANDROID_KABI_RESERVE(1); |
---|
99 | 132 | }; |
---|
100 | 133 | |
---|
101 | 134 | struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); |
---|
.. | .. |
---|
107 | 140 | return page_pool_alloc_pages(pool, gfp); |
---|
108 | 141 | } |
---|
109 | 142 | |
---|
| 143 | +/* get the stored dma direction. A driver might decide to treat this locally and |
---|
| 144 | + * avoid the extra cache line from page_pool to determine the direction |
---|
| 145 | + */ |
---|
| 146 | +static |
---|
| 147 | +inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) |
---|
| 148 | +{ |
---|
| 149 | + return pool->p.dma_dir; |
---|
| 150 | +} |
---|
| 151 | + |
---|
110 | 152 | struct page_pool *page_pool_create(const struct page_pool_params *params); |
---|
111 | 153 | |
---|
| 154 | +#ifdef CONFIG_PAGE_POOL |
---|
112 | 155 | void page_pool_destroy(struct page_pool *pool); |
---|
| 156 | +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); |
---|
| 157 | +void page_pool_release_page(struct page_pool *pool, struct page *page); |
---|
| 158 | +#else |
---|
| 159 | +static inline void page_pool_destroy(struct page_pool *pool) |
---|
| 160 | +{ |
---|
| 161 | +} |
---|
113 | 162 | |
---|
114 | | -/* Never call this directly, use helpers below */ |
---|
115 | | -void __page_pool_put_page(struct page_pool *pool, |
---|
116 | | - struct page *page, bool allow_direct); |
---|
| 163 | +static inline void page_pool_use_xdp_mem(struct page_pool *pool, |
---|
| 164 | + void (*disconnect)(void *)) |
---|
| 165 | +{ |
---|
| 166 | +} |
---|
| 167 | +static inline void page_pool_release_page(struct page_pool *pool, |
---|
| 168 | + struct page *page) |
---|
| 169 | +{ |
---|
| 170 | +} |
---|
| 171 | +#endif |
---|
117 | 172 | |
---|
118 | | -static inline void page_pool_put_page(struct page_pool *pool, |
---|
119 | | - struct page *page, bool allow_direct) |
---|
| 173 | +void page_pool_put_page(struct page_pool *pool, struct page *page, |
---|
| 174 | + unsigned int dma_sync_size, bool allow_direct); |
---|
| 175 | + |
---|
| 176 | +/* Same as above but will try to sync the entire area pool->max_len */ |
---|
| 177 | +static inline void page_pool_put_full_page(struct page_pool *pool, |
---|
| 178 | + struct page *page, bool allow_direct) |
---|
120 | 179 | { |
---|
121 | 180 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't |
---|
122 | 181 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. |
---|
123 | 182 | */ |
---|
124 | 183 | #ifdef CONFIG_PAGE_POOL |
---|
125 | | - __page_pool_put_page(pool, page, allow_direct); |
---|
| 184 | + page_pool_put_page(pool, page, -1, allow_direct); |
---|
126 | 185 | #endif |
---|
127 | 186 | } |
---|
128 | | -/* Very limited use-cases allow recycle direct */ |
---|
| 187 | + |
---|
| 188 | +/* Same as above but the caller must guarantee safe context. e.g NAPI */ |
---|
129 | 189 | static inline void page_pool_recycle_direct(struct page_pool *pool, |
---|
130 | 190 | struct page *page) |
---|
131 | 191 | { |
---|
132 | | - __page_pool_put_page(pool, page, true); |
---|
| 192 | + page_pool_put_full_page(pool, page, true); |
---|
| 193 | +} |
---|
| 194 | + |
---|
| 195 | +static inline dma_addr_t page_pool_get_dma_addr(struct page *page) |
---|
| 196 | +{ |
---|
| 197 | + dma_addr_t ret = page->dma_addr[0]; |
---|
| 198 | + if (sizeof(dma_addr_t) > sizeof(unsigned long)) |
---|
| 199 | + ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16; |
---|
| 200 | + return ret; |
---|
| 201 | +} |
---|
| 202 | + |
---|
| 203 | +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) |
---|
| 204 | +{ |
---|
| 205 | + page->dma_addr[0] = addr; |
---|
| 206 | + if (sizeof(dma_addr_t) > sizeof(unsigned long)) |
---|
| 207 | + page->dma_addr[1] = upper_32_bits(addr); |
---|
133 | 208 | } |
---|
134 | 209 | |
---|
135 | 210 | static inline bool is_page_pool_compiled_in(void) |
---|
.. | .. |
---|
141 | 216 | #endif |
---|
142 | 217 | } |
---|
143 | 218 | |
---|
| 219 | +static inline bool page_pool_put(struct page_pool *pool) |
---|
| 220 | +{ |
---|
| 221 | + return refcount_dec_and_test(&pool->user_cnt); |
---|
| 222 | +} |
---|
| 223 | + |
---|
| 224 | +/* Caller must provide appropriate safe context, e.g. NAPI. */ |
---|
| 225 | +void page_pool_update_nid(struct page_pool *pool, int new_nid); |
---|
| 226 | +static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) |
---|
| 227 | +{ |
---|
| 228 | + if (unlikely(pool->p.nid != new_nid)) |
---|
| 229 | + page_pool_update_nid(pool, new_nid); |
---|
| 230 | +} |
---|
144 | 231 | #endif /* _NET_PAGE_POOL_H */ |
---|