.. | .. |
---|
15 | 15 | |
---|
16 | 16 | struct net_device; |
---|
17 | 17 | struct xsk_queue; |
---|
18 | | - |
---|
19 | | -struct xdp_umem_props { |
---|
20 | | - u64 chunk_mask; |
---|
21 | | - u64 size; |
---|
22 | | -}; |
---|
23 | | - |
---|
24 | | -struct xdp_umem_page { |
---|
25 | | - void *addr; |
---|
26 | | - dma_addr_t dma; |
---|
27 | | -}; |
---|
| 18 | +struct xdp_buff; |
---|
28 | 19 | |
---|
29 | 20 | struct xdp_umem { |
---|
30 | | - struct xsk_queue *fq; |
---|
31 | | - struct xsk_queue *cq; |
---|
32 | | - struct xdp_umem_page *pages; |
---|
33 | | - struct xdp_umem_props props; |
---|
| 21 | + void *addrs; |
---|
| 22 | + u64 size; |
---|
34 | 23 | u32 headroom; |
---|
35 | | - u32 chunk_size_nohr; |
---|
36 | | - struct user_struct *user; |
---|
37 | | - unsigned long address; |
---|
38 | | - refcount_t users; |
---|
39 | | - struct work_struct work; |
---|
40 | | - struct page **pgs; |
---|
| 24 | + u32 chunk_size; |
---|
| 25 | + u32 chunks; |
---|
41 | 26 | u32 npgs; |
---|
42 | | - struct net_device *dev; |
---|
43 | | - u16 queue_id; |
---|
| 27 | + struct user_struct *user; |
---|
| 28 | + refcount_t users; |
---|
| 29 | + u8 flags; |
---|
44 | 30 | bool zc; |
---|
45 | | - spinlock_t xsk_list_lock; |
---|
46 | | - struct list_head xsk_list; |
---|
| 31 | + struct page **pgs; |
---|
| 32 | + int id; |
---|
| 33 | + struct list_head xsk_dma_list; |
---|
| 34 | + struct work_struct work; |
---|
| 35 | +}; |
---|
| 36 | + |
---|
| 37 | +struct xsk_map { |
---|
| 38 | + struct bpf_map map; |
---|
| 39 | + spinlock_t lock; /* Synchronize map updates */ |
---|
| 40 | + struct xdp_sock *xsk_map[]; |
---|
47 | 41 | }; |
---|
48 | 42 | |
---|
49 | 43 | struct xdp_sock { |
---|
50 | 44 | /* struct sock must be the first member of struct xdp_sock */ |
---|
51 | 45 | struct sock sk; |
---|
52 | | - struct xsk_queue *rx; |
---|
| 46 | + struct xsk_queue *rx ____cacheline_aligned_in_smp; |
---|
53 | 47 | struct net_device *dev; |
---|
54 | 48 | struct xdp_umem *umem; |
---|
55 | 49 | struct list_head flush_node; |
---|
| 50 | + struct xsk_buff_pool *pool; |
---|
56 | 51 | u16 queue_id; |
---|
57 | | - struct xsk_queue *tx ____cacheline_aligned_in_smp; |
---|
58 | | - struct list_head list; |
---|
59 | 52 | bool zc; |
---|
| 53 | + enum { |
---|
| 54 | + XSK_READY = 0, |
---|
| 55 | + XSK_BOUND, |
---|
| 56 | + XSK_UNBOUND, |
---|
| 57 | + } state; |
---|
| 58 | + |
---|
| 59 | + struct xsk_queue *tx ____cacheline_aligned_in_smp; |
---|
| 60 | + struct list_head tx_list; |
---|
| 61 | + /* Protects generic receive. */ |
---|
| 62 | + spinlock_t rx_lock; |
---|
| 63 | + |
---|
| 64 | + /* Statistics */ |
---|
| 65 | + u64 rx_dropped; |
---|
| 66 | + u64 rx_queue_full; |
---|
| 67 | + |
---|
| 68 | + struct list_head map_list; |
---|
| 69 | + /* Protects map_list */ |
---|
| 70 | + spinlock_t map_list_lock; |
---|
60 | 71 | /* Protects multiple processes in the control path */ |
---|
61 | 72 | struct mutex mutex; |
---|
62 | | - /* Mutual exclusion of NAPI TX thread and sendmsg error paths |
---|
63 | | - * in the SKB destructor callback. |
---|
64 | | - */ |
---|
65 | | - spinlock_t tx_completion_lock; |
---|
66 | | - u64 rx_dropped; |
---|
| 73 | + struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ |
---|
| 74 | + struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ |
---|
67 | 75 | }; |
---|
68 | 76 | |
---|
69 | | -struct xdp_buff; |
---|
70 | 77 | #ifdef CONFIG_XDP_SOCKETS |
---|
| 78 | + |
---|
71 | 79 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
---|
72 | | -int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
---|
73 | | -void xsk_flush(struct xdp_sock *xs); |
---|
74 | | -bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); |
---|
75 | | -/* Used from netdev driver */ |
---|
76 | | -u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); |
---|
77 | | -void xsk_umem_discard_addr(struct xdp_umem *umem); |
---|
78 | | -void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); |
---|
79 | | -bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); |
---|
80 | | -void xsk_umem_consume_tx_done(struct xdp_umem *umem); |
---|
| 80 | +int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); |
---|
| 81 | +void __xsk_map_flush(void); |
---|
| 82 | + |
---|
| 83 | +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, |
---|
| 84 | + u32 key) |
---|
| 85 | +{ |
---|
| 86 | + struct xsk_map *m = container_of(map, struct xsk_map, map); |
---|
| 87 | + struct xdp_sock *xs; |
---|
| 88 | + |
---|
| 89 | + if (key >= map->max_entries) |
---|
| 90 | + return NULL; |
---|
| 91 | + |
---|
| 92 | + xs = READ_ONCE(m->xsk_map[key]); |
---|
| 93 | + return xs; |
---|
| 94 | +} |
---|
| 95 | + |
---|
81 | 96 | #else |
---|
| 97 | + |
---|
82 | 98 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
---|
83 | 99 | { |
---|
84 | 100 | return -ENOTSUPP; |
---|
85 | 101 | } |
---|
86 | 102 | |
---|
87 | | -static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
---|
| 103 | +static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
---|
88 | 104 | { |
---|
89 | | - return -ENOTSUPP; |
---|
| 105 | + return -EOPNOTSUPP; |
---|
90 | 106 | } |
---|
91 | 107 | |
---|
92 | | -static inline void xsk_flush(struct xdp_sock *xs) |
---|
| 108 | +static inline void __xsk_map_flush(void) |
---|
93 | 109 | { |
---|
94 | 110 | } |
---|
95 | 111 | |
---|
96 | | -static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) |
---|
| 112 | +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, |
---|
| 113 | + u32 key) |
---|
97 | 114 | { |
---|
98 | | - return false; |
---|
| 115 | + return NULL; |
---|
99 | 116 | } |
---|
| 117 | + |
---|
100 | 118 | #endif /* CONFIG_XDP_SOCKETS */ |
---|
101 | 119 | |
---|
102 | 120 | #endif /* _LINUX_XDP_SOCK_H */ |
---|