.. | .. |
---|
4 | 4 | #ifndef _ICE_TXRX_H_ |
---|
5 | 5 | #define _ICE_TXRX_H_ |
---|
6 | 6 | |
---|
| 7 | +#include "ice_type.h" |
---|
| 8 | + |
---|
7 | 9 | #define ICE_DFLT_IRQ_WORK 256 |
---|
| 10 | +#define ICE_RXBUF_3072 3072 |
---|
8 | 11 | #define ICE_RXBUF_2048 2048 |
---|
| 12 | +#define ICE_RXBUF_1536 1536 |
---|
9 | 13 | #define ICE_MAX_CHAINED_RX_BUFS 5 |
---|
10 | 14 | #define ICE_MAX_BUF_TXD 8 |
---|
11 | 15 | #define ICE_MIN_TX_LEN 17 |
---|
.. | .. |
---|
22 | 26 | #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ |
---|
23 | 27 | #define ICE_MAX_TXQ_PER_TXQG 128 |
---|
24 | 28 | |
---|
25 | | -/* Tx Descriptors needed, worst case */ |
---|
26 | | -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
---|
| 29 | +/* Attempt to maximize the headroom available for incoming frames. We use a 2K |
---|
| 30 | + * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. |
---|
| 31 | + * This leaves us with 512 bytes of room. From that we need to deduct the |
---|
| 32 | + * space needed for the shared info and the padding needed to IP align the |
---|
| 33 | + * frame. |
---|
| 34 | + * |
---|
| 35 | + * Note: For cache line sizes 256 or larger this value is going to end |
---|
| 36 | + * up negative. In these cases we should fall back to the legacy |
---|
| 37 | + * receive path. |
---|
| 38 | + */ |
---|
| 39 | +#if (PAGE_SIZE < 8192) |
---|
| 40 | +#define ICE_2K_TOO_SMALL_WITH_PADDING \ |
---|
| 41 | + ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ |
---|
| 42 | + SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) |
---|
| 43 | + |
---|
| 44 | +/** |
---|
| 45 | + * ice_compute_pad - compute the padding |
---|
| 46 | + * @rx_buf_len: buffer length |
---|
| 47 | + * |
---|
| 48 | + * Figure out the size of half page based on given buffer length and |
---|
| 49 | + * then subtract the skb_shared_info followed by subtraction of the |
---|
| 50 | + * actual buffer length; this in turn results in the actual space that |
---|
| 51 | + * is left for padding usage |
---|
| 52 | + */ |
---|
| 53 | +static inline int ice_compute_pad(int rx_buf_len) |
---|
| 54 | +{ |
---|
| 55 | + int half_page_size; |
---|
| 56 | + |
---|
| 57 | + half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); |
---|
| 58 | + return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; |
---|
| 59 | +} |
---|
| 60 | + |
---|
| 61 | +/** |
---|
| 62 | + * ice_skb_pad - determine the padding that we can supply |
---|
| 63 | + * |
---|
| 64 | + * Figure out the right Rx buffer size and based on that calculate the |
---|
| 65 | + * padding |
---|
| 66 | + */ |
---|
| 67 | +static inline int ice_skb_pad(void) |
---|
| 68 | +{ |
---|
| 69 | + int rx_buf_len; |
---|
| 70 | + |
---|
| 71 | + /* If a 2K buffer cannot handle a standard Ethernet frame then |
---|
| 72 | + * optimize padding for a 3K buffer instead of a 1.5K buffer. |
---|
| 73 | + * |
---|
| 74 | + * For a 3K buffer we need to add enough padding to allow for |
---|
| 75 | + * tailroom due to NET_IP_ALIGN possibly shifting us out of |
---|
| 76 | + * cache-line alignment. |
---|
| 77 | + */ |
---|
| 78 | + if (ICE_2K_TOO_SMALL_WITH_PADDING) |
---|
| 79 | + rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); |
---|
| 80 | + else |
---|
| 81 | + rx_buf_len = ICE_RXBUF_1536; |
---|
| 82 | + |
---|
| 83 | + /* if needed make room for NET_IP_ALIGN */ |
---|
| 84 | + rx_buf_len -= NET_IP_ALIGN; |
---|
| 85 | + |
---|
| 86 | + return ice_compute_pad(rx_buf_len); |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +#define ICE_SKB_PAD ice_skb_pad() |
---|
| 90 | +#else |
---|
| 91 | +#define ICE_2K_TOO_SMALL_WITH_PADDING false |
---|
| 92 | +#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
---|
| 93 | +#endif |
---|
| 94 | + |
---|
| 95 | +/* We are assuming that the cache line is always 64 Bytes here for ice. |
---|
| 96 | + * In order to make sure that is a correct assumption there is a check in probe |
---|
| 97 | + * to print a warning if the read from GLPCI_CNF2 tells us that the cache line |
---|
| 98 | + * size is 128 bytes. We do it this way because we do not want to read the |
---|
| 99 | + * GLPCI_CNF2 register or a variable containing the value on every pass through |
---|
| 100 | + * the Tx path. |
---|
| 101 | + */ |
---|
| 102 | +#define ICE_CACHE_LINE_BYTES 64 |
---|
| 103 | +#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ |
---|
| 104 | + sizeof(struct ice_tx_desc)) |
---|
| 105 | +#define ICE_DESCS_FOR_CTX_DESC 1 |
---|
| 106 | +#define ICE_DESCS_FOR_SKB_DATA_PTR 1 |
---|
| 107 | +/* Tx descriptors needed, worst case */ |
---|
| 108 | +#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ |
---|
| 109 | + ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) |
---|
27 | 110 | #define ICE_DESC_UNUSED(R) \ |
---|
28 | | - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ |
---|
29 | | - (R)->next_to_clean - (R)->next_to_use - 1) |
---|
| 111 | + (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ |
---|
| 112 | + (R)->next_to_clean - (R)->next_to_use - 1) |
---|
30 | 113 | |
---|
31 | 114 | #define ICE_TX_FLAGS_TSO BIT(0) |
---|
32 | 115 | #define ICE_TX_FLAGS_HW_VLAN BIT(1) |
---|
33 | 116 | #define ICE_TX_FLAGS_SW_VLAN BIT(2) |
---|
| 117 | +/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be |
---|
| 118 | + * freed instead of returned like skb packets. |
---|
| 119 | + */ |
---|
| 120 | +#define ICE_TX_FLAGS_DUMMY_PKT BIT(3) |
---|
| 121 | +#define ICE_TX_FLAGS_IPV4 BIT(5) |
---|
| 122 | +#define ICE_TX_FLAGS_IPV6 BIT(6) |
---|
| 123 | +#define ICE_TX_FLAGS_TUNNEL BIT(7) |
---|
34 | 124 | #define ICE_TX_FLAGS_VLAN_M 0xffff0000 |
---|
| 125 | +#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 |
---|
| 126 | +#define ICE_TX_FLAGS_VLAN_PR_S 29 |
---|
35 | 127 | #define ICE_TX_FLAGS_VLAN_S 16 |
---|
| 128 | + |
---|
| 129 | +#define ICE_XDP_PASS 0 |
---|
| 130 | +#define ICE_XDP_CONSUMED BIT(0) |
---|
| 131 | +#define ICE_XDP_TX BIT(1) |
---|
| 132 | +#define ICE_XDP_REDIR BIT(2) |
---|
| 133 | + |
---|
| 134 | +#define ICE_RX_DMA_ATTR \ |
---|
| 135 | + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) |
---|
| 136 | + |
---|
| 137 | +#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
---|
| 138 | + |
---|
| 139 | +#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) |
---|
36 | 140 | |
---|
37 | 141 | struct ice_tx_buf { |
---|
38 | 142 | struct ice_tx_desc *next_to_watch; |
---|
39 | | - struct sk_buff *skb; |
---|
| 143 | + union { |
---|
| 144 | + struct sk_buff *skb; |
---|
| 145 | + void *raw_buf; /* used for XDP */ |
---|
| 146 | + }; |
---|
40 | 147 | unsigned int bytecount; |
---|
41 | 148 | unsigned short gso_segs; |
---|
42 | 149 | u32 tx_flags; |
---|
43 | | - DEFINE_DMA_UNMAP_ADDR(dma); |
---|
44 | 150 | DEFINE_DMA_UNMAP_LEN(len); |
---|
| 151 | + DEFINE_DMA_UNMAP_ADDR(dma); |
---|
45 | 152 | }; |
---|
46 | 153 | |
---|
47 | 154 | struct ice_tx_offload_params { |
---|
48 | | - u8 header_len; |
---|
| 155 | + u64 cd_qw1; |
---|
| 156 | + struct ice_ring *tx_ring; |
---|
49 | 157 | u32 td_cmd; |
---|
50 | 158 | u32 td_offset; |
---|
51 | 159 | u32 td_l2tag1; |
---|
52 | | - u16 cd_l2tag2; |
---|
53 | 160 | u32 cd_tunnel_params; |
---|
54 | | - u64 cd_qw1; |
---|
55 | | - struct ice_ring *tx_ring; |
---|
| 161 | + u16 cd_l2tag2; |
---|
| 162 | + u8 header_len; |
---|
56 | 163 | }; |
---|
57 | 164 | |
---|
58 | 165 | struct ice_rx_buf { |
---|
59 | | - struct sk_buff *skb; |
---|
60 | | - dma_addr_t dma; |
---|
61 | | - struct page *page; |
---|
62 | | - unsigned int page_offset; |
---|
| 166 | + union { |
---|
| 167 | + struct { |
---|
| 168 | + struct sk_buff *skb; |
---|
| 169 | + dma_addr_t dma; |
---|
| 170 | + struct page *page; |
---|
| 171 | + unsigned int page_offset; |
---|
| 172 | + u16 pagecnt_bias; |
---|
| 173 | + }; |
---|
| 174 | + struct { |
---|
| 175 | + struct xdp_buff *xdp; |
---|
| 176 | + }; |
---|
| 177 | + }; |
---|
63 | 178 | }; |
---|
64 | 179 | |
---|
65 | 180 | struct ice_q_stats { |
---|
.. | .. |
---|
71 | 186 | u64 restart_q; |
---|
72 | 187 | u64 tx_busy; |
---|
73 | 188 | u64 tx_linearize; |
---|
| 189 | + int prev_pkt; /* negative if no pending Tx descriptors */ |
---|
74 | 190 | }; |
---|
75 | 191 | |
---|
76 | 192 | struct ice_rxq_stats { |
---|
77 | 193 | u64 non_eop_descs; |
---|
78 | 194 | u64 alloc_page_failed; |
---|
79 | 195 | u64 alloc_buf_failed; |
---|
80 | | - u64 page_reuse_count; |
---|
| 196 | + u64 gro_dropped; /* GRO returned dropped */ |
---|
81 | 197 | }; |
---|
82 | 198 | |
---|
83 | 199 | /* this enum matches hardware bits and is meant to be used by DYN_CTLN |
---|
.. | .. |
---|
102 | 218 | /* indices into GLINT_ITR registers */ |
---|
103 | 219 | #define ICE_RX_ITR ICE_IDX_ITR0 |
---|
104 | 220 | #define ICE_TX_ITR ICE_IDX_ITR1 |
---|
105 | | -#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ |
---|
106 | | -#define ICE_ITR_8K 0x003E |
---|
| 221 | +#define ICE_ITR_8K 124 |
---|
| 222 | +#define ICE_ITR_20K 50 |
---|
| 223 | +#define ICE_ITR_MAX 8160 |
---|
| 224 | +#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) |
---|
| 225 | +#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) |
---|
| 226 | +#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ |
---|
| 227 | +#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) |
---|
| 228 | +#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) |
---|
| 229 | +#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ |
---|
| 230 | +#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) |
---|
| 231 | +#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ |
---|
| 232 | +#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) |
---|
107 | 233 | |
---|
108 | | -/* apply ITR HW granularity translation to program the HW registers */ |
---|
109 | | -#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) |
---|
| 234 | +#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 |
---|
| 235 | +#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 |
---|
| 236 | +#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA |
---|
| 237 | +#define ICE_ITR_ADAPTIVE_LATENCY 0x8000 |
---|
| 238 | +#define ICE_ITR_ADAPTIVE_BULK 0x0000 |
---|
| 239 | + |
---|
| 240 | +#define ICE_DFLT_INTRL 0 |
---|
| 241 | +#define ICE_MAX_INTRL 236 |
---|
| 242 | + |
---|
| 243 | +#define ICE_WB_ON_ITR_USECS 2 |
---|
| 244 | +#define ICE_IN_WB_ON_ITR_MODE 255 |
---|
| 245 | +/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows |
---|
| 246 | + * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, |
---|
| 247 | + * set the write-back latency to the usecs passed in. |
---|
| 248 | + */ |
---|
| 249 | +#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ |
---|
| 250 | + ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ |
---|
| 251 | + GLINT_DYN_CTL_INTERVAL_M) | \ |
---|
| 252 | + (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ |
---|
| 253 | + GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ |
---|
| 254 | + GLINT_DYN_CTL_WB_ON_ITR_M) |
---|
110 | 255 | |
---|
111 | 256 | /* Legacy or Advanced Mode Queue */ |
---|
112 | 257 | #define ICE_TX_ADVANCED 0 |
---|
.. | .. |
---|
114 | 259 | |
---|
115 | 260 | /* descriptor ring, associated with a VSI */ |
---|
116 | 261 | struct ice_ring { |
---|
| 262 | + /* CL1 - 1st cacheline starts here */ |
---|
117 | 263 | struct ice_ring *next; /* pointer to next ring in q_vector */ |
---|
118 | 264 | void *desc; /* Descriptor ring memory */ |
---|
119 | 265 | struct device *dev; /* Used for DMA mapping */ |
---|
.. | .. |
---|
125 | 271 | struct ice_tx_buf *tx_buf; |
---|
126 | 272 | struct ice_rx_buf *rx_buf; |
---|
127 | 273 | }; |
---|
| 274 | + /* CL2 - 2nd cacheline starts here */ |
---|
128 | 275 | u16 q_index; /* Queue number of ring */ |
---|
129 | | - u32 txq_teid; /* Added Tx queue TEID */ |
---|
| 276 | + u16 q_handle; /* Queue handle per TC */ |
---|
130 | 277 | |
---|
131 | | - /* high bit set means dynamic, use accessor routines to read/write. |
---|
132 | | - * hardware supports 2us/1us resolution for the ITR registers. |
---|
133 | | - * these values always store the USER setting, and must be converted |
---|
134 | | - * before programming to a register. |
---|
135 | | - */ |
---|
136 | | - u16 rx_itr_setting; |
---|
137 | | - u16 tx_itr_setting; |
---|
| 278 | + u8 ring_active:1; /* is ring online or not */ |
---|
138 | 279 | |
---|
139 | 280 | u16 count; /* Number of descriptors */ |
---|
140 | 281 | u16 reg_idx; /* HW register index of the ring */ |
---|
.. | .. |
---|
142 | 283 | /* used in interrupt processing */ |
---|
143 | 284 | u16 next_to_use; |
---|
144 | 285 | u16 next_to_clean; |
---|
145 | | - |
---|
146 | | - u8 ring_active; /* is ring online or not */ |
---|
| 286 | + u16 next_to_alloc; |
---|
147 | 287 | |
---|
148 | 288 | /* stats structs */ |
---|
149 | 289 | struct ice_q_stats stats; |
---|
.. | .. |
---|
153 | 293 | struct ice_rxq_stats rx_stats; |
---|
154 | 294 | }; |
---|
155 | 295 | |
---|
156 | | - unsigned int size; /* length of descriptor ring in bytes */ |
---|
157 | | - dma_addr_t dma; /* physical address of ring */ |
---|
158 | 296 | struct rcu_head rcu; /* to avoid race on free */ |
---|
159 | | - u16 next_to_alloc; |
---|
| 297 | + struct bpf_prog *xdp_prog; |
---|
| 298 | + struct xsk_buff_pool *xsk_pool; |
---|
| 299 | + /* CL3 - 3rd cacheline starts here */ |
---|
| 300 | + struct xdp_rxq_info xdp_rxq; |
---|
| 301 | + /* CLX - the below items are only accessed infrequently and should be |
---|
| 302 | + * in their own cache line if possible |
---|
| 303 | + */ |
---|
| 304 | +#define ICE_TX_FLAGS_RING_XDP BIT(0) |
---|
| 305 | +#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) |
---|
| 306 | + u8 flags; |
---|
| 307 | + dma_addr_t dma; /* physical address of ring */ |
---|
| 308 | + unsigned int size; /* length of descriptor ring in bytes */ |
---|
| 309 | + u32 txq_teid; /* Added Tx queue TEID */ |
---|
| 310 | + u16 rx_buf_len; |
---|
| 311 | + u8 dcb_tc; /* Traffic class of ring */ |
---|
160 | 312 | } ____cacheline_internodealigned_in_smp; |
---|
161 | 313 | |
---|
162 | | -enum ice_latency_range { |
---|
163 | | - ICE_LOWEST_LATENCY = 0, |
---|
164 | | - ICE_LOW_LATENCY = 1, |
---|
165 | | - ICE_BULK_LATENCY = 2, |
---|
166 | | - ICE_ULTRA_LATENCY = 3, |
---|
167 | | -}; |
---|
| 314 | +static inline bool ice_ring_uses_build_skb(struct ice_ring *ring) |
---|
| 315 | +{ |
---|
| 316 | + return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); |
---|
| 317 | +} |
---|
| 318 | + |
---|
| 319 | +static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring) |
---|
| 320 | +{ |
---|
| 321 | + ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; |
---|
| 322 | +} |
---|
| 323 | + |
---|
| 324 | +static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring) |
---|
| 325 | +{ |
---|
| 326 | + ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; |
---|
| 327 | +} |
---|
| 328 | + |
---|
| 329 | +static inline bool ice_ring_is_xdp(struct ice_ring *ring) |
---|
| 330 | +{ |
---|
| 331 | + return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); |
---|
| 332 | +} |
---|
168 | 333 | |
---|
169 | 334 | struct ice_ring_container { |
---|
170 | | - /* array of pointers to rings */ |
---|
| 335 | + /* head of linked-list of rings */ |
---|
171 | 336 | struct ice_ring *ring; |
---|
| 337 | + unsigned long next_update; /* jiffies value of next queue update */ |
---|
172 | 338 | unsigned int total_bytes; /* total bytes processed this int */ |
---|
173 | 339 | unsigned int total_pkts; /* total packets processed this int */ |
---|
174 | | - enum ice_latency_range latency_range; |
---|
175 | | - u16 itr; |
---|
| 340 | + u16 itr_idx; /* index in the interrupt vector */ |
---|
| 341 | + u16 target_itr; /* value in usecs divided by the hw->itr_gran */ |
---|
| 342 | + u16 current_itr; /* value in usecs divided by the hw->itr_gran */ |
---|
| 343 | + /* high bit set means dynamic ITR, rest is used to store user |
---|
| 344 | + * readable ITR value in usecs and must be converted before programming |
---|
| 345 | + * to a register. |
---|
| 346 | + */ |
---|
| 347 | + u16 itr_setting; |
---|
| 348 | +}; |
---|
| 349 | + |
---|
| 350 | +struct ice_coalesce_stored { |
---|
| 351 | + u16 itr_tx; |
---|
| 352 | + u16 itr_rx; |
---|
| 353 | + u8 intrl; |
---|
| 354 | + u8 tx_valid; |
---|
| 355 | + u8 rx_valid; |
---|
176 | 356 | }; |
---|
177 | 357 | |
---|
178 | 358 | /* iterator for handling rings in ring container */ |
---|
179 | 359 | #define ice_for_each_ring(pos, head) \ |
---|
180 | 360 | for (pos = (head).ring; pos; pos = pos->next) |
---|
| 361 | + |
---|
| 362 | +static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) |
---|
| 363 | +{ |
---|
| 364 | +#if (PAGE_SIZE < 8192) |
---|
| 365 | + if (ring->rx_buf_len > (PAGE_SIZE / 2)) |
---|
| 366 | + return 1; |
---|
| 367 | +#endif |
---|
| 368 | + return 0; |
---|
| 369 | +} |
---|
| 370 | + |
---|
| 371 | +#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) |
---|
| 372 | + |
---|
| 373 | +union ice_32b_rx_flex_desc; |
---|
181 | 374 | |
---|
182 | 375 | bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); |
---|
183 | 376 | netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); |
---|
.. | .. |
---|
188 | 381 | void ice_free_tx_ring(struct ice_ring *tx_ring); |
---|
189 | 382 | void ice_free_rx_ring(struct ice_ring *rx_ring); |
---|
190 | 383 | int ice_napi_poll(struct napi_struct *napi, int budget); |
---|
191 | | - |
---|
| 384 | +int |
---|
| 385 | +ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, |
---|
| 386 | + u8 *raw_packet); |
---|
| 387 | +int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget); |
---|
| 388 | +void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring); |
---|
192 | 389 | #endif /* _ICE_TXRX_H_ */ |
---|