hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/ethernet/intel/igb/igb.h
....@@ -19,6 +19,8 @@
1919 #include <linux/pci.h>
2020 #include <linux/mdio.h>
2121
22
+#include <net/xdp.h>
23
+
2224 struct igb_adapter;
2325
2426 #define E1000_PCS_CFG_IGN_SD 1
....@@ -32,11 +34,11 @@
3234 /* TX/RX descriptor defines */
3335 #define IGB_DEFAULT_TXD 256
3436 #define IGB_DEFAULT_TX_WORK 128
35
-#define IGB_MIN_TXD 80
37
+#define IGB_MIN_TXD 64
3638 #define IGB_MAX_TXD 4096
3739
3840 #define IGB_DEFAULT_RXD 256
39
-#define IGB_MIN_RXD 80
41
+#define IGB_MIN_RXD 64
4042 #define IGB_MAX_RXD 4096
4143
4244 #define IGB_DEFAULT_ITR 3 /* dynamic */
....@@ -78,6 +80,12 @@
7880 #define IGB_I210_RX_LATENCY_10 20662
7981 #define IGB_I210_RX_LATENCY_100 2213
8082 #define IGB_I210_RX_LATENCY_1000 448
83
+
84
+/* XDP */
85
+#define IGB_XDP_PASS 0
86
+#define IGB_XDP_CONSUMED BIT(0)
87
+#define IGB_XDP_TX BIT(1)
88
+#define IGB_XDP_REDIR BIT(2)
8189
8290 struct vf_data_storage {
8391 unsigned char vf_mac_addresses[ETH_ALEN];
....@@ -130,19 +138,66 @@
130138 /* this is the size past which hardware will drop packets when setting LPE=0 */
131139 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
132140
141
+#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
142
+
133143 /* Supported Rx Buffer Sizes */
134144 #define IGB_RXBUFFER_256 256
145
+#define IGB_RXBUFFER_1536 1536
135146 #define IGB_RXBUFFER_2048 2048
136147 #define IGB_RXBUFFER_3072 3072
137148 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
138149 #define IGB_TS_HDR_LEN 16
139150
140
-#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
151
+/* Attempt to maximize the headroom available for incoming frames. We
152
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
153
+ * the frame. This leaves us with 512 bytes of room. From that we need
154
+ * to deduct the space needed for the shared info and the padding needed
155
+ * to IP align the frame.
156
+ *
157
+ * Note: For cache line sizes 256 or larger this value is going to end
158
+ * up negative. In these cases we should fall back to the 3K
159
+ * buffers.
160
+ */
141161 #if (PAGE_SIZE < 8192)
142
-#define IGB_MAX_FRAME_BUILD_SKB \
143
- (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
162
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
163
+#define IGB_2K_TOO_SMALL_WITH_PADDING \
164
+((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
165
+
166
+static inline int igb_compute_pad(int rx_buf_len)
167
+{
168
+ int page_size, pad_size;
169
+
170
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
171
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
172
+
173
+ return pad_size;
174
+}
175
+
176
+static inline int igb_skb_pad(void)
177
+{
178
+ int rx_buf_len;
179
+
180
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
181
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
182
+ *
183
+ * For a 3K buffer we need to add enough padding to allow for
184
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
185
+ * cache-line alignment.
186
+ */
187
+ if (IGB_2K_TOO_SMALL_WITH_PADDING)
188
+ rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
189
+ else
190
+ rx_buf_len = IGB_RXBUFFER_1536;
191
+
192
+ /* if needed make room for NET_IP_ALIGN */
193
+ rx_buf_len -= NET_IP_ALIGN;
194
+
195
+ return igb_compute_pad(rx_buf_len);
196
+}
197
+
198
+#define IGB_SKB_PAD igb_skb_pad()
144199 #else
145
-#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
200
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
146201 #endif
147202
148203 /* How many Rx Buffers do we bundle into one write to the hardware ? */
....@@ -194,13 +249,25 @@
194249 #define IGB_SFF_ADDRESSING_MODE 0x4
195250 #define IGB_SFF_8472_UNSUP 0x00
196251
252
+/* TX resources are shared between XDP and netstack
253
+ * and we need to tag the buffer type to distinguish them
254
+ */
255
+enum igb_tx_buf_type {
256
+ IGB_TYPE_SKB = 0,
257
+ IGB_TYPE_XDP,
258
+};
259
+
197260 /* wrapper around a pointer to a socket buffer,
198261 * so a DMA handle can be stored along with the buffer
199262 */
200263 struct igb_tx_buffer {
201264 union e1000_adv_tx_desc *next_to_watch;
202265 unsigned long time_stamp;
203
- struct sk_buff *skb;
266
+ enum igb_tx_buf_type type;
267
+ union {
268
+ struct sk_buff *skb;
269
+ struct xdp_frame *xdpf;
270
+ };
204271 unsigned int bytecount;
205272 u16 gso_segs;
206273 __be16 protocol;
....@@ -248,6 +315,7 @@
248315 struct igb_ring {
249316 struct igb_q_vector *q_vector; /* backlink to q_vector */
250317 struct net_device *netdev; /* back pointer to net_device */
318
+ struct bpf_prog *xdp_prog;
251319 struct device *dev; /* device pointer for dma mapping */
252320 union { /* array of buffer info structs */
253321 struct igb_tx_buffer *tx_buffer_info;
....@@ -288,6 +356,7 @@
288356 struct u64_stats_sync rx_syncp;
289357 };
290358 };
359
+ struct xdp_rxq_info xdp_rxq;
291360 } ____cacheline_internodealigned_in_smp;
292361
293362 struct igb_q_vector {
....@@ -306,7 +375,7 @@
306375 char name[IFNAMSIZ + 9];
307376
308377 /* for dynamic allocation of rings associated with this q_vector */
309
- struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
378
+ struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
310379 };
311380
312381 enum e1000_ring_flags_t {
....@@ -339,7 +408,7 @@
339408 return IGB_RXBUFFER_3072;
340409
341410 if (ring_uses_build_skb(ring))
342
- return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
411
+ return IGB_MAX_FRAME_BUILD_SKB;
343412 #endif
344413 return IGB_RXBUFFER_2048;
345414 }
....@@ -467,6 +536,7 @@
467536 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
468537
469538 struct net_device *netdev;
539
+ struct bpf_prog *xdp_prog;
470540
471541 unsigned long state;
472542 unsigned int flags;
....@@ -594,6 +664,8 @@
594664 struct igb_mac_addr *mac_table;
595665 struct vf_mac_filter vf_macs;
596666 struct vf_mac_filter *vf_mac_list;
667
+ /* lock for VF resources */
668
+ spinlock_t vfs_lock;
597669 };
598670
599671 /* flags controlling PTP/1588 function */
....@@ -642,8 +714,10 @@
642714 };
643715
644716 extern char igb_driver_name[];
645
-extern char igb_driver_version[];
646717
718
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
719
+ struct igb_ring *ring,
720
+ struct xdp_frame *xdpf);
647721 int igb_open(struct net_device *netdev);
648722 int igb_close(struct net_device *netdev);
649723 int igb_up(struct igb_adapter *);
....@@ -661,6 +735,7 @@
661735 void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
662736 void igb_setup_tctl(struct igb_adapter *);
663737 void igb_setup_rctl(struct igb_adapter *);
738
+void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
664739 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
665740 void igb_alloc_rx_buffers(struct igb_ring *, u16);
666741 void igb_update_stats(struct igb_adapter *);
....@@ -675,8 +750,8 @@
675750 void igb_ptp_rx_hang(struct igb_adapter *adapter);
676751 void igb_ptp_tx_hang(struct igb_adapter *adapter);
677752 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
678
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
679
- struct sk_buff *skb);
753
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
754
+ struct sk_buff *skb);
680755 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
681756 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
682757 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);