| .. | .. |
|---|
| 16 | 16 | #define XDP_SHARED_UMEM (1 << 0) |
|---|
| 17 | 17 | #define XDP_COPY (1 << 1) /* Force copy-mode */ |
|---|
| 18 | 18 | #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ |
|---|
| 19 | +/* If this option is set, the driver might go sleep and in that case |
|---|
| 20 | + * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be |
|---|
| 21 | + * set. If it is set, the application need to explicitly wake up the |
|---|
| 22 | + * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are |
|---|
| 23 | + * running the driver and the application on the same core, you should |
|---|
| 24 | + * use this option so that the kernel will yield to the user space |
|---|
| 25 | + * application. |
|---|
| 26 | + */ |
|---|
| 27 | +#define XDP_USE_NEED_WAKEUP (1 << 3) |
|---|
| 28 | + |
|---|
| 29 | +/* Flags for xsk_umem_config flags */ |
|---|
| 30 | +#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) |
|---|
| 19 | 31 | |
|---|
| 20 | 32 | struct sockaddr_xdp { |
|---|
| 21 | 33 | __u16 sxdp_family; |
|---|
| .. | .. |
|---|
| 25 | 37 | __u32 sxdp_shared_umem_fd; |
|---|
| 26 | 38 | }; |
|---|
| 27 | 39 | |
|---|
| 40 | +/* XDP_RING flags */ |
|---|
| 41 | +#define XDP_RING_NEED_WAKEUP (1 << 0) |
|---|
| 42 | + |
|---|
| 28 | 43 | struct xdp_ring_offset { |
|---|
| 29 | 44 | __u64 producer; |
|---|
| 30 | 45 | __u64 consumer; |
|---|
| 31 | 46 | __u64 desc; |
|---|
| 47 | + __u64 flags; |
|---|
| 32 | 48 | }; |
|---|
| 33 | 49 | |
|---|
| 34 | 50 | struct xdp_mmap_offsets { |
|---|
| .. | .. |
|---|
| 46 | 62 | #define XDP_UMEM_FILL_RING 5 |
|---|
| 47 | 63 | #define XDP_UMEM_COMPLETION_RING 6 |
|---|
| 48 | 64 | #define XDP_STATISTICS 7 |
|---|
| 65 | +#define XDP_OPTIONS 8 |
|---|
| 49 | 66 | |
|---|
| 50 | 67 | struct xdp_umem_reg { |
|---|
| 51 | 68 | __u64 addr; /* Start of packet data area */ |
|---|
| 52 | 69 | __u64 len; /* Length of packet data area */ |
|---|
| 53 | 70 | __u32 chunk_size; |
|---|
| 54 | 71 | __u32 headroom; |
|---|
| 72 | + __u32 flags; |
|---|
| 55 | 73 | }; |
|---|
| 56 | 74 | |
|---|
| 57 | 75 | struct xdp_statistics { |
|---|
| 58 | | - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ |
|---|
| 76 | + __u64 rx_dropped; /* Dropped for other reasons */ |
|---|
| 59 | 77 | __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ |
|---|
| 60 | 78 | __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ |
|---|
| 79 | + __u64 rx_ring_full; /* Dropped due to rx ring being full */ |
|---|
| 80 | + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ |
|---|
| 81 | + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ |
|---|
| 61 | 82 | }; |
|---|
| 83 | + |
|---|
| 84 | +struct xdp_options { |
|---|
| 85 | + __u32 flags; |
|---|
| 86 | +}; |
|---|
| 87 | + |
|---|
| 88 | +/* Flags for the flags field of struct xdp_options */ |
|---|
| 89 | +#define XDP_OPTIONS_ZEROCOPY (1 << 0) |
|---|
| 62 | 90 | |
|---|
| 63 | 91 | /* Pgoff for mmaping the rings */ |
|---|
| 64 | 92 | #define XDP_PGOFF_RX_RING 0 |
|---|
| .. | .. |
|---|
| 66 | 94 | #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL |
|---|
| 67 | 95 | #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL |
|---|
| 68 | 96 | |
|---|
| 97 | +/* Masks for unaligned chunks mode */ |
|---|
| 98 | +#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 |
|---|
| 99 | +#define XSK_UNALIGNED_BUF_ADDR_MASK \ |
|---|
| 100 | + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) |
|---|
| 101 | + |
|---|
| 69 | 102 | /* Rx/Tx descriptor */ |
|---|
| 70 | 103 | struct xdp_desc { |
|---|
| 71 | 104 | __u64 addr; |
|---|