hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/uapi/linux/pkt_sched.h
....@@ -2,6 +2,7 @@
22 #ifndef __LINUX_PKT_SCHED_H
33 #define __LINUX_PKT_SCHED_H
44
5
+#include <linux/const.h>
56 #include <linux/types.h>
67
78 /* Logical priority bands not depending on specific packet scheduler.
....@@ -255,6 +256,9 @@
255256 TCA_RED_PARMS,
256257 TCA_RED_STAB,
257258 TCA_RED_MAX_P,
259
+ TCA_RED_FLAGS, /* bitfield32 */
260
+ TCA_RED_EARLY_DROP_BLOCK, /* u32 */
261
+ TCA_RED_MARK_BLOCK, /* u32 */
258262 __TCA_RED_MAX,
259263 };
260264
....@@ -267,11 +271,27 @@
267271 unsigned char Wlog; /* log(W) */
268272 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
269273 unsigned char Scell_log; /* cell size for idle damping */
274
+
275
+ /* This field can be used for flags that a RED-like qdisc has
276
+ * historically supported. E.g. when configuring RED, it can be used for
277
+ * ECN, HARDDROP and ADAPTATIVE. For SFQ it can be used for ECN,
278
+ * HARDDROP. Etc. Because this field has not been validated, and is
279
+ * copied back on dump, any bits besides those to which a given qdisc
280
+ * has assigned a historical meaning need to be considered for free use
281
+ * by userspace tools.
282
+ *
283
+ * Any further flags need to be passed differently, e.g. through an
284
+ * attribute (such as TCA_RED_FLAGS above). Such attribute should allow
285
+ * passing both recent and historic flags in one value.
286
+ */
270287 unsigned char flags;
271288 #define TC_RED_ECN 1
272289 #define TC_RED_HARDDROP 2
273290 #define TC_RED_ADAPTATIVE 4
291
+#define TC_RED_NODROP 8
274292 };
293
+
294
+#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)
275295
276296 struct tc_red_xstats {
277297 __u32 early; /* Early drops */
....@@ -291,10 +311,37 @@
291311 TCA_GRED_DPS,
292312 TCA_GRED_MAX_P,
293313 TCA_GRED_LIMIT,
314
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
294315 __TCA_GRED_MAX,
295316 };
296317
297318 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
319
+
320
+enum {
321
+ TCA_GRED_VQ_ENTRY_UNSPEC,
322
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
323
+ __TCA_GRED_VQ_ENTRY_MAX,
324
+};
325
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
326
+
327
+enum {
328
+ TCA_GRED_VQ_UNSPEC,
329
+ TCA_GRED_VQ_PAD,
330
+ TCA_GRED_VQ_DP, /* u32 */
331
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
332
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
333
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
334
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
335
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
336
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
337
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
338
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
339
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
340
+ TCA_GRED_VQ_FLAGS, /* u32 */
341
+ __TCA_GRED_VQ_MAX
342
+};
343
+
344
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
298345
299346 struct tc_gred_qopt {
300347 __u32 limit; /* HARD maximal queue length (bytes) */
....@@ -395,9 +442,9 @@
395442 struct tc_htb_xstats {
396443 __u32 lends;
397444 __u32 borrows;
398
- __u32 giants; /* too big packets (rate will not be accurate) */
399
- __u32 tokens;
400
- __u32 ctokens;
445
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
446
+ __s32 tokens;
447
+ __s32 ctokens;
401448 };
402449
403450 /* HFSC section */
....@@ -866,6 +913,14 @@
866913
867914 TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
868915
916
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
917
+
918
+ TCA_FQ_TIMER_SLACK, /* timer slack */
919
+
920
+ TCA_FQ_HORIZON, /* time horizon in us */
921
+
922
+ TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */
923
+
869924 __TCA_FQ_MAX
870925 };
871926
....@@ -884,6 +939,9 @@
884939 __u32 inactive_flows;
885940 __u32 throttled_flows;
886941 __u32 unthrottle_latency_ns;
942
+ __u64 ce_mark; /* packets above ce_threshold */
943
+ __u64 horizon_drops;
944
+ __u64 horizon_caps;
887945 };
888946
889947 /* Heavy-Hitter Filter */
....@@ -921,19 +979,56 @@
921979 TCA_PIE_BETA,
922980 TCA_PIE_ECN,
923981 TCA_PIE_BYTEMODE,
982
+ TCA_PIE_DQ_RATE_ESTIMATOR,
924983 __TCA_PIE_MAX
925984 };
926985 #define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
927986
928987 struct tc_pie_xstats {
929
- __u32 prob; /* current probability */
930
- __u32 delay; /* current delay in ms */
931
- __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
932
- __u32 packets_in; /* total number of packets enqueued */
933
- __u32 dropped; /* packets dropped due to pie_action */
934
- __u32 overlimit; /* dropped due to lack of space in queue */
935
- __u32 maxq; /* maximum queue size */
936
- __u32 ecn_mark; /* packets marked with ecn*/
988
+ __u64 prob; /* current probability */
989
+ __u32 delay; /* current delay in ms */
990
+ __u32 avg_dq_rate; /* current average dq_rate in
991
+ * bits/pie_time
992
+ */
993
+ __u32 dq_rate_estimating; /* is avg_dq_rate being calculated? */
994
+ __u32 packets_in; /* total number of packets enqueued */
995
+ __u32 dropped; /* packets dropped due to pie_action */
996
+ __u32 overlimit; /* dropped due to lack of space
997
+ * in queue
998
+ */
999
+ __u32 maxq; /* maximum queue size */
1000
+ __u32 ecn_mark; /* packets marked with ecn*/
1001
+};
1002
+
1003
+/* FQ PIE */
1004
+enum {
1005
+ TCA_FQ_PIE_UNSPEC,
1006
+ TCA_FQ_PIE_LIMIT,
1007
+ TCA_FQ_PIE_FLOWS,
1008
+ TCA_FQ_PIE_TARGET,
1009
+ TCA_FQ_PIE_TUPDATE,
1010
+ TCA_FQ_PIE_ALPHA,
1011
+ TCA_FQ_PIE_BETA,
1012
+ TCA_FQ_PIE_QUANTUM,
1013
+ TCA_FQ_PIE_MEMORY_LIMIT,
1014
+ TCA_FQ_PIE_ECN_PROB,
1015
+ TCA_FQ_PIE_ECN,
1016
+ TCA_FQ_PIE_BYTEMODE,
1017
+ TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
1018
+ __TCA_FQ_PIE_MAX
1019
+};
1020
+#define TCA_FQ_PIE_MAX (__TCA_FQ_PIE_MAX - 1)
1021
+
1022
+struct tc_fq_pie_xstats {
1023
+ __u32 packets_in; /* total number of packets enqueued */
1024
+ __u32 dropped; /* packets dropped due to fq_pie_action */
1025
+ __u32 overlimit; /* dropped due to lack of space in queue */
1026
+ __u32 overmemory; /* dropped due to lack of memory in queue */
1027
+ __u32 ecn_mark; /* packets marked with ecn */
1028
+ __u32 new_flow_count; /* count of new flows created by packets */
1029
+ __u32 new_flows_len; /* count of flows in new list */
1030
+ __u32 old_flows_len; /* count of flows in old list */
1031
+ __u32 memory_usage; /* total memory across all queues */
9371032 };
9381033
9391034 /* CBS */
....@@ -960,8 +1055,9 @@
9601055 __s32 delta;
9611056 __s32 clockid;
9621057 __u32 flags;
963
-#define TC_ETF_DEADLINE_MODE_ON BIT(0)
964
-#define TC_ETF_OFFLOAD_ON BIT(1)
1058
+#define TC_ETF_DEADLINE_MODE_ON _BITUL(0)
1059
+#define TC_ETF_OFFLOAD_ON _BITUL(1)
1060
+#define TC_ETF_SKIP_SOCK_CHECK _BITUL(2)
9651061 };
9661062
9671063 enum {
....@@ -993,6 +1089,7 @@
9931089 TCA_CAKE_INGRESS,
9941090 TCA_CAKE_ACK_FILTER,
9951091 TCA_CAKE_SPLIT_GSO,
1092
+ TCA_CAKE_FWMARK,
9961093 __TCA_CAKE_MAX
9971094 };
9981095 #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
....@@ -1086,4 +1183,85 @@
10861183 CAKE_ATM_MAX
10871184 };
10881185
1186
+
1187
+/* TAPRIO */
1188
+enum {
1189
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
1190
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1191
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1192
+};
1193
+
1194
+enum {
1195
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1196
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1197
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1198
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1199
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1200
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
1201
+};
1202
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1203
+
1204
+/* The format for schedule entry list is:
1205
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1206
+ * [TCA_TAPRIO_SCHED_ENTRY]
1207
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
1208
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
1209
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1210
+ */
1211
+enum {
1212
+ TCA_TAPRIO_SCHED_UNSPEC,
1213
+ TCA_TAPRIO_SCHED_ENTRY,
1214
+ __TCA_TAPRIO_SCHED_MAX,
1215
+};
1216
+
1217
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1218
+
1219
+/* The format for the admin sched (dump only):
1220
+ * [TCA_TAPRIO_SCHED_ADMIN_SCHED]
1221
+ * [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]
1222
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]
1223
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY]
1224
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_CMD]
1225
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_GATES]
1226
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
1227
+ */
1228
+
1229
+#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
1230
+#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
1231
+
1232
+enum {
1233
+ TCA_TAPRIO_ATTR_UNSPEC,
1234
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1235
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1236
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1237
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1238
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1239
+ TCA_TAPRIO_PAD,
1240
+ TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
1241
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
1242
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
1243
+ TCA_TAPRIO_ATTR_FLAGS, /* u32 */
1244
+ TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
1245
+ __TCA_TAPRIO_ATTR_MAX,
1246
+};
1247
+
1248
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1249
+
1250
+/* ETS */
1251
+
1252
+#define TCQ_ETS_MAX_BANDS 16
1253
+
1254
+enum {
1255
+ TCA_ETS_UNSPEC,
1256
+ TCA_ETS_NBANDS, /* u8 */
1257
+ TCA_ETS_NSTRICT, /* u8 */
1258
+ TCA_ETS_QUANTA, /* nested TCA_ETS_QUANTA_BAND */
1259
+ TCA_ETS_QUANTA_BAND, /* u32 */
1260
+ TCA_ETS_PRIOMAP, /* nested TCA_ETS_PRIOMAP_BAND */
1261
+ TCA_ETS_PRIOMAP_BAND, /* u8 */
1262
+ __TCA_ETS_MAX,
1263
+};
1264
+
1265
+#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
1266
+
10891267 #endif