hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/sch_etf.c
....@@ -22,15 +22,17 @@
2222
2323 #define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON)
2424 #define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON)
25
+#define SKIP_SOCK_CHECK_IS_SET(x) ((x)->flags & TC_ETF_SKIP_SOCK_CHECK)
2526
2627 struct etf_sched_data {
2728 bool offload;
2829 bool deadline_mode;
30
+ bool skip_sock_check;
2931 int clockid;
3032 int queue;
3133 s32 delta; /* in ns */
3234 ktime_t last; /* The txtime of the last skb sent to the netdevice. */
33
- struct rb_root head;
35
+ struct rb_root_cached head;
3436 struct qdisc_watchdog watchdog;
3537 ktime_t (*get_time)(void);
3638 };
....@@ -77,6 +79,9 @@
7779 struct sock *sk = nskb->sk;
7880 ktime_t now;
7981
82
+ if (q->skip_sock_check)
83
+ goto skip;
84
+
8085 if (!sk || !sk_fullsock(sk))
8186 return false;
8287
....@@ -92,6 +97,7 @@
9297 if (sk->sk_txtime_deadline_mode != q->deadline_mode)
9398 return false;
9499
100
+skip:
95101 now = q->get_time();
96102 if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
97103 return false;
....@@ -104,7 +110,7 @@
104110 struct etf_sched_data *q = qdisc_priv(sch);
105111 struct rb_node *p;
106112
107
- p = rb_first(&q->head);
113
+ p = rb_first_cached(&q->head);
108114 if (!p)
109115 return NULL;
110116
....@@ -117,8 +123,10 @@
117123 struct sk_buff *skb = etf_peek_timesortedlist(sch);
118124 ktime_t next;
119125
120
- if (!skb)
126
+ if (!skb) {
127
+ qdisc_watchdog_cancel(&q->watchdog);
121128 return;
129
+ }
122130
123131 next = ktime_sub_ns(skb->tstamp, q->delta);
124132 qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
....@@ -155,8 +163,9 @@
155163 struct sk_buff **to_free)
156164 {
157165 struct etf_sched_data *q = qdisc_priv(sch);
158
- struct rb_node **p = &q->head.rb_node, *parent = NULL;
166
+ struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
159167 ktime_t txtime = nskb->tstamp;
168
+ bool leftmost = true;
160169
161170 if (!is_packet_valid(sch, nskb)) {
162171 report_sock_error(nskb, EINVAL,
....@@ -169,13 +178,15 @@
169178
170179 parent = *p;
171180 skb = rb_to_skb(parent);
172
- if (ktime_after(txtime, skb->tstamp))
181
+ if (ktime_compare(txtime, skb->tstamp) >= 0) {
173182 p = &parent->rb_right;
174
- else
183
+ leftmost = false;
184
+ } else {
175185 p = &parent->rb_left;
186
+ }
176187 }
177188 rb_link_node(&nskb->rbnode, parent, p);
178
- rb_insert_color(&nskb->rbnode, &q->head);
189
+ rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
179190
180191 qdisc_qstats_backlog_inc(sch, nskb);
181192 sch->q.qlen++;
....@@ -186,12 +197,42 @@
186197 return NET_XMIT_SUCCESS;
187198 }
188199
189
-static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
190
- bool drop)
200
+static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
201
+ ktime_t now)
202
+{
203
+ struct etf_sched_data *q = qdisc_priv(sch);
204
+ struct sk_buff *to_free = NULL;
205
+ struct sk_buff *tmp = NULL;
206
+
207
+ skb_rbtree_walk_from_safe(skb, tmp) {
208
+ if (ktime_after(skb->tstamp, now))
209
+ break;
210
+
211
+ rb_erase_cached(&skb->rbnode, &q->head);
212
+
213
+ /* The rbnode field in the skb re-uses these fields, now that
214
+ * we are done with the rbnode, reset them.
215
+ */
216
+ skb->next = NULL;
217
+ skb->prev = NULL;
218
+ skb->dev = qdisc_dev(sch);
219
+
220
+ report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
221
+
222
+ qdisc_qstats_backlog_dec(sch, skb);
223
+ qdisc_drop(skb, sch, &to_free);
224
+ qdisc_qstats_overlimit(sch);
225
+ sch->q.qlen--;
226
+ }
227
+
228
+ kfree_skb_list(to_free);
229
+}
230
+
231
+static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
191232 {
192233 struct etf_sched_data *q = qdisc_priv(sch);
193234
194
- rb_erase(&skb->rbnode, &q->head);
235
+ rb_erase_cached(&skb->rbnode, &q->head);
195236
196237 /* The rbnode field in the skb re-uses these fields, now that
197238 * we are done with the rbnode, reset them.
....@@ -202,19 +243,9 @@
202243
203244 qdisc_qstats_backlog_dec(sch, skb);
204245
205
- if (drop) {
206
- struct sk_buff *to_free = NULL;
246
+ qdisc_bstats_update(sch, skb);
207247
208
- report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
209
-
210
- qdisc_drop(skb, sch, &to_free);
211
- kfree_skb_list(to_free);
212
- qdisc_qstats_overlimit(sch);
213
- } else {
214
- qdisc_bstats_update(sch, skb);
215
-
216
- q->last = skb->tstamp;
217
- }
248
+ q->last = skb->tstamp;
218249
219250 sch->q.qlen--;
220251 }
....@@ -233,7 +264,7 @@
233264
234265 /* Drop if packet has expired while in queue. */
235266 if (ktime_before(skb->tstamp, now)) {
236
- timesortedlist_erase(sch, skb, true);
267
+ timesortedlist_drop(sch, skb, now);
237268 skb = NULL;
238269 goto out;
239270 }
....@@ -242,7 +273,7 @@
242273 * txtime from deadline to (now + delta).
243274 */
244275 if (q->deadline_mode) {
245
- timesortedlist_erase(sch, skb, false);
276
+ timesortedlist_remove(sch, skb);
246277 skb->tstamp = now;
247278 goto out;
248279 }
....@@ -251,7 +282,7 @@
251282
252283 /* Dequeue only if now is within the [txtime - delta, txtime] range. */
253284 if (ktime_after(now, next))
254
- timesortedlist_erase(sch, skb, false);
285
+ timesortedlist_remove(sch, skb);
255286 else
256287 skb = NULL;
257288
....@@ -327,7 +358,8 @@
327358 return -EINVAL;
328359 }
329360
330
- err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack);
361
+ err = nla_parse_nested_deprecated(tb, TCA_ETF_MAX, opt, etf_policy,
362
+ extack);
331363 if (err < 0)
332364 return err;
333365
....@@ -360,6 +392,7 @@
360392 q->clockid = qopt->clockid;
361393 q->offload = OFFLOAD_IS_ON(qopt);
362394 q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
395
+ q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt);
363396
364397 switch (q->clockid) {
365398 case CLOCK_REALTIME:
....@@ -387,14 +420,14 @@
387420 static void timesortedlist_clear(struct Qdisc *sch)
388421 {
389422 struct etf_sched_data *q = qdisc_priv(sch);
390
- struct rb_node *p = rb_first(&q->head);
423
+ struct rb_node *p = rb_first_cached(&q->head);
391424
392425 while (p) {
393426 struct sk_buff *skb = rb_to_skb(p);
394427
395428 p = rb_next(p);
396429
397
- rb_erase(&skb->rbnode, &q->head);
430
+ rb_erase_cached(&skb->rbnode, &q->head);
398431 rtnl_kfree_skbs(skb, skb);
399432 sch->q.qlen--;
400433 }
....@@ -411,9 +444,6 @@
411444 /* No matter which mode we are on, it's safe to clear both lists. */
412445 timesortedlist_clear(sch);
413446 __qdisc_reset_queue(&sch->q);
414
-
415
- sch->qstats.backlog = 0;
416
- sch->q.qlen = 0;
417447
418448 q->last = 0;
419449 }
....@@ -436,7 +466,7 @@
436466 struct tc_etf_qopt opt = { };
437467 struct nlattr *nest;
438468
439
- nest = nla_nest_start(skb, TCA_OPTIONS);
469
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
440470 if (!nest)
441471 goto nla_put_failure;
442472
....@@ -448,6 +478,9 @@
448478 if (q->deadline_mode)
449479 opt.flags |= TC_ETF_DEADLINE_MODE_ON;
450480
481
+ if (q->skip_sock_check)
482
+ opt.flags |= TC_ETF_SKIP_SOCK_CHECK;
483
+
451484 if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
452485 goto nla_put_failure;
453486