.. | .. |
---|
22 | 22 | |
---|
23 | 23 | #define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON) |
---|
24 | 24 | #define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON) |
---|
| 25 | +#define SKIP_SOCK_CHECK_IS_SET(x) ((x)->flags & TC_ETF_SKIP_SOCK_CHECK) |
---|
25 | 26 | |
---|
26 | 27 | struct etf_sched_data { |
---|
27 | 28 | bool offload; |
---|
28 | 29 | bool deadline_mode; |
---|
| 30 | + bool skip_sock_check; |
---|
29 | 31 | int clockid; |
---|
30 | 32 | int queue; |
---|
31 | 33 | s32 delta; /* in ns */ |
---|
32 | 34 | ktime_t last; /* The txtime of the last skb sent to the netdevice. */ |
---|
33 | | - struct rb_root head; |
---|
| 35 | + struct rb_root_cached head; |
---|
34 | 36 | struct qdisc_watchdog watchdog; |
---|
35 | 37 | ktime_t (*get_time)(void); |
---|
36 | 38 | }; |
---|
.. | .. |
---|
77 | 79 | struct sock *sk = nskb->sk; |
---|
78 | 80 | ktime_t now; |
---|
79 | 81 | |
---|
| 82 | + if (q->skip_sock_check) |
---|
| 83 | + goto skip; |
---|
| 84 | + |
---|
80 | 85 | if (!sk || !sk_fullsock(sk)) |
---|
81 | 86 | return false; |
---|
82 | 87 | |
---|
.. | .. |
---|
92 | 97 | if (sk->sk_txtime_deadline_mode != q->deadline_mode) |
---|
93 | 98 | return false; |
---|
94 | 99 | |
---|
| 100 | +skip: |
---|
95 | 101 | now = q->get_time(); |
---|
96 | 102 | if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) |
---|
97 | 103 | return false; |
---|
.. | .. |
---|
104 | 110 | struct etf_sched_data *q = qdisc_priv(sch); |
---|
105 | 111 | struct rb_node *p; |
---|
106 | 112 | |
---|
107 | | - p = rb_first(&q->head); |
---|
| 113 | + p = rb_first_cached(&q->head); |
---|
108 | 114 | if (!p) |
---|
109 | 115 | return NULL; |
---|
110 | 116 | |
---|
.. | .. |
---|
117 | 123 | struct sk_buff *skb = etf_peek_timesortedlist(sch); |
---|
118 | 124 | ktime_t next; |
---|
119 | 125 | |
---|
120 | | - if (!skb) |
---|
| 126 | + if (!skb) { |
---|
| 127 | + qdisc_watchdog_cancel(&q->watchdog); |
---|
121 | 128 | return; |
---|
| 129 | + } |
---|
122 | 130 | |
---|
123 | 131 | next = ktime_sub_ns(skb->tstamp, q->delta); |
---|
124 | 132 | qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); |
---|
.. | .. |
---|
155 | 163 | struct sk_buff **to_free) |
---|
156 | 164 | { |
---|
157 | 165 | struct etf_sched_data *q = qdisc_priv(sch); |
---|
158 | | - struct rb_node **p = &q->head.rb_node, *parent = NULL; |
---|
| 166 | + struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; |
---|
159 | 167 | ktime_t txtime = nskb->tstamp; |
---|
| 168 | + bool leftmost = true; |
---|
160 | 169 | |
---|
161 | 170 | if (!is_packet_valid(sch, nskb)) { |
---|
162 | 171 | report_sock_error(nskb, EINVAL, |
---|
.. | .. |
---|
169 | 178 | |
---|
170 | 179 | parent = *p; |
---|
171 | 180 | skb = rb_to_skb(parent); |
---|
172 | | - if (ktime_after(txtime, skb->tstamp)) |
---|
| 181 | + if (ktime_compare(txtime, skb->tstamp) >= 0) { |
---|
173 | 182 | p = &parent->rb_right; |
---|
174 | | - else |
---|
| 183 | + leftmost = false; |
---|
| 184 | + } else { |
---|
175 | 185 | p = &parent->rb_left; |
---|
| 186 | + } |
---|
176 | 187 | } |
---|
177 | 188 | rb_link_node(&nskb->rbnode, parent, p); |
---|
178 | | - rb_insert_color(&nskb->rbnode, &q->head); |
---|
| 189 | + rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); |
---|
179 | 190 | |
---|
180 | 191 | qdisc_qstats_backlog_inc(sch, nskb); |
---|
181 | 192 | sch->q.qlen++; |
---|
.. | .. |
---|
186 | 197 | return NET_XMIT_SUCCESS; |
---|
187 | 198 | } |
---|
188 | 199 | |
---|
189 | | -static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb, |
---|
190 | | - bool drop) |
---|
| 200 | +static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb, |
---|
| 201 | + ktime_t now) |
---|
| 202 | +{ |
---|
| 203 | + struct etf_sched_data *q = qdisc_priv(sch); |
---|
| 204 | + struct sk_buff *to_free = NULL; |
---|
| 205 | + struct sk_buff *tmp = NULL; |
---|
| 206 | + |
---|
| 207 | + skb_rbtree_walk_from_safe(skb, tmp) { |
---|
| 208 | + if (ktime_after(skb->tstamp, now)) |
---|
| 209 | + break; |
---|
| 210 | + |
---|
| 211 | + rb_erase_cached(&skb->rbnode, &q->head); |
---|
| 212 | + |
---|
| 213 | + /* The rbnode field in the skb re-uses these fields, now that |
---|
| 214 | + * we are done with the rbnode, reset them. |
---|
| 215 | + */ |
---|
| 216 | + skb->next = NULL; |
---|
| 217 | + skb->prev = NULL; |
---|
| 218 | + skb->dev = qdisc_dev(sch); |
---|
| 219 | + |
---|
| 220 | + report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED); |
---|
| 221 | + |
---|
| 222 | + qdisc_qstats_backlog_dec(sch, skb); |
---|
| 223 | + qdisc_drop(skb, sch, &to_free); |
---|
| 224 | + qdisc_qstats_overlimit(sch); |
---|
| 225 | + sch->q.qlen--; |
---|
| 226 | + } |
---|
| 227 | + |
---|
| 228 | + kfree_skb_list(to_free); |
---|
| 229 | +} |
---|
| 230 | + |
---|
| 231 | +static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb) |
---|
191 | 232 | { |
---|
192 | 233 | struct etf_sched_data *q = qdisc_priv(sch); |
---|
193 | 234 | |
---|
194 | | - rb_erase(&skb->rbnode, &q->head); |
---|
| 235 | + rb_erase_cached(&skb->rbnode, &q->head); |
---|
195 | 236 | |
---|
196 | 237 | /* The rbnode field in the skb re-uses these fields, now that |
---|
197 | 238 | * we are done with the rbnode, reset them. |
---|
.. | .. |
---|
202 | 243 | |
---|
203 | 244 | qdisc_qstats_backlog_dec(sch, skb); |
---|
204 | 245 | |
---|
205 | | - if (drop) { |
---|
206 | | - struct sk_buff *to_free = NULL; |
---|
| 246 | + qdisc_bstats_update(sch, skb); |
---|
207 | 247 | |
---|
208 | | - report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED); |
---|
209 | | - |
---|
210 | | - qdisc_drop(skb, sch, &to_free); |
---|
211 | | - kfree_skb_list(to_free); |
---|
212 | | - qdisc_qstats_overlimit(sch); |
---|
213 | | - } else { |
---|
214 | | - qdisc_bstats_update(sch, skb); |
---|
215 | | - |
---|
216 | | - q->last = skb->tstamp; |
---|
217 | | - } |
---|
| 248 | + q->last = skb->tstamp; |
---|
218 | 249 | |
---|
219 | 250 | sch->q.qlen--; |
---|
220 | 251 | } |
---|
.. | .. |
---|
233 | 264 | |
---|
234 | 265 | /* Drop if packet has expired while in queue. */ |
---|
235 | 266 | if (ktime_before(skb->tstamp, now)) { |
---|
236 | | - timesortedlist_erase(sch, skb, true); |
---|
| 267 | + timesortedlist_drop(sch, skb, now); |
---|
237 | 268 | skb = NULL; |
---|
238 | 269 | goto out; |
---|
239 | 270 | } |
---|
.. | .. |
---|
242 | 273 | * txtime from deadline to (now + delta). |
---|
243 | 274 | */ |
---|
244 | 275 | if (q->deadline_mode) { |
---|
245 | | - timesortedlist_erase(sch, skb, false); |
---|
| 276 | + timesortedlist_remove(sch, skb); |
---|
246 | 277 | skb->tstamp = now; |
---|
247 | 278 | goto out; |
---|
248 | 279 | } |
---|
.. | .. |
---|
251 | 282 | |
---|
252 | 283 | /* Dequeue only if now is within the [txtime - delta, txtime] range. */ |
---|
253 | 284 | if (ktime_after(now, next)) |
---|
254 | | - timesortedlist_erase(sch, skb, false); |
---|
| 285 | + timesortedlist_remove(sch, skb); |
---|
255 | 286 | else |
---|
256 | 287 | skb = NULL; |
---|
257 | 288 | |
---|
.. | .. |
---|
327 | 358 | return -EINVAL; |
---|
328 | 359 | } |
---|
329 | 360 | |
---|
330 | | - err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack); |
---|
| 361 | + err = nla_parse_nested_deprecated(tb, TCA_ETF_MAX, opt, etf_policy, |
---|
| 362 | + extack); |
---|
331 | 363 | if (err < 0) |
---|
332 | 364 | return err; |
---|
333 | 365 | |
---|
.. | .. |
---|
360 | 392 | q->clockid = qopt->clockid; |
---|
361 | 393 | q->offload = OFFLOAD_IS_ON(qopt); |
---|
362 | 394 | q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); |
---|
| 395 | + q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt); |
---|
363 | 396 | |
---|
364 | 397 | switch (q->clockid) { |
---|
365 | 398 | case CLOCK_REALTIME: |
---|
.. | .. |
---|
387 | 420 | static void timesortedlist_clear(struct Qdisc *sch) |
---|
388 | 421 | { |
---|
389 | 422 | struct etf_sched_data *q = qdisc_priv(sch); |
---|
390 | | - struct rb_node *p = rb_first(&q->head); |
---|
| 423 | + struct rb_node *p = rb_first_cached(&q->head); |
---|
391 | 424 | |
---|
392 | 425 | while (p) { |
---|
393 | 426 | struct sk_buff *skb = rb_to_skb(p); |
---|
394 | 427 | |
---|
395 | 428 | p = rb_next(p); |
---|
396 | 429 | |
---|
397 | | - rb_erase(&skb->rbnode, &q->head); |
---|
| 430 | + rb_erase_cached(&skb->rbnode, &q->head); |
---|
398 | 431 | rtnl_kfree_skbs(skb, skb); |
---|
399 | 432 | sch->q.qlen--; |
---|
400 | 433 | } |
---|
.. | .. |
---|
411 | 444 | /* No matter which mode we are on, it's safe to clear both lists. */ |
---|
412 | 445 | timesortedlist_clear(sch); |
---|
413 | 446 | __qdisc_reset_queue(&sch->q); |
---|
414 | | - |
---|
415 | | - sch->qstats.backlog = 0; |
---|
416 | | - sch->q.qlen = 0; |
---|
417 | 447 | |
---|
418 | 448 | q->last = 0; |
---|
419 | 449 | } |
---|
.. | .. |
---|
436 | 466 | struct tc_etf_qopt opt = { }; |
---|
437 | 467 | struct nlattr *nest; |
---|
438 | 468 | |
---|
439 | | - nest = nla_nest_start(skb, TCA_OPTIONS); |
---|
| 469 | + nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |
---|
440 | 470 | if (!nest) |
---|
441 | 471 | goto nla_put_failure; |
---|
442 | 472 | |
---|
.. | .. |
---|
448 | 478 | if (q->deadline_mode) |
---|
449 | 479 | opt.flags |= TC_ETF_DEADLINE_MODE_ON; |
---|
450 | 480 | |
---|
| 481 | + if (q->skip_sock_check) |
---|
| 482 | + opt.flags |= TC_ETF_SKIP_SOCK_CHECK; |
---|
| 483 | + |
---|
451 | 484 | if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt)) |
---|
452 | 485 | goto nla_put_failure; |
---|
453 | 486 | |
---|