hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/sch_cake.c
....@@ -138,8 +138,8 @@
138138 struct cake_host {
139139 u32 srchost_tag;
140140 u32 dsthost_tag;
141
- u16 srchost_refcnt;
142
- u16 dsthost_refcnt;
141
+ u16 srchost_bulk_flow_count;
142
+ u16 dsthost_bulk_flow_count;
143143 };
144144
145145 struct cake_heap_entry {
....@@ -173,8 +173,7 @@
173173 u64 tin_rate_bps;
174174 u16 tin_rate_shft;
175175
176
- u16 tin_quantum_prio;
177
- u16 tin_quantum_band;
176
+ u16 tin_quantum;
178177 s32 tin_deficit;
179178 u32 tin_backlog;
180179 u32 tin_dropped;
....@@ -210,6 +209,9 @@
210209 u8 flow_mode;
211210 u8 ack_filter;
212211 u8 atm_mode;
212
+
213
+ u32 fwmark_mask;
214
+ u16 fwmark_shft;
213215
214216 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
215217 u16 rate_shft;
....@@ -310,8 +312,8 @@
310312 };
311313
312314 static const u8 diffserv8[] = {
313
- 2, 5, 1, 2, 4, 2, 2, 2,
314
- 0, 2, 1, 2, 1, 2, 1, 2,
315
+ 2, 0, 1, 2, 4, 2, 2, 2,
316
+ 1, 2, 1, 2, 1, 2, 1, 2,
315317 5, 2, 4, 2, 4, 2, 4, 2,
316318 3, 2, 3, 2, 3, 2, 3, 2,
317319 6, 2, 3, 2, 3, 2, 3, 2,
....@@ -321,7 +323,7 @@
321323 };
322324
323325 static const u8 diffserv4[] = {
324
- 0, 2, 0, 0, 2, 0, 0, 0,
326
+ 0, 1, 0, 0, 2, 0, 0, 0,
325327 1, 0, 0, 0, 0, 0, 0, 0,
326328 2, 0, 2, 0, 2, 0, 2, 0,
327329 2, 0, 2, 0, 2, 0, 2, 0,
....@@ -332,7 +334,7 @@
332334 };
333335
334336 static const u8 diffserv3[] = {
335
- 0, 0, 0, 0, 2, 0, 0, 0,
337
+ 0, 1, 0, 0, 2, 0, 0, 0,
336338 1, 0, 0, 0, 0, 0, 0, 0,
337339 0, 0, 0, 0, 0, 0, 0, 0,
338340 0, 0, 0, 0, 0, 0, 0, 0,
....@@ -582,26 +584,48 @@
582584 return drop;
583585 }
584586
585
-static void cake_update_flowkeys(struct flow_keys *keys,
587
+static bool cake_update_flowkeys(struct flow_keys *keys,
586588 const struct sk_buff *skb)
587589 {
588590 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
589591 struct nf_conntrack_tuple tuple = {};
590
- bool rev = !skb->_nfct;
592
+ bool rev = !skb->_nfct, upd = false;
593
+ __be32 ip;
591594
592595 if (skb_protocol(skb, true) != htons(ETH_P_IP))
593
- return;
596
+ return false;
594597
595598 if (!nf_ct_get_tuple_skb(&tuple, skb))
596
- return;
599
+ return false;
597600
598
- keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
599
- keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
601
+ ip = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
602
+ if (ip != keys->addrs.v4addrs.src) {
603
+ keys->addrs.v4addrs.src = ip;
604
+ upd = true;
605
+ }
606
+ ip = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
607
+ if (ip != keys->addrs.v4addrs.dst) {
608
+ keys->addrs.v4addrs.dst = ip;
609
+ upd = true;
610
+ }
600611
601612 if (keys->ports.ports) {
602
- keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
603
- keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
613
+ __be16 port;
614
+
615
+ port = rev ? tuple.dst.u.all : tuple.src.u.all;
616
+ if (port != keys->ports.src) {
617
+ keys->ports.src = port;
618
+ upd = true;
619
+ }
620
+ port = rev ? tuple.src.u.all : tuple.dst.u.all;
621
+ if (port != keys->ports.dst) {
622
+ port = keys->ports.dst;
623
+ upd = true;
624
+ }
604625 }
626
+ return upd;
627
+#else
628
+ return false;
605629 #endif
606630 }
607631
....@@ -622,23 +646,36 @@
622646 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
623647 int flow_mode, u16 flow_override, u16 host_override)
624648 {
649
+ bool hash_flows = (!flow_override && !!(flow_mode & CAKE_FLOW_FLOWS));
650
+ bool hash_hosts = (!host_override && !!(flow_mode & CAKE_FLOW_HOSTS));
651
+ bool nat_enabled = !!(flow_mode & CAKE_FLOW_NAT_FLAG);
625652 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
626653 u16 reduced_hash, srchost_idx, dsthost_idx;
627654 struct flow_keys keys, host_keys;
655
+ bool use_skbhash = skb->l4_hash;
628656
629657 if (unlikely(flow_mode == CAKE_FLOW_NONE))
630658 return 0;
631659
632
- /* If both overrides are set we can skip packet dissection entirely */
633
- if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
634
- (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
660
+ /* If both overrides are set, or we can use the SKB hash and nat mode is
661
+ * disabled, we can skip packet dissection entirely. If nat mode is
662
+ * enabled there's another check below after doing the conntrack lookup.
663
+ */
664
+ if ((!hash_flows || (use_skbhash && !nat_enabled)) && !hash_hosts)
635665 goto skip_hash;
636666
637667 skb_flow_dissect_flow_keys(skb, &keys,
638668 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
639669
640
- if (flow_mode & CAKE_FLOW_NAT_FLAG)
641
- cake_update_flowkeys(&keys, skb);
670
+ /* Don't use the SKB hash if we change the lookup keys from conntrack */
671
+ if (nat_enabled && cake_update_flowkeys(&keys, skb))
672
+ use_skbhash = false;
673
+
674
+ /* If we can still use the SKB hash and don't need the host hash, we can
675
+ * skip the rest of the hashing procedure
676
+ */
677
+ if (use_skbhash && !hash_hosts)
678
+ goto skip_hash;
642679
643680 /* flow_hash_from_keys() sorts the addresses by value, so we have
644681 * to preserve their order in a separate data structure to treat
....@@ -677,12 +714,14 @@
677714 /* This *must* be after the above switch, since as a
678715 * side-effect it sorts the src and dst addresses.
679716 */
680
- if (flow_mode & CAKE_FLOW_FLOWS)
717
+ if (hash_flows && !use_skbhash)
681718 flow_hash = flow_hash_from_keys(&keys);
682719
683720 skip_hash:
684721 if (flow_override)
685722 flow_hash = flow_override - 1;
723
+ else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
724
+ flow_hash = skb->hash;
686725 if (host_override) {
687726 dsthost_hash = host_override - 1;
688727 srchost_hash = host_override - 1;
....@@ -746,8 +785,10 @@
746785 * queue, accept the collision, update the host tags.
747786 */
748787 q->way_collisions++;
749
- q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
750
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
788
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
789
+ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
790
+ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
791
+ }
751792 allocate_src = cake_dsrc(flow_mode);
752793 allocate_dst = cake_ddst(flow_mode);
753794 found:
....@@ -767,13 +808,14 @@
767808 }
768809 for (i = 0; i < CAKE_SET_WAYS;
769810 i++, k = (k + 1) % CAKE_SET_WAYS) {
770
- if (!q->hosts[outer_hash + k].srchost_refcnt)
811
+ if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
771812 break;
772813 }
773814 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
774815 found_src:
775816 srchost_idx = outer_hash + k;
776
- q->hosts[srchost_idx].srchost_refcnt++;
817
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
818
+ q->hosts[srchost_idx].srchost_bulk_flow_count++;
777819 q->flows[reduced_hash].srchost = srchost_idx;
778820 }
779821
....@@ -789,13 +831,14 @@
789831 }
790832 for (i = 0; i < CAKE_SET_WAYS;
791833 i++, k = (k + 1) % CAKE_SET_WAYS) {
792
- if (!q->hosts[outer_hash + k].dsthost_refcnt)
834
+ if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
793835 break;
794836 }
795837 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
796838 found_dst:
797839 dsthost_idx = outer_hash + k;
798
- q->hosts[dsthost_idx].dsthost_refcnt++;
840
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
841
+ q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
799842 q->flows[reduced_hash].dsthost = dsthost_idx;
800843 }
801844 }
....@@ -812,7 +855,7 @@
812855
813856 if (skb) {
814857 flow->head = skb->next;
815
- skb->next = NULL;
858
+ skb_mark_not_on_list(skb);
816859 }
817860
818861 return skb;
....@@ -1256,7 +1299,7 @@
12561299 else
12571300 flow->head = elig_ack->next;
12581301
1259
- elig_ack->next = NULL;
1302
+ skb_mark_not_on_list(elig_ack);
12601303
12611304 return elig_ack;
12621305 }
....@@ -1572,7 +1615,7 @@
15721615 struct sk_buff *skb)
15731616 {
15741617 struct cake_sched_data *q = qdisc_priv(sch);
1575
- u32 tin;
1618
+ u32 tin, mark;
15761619 bool wash;
15771620 u8 dscp;
15781621
....@@ -1580,12 +1623,16 @@
15801623 * using firewall marks or skb->priority. Call DSCP parsing early if
15811624 * wash is enabled, otherwise defer to below to skip unneeded parsing.
15821625 */
1626
+ mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
15831627 wash = !!(q->rate_flags & CAKE_FLAG_WASH);
15841628 if (wash)
15851629 dscp = cake_handle_diffserv(skb, wash);
15861630
15871631 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
15881632 tin = 0;
1633
+
1634
+ else if (mark && mark <= q->tin_cnt)
1635
+ tin = q->tin_order[mark - 1];
15891636
15901637 else if (TC_H_MAJ(skb->priority) == sch->handle &&
15911638 TC_H_MIN(skb->priority) > 0 &&
....@@ -1627,7 +1674,7 @@
16271674 case TC_ACT_QUEUED:
16281675 case TC_ACT_TRAP:
16291676 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1630
- /* fall through */
1677
+ fallthrough;
16311678 case TC_ACT_SHOT:
16321679 return 0;
16331680 }
....@@ -1649,7 +1696,7 @@
16491696 {
16501697 struct cake_sched_data *q = qdisc_priv(sch);
16511698 int len = qdisc_pkt_len(skb);
1652
- int uninitialized_var(ret);
1699
+ int ret;
16531700 struct sk_buff *ack = NULL;
16541701 ktime_t now = ktime_get();
16551702 struct cake_tin_data *b;
....@@ -1700,9 +1747,8 @@
17001747 if (IS_ERR_OR_NULL(segs))
17011748 return qdisc_drop(skb, sch, to_free);
17021749
1703
- while (segs) {
1704
- nskb = segs->next;
1705
- segs->next = NULL;
1750
+ skb_list_walk_safe(segs, segs, nskb) {
1751
+ skb_mark_not_on_list(segs);
17061752 qdisc_skb_cb(segs)->pkt_len = segs->len;
17071753 cobalt_set_enqueue_time(segs, now);
17081754 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
....@@ -1714,7 +1760,6 @@
17141760 slen += segs->len;
17151761 q->buffer_used += segs->truesize;
17161762 b->packets++;
1717
- segs = nskb;
17181763 }
17191764
17201765 /* stats */
....@@ -1821,20 +1866,30 @@
18211866 b->sparse_flow_count++;
18221867
18231868 if (cake_dsrc(q->flow_mode))
1824
- host_load = max(host_load, srchost->srchost_refcnt);
1869
+ host_load = max(host_load, srchost->srchost_bulk_flow_count);
18251870
18261871 if (cake_ddst(q->flow_mode))
1827
- host_load = max(host_load, dsthost->dsthost_refcnt);
1872
+ host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
18281873
18291874 flow->deficit = (b->flow_quantum *
18301875 quantum_div[host_load]) >> 16;
18311876 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1877
+ struct cake_host *srchost = &b->hosts[flow->srchost];
1878
+ struct cake_host *dsthost = &b->hosts[flow->dsthost];
1879
+
18321880 /* this flow was empty, accounted as a sparse flow, but actually
18331881 * in the bulk rotation.
18341882 */
18351883 flow->set = CAKE_SET_BULK;
18361884 b->sparse_flow_count--;
18371885 b->bulk_flow_count++;
1886
+
1887
+ if (cake_dsrc(q->flow_mode))
1888
+ srchost->srchost_bulk_flow_count++;
1889
+
1890
+ if (cake_ddst(q->flow_mode))
1891
+ dsthost->dsthost_bulk_flow_count++;
1892
+
18381893 }
18391894
18401895 if (q->buffer_used > q->buffer_max_used)
....@@ -1926,7 +1981,7 @@
19261981 while (b->tin_deficit < 0 ||
19271982 !(b->sparse_flow_count + b->bulk_flow_count)) {
19281983 if (b->tin_deficit <= 0)
1929
- b->tin_deficit += b->tin_quantum_band;
1984
+ b->tin_deficit += b->tin_quantum;
19301985 if (b->sparse_flow_count + b->bulk_flow_count)
19311986 empty = false;
19321987
....@@ -2002,23 +2057,8 @@
20022057 dsthost = &b->hosts[flow->dsthost];
20032058 host_load = 1;
20042059
2005
- if (cake_dsrc(q->flow_mode))
2006
- host_load = max(host_load, srchost->srchost_refcnt);
2007
-
2008
- if (cake_ddst(q->flow_mode))
2009
- host_load = max(host_load, dsthost->dsthost_refcnt);
2010
-
2011
- WARN_ON(host_load > CAKE_QUEUES);
2012
-
20132060 /* flow isolation (DRR++) */
20142061 if (flow->deficit <= 0) {
2015
- /* The shifted prandom_u32() is a way to apply dithering to
2016
- * avoid accumulating roundoff errors
2017
- */
2018
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2019
- (prandom_u32() >> 16)) >> 16;
2020
- list_move_tail(&flow->flowchain, &b->old_flows);
2021
-
20222062 /* Keep all flows with deficits out of the sparse and decaying
20232063 * rotations. No non-empty flow can go into the decaying
20242064 * rotation, so they can't get deficits
....@@ -2027,6 +2067,13 @@
20272067 if (flow->head) {
20282068 b->sparse_flow_count--;
20292069 b->bulk_flow_count++;
2070
+
2071
+ if (cake_dsrc(q->flow_mode))
2072
+ srchost->srchost_bulk_flow_count++;
2073
+
2074
+ if (cake_ddst(q->flow_mode))
2075
+ dsthost->dsthost_bulk_flow_count++;
2076
+
20302077 flow->set = CAKE_SET_BULK;
20312078 } else {
20322079 /* we've moved it to the bulk rotation for
....@@ -2036,6 +2083,22 @@
20362083 flow->set = CAKE_SET_SPARSE_WAIT;
20372084 }
20382085 }
2086
+
2087
+ if (cake_dsrc(q->flow_mode))
2088
+ host_load = max(host_load, srchost->srchost_bulk_flow_count);
2089
+
2090
+ if (cake_ddst(q->flow_mode))
2091
+ host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
2092
+
2093
+ WARN_ON(host_load > CAKE_QUEUES);
2094
+
2095
+ /* The shifted prandom_u32() is a way to apply dithering to
2096
+ * avoid accumulating roundoff errors
2097
+ */
2098
+ flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2099
+ (prandom_u32() >> 16)) >> 16;
2100
+ list_move_tail(&flow->flowchain, &b->old_flows);
2101
+
20392102 goto retry;
20402103 }
20412104
....@@ -2056,6 +2119,13 @@
20562119 &b->decaying_flows);
20572120 if (flow->set == CAKE_SET_BULK) {
20582121 b->bulk_flow_count--;
2122
+
2123
+ if (cake_dsrc(q->flow_mode))
2124
+ srchost->srchost_bulk_flow_count--;
2125
+
2126
+ if (cake_ddst(q->flow_mode))
2127
+ dsthost->dsthost_bulk_flow_count--;
2128
+
20592129 b->decaying_flow_count++;
20602130 } else if (flow->set == CAKE_SET_SPARSE ||
20612131 flow->set == CAKE_SET_SPARSE_WAIT) {
....@@ -2069,14 +2139,19 @@
20692139 if (flow->set == CAKE_SET_SPARSE ||
20702140 flow->set == CAKE_SET_SPARSE_WAIT)
20712141 b->sparse_flow_count--;
2072
- else if (flow->set == CAKE_SET_BULK)
2142
+ else if (flow->set == CAKE_SET_BULK) {
20732143 b->bulk_flow_count--;
2074
- else
2144
+
2145
+ if (cake_dsrc(q->flow_mode))
2146
+ srchost->srchost_bulk_flow_count--;
2147
+
2148
+ if (cake_ddst(q->flow_mode))
2149
+ dsthost->dsthost_bulk_flow_count--;
2150
+
2151
+ } else
20752152 b->decaying_flow_count--;
20762153
20772154 flow->set = CAKE_SET_NONE;
2078
- srchost->srchost_refcnt--;
2079
- dsthost->dsthost_refcnt--;
20802155 }
20812156 goto begin;
20822157 }
....@@ -2149,7 +2224,11 @@
21492224
21502225 static void cake_reset(struct Qdisc *sch)
21512226 {
2227
+ struct cake_sched_data *q = qdisc_priv(sch);
21522228 u32 c;
2229
+
2230
+ if (!q->tins)
2231
+ return;
21532232
21542233 for (c = 0; c < CAKE_MAX_TINS; c++)
21552234 cake_clear_tin(sch, c);
....@@ -2171,6 +2250,8 @@
21712250 [TCA_CAKE_MPU] = { .type = NLA_U32 },
21722251 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
21732252 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2253
+ [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
2254
+ [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
21742255 };
21752256
21762257 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
....@@ -2226,8 +2307,7 @@
22262307
22272308 cake_set_rate(b, rate, mtu,
22282309 us_to_ns(q->target), us_to_ns(q->interval));
2229
- b->tin_quantum_band = 65535;
2230
- b->tin_quantum_prio = 65535;
2310
+ b->tin_quantum = 65535;
22312311
22322312 return 0;
22332313 }
....@@ -2238,8 +2318,7 @@
22382318 struct cake_sched_data *q = qdisc_priv(sch);
22392319 u32 mtu = psched_mtu(qdisc_dev(sch));
22402320 u64 rate = q->rate_bps;
2241
- u32 quantum1 = 256;
2242
- u32 quantum2 = 256;
2321
+ u32 quantum = 256;
22432322 u32 i;
22442323
22452324 q->tin_cnt = 8;
....@@ -2252,18 +2331,14 @@
22522331 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
22532332 us_to_ns(q->interval));
22542333
2255
- b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2256
- b->tin_quantum_band = max_t(u16, 1U, quantum2);
2334
+ b->tin_quantum = max_t(u16, 1U, quantum);
22572335
22582336 /* calculate next class's parameters */
22592337 rate *= 7;
22602338 rate >>= 3;
22612339
2262
- quantum1 *= 3;
2263
- quantum1 >>= 1;
2264
-
2265
- quantum2 *= 7;
2266
- quantum2 >>= 3;
2340
+ quantum *= 7;
2341
+ quantum >>= 3;
22672342 }
22682343
22692344 return 0;
....@@ -2332,8 +2407,7 @@
23322407 struct cake_sched_data *q = qdisc_priv(sch);
23332408 u32 mtu = psched_mtu(qdisc_dev(sch));
23342409 u64 rate = q->rate_bps;
2335
- u32 quantum1 = 256;
2336
- u32 quantum2 = 256;
2410
+ u32 quantum = 256;
23372411 u32 i;
23382412
23392413 q->tin_cnt = 8;
....@@ -2349,18 +2423,14 @@
23492423 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
23502424 us_to_ns(q->interval));
23512425
2352
- b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2353
- b->tin_quantum_band = max_t(u16, 1U, quantum2);
2426
+ b->tin_quantum = max_t(u16, 1U, quantum);
23542427
23552428 /* calculate next class's parameters */
23562429 rate *= 7;
23572430 rate >>= 3;
23582431
2359
- quantum1 *= 3;
2360
- quantum1 >>= 1;
2361
-
2362
- quantum2 *= 7;
2363
- quantum2 >>= 3;
2432
+ quantum *= 7;
2433
+ quantum >>= 3;
23642434 }
23652435
23662436 return 0;
....@@ -2399,17 +2469,11 @@
23992469 cake_set_rate(&q->tins[3], rate >> 2, mtu,
24002470 us_to_ns(q->target), us_to_ns(q->interval));
24012471
2402
- /* priority weights */
2403
- q->tins[0].tin_quantum_prio = quantum;
2404
- q->tins[1].tin_quantum_prio = quantum >> 4;
2405
- q->tins[2].tin_quantum_prio = quantum << 2;
2406
- q->tins[3].tin_quantum_prio = quantum << 4;
2407
-
24082472 /* bandwidth-sharing weights */
2409
- q->tins[0].tin_quantum_band = quantum;
2410
- q->tins[1].tin_quantum_band = quantum >> 4;
2411
- q->tins[2].tin_quantum_band = quantum >> 1;
2412
- q->tins[3].tin_quantum_band = quantum >> 2;
2473
+ q->tins[0].tin_quantum = quantum;
2474
+ q->tins[1].tin_quantum = quantum >> 4;
2475
+ q->tins[2].tin_quantum = quantum >> 1;
2476
+ q->tins[3].tin_quantum = quantum >> 2;
24132477
24142478 return 0;
24152479 }
....@@ -2440,15 +2504,10 @@
24402504 cake_set_rate(&q->tins[2], rate >> 2, mtu,
24412505 us_to_ns(q->target), us_to_ns(q->interval));
24422506
2443
- /* priority weights */
2444
- q->tins[0].tin_quantum_prio = quantum;
2445
- q->tins[1].tin_quantum_prio = quantum >> 4;
2446
- q->tins[2].tin_quantum_prio = quantum << 4;
2447
-
24482507 /* bandwidth-sharing weights */
2449
- q->tins[0].tin_quantum_band = quantum;
2450
- q->tins[1].tin_quantum_band = quantum >> 4;
2451
- q->tins[2].tin_quantum_band = quantum >> 2;
2508
+ q->tins[0].tin_quantum = quantum;
2509
+ q->tins[1].tin_quantum = quantum >> 4;
2510
+ q->tins[2].tin_quantum = quantum >> 2;
24522511
24532512 return 0;
24542513 }
....@@ -2517,7 +2576,8 @@
25172576 if (!opt)
25182577 return -EINVAL;
25192578
2520
- err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
2579
+ err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
2580
+ extack);
25212581 if (err < 0)
25222582 return err;
25232583
....@@ -2617,6 +2677,11 @@
26172677 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
26182678 }
26192679
2680
+ if (tb[TCA_CAKE_FWMARK]) {
2681
+ q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2682
+ q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2683
+ }
2684
+
26202685 if (q->tins) {
26212686 sch_tree_lock(sch);
26222687 cake_reconfigure(sch);
....@@ -2712,7 +2777,7 @@
27122777 struct cake_sched_data *q = qdisc_priv(sch);
27132778 struct nlattr *opts;
27142779
2715
- opts = nla_nest_start(skb, TCA_OPTIONS);
2780
+ opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
27162781 if (!opts)
27172782 goto nla_put_failure;
27182783
....@@ -2772,6 +2837,9 @@
27722837 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
27732838 goto nla_put_failure;
27742839
2840
+ if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2841
+ goto nla_put_failure;
2842
+
27752843 return nla_nest_end(skb, opts);
27762844
27772845 nla_put_failure:
....@@ -2780,7 +2848,7 @@
27802848
27812849 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
27822850 {
2783
- struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
2851
+ struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
27842852 struct cake_sched_data *q = qdisc_priv(sch);
27852853 struct nlattr *tstats, *ts;
27862854 int i;
....@@ -2810,7 +2878,7 @@
28102878 #undef PUT_STAT_U32
28112879 #undef PUT_STAT_U64
28122880
2813
- tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
2881
+ tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
28142882 if (!tstats)
28152883 goto nla_put_failure;
28162884
....@@ -2827,7 +2895,7 @@
28272895 for (i = 0; i < q->tin_cnt; i++) {
28282896 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
28292897
2830
- ts = nla_nest_start(d->skb, i + 1);
2898
+ ts = nla_nest_start_noflag(d->skb, i + 1);
28312899 if (!ts)
28322900 goto nla_put_failure;
28332901
....@@ -2947,7 +3015,7 @@
29473015 if (flow) {
29483016 ktime_t now = ktime_get();
29493017
2950
- stats = nla_nest_start(d->skb, TCA_STATS_APP);
3018
+ stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
29513019 if (!stats)
29523020 return -1;
29533021