.. | .. |
---|
138 | 138 | struct cake_host { |
---|
139 | 139 | u32 srchost_tag; |
---|
140 | 140 | u32 dsthost_tag; |
---|
141 | | - u16 srchost_refcnt; |
---|
142 | | - u16 dsthost_refcnt; |
---|
| 141 | + u16 srchost_bulk_flow_count; |
---|
| 142 | + u16 dsthost_bulk_flow_count; |
---|
143 | 143 | }; |
---|
144 | 144 | |
---|
145 | 145 | struct cake_heap_entry { |
---|
.. | .. |
---|
173 | 173 | u64 tin_rate_bps; |
---|
174 | 174 | u16 tin_rate_shft; |
---|
175 | 175 | |
---|
176 | | - u16 tin_quantum_prio; |
---|
177 | | - u16 tin_quantum_band; |
---|
| 176 | + u16 tin_quantum; |
---|
178 | 177 | s32 tin_deficit; |
---|
179 | 178 | u32 tin_backlog; |
---|
180 | 179 | u32 tin_dropped; |
---|
.. | .. |
---|
210 | 209 | u8 flow_mode; |
---|
211 | 210 | u8 ack_filter; |
---|
212 | 211 | u8 atm_mode; |
---|
| 212 | + |
---|
| 213 | + u32 fwmark_mask; |
---|
| 214 | + u16 fwmark_shft; |
---|
213 | 215 | |
---|
214 | 216 | /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ |
---|
215 | 217 | u16 rate_shft; |
---|
.. | .. |
---|
310 | 312 | }; |
---|
311 | 313 | |
---|
312 | 314 | static const u8 diffserv8[] = { |
---|
313 | | - 2, 5, 1, 2, 4, 2, 2, 2, |
---|
314 | | - 0, 2, 1, 2, 1, 2, 1, 2, |
---|
| 315 | + 2, 0, 1, 2, 4, 2, 2, 2, |
---|
| 316 | + 1, 2, 1, 2, 1, 2, 1, 2, |
---|
315 | 317 | 5, 2, 4, 2, 4, 2, 4, 2, |
---|
316 | 318 | 3, 2, 3, 2, 3, 2, 3, 2, |
---|
317 | 319 | 6, 2, 3, 2, 3, 2, 3, 2, |
---|
.. | .. |
---|
321 | 323 | }; |
---|
322 | 324 | |
---|
323 | 325 | static const u8 diffserv4[] = { |
---|
324 | | - 0, 2, 0, 0, 2, 0, 0, 0, |
---|
| 326 | + 0, 1, 0, 0, 2, 0, 0, 0, |
---|
325 | 327 | 1, 0, 0, 0, 0, 0, 0, 0, |
---|
326 | 328 | 2, 0, 2, 0, 2, 0, 2, 0, |
---|
327 | 329 | 2, 0, 2, 0, 2, 0, 2, 0, |
---|
.. | .. |
---|
332 | 334 | }; |
---|
333 | 335 | |
---|
334 | 336 | static const u8 diffserv3[] = { |
---|
335 | | - 0, 0, 0, 0, 2, 0, 0, 0, |
---|
| 337 | + 0, 1, 0, 0, 2, 0, 0, 0, |
---|
336 | 338 | 1, 0, 0, 0, 0, 0, 0, 0, |
---|
337 | 339 | 0, 0, 0, 0, 0, 0, 0, 0, |
---|
338 | 340 | 0, 0, 0, 0, 0, 0, 0, 0, |
---|
.. | .. |
---|
582 | 584 | return drop; |
---|
583 | 585 | } |
---|
584 | 586 | |
---|
585 | | -static void cake_update_flowkeys(struct flow_keys *keys, |
---|
| 587 | +static bool cake_update_flowkeys(struct flow_keys *keys, |
---|
586 | 588 | const struct sk_buff *skb) |
---|
587 | 589 | { |
---|
588 | 590 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
---|
589 | 591 | struct nf_conntrack_tuple tuple = {}; |
---|
590 | | - bool rev = !skb->_nfct; |
---|
| 592 | + bool rev = !skb->_nfct, upd = false; |
---|
| 593 | + __be32 ip; |
---|
591 | 594 | |
---|
592 | 595 | if (skb_protocol(skb, true) != htons(ETH_P_IP)) |
---|
593 | | - return; |
---|
| 596 | + return false; |
---|
594 | 597 | |
---|
595 | 598 | if (!nf_ct_get_tuple_skb(&tuple, skb)) |
---|
596 | | - return; |
---|
| 599 | + return false; |
---|
597 | 600 | |
---|
598 | | - keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; |
---|
599 | | - keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; |
---|
| 601 | + ip = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; |
---|
| 602 | + if (ip != keys->addrs.v4addrs.src) { |
---|
| 603 | + keys->addrs.v4addrs.src = ip; |
---|
| 604 | + upd = true; |
---|
| 605 | + } |
---|
| 606 | + ip = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; |
---|
| 607 | + if (ip != keys->addrs.v4addrs.dst) { |
---|
| 608 | + keys->addrs.v4addrs.dst = ip; |
---|
| 609 | + upd = true; |
---|
| 610 | + } |
---|
600 | 611 | |
---|
601 | 612 | if (keys->ports.ports) { |
---|
602 | | - keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all; |
---|
603 | | - keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all; |
---|
| 613 | + __be16 port; |
---|
| 614 | + |
---|
| 615 | + port = rev ? tuple.dst.u.all : tuple.src.u.all; |
---|
| 616 | + if (port != keys->ports.src) { |
---|
| 617 | + keys->ports.src = port; |
---|
| 618 | + upd = true; |
---|
| 619 | + } |
---|
| 620 | + port = rev ? tuple.src.u.all : tuple.dst.u.all; |
---|
| 621 | + if (port != keys->ports.dst) { |
---|
| 622 | + port = keys->ports.dst; |
---|
| 623 | + upd = true; |
---|
| 624 | + } |
---|
604 | 625 | } |
---|
| 626 | + return upd; |
---|
| 627 | +#else |
---|
| 628 | + return false; |
---|
605 | 629 | #endif |
---|
606 | 630 | } |
---|
607 | 631 | |
---|
.. | .. |
---|
622 | 646 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, |
---|
623 | 647 | int flow_mode, u16 flow_override, u16 host_override) |
---|
624 | 648 | { |
---|
| 649 | + bool hash_flows = (!flow_override && !!(flow_mode & CAKE_FLOW_FLOWS)); |
---|
| 650 | + bool hash_hosts = (!host_override && !!(flow_mode & CAKE_FLOW_HOSTS)); |
---|
| 651 | + bool nat_enabled = !!(flow_mode & CAKE_FLOW_NAT_FLAG); |
---|
625 | 652 | u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; |
---|
626 | 653 | u16 reduced_hash, srchost_idx, dsthost_idx; |
---|
627 | 654 | struct flow_keys keys, host_keys; |
---|
| 655 | + bool use_skbhash = skb->l4_hash; |
---|
628 | 656 | |
---|
629 | 657 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) |
---|
630 | 658 | return 0; |
---|
631 | 659 | |
---|
632 | | - /* If both overrides are set we can skip packet dissection entirely */ |
---|
633 | | - if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && |
---|
634 | | - (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) |
---|
| 660 | + /* If both overrides are set, or we can use the SKB hash and nat mode is |
---|
| 661 | + * disabled, we can skip packet dissection entirely. If nat mode is |
---|
| 662 | + * enabled there's another check below after doing the conntrack lookup. |
---|
| 663 | + */ |
---|
| 664 | + if ((!hash_flows || (use_skbhash && !nat_enabled)) && !hash_hosts) |
---|
635 | 665 | goto skip_hash; |
---|
636 | 666 | |
---|
637 | 667 | skb_flow_dissect_flow_keys(skb, &keys, |
---|
638 | 668 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
---|
639 | 669 | |
---|
640 | | - if (flow_mode & CAKE_FLOW_NAT_FLAG) |
---|
641 | | - cake_update_flowkeys(&keys, skb); |
---|
| 670 | + /* Don't use the SKB hash if we change the lookup keys from conntrack */ |
---|
| 671 | + if (nat_enabled && cake_update_flowkeys(&keys, skb)) |
---|
| 672 | + use_skbhash = false; |
---|
| 673 | + |
---|
| 674 | + /* If we can still use the SKB hash and don't need the host hash, we can |
---|
| 675 | + * skip the rest of the hashing procedure |
---|
| 676 | + */ |
---|
| 677 | + if (use_skbhash && !hash_hosts) |
---|
| 678 | + goto skip_hash; |
---|
642 | 679 | |
---|
643 | 680 | /* flow_hash_from_keys() sorts the addresses by value, so we have |
---|
644 | 681 | * to preserve their order in a separate data structure to treat |
---|
.. | .. |
---|
677 | 714 | /* This *must* be after the above switch, since as a |
---|
678 | 715 | * side-effect it sorts the src and dst addresses. |
---|
679 | 716 | */ |
---|
680 | | - if (flow_mode & CAKE_FLOW_FLOWS) |
---|
| 717 | + if (hash_flows && !use_skbhash) |
---|
681 | 718 | flow_hash = flow_hash_from_keys(&keys); |
---|
682 | 719 | |
---|
683 | 720 | skip_hash: |
---|
684 | 721 | if (flow_override) |
---|
685 | 722 | flow_hash = flow_override - 1; |
---|
| 723 | + else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS)) |
---|
| 724 | + flow_hash = skb->hash; |
---|
686 | 725 | if (host_override) { |
---|
687 | 726 | dsthost_hash = host_override - 1; |
---|
688 | 727 | srchost_hash = host_override - 1; |
---|
.. | .. |
---|
746 | 785 | * queue, accept the collision, update the host tags. |
---|
747 | 786 | */ |
---|
748 | 787 | q->way_collisions++; |
---|
749 | | - q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--; |
---|
750 | | - q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--; |
---|
| 788 | + if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { |
---|
| 789 | + q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; |
---|
| 790 | + q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; |
---|
| 791 | + } |
---|
751 | 792 | allocate_src = cake_dsrc(flow_mode); |
---|
752 | 793 | allocate_dst = cake_ddst(flow_mode); |
---|
753 | 794 | found: |
---|
.. | .. |
---|
767 | 808 | } |
---|
768 | 809 | for (i = 0; i < CAKE_SET_WAYS; |
---|
769 | 810 | i++, k = (k + 1) % CAKE_SET_WAYS) { |
---|
770 | | - if (!q->hosts[outer_hash + k].srchost_refcnt) |
---|
| 811 | + if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) |
---|
771 | 812 | break; |
---|
772 | 813 | } |
---|
773 | 814 | q->hosts[outer_hash + k].srchost_tag = srchost_hash; |
---|
774 | 815 | found_src: |
---|
775 | 816 | srchost_idx = outer_hash + k; |
---|
776 | | - q->hosts[srchost_idx].srchost_refcnt++; |
---|
| 817 | + if (q->flows[reduced_hash].set == CAKE_SET_BULK) |
---|
| 818 | + q->hosts[srchost_idx].srchost_bulk_flow_count++; |
---|
777 | 819 | q->flows[reduced_hash].srchost = srchost_idx; |
---|
778 | 820 | } |
---|
779 | 821 | |
---|
.. | .. |
---|
789 | 831 | } |
---|
790 | 832 | for (i = 0; i < CAKE_SET_WAYS; |
---|
791 | 833 | i++, k = (k + 1) % CAKE_SET_WAYS) { |
---|
792 | | - if (!q->hosts[outer_hash + k].dsthost_refcnt) |
---|
| 834 | + if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) |
---|
793 | 835 | break; |
---|
794 | 836 | } |
---|
795 | 837 | q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; |
---|
796 | 838 | found_dst: |
---|
797 | 839 | dsthost_idx = outer_hash + k; |
---|
798 | | - q->hosts[dsthost_idx].dsthost_refcnt++; |
---|
| 840 | + if (q->flows[reduced_hash].set == CAKE_SET_BULK) |
---|
| 841 | + q->hosts[dsthost_idx].dsthost_bulk_flow_count++; |
---|
799 | 842 | q->flows[reduced_hash].dsthost = dsthost_idx; |
---|
800 | 843 | } |
---|
801 | 844 | } |
---|
.. | .. |
---|
812 | 855 | |
---|
813 | 856 | if (skb) { |
---|
814 | 857 | flow->head = skb->next; |
---|
815 | | - skb->next = NULL; |
---|
| 858 | + skb_mark_not_on_list(skb); |
---|
816 | 859 | } |
---|
817 | 860 | |
---|
818 | 861 | return skb; |
---|
.. | .. |
---|
1256 | 1299 | else |
---|
1257 | 1300 | flow->head = elig_ack->next; |
---|
1258 | 1301 | |
---|
1259 | | - elig_ack->next = NULL; |
---|
| 1302 | + skb_mark_not_on_list(elig_ack); |
---|
1260 | 1303 | |
---|
1261 | 1304 | return elig_ack; |
---|
1262 | 1305 | } |
---|
.. | .. |
---|
1572 | 1615 | struct sk_buff *skb) |
---|
1573 | 1616 | { |
---|
1574 | 1617 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
1575 | | - u32 tin; |
---|
| 1618 | + u32 tin, mark; |
---|
1576 | 1619 | bool wash; |
---|
1577 | 1620 | u8 dscp; |
---|
1578 | 1621 | |
---|
.. | .. |
---|
1580 | 1623 | * using firewall marks or skb->priority. Call DSCP parsing early if |
---|
1581 | 1624 | * wash is enabled, otherwise defer to below to skip unneeded parsing. |
---|
1582 | 1625 | */ |
---|
| 1626 | + mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; |
---|
1583 | 1627 | wash = !!(q->rate_flags & CAKE_FLAG_WASH); |
---|
1584 | 1628 | if (wash) |
---|
1585 | 1629 | dscp = cake_handle_diffserv(skb, wash); |
---|
1586 | 1630 | |
---|
1587 | 1631 | if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) |
---|
1588 | 1632 | tin = 0; |
---|
| 1633 | + |
---|
| 1634 | + else if (mark && mark <= q->tin_cnt) |
---|
| 1635 | + tin = q->tin_order[mark - 1]; |
---|
1589 | 1636 | |
---|
1590 | 1637 | else if (TC_H_MAJ(skb->priority) == sch->handle && |
---|
1591 | 1638 | TC_H_MIN(skb->priority) > 0 && |
---|
.. | .. |
---|
1627 | 1674 | case TC_ACT_QUEUED: |
---|
1628 | 1675 | case TC_ACT_TRAP: |
---|
1629 | 1676 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
---|
1630 | | - /* fall through */ |
---|
| 1677 | + fallthrough; |
---|
1631 | 1678 | case TC_ACT_SHOT: |
---|
1632 | 1679 | return 0; |
---|
1633 | 1680 | } |
---|
.. | .. |
---|
1649 | 1696 | { |
---|
1650 | 1697 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
1651 | 1698 | int len = qdisc_pkt_len(skb); |
---|
1652 | | - int uninitialized_var(ret); |
---|
| 1699 | + int ret; |
---|
1653 | 1700 | struct sk_buff *ack = NULL; |
---|
1654 | 1701 | ktime_t now = ktime_get(); |
---|
1655 | 1702 | struct cake_tin_data *b; |
---|
.. | .. |
---|
1700 | 1747 | if (IS_ERR_OR_NULL(segs)) |
---|
1701 | 1748 | return qdisc_drop(skb, sch, to_free); |
---|
1702 | 1749 | |
---|
1703 | | - while (segs) { |
---|
1704 | | - nskb = segs->next; |
---|
1705 | | - segs->next = NULL; |
---|
| 1750 | + skb_list_walk_safe(segs, segs, nskb) { |
---|
| 1751 | + skb_mark_not_on_list(segs); |
---|
1706 | 1752 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
---|
1707 | 1753 | cobalt_set_enqueue_time(segs, now); |
---|
1708 | 1754 | get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, |
---|
.. | .. |
---|
1714 | 1760 | slen += segs->len; |
---|
1715 | 1761 | q->buffer_used += segs->truesize; |
---|
1716 | 1762 | b->packets++; |
---|
1717 | | - segs = nskb; |
---|
1718 | 1763 | } |
---|
1719 | 1764 | |
---|
1720 | 1765 | /* stats */ |
---|
.. | .. |
---|
1821 | 1866 | b->sparse_flow_count++; |
---|
1822 | 1867 | |
---|
1823 | 1868 | if (cake_dsrc(q->flow_mode)) |
---|
1824 | | - host_load = max(host_load, srchost->srchost_refcnt); |
---|
| 1869 | + host_load = max(host_load, srchost->srchost_bulk_flow_count); |
---|
1825 | 1870 | |
---|
1826 | 1871 | if (cake_ddst(q->flow_mode)) |
---|
1827 | | - host_load = max(host_load, dsthost->dsthost_refcnt); |
---|
| 1872 | + host_load = max(host_load, dsthost->dsthost_bulk_flow_count); |
---|
1828 | 1873 | |
---|
1829 | 1874 | flow->deficit = (b->flow_quantum * |
---|
1830 | 1875 | quantum_div[host_load]) >> 16; |
---|
1831 | 1876 | } else if (flow->set == CAKE_SET_SPARSE_WAIT) { |
---|
| 1877 | + struct cake_host *srchost = &b->hosts[flow->srchost]; |
---|
| 1878 | + struct cake_host *dsthost = &b->hosts[flow->dsthost]; |
---|
| 1879 | + |
---|
1832 | 1880 | /* this flow was empty, accounted as a sparse flow, but actually |
---|
1833 | 1881 | * in the bulk rotation. |
---|
1834 | 1882 | */ |
---|
1835 | 1883 | flow->set = CAKE_SET_BULK; |
---|
1836 | 1884 | b->sparse_flow_count--; |
---|
1837 | 1885 | b->bulk_flow_count++; |
---|
| 1886 | + |
---|
| 1887 | + if (cake_dsrc(q->flow_mode)) |
---|
| 1888 | + srchost->srchost_bulk_flow_count++; |
---|
| 1889 | + |
---|
| 1890 | + if (cake_ddst(q->flow_mode)) |
---|
| 1891 | + dsthost->dsthost_bulk_flow_count++; |
---|
| 1892 | + |
---|
1838 | 1893 | } |
---|
1839 | 1894 | |
---|
1840 | 1895 | if (q->buffer_used > q->buffer_max_used) |
---|
.. | .. |
---|
1926 | 1981 | while (b->tin_deficit < 0 || |
---|
1927 | 1982 | !(b->sparse_flow_count + b->bulk_flow_count)) { |
---|
1928 | 1983 | if (b->tin_deficit <= 0) |
---|
1929 | | - b->tin_deficit += b->tin_quantum_band; |
---|
| 1984 | + b->tin_deficit += b->tin_quantum; |
---|
1930 | 1985 | if (b->sparse_flow_count + b->bulk_flow_count) |
---|
1931 | 1986 | empty = false; |
---|
1932 | 1987 | |
---|
.. | .. |
---|
2002 | 2057 | dsthost = &b->hosts[flow->dsthost]; |
---|
2003 | 2058 | host_load = 1; |
---|
2004 | 2059 | |
---|
2005 | | - if (cake_dsrc(q->flow_mode)) |
---|
2006 | | - host_load = max(host_load, srchost->srchost_refcnt); |
---|
2007 | | - |
---|
2008 | | - if (cake_ddst(q->flow_mode)) |
---|
2009 | | - host_load = max(host_load, dsthost->dsthost_refcnt); |
---|
2010 | | - |
---|
2011 | | - WARN_ON(host_load > CAKE_QUEUES); |
---|
2012 | | - |
---|
2013 | 2060 | /* flow isolation (DRR++) */ |
---|
2014 | 2061 | if (flow->deficit <= 0) { |
---|
2015 | | - /* The shifted prandom_u32() is a way to apply dithering to |
---|
2016 | | - * avoid accumulating roundoff errors |
---|
2017 | | - */ |
---|
2018 | | - flow->deficit += (b->flow_quantum * quantum_div[host_load] + |
---|
2019 | | - (prandom_u32() >> 16)) >> 16; |
---|
2020 | | - list_move_tail(&flow->flowchain, &b->old_flows); |
---|
2021 | | - |
---|
2022 | 2062 | /* Keep all flows with deficits out of the sparse and decaying |
---|
2023 | 2063 | * rotations. No non-empty flow can go into the decaying |
---|
2024 | 2064 | * rotation, so they can't get deficits |
---|
.. | .. |
---|
2027 | 2067 | if (flow->head) { |
---|
2028 | 2068 | b->sparse_flow_count--; |
---|
2029 | 2069 | b->bulk_flow_count++; |
---|
| 2070 | + |
---|
| 2071 | + if (cake_dsrc(q->flow_mode)) |
---|
| 2072 | + srchost->srchost_bulk_flow_count++; |
---|
| 2073 | + |
---|
| 2074 | + if (cake_ddst(q->flow_mode)) |
---|
| 2075 | + dsthost->dsthost_bulk_flow_count++; |
---|
| 2076 | + |
---|
2030 | 2077 | flow->set = CAKE_SET_BULK; |
---|
2031 | 2078 | } else { |
---|
2032 | 2079 | /* we've moved it to the bulk rotation for |
---|
.. | .. |
---|
2036 | 2083 | flow->set = CAKE_SET_SPARSE_WAIT; |
---|
2037 | 2084 | } |
---|
2038 | 2085 | } |
---|
| 2086 | + |
---|
| 2087 | + if (cake_dsrc(q->flow_mode)) |
---|
| 2088 | + host_load = max(host_load, srchost->srchost_bulk_flow_count); |
---|
| 2089 | + |
---|
| 2090 | + if (cake_ddst(q->flow_mode)) |
---|
| 2091 | + host_load = max(host_load, dsthost->dsthost_bulk_flow_count); |
---|
| 2092 | + |
---|
| 2093 | + WARN_ON(host_load > CAKE_QUEUES); |
---|
| 2094 | + |
---|
| 2095 | + /* The shifted prandom_u32() is a way to apply dithering to |
---|
| 2096 | + * avoid accumulating roundoff errors |
---|
| 2097 | + */ |
---|
| 2098 | + flow->deficit += (b->flow_quantum * quantum_div[host_load] + |
---|
| 2099 | + (prandom_u32() >> 16)) >> 16; |
---|
| 2100 | + list_move_tail(&flow->flowchain, &b->old_flows); |
---|
| 2101 | + |
---|
2039 | 2102 | goto retry; |
---|
2040 | 2103 | } |
---|
2041 | 2104 | |
---|
.. | .. |
---|
2056 | 2119 | &b->decaying_flows); |
---|
2057 | 2120 | if (flow->set == CAKE_SET_BULK) { |
---|
2058 | 2121 | b->bulk_flow_count--; |
---|
| 2122 | + |
---|
| 2123 | + if (cake_dsrc(q->flow_mode)) |
---|
| 2124 | + srchost->srchost_bulk_flow_count--; |
---|
| 2125 | + |
---|
| 2126 | + if (cake_ddst(q->flow_mode)) |
---|
| 2127 | + dsthost->dsthost_bulk_flow_count--; |
---|
| 2128 | + |
---|
2059 | 2129 | b->decaying_flow_count++; |
---|
2060 | 2130 | } else if (flow->set == CAKE_SET_SPARSE || |
---|
2061 | 2131 | flow->set == CAKE_SET_SPARSE_WAIT) { |
---|
.. | .. |
---|
2069 | 2139 | if (flow->set == CAKE_SET_SPARSE || |
---|
2070 | 2140 | flow->set == CAKE_SET_SPARSE_WAIT) |
---|
2071 | 2141 | b->sparse_flow_count--; |
---|
2072 | | - else if (flow->set == CAKE_SET_BULK) |
---|
| 2142 | + else if (flow->set == CAKE_SET_BULK) { |
---|
2073 | 2143 | b->bulk_flow_count--; |
---|
2074 | | - else |
---|
| 2144 | + |
---|
| 2145 | + if (cake_dsrc(q->flow_mode)) |
---|
| 2146 | + srchost->srchost_bulk_flow_count--; |
---|
| 2147 | + |
---|
| 2148 | + if (cake_ddst(q->flow_mode)) |
---|
| 2149 | + dsthost->dsthost_bulk_flow_count--; |
---|
| 2150 | + |
---|
| 2151 | + } else |
---|
2075 | 2152 | b->decaying_flow_count--; |
---|
2076 | 2153 | |
---|
2077 | 2154 | flow->set = CAKE_SET_NONE; |
---|
2078 | | - srchost->srchost_refcnt--; |
---|
2079 | | - dsthost->dsthost_refcnt--; |
---|
2080 | 2155 | } |
---|
2081 | 2156 | goto begin; |
---|
2082 | 2157 | } |
---|
.. | .. |
---|
2149 | 2224 | |
---|
2150 | 2225 | static void cake_reset(struct Qdisc *sch) |
---|
2151 | 2226 | { |
---|
| 2227 | + struct cake_sched_data *q = qdisc_priv(sch); |
---|
2152 | 2228 | u32 c; |
---|
| 2229 | + |
---|
| 2230 | + if (!q->tins) |
---|
| 2231 | + return; |
---|
2153 | 2232 | |
---|
2154 | 2233 | for (c = 0; c < CAKE_MAX_TINS; c++) |
---|
2155 | 2234 | cake_clear_tin(sch, c); |
---|
.. | .. |
---|
2171 | 2250 | [TCA_CAKE_MPU] = { .type = NLA_U32 }, |
---|
2172 | 2251 | [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, |
---|
2173 | 2252 | [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, |
---|
| 2253 | + [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 }, |
---|
| 2254 | + [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, |
---|
2174 | 2255 | }; |
---|
2175 | 2256 | |
---|
2176 | 2257 | static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, |
---|
.. | .. |
---|
2226 | 2307 | |
---|
2227 | 2308 | cake_set_rate(b, rate, mtu, |
---|
2228 | 2309 | us_to_ns(q->target), us_to_ns(q->interval)); |
---|
2229 | | - b->tin_quantum_band = 65535; |
---|
2230 | | - b->tin_quantum_prio = 65535; |
---|
| 2310 | + b->tin_quantum = 65535; |
---|
2231 | 2311 | |
---|
2232 | 2312 | return 0; |
---|
2233 | 2313 | } |
---|
.. | .. |
---|
2238 | 2318 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
2239 | 2319 | u32 mtu = psched_mtu(qdisc_dev(sch)); |
---|
2240 | 2320 | u64 rate = q->rate_bps; |
---|
2241 | | - u32 quantum1 = 256; |
---|
2242 | | - u32 quantum2 = 256; |
---|
| 2321 | + u32 quantum = 256; |
---|
2243 | 2322 | u32 i; |
---|
2244 | 2323 | |
---|
2245 | 2324 | q->tin_cnt = 8; |
---|
.. | .. |
---|
2252 | 2331 | cake_set_rate(b, rate, mtu, us_to_ns(q->target), |
---|
2253 | 2332 | us_to_ns(q->interval)); |
---|
2254 | 2333 | |
---|
2255 | | - b->tin_quantum_prio = max_t(u16, 1U, quantum1); |
---|
2256 | | - b->tin_quantum_band = max_t(u16, 1U, quantum2); |
---|
| 2334 | + b->tin_quantum = max_t(u16, 1U, quantum); |
---|
2257 | 2335 | |
---|
2258 | 2336 | /* calculate next class's parameters */ |
---|
2259 | 2337 | rate *= 7; |
---|
2260 | 2338 | rate >>= 3; |
---|
2261 | 2339 | |
---|
2262 | | - quantum1 *= 3; |
---|
2263 | | - quantum1 >>= 1; |
---|
2264 | | - |
---|
2265 | | - quantum2 *= 7; |
---|
2266 | | - quantum2 >>= 3; |
---|
| 2340 | + quantum *= 7; |
---|
| 2341 | + quantum >>= 3; |
---|
2267 | 2342 | } |
---|
2268 | 2343 | |
---|
2269 | 2344 | return 0; |
---|
.. | .. |
---|
2332 | 2407 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
2333 | 2408 | u32 mtu = psched_mtu(qdisc_dev(sch)); |
---|
2334 | 2409 | u64 rate = q->rate_bps; |
---|
2335 | | - u32 quantum1 = 256; |
---|
2336 | | - u32 quantum2 = 256; |
---|
| 2410 | + u32 quantum = 256; |
---|
2337 | 2411 | u32 i; |
---|
2338 | 2412 | |
---|
2339 | 2413 | q->tin_cnt = 8; |
---|
.. | .. |
---|
2349 | 2423 | cake_set_rate(b, rate, mtu, us_to_ns(q->target), |
---|
2350 | 2424 | us_to_ns(q->interval)); |
---|
2351 | 2425 | |
---|
2352 | | - b->tin_quantum_prio = max_t(u16, 1U, quantum1); |
---|
2353 | | - b->tin_quantum_band = max_t(u16, 1U, quantum2); |
---|
| 2426 | + b->tin_quantum = max_t(u16, 1U, quantum); |
---|
2354 | 2427 | |
---|
2355 | 2428 | /* calculate next class's parameters */ |
---|
2356 | 2429 | rate *= 7; |
---|
2357 | 2430 | rate >>= 3; |
---|
2358 | 2431 | |
---|
2359 | | - quantum1 *= 3; |
---|
2360 | | - quantum1 >>= 1; |
---|
2361 | | - |
---|
2362 | | - quantum2 *= 7; |
---|
2363 | | - quantum2 >>= 3; |
---|
| 2432 | + quantum *= 7; |
---|
| 2433 | + quantum >>= 3; |
---|
2364 | 2434 | } |
---|
2365 | 2435 | |
---|
2366 | 2436 | return 0; |
---|
.. | .. |
---|
2399 | 2469 | cake_set_rate(&q->tins[3], rate >> 2, mtu, |
---|
2400 | 2470 | us_to_ns(q->target), us_to_ns(q->interval)); |
---|
2401 | 2471 | |
---|
2402 | | - /* priority weights */ |
---|
2403 | | - q->tins[0].tin_quantum_prio = quantum; |
---|
2404 | | - q->tins[1].tin_quantum_prio = quantum >> 4; |
---|
2405 | | - q->tins[2].tin_quantum_prio = quantum << 2; |
---|
2406 | | - q->tins[3].tin_quantum_prio = quantum << 4; |
---|
2407 | | - |
---|
2408 | 2472 | /* bandwidth-sharing weights */ |
---|
2409 | | - q->tins[0].tin_quantum_band = quantum; |
---|
2410 | | - q->tins[1].tin_quantum_band = quantum >> 4; |
---|
2411 | | - q->tins[2].tin_quantum_band = quantum >> 1; |
---|
2412 | | - q->tins[3].tin_quantum_band = quantum >> 2; |
---|
| 2473 | + q->tins[0].tin_quantum = quantum; |
---|
| 2474 | + q->tins[1].tin_quantum = quantum >> 4; |
---|
| 2475 | + q->tins[2].tin_quantum = quantum >> 1; |
---|
| 2476 | + q->tins[3].tin_quantum = quantum >> 2; |
---|
2413 | 2477 | |
---|
2414 | 2478 | return 0; |
---|
2415 | 2479 | } |
---|
.. | .. |
---|
2440 | 2504 | cake_set_rate(&q->tins[2], rate >> 2, mtu, |
---|
2441 | 2505 | us_to_ns(q->target), us_to_ns(q->interval)); |
---|
2442 | 2506 | |
---|
2443 | | - /* priority weights */ |
---|
2444 | | - q->tins[0].tin_quantum_prio = quantum; |
---|
2445 | | - q->tins[1].tin_quantum_prio = quantum >> 4; |
---|
2446 | | - q->tins[2].tin_quantum_prio = quantum << 4; |
---|
2447 | | - |
---|
2448 | 2507 | /* bandwidth-sharing weights */ |
---|
2449 | | - q->tins[0].tin_quantum_band = quantum; |
---|
2450 | | - q->tins[1].tin_quantum_band = quantum >> 4; |
---|
2451 | | - q->tins[2].tin_quantum_band = quantum >> 2; |
---|
| 2508 | + q->tins[0].tin_quantum = quantum; |
---|
| 2509 | + q->tins[1].tin_quantum = quantum >> 4; |
---|
| 2510 | + q->tins[2].tin_quantum = quantum >> 2; |
---|
2452 | 2511 | |
---|
2453 | 2512 | return 0; |
---|
2454 | 2513 | } |
---|
.. | .. |
---|
2517 | 2576 | if (!opt) |
---|
2518 | 2577 | return -EINVAL; |
---|
2519 | 2578 | |
---|
2520 | | - err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack); |
---|
| 2579 | + err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy, |
---|
| 2580 | + extack); |
---|
2521 | 2581 | if (err < 0) |
---|
2522 | 2582 | return err; |
---|
2523 | 2583 | |
---|
.. | .. |
---|
2617 | 2677 | q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; |
---|
2618 | 2678 | } |
---|
2619 | 2679 | |
---|
| 2680 | + if (tb[TCA_CAKE_FWMARK]) { |
---|
| 2681 | + q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); |
---|
| 2682 | + q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; |
---|
| 2683 | + } |
---|
| 2684 | + |
---|
2620 | 2685 | if (q->tins) { |
---|
2621 | 2686 | sch_tree_lock(sch); |
---|
2622 | 2687 | cake_reconfigure(sch); |
---|
.. | .. |
---|
2712 | 2777 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
2713 | 2778 | struct nlattr *opts; |
---|
2714 | 2779 | |
---|
2715 | | - opts = nla_nest_start(skb, TCA_OPTIONS); |
---|
| 2780 | + opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
---|
2716 | 2781 | if (!opts) |
---|
2717 | 2782 | goto nla_put_failure; |
---|
2718 | 2783 | |
---|
.. | .. |
---|
2772 | 2837 | !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) |
---|
2773 | 2838 | goto nla_put_failure; |
---|
2774 | 2839 | |
---|
| 2840 | + if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) |
---|
| 2841 | + goto nla_put_failure; |
---|
| 2842 | + |
---|
2775 | 2843 | return nla_nest_end(skb, opts); |
---|
2776 | 2844 | |
---|
2777 | 2845 | nla_put_failure: |
---|
.. | .. |
---|
2780 | 2848 | |
---|
2781 | 2849 | static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
---|
2782 | 2850 | { |
---|
2783 | | - struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP); |
---|
| 2851 | + struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); |
---|
2784 | 2852 | struct cake_sched_data *q = qdisc_priv(sch); |
---|
2785 | 2853 | struct nlattr *tstats, *ts; |
---|
2786 | 2854 | int i; |
---|
.. | .. |
---|
2810 | 2878 | #undef PUT_STAT_U32 |
---|
2811 | 2879 | #undef PUT_STAT_U64 |
---|
2812 | 2880 | |
---|
2813 | | - tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS); |
---|
| 2881 | + tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS); |
---|
2814 | 2882 | if (!tstats) |
---|
2815 | 2883 | goto nla_put_failure; |
---|
2816 | 2884 | |
---|
.. | .. |
---|
2827 | 2895 | for (i = 0; i < q->tin_cnt; i++) { |
---|
2828 | 2896 | struct cake_tin_data *b = &q->tins[q->tin_order[i]]; |
---|
2829 | 2897 | |
---|
2830 | | - ts = nla_nest_start(d->skb, i + 1); |
---|
| 2898 | + ts = nla_nest_start_noflag(d->skb, i + 1); |
---|
2831 | 2899 | if (!ts) |
---|
2832 | 2900 | goto nla_put_failure; |
---|
2833 | 2901 | |
---|
.. | .. |
---|
2947 | 3015 | if (flow) { |
---|
2948 | 3016 | ktime_t now = ktime_get(); |
---|
2949 | 3017 | |
---|
2950 | | - stats = nla_nest_start(d->skb, TCA_STATS_APP); |
---|
| 3018 | + stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); |
---|
2951 | 3019 | if (!stats) |
---|
2952 | 3020 | return -1; |
---|
2953 | 3021 | |
---|