From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 22 Oct 2024 10:36:11 +0000 Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM --- kernel/net/sched/sch_cake.c | 274 ++++++++++++++++++++++++++++++++++-------------------- 1 files changed, 171 insertions(+), 103 deletions(-) diff --git a/kernel/net/sched/sch_cake.c b/kernel/net/sched/sch_cake.c index 18c207b..5dc7a3c 100644 --- a/kernel/net/sched/sch_cake.c +++ b/kernel/net/sched/sch_cake.c @@ -138,8 +138,8 @@ struct cake_host { u32 srchost_tag; u32 dsthost_tag; - u16 srchost_refcnt; - u16 dsthost_refcnt; + u16 srchost_bulk_flow_count; + u16 dsthost_bulk_flow_count; }; struct cake_heap_entry { @@ -173,8 +173,7 @@ u64 tin_rate_bps; u16 tin_rate_shft; - u16 tin_quantum_prio; - u16 tin_quantum_band; + u16 tin_quantum; s32 tin_deficit; u32 tin_backlog; u32 tin_dropped; @@ -210,6 +209,9 @@ u8 flow_mode; u8 ack_filter; u8 atm_mode; + + u32 fwmark_mask; + u16 fwmark_shft; /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ u16 rate_shft; @@ -310,8 +312,8 @@ }; static const u8 diffserv8[] = { - 2, 5, 1, 2, 4, 2, 2, 2, - 0, 2, 1, 2, 1, 2, 1, 2, + 2, 0, 1, 2, 4, 2, 2, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 5, 2, 4, 2, 4, 2, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 6, 2, 3, 2, 3, 2, 3, 2, @@ -321,7 +323,7 @@ }; static const u8 diffserv4[] = { - 0, 2, 0, 0, 2, 0, 0, 0, + 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, @@ -332,7 +334,7 @@ }; static const u8 diffserv3[] = { - 0, 0, 0, 0, 2, 0, 0, 0, + 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -582,26 +584,48 @@ return drop; } -static void cake_update_flowkeys(struct flow_keys *keys, +static bool cake_update_flowkeys(struct flow_keys *keys, const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) struct nf_conntrack_tuple tuple = {}; - bool rev = !skb->_nfct; + bool rev = !skb->_nfct, upd = false; + __be32 ip; if (skb_protocol(skb, true) != htons(ETH_P_IP)) - return; + return false; if (!nf_ct_get_tuple_skb(&tuple, skb)) - return; + return false; - keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; - keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; + ip = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; + if (ip != keys->addrs.v4addrs.src) { + keys->addrs.v4addrs.src = ip; + upd = true; + } + ip = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; + if (ip != keys->addrs.v4addrs.dst) { + keys->addrs.v4addrs.dst = ip; + upd = true; + } if (keys->ports.ports) { - keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all; - keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all; + __be16 port; + + port = rev ? tuple.dst.u.all : tuple.src.u.all; + if (port != keys->ports.src) { + keys->ports.src = port; + upd = true; + } + port = rev ? tuple.src.u.all : tuple.dst.u.all; + if (port != keys->ports.dst) { + port = keys->ports.dst; + upd = true; + } } + return upd; +#else + return false; #endif } @@ -622,23 +646,36 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, int flow_mode, u16 flow_override, u16 host_override) { + bool hash_flows = (!flow_override && !!(flow_mode & CAKE_FLOW_FLOWS)); + bool hash_hosts = (!host_override && !!(flow_mode & CAKE_FLOW_HOSTS)); + bool nat_enabled = !!(flow_mode & CAKE_FLOW_NAT_FLAG); u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; u16 reduced_hash, srchost_idx, dsthost_idx; struct flow_keys keys, host_keys; + bool use_skbhash = skb->l4_hash; if (unlikely(flow_mode == CAKE_FLOW_NONE)) return 0; - /* If both overrides are set we can skip packet dissection entirely */ - if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && - (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) + /* If both overrides are set, or we can use the SKB hash and nat mode is + * disabled, we can skip packet dissection entirely. If nat mode is + * enabled there's another check below after doing the conntrack lookup. + */ + if ((!hash_flows || (use_skbhash && !nat_enabled)) && !hash_hosts) goto skip_hash; skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); - if (flow_mode & CAKE_FLOW_NAT_FLAG) - cake_update_flowkeys(&keys, skb); + /* Don't use the SKB hash if we change the lookup keys from conntrack */ + if (nat_enabled && cake_update_flowkeys(&keys, skb)) + use_skbhash = false; + + /* If we can still use the SKB hash and don't need the host hash, we can + * skip the rest of the hashing procedure + */ + if (use_skbhash && !hash_hosts) + goto skip_hash; /* flow_hash_from_keys() sorts the addresses by value, so we have * to preserve their order in a separate data structure to treat @@ -677,12 +714,14 @@ /* This *must* be after the above switch, since as a * side-effect it sorts the src and dst addresses. */ - if (flow_mode & CAKE_FLOW_FLOWS) + if (hash_flows && !use_skbhash) flow_hash = flow_hash_from_keys(&keys); skip_hash: if (flow_override) flow_hash = flow_override - 1; + else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS)) + flow_hash = skb->hash; if (host_override) { dsthost_hash = host_override - 1; srchost_hash = host_override - 1; @@ -746,8 +785,10 @@ * queue, accept the collision, update the host tags. */ q->way_collisions++; - q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--; - q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--; + if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { + q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; + q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; + } allocate_src = cake_dsrc(flow_mode); allocate_dst = cake_ddst(flow_mode); found: @@ -767,13 +808,14 @@ } for (i = 0; i < CAKE_SET_WAYS; i++, k = (k + 1) % CAKE_SET_WAYS) { - if (!q->hosts[outer_hash + k].srchost_refcnt) + if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) break; } q->hosts[outer_hash + k].srchost_tag = srchost_hash; found_src: srchost_idx = outer_hash + k; - q->hosts[srchost_idx].srchost_refcnt++; + if (q->flows[reduced_hash].set == CAKE_SET_BULK) + q->hosts[srchost_idx].srchost_bulk_flow_count++; q->flows[reduced_hash].srchost = srchost_idx; } @@ -789,13 +831,14 @@ } for (i = 0; i < CAKE_SET_WAYS; i++, k = (k + 1) % CAKE_SET_WAYS) { - if (!q->hosts[outer_hash + k].dsthost_refcnt) + if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) break; } q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; found_dst: dsthost_idx = outer_hash + k; - q->hosts[dsthost_idx].dsthost_refcnt++; + if (q->flows[reduced_hash].set == CAKE_SET_BULK) + q->hosts[dsthost_idx].dsthost_bulk_flow_count++; q->flows[reduced_hash].dsthost = dsthost_idx; } } @@ -812,7 +855,7 @@ if (skb) { flow->head = skb->next; - skb->next = NULL; + skb_mark_not_on_list(skb); } return skb; @@ -1256,7 +1299,7 @@ else flow->head = elig_ack->next; - elig_ack->next = NULL; + skb_mark_not_on_list(elig_ack); return elig_ack; } @@ -1572,7 +1615,7 @@ struct sk_buff *skb) { struct cake_sched_data *q = qdisc_priv(sch); - u32 tin; + u32 tin, mark; bool wash; u8 dscp; @@ -1580,12 +1623,16 @@ * using firewall marks or skb->priority. Call DSCP parsing early if * wash is enabled, otherwise defer to below to skip unneeded parsing. */ + mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; wash = !!(q->rate_flags & CAKE_FLAG_WASH); if (wash) dscp = cake_handle_diffserv(skb, wash); if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) tin = 0; + + else if (mark && mark <= q->tin_cnt) + tin = q->tin_order[mark - 1]; else if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && @@ -1627,7 +1674,7 @@ case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } @@ -1649,7 +1696,7 @@ { struct cake_sched_data *q = qdisc_priv(sch); int len = qdisc_pkt_len(skb); - int uninitialized_var(ret); + int ret; struct sk_buff *ack = NULL; ktime_t now = ktime_get(); struct cake_tin_data *b; @@ -1700,9 +1747,8 @@ if (IS_ERR_OR_NULL(segs)) return qdisc_drop(skb, sch, to_free); - while (segs) { - nskb = segs->next; - segs->next = NULL; + skb_list_walk_safe(segs, segs, nskb) { + skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; cobalt_set_enqueue_time(segs, now); get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, @@ -1714,7 +1760,6 @@ slen += segs->len; q->buffer_used += segs->truesize; b->packets++; - segs = nskb; } /* stats */ @@ -1821,20 +1866,30 @@ b->sparse_flow_count++; if (cake_dsrc(q->flow_mode)) - host_load = max(host_load, srchost->srchost_refcnt); + host_load = max(host_load, srchost->srchost_bulk_flow_count); if (cake_ddst(q->flow_mode)) - host_load = max(host_load, dsthost->dsthost_refcnt); + host_load = max(host_load, dsthost->dsthost_bulk_flow_count); flow->deficit = (b->flow_quantum * quantum_div[host_load]) >> 16; } else if (flow->set == CAKE_SET_SPARSE_WAIT) { + struct cake_host *srchost = &b->hosts[flow->srchost]; + struct cake_host *dsthost = &b->hosts[flow->dsthost]; + /* this flow was empty, accounted as a sparse flow, but actually * in the bulk rotation. */ flow->set = CAKE_SET_BULK; b->sparse_flow_count--; b->bulk_flow_count++; + + if (cake_dsrc(q->flow_mode)) + srchost->srchost_bulk_flow_count++; + + if (cake_ddst(q->flow_mode)) + dsthost->dsthost_bulk_flow_count++; + } if (q->buffer_used > q->buffer_max_used) @@ -1926,7 +1981,7 @@ while (b->tin_deficit < 0 || !(b->sparse_flow_count + b->bulk_flow_count)) { if (b->tin_deficit <= 0) - b->tin_deficit += b->tin_quantum_band; + b->tin_deficit += b->tin_quantum; if (b->sparse_flow_count + b->bulk_flow_count) empty = false; @@ -2002,23 +2057,8 @@ dsthost = &b->hosts[flow->dsthost]; host_load = 1; - if (cake_dsrc(q->flow_mode)) - host_load = max(host_load, srchost->srchost_refcnt); - - if (cake_ddst(q->flow_mode)) - host_load = max(host_load, dsthost->dsthost_refcnt); - - WARN_ON(host_load > CAKE_QUEUES); - /* flow isolation (DRR++) */ if (flow->deficit <= 0) { - /* The shifted prandom_u32() is a way to apply dithering to - * avoid accumulating roundoff errors - */ - flow->deficit += (b->flow_quantum * quantum_div[host_load] + - (prandom_u32() >> 16)) >> 16; - list_move_tail(&flow->flowchain, &b->old_flows); - /* Keep all flows with deficits out of the sparse and decaying * rotations. No non-empty flow can go into the decaying * rotation, so they can't get deficits @@ -2027,6 +2067,13 @@ if (flow->head) { b->sparse_flow_count--; b->bulk_flow_count++; + + if (cake_dsrc(q->flow_mode)) + srchost->srchost_bulk_flow_count++; + + if (cake_ddst(q->flow_mode)) + dsthost->dsthost_bulk_flow_count++; + flow->set = CAKE_SET_BULK; } else { /* we've moved it to the bulk rotation for @@ -2036,6 +2083,22 @@ flow->set = CAKE_SET_SPARSE_WAIT; } } + + if (cake_dsrc(q->flow_mode)) + host_load = max(host_load, srchost->srchost_bulk_flow_count); + + if (cake_ddst(q->flow_mode)) + host_load = max(host_load, dsthost->dsthost_bulk_flow_count); + + WARN_ON(host_load > CAKE_QUEUES); + + /* The shifted prandom_u32() is a way to apply dithering to + * avoid accumulating roundoff errors + */ + flow->deficit += (b->flow_quantum * quantum_div[host_load] + + (prandom_u32() >> 16)) >> 16; + list_move_tail(&flow->flowchain, &b->old_flows); + goto retry; } @@ -2056,6 +2119,13 @@ &b->decaying_flows); if (flow->set == CAKE_SET_BULK) { b->bulk_flow_count--; + + if (cake_dsrc(q->flow_mode)) + srchost->srchost_bulk_flow_count--; + + if (cake_ddst(q->flow_mode)) + dsthost->dsthost_bulk_flow_count--; + b->decaying_flow_count++; } else if (flow->set == CAKE_SET_SPARSE || flow->set == CAKE_SET_SPARSE_WAIT) { @@ -2069,14 +2139,19 @@ if (flow->set == CAKE_SET_SPARSE || flow->set == CAKE_SET_SPARSE_WAIT) b->sparse_flow_count--; - else if (flow->set == CAKE_SET_BULK) + else if (flow->set == CAKE_SET_BULK) { b->bulk_flow_count--; - else + + if (cake_dsrc(q->flow_mode)) + srchost->srchost_bulk_flow_count--; + + if (cake_ddst(q->flow_mode)) + dsthost->dsthost_bulk_flow_count--; + + } else b->decaying_flow_count--; flow->set = CAKE_SET_NONE; - srchost->srchost_refcnt--; - dsthost->dsthost_refcnt--; } goto begin; } @@ -2149,7 +2224,11 @@ static void cake_reset(struct Qdisc *sch) { + struct cake_sched_data *q = qdisc_priv(sch); u32 c; + + if (!q->tins) + return; for (c = 0; c < CAKE_MAX_TINS; c++) cake_clear_tin(sch, c); @@ -2171,6 +2250,8 @@ [TCA_CAKE_MPU] = { .type = NLA_U32 }, [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, + [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 }, + [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, }; static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, @@ -2226,8 +2307,7 @@ cake_set_rate(b, rate, mtu, us_to_ns(q->target), us_to_ns(q->interval)); - b->tin_quantum_band = 65535; - b->tin_quantum_prio = 65535; + b->tin_quantum = 65535; return 0; } @@ -2238,8 +2318,7 @@ struct cake_sched_data *q = qdisc_priv(sch); u32 mtu = psched_mtu(qdisc_dev(sch)); u64 rate = q->rate_bps; - u32 quantum1 = 256; - u32 quantum2 = 256; + u32 quantum = 256; u32 i; q->tin_cnt = 8; @@ -2252,18 +2331,14 @@ cake_set_rate(b, rate, mtu, us_to_ns(q->target), us_to_ns(q->interval)); - b->tin_quantum_prio = max_t(u16, 1U, quantum1); - b->tin_quantum_band = max_t(u16, 1U, quantum2); + b->tin_quantum = max_t(u16, 1U, quantum); /* calculate next class's parameters */ rate *= 7; rate >>= 3; - quantum1 *= 3; - quantum1 >>= 1; - - quantum2 *= 7; - quantum2 >>= 3; + quantum *= 7; + quantum >>= 3; } return 0; @@ -2332,8 +2407,7 @@ struct cake_sched_data *q = qdisc_priv(sch); u32 mtu = psched_mtu(qdisc_dev(sch)); u64 rate = q->rate_bps; - u32 quantum1 = 256; - u32 quantum2 = 256; + u32 quantum = 256; u32 i; q->tin_cnt = 8; @@ -2349,18 +2423,14 @@ cake_set_rate(b, rate, mtu, us_to_ns(q->target), us_to_ns(q->interval)); - b->tin_quantum_prio = max_t(u16, 1U, quantum1); - b->tin_quantum_band = max_t(u16, 1U, quantum2); + b->tin_quantum = max_t(u16, 1U, quantum); /* calculate next class's parameters */ rate *= 7; rate >>= 3; - quantum1 *= 3; - quantum1 >>= 1; - - quantum2 *= 7; - quantum2 >>= 3; + quantum *= 7; + quantum >>= 3; } return 0; @@ -2399,17 +2469,11 @@ cake_set_rate(&q->tins[3], rate >> 2, mtu, us_to_ns(q->target), us_to_ns(q->interval)); - /* priority weights */ - q->tins[0].tin_quantum_prio = quantum; - q->tins[1].tin_quantum_prio = quantum >> 4; - q->tins[2].tin_quantum_prio = quantum << 2; - q->tins[3].tin_quantum_prio = quantum << 4; - /* bandwidth-sharing weights */ - q->tins[0].tin_quantum_band = quantum; - q->tins[1].tin_quantum_band = quantum >> 4; - q->tins[2].tin_quantum_band = quantum >> 1; - q->tins[3].tin_quantum_band = quantum >> 2; + q->tins[0].tin_quantum = quantum; + q->tins[1].tin_quantum = quantum >> 4; + q->tins[2].tin_quantum = quantum >> 1; + q->tins[3].tin_quantum = quantum >> 2; return 0; } @@ -2440,15 +2504,10 @@ cake_set_rate(&q->tins[2], rate >> 2, mtu, us_to_ns(q->target), us_to_ns(q->interval)); - /* priority weights */ - q->tins[0].tin_quantum_prio = quantum; - q->tins[1].tin_quantum_prio = quantum >> 4; - q->tins[2].tin_quantum_prio = quantum << 4; - /* bandwidth-sharing weights */ - q->tins[0].tin_quantum_band = quantum; - q->tins[1].tin_quantum_band = quantum >> 4; - q->tins[2].tin_quantum_band = quantum >> 2; + q->tins[0].tin_quantum = quantum; + q->tins[1].tin_quantum = quantum >> 4; + q->tins[2].tin_quantum = quantum >> 2; return 0; } @@ -2517,7 +2576,8 @@ if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack); + err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy, + extack); if (err < 0) return err; @@ -2617,6 +2677,11 @@ q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; } + if (tb[TCA_CAKE_FWMARK]) { + q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); + q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; + } + if (q->tins) { sch_tree_lock(sch); cake_reconfigure(sch); @@ -2712,7 +2777,7 @@ struct cake_sched_data *q = qdisc_priv(sch); struct nlattr *opts; - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!opts) goto nla_put_failure; @@ -2772,6 +2837,9 @@ !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: @@ -2780,7 +2848,7 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { - struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP); + struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); struct cake_sched_data *q = qdisc_priv(sch); struct nlattr *tstats, *ts; int i; @@ -2810,7 +2878,7 @@ #undef PUT_STAT_U32 #undef PUT_STAT_U64 - tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS); + tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS); if (!tstats) goto nla_put_failure; @@ -2827,7 +2895,7 @@ for (i = 0; i < q->tin_cnt; i++) { struct cake_tin_data *b = &q->tins[q->tin_order[i]]; - ts = nla_nest_start(d->skb, i + 1); + ts = nla_nest_start_noflag(d->skb, i + 1); if (!ts) goto nla_put_failure; @@ -2947,7 +3015,7 @@ if (flow) { ktime_t now = ktime_get(); - stats = nla_nest_start(d->skb, TCA_STATS_APP); + stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP); if (!stats) return -1; -- Gitblit v1.6.2