From: Amaury Denoyelle Date: Thu, 23 Jan 2025 09:47:57 +0000 (+0100) Subject: MINOR: quic: rename min/max fields for congestion window algo X-Git-Tag: v3.0.12~130 X-Git-Url: http://git.haproxy.org/?a=commitdiff_plain;h=2f13b9061bf5ed8d8084e7b4049b6492e13d27c7;p=haproxy-3.0.git MINOR: quic: rename min/max fields for congestion window algo There was some possible confusion between fields related to congestion window size min and max limit which cannot be exceeded, and the maximum value previously reached by the window. Fix this by adopting a new naming scheme. Enforced limit are now renamed /, while the previously reached max value is renamed . This should be backported up to 3.1. (cherry picked from commit 2eb1b0cd96f663b9260ee48921612566417b3b8d) Signed-off-by: Willy Tarreau (cherry picked from commit 13c3baf545c93994688c3abd4895e6aede05211a) [ad: pick to ease next backport] Signed-off-by: Amaury Denoyelle --- diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h index 99e8f0c..c35ea07 100644 --- a/include/haproxy/quic_cc-t.h +++ b/include/haproxy/quic_cc-t.h @@ -101,11 +101,11 @@ struct quic_cc_path { /* Congestion window. */ uint64_t cwnd; /* The current maximum congestion window value reached. */ - uint64_t mcwnd; - /* The maximum congestion window value which can be reached. */ - uint64_t max_cwnd; - /* Minimum congestion window. */ - uint64_t min_cwnd; + uint64_t cwnd_last_max; + /* Max limit on congestion window size. */ + uint64_t limit_max; + /* Min limit on congestion window size. */ + uint64_t limit_min; /* Prepared data to be sent (in bytes). */ uint64_t prep_in_flight; /* Outstanding data (in bytes). */ diff --git a/include/haproxy/quic_cc.h b/include/haproxy/quic_cc.h index 4e21ddf..eb3d784 100644 --- a/include/haproxy/quic_cc.h +++ b/include/haproxy/quic_cc.h @@ -86,9 +86,9 @@ static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsign quic_loss_init(&path->loss); path->mtu = max_dgram_sz; path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U)); - path->mcwnd = path->cwnd; - path->max_cwnd = max_cwnd; - path->min_cwnd = max_dgram_sz << 1; + path->cwnd_last_max = path->cwnd; + path->limit_max = max_cwnd; + path->limit_min = max_dgram_sz << 1; path->prep_in_flight = 0; path->in_flight = 0; path->ifae_pkts = 0; diff --git a/src/quic_cc_cubic.c b/src/quic_cc_cubic.c index 3140cba..2ea64b9 100644 --- a/src/quic_cc_cubic.c +++ b/src/quic_cc_cubic.c @@ -379,8 +379,8 @@ static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked) if (quic_cwnd_may_increase(path)) { path->cwnd += inc; - path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd); - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd = QUIC_MIN(path->limit_max, path->cwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); } leave: TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc); @@ -428,7 +428,7 @@ static void quic_enter_recovery(struct quic_cc *cc) } c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT; - path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->min_cwnd); + path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min); c->state = QUIC_CC_ST_RP; TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc); } @@ -452,7 +452,7 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) if (quic_cwnd_may_increase(path)) { path->cwnd += acked; - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); } quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt); if (ev->ack.pn >= h->wnd_end) @@ -466,13 +466,13 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) { if (quic_cwnd_may_increase(path)) { path->cwnd += ev->ack.acked; - path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd); + path->cwnd = QUIC_MIN(path->limit_max, path->cwnd); } } /* Exit to congestion avoidance if slow start threshold is reached. */ if (path->cwnd >= c->ssthresh) c->state = QUIC_CC_ST_CA; - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); break; case QUIC_CC_EVT_LOSS: @@ -532,7 +532,7 @@ static void quic_cc_cubic_cs_cb(struct quic_cc *cc, struct quic_cc_event *ev) if (quic_cwnd_may_increase(path)) { path->cwnd += acked; - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); } quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt); if (quic_cc_hystart_may_reenter_ss(h)) { @@ -642,10 +642,10 @@ static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc * struct cubic *c = quic_cc_priv(cc); path = container_of(cc, struct quic_cc_path, cc); - chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms", + chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%d rpst=%dms", quic_cc_state_str(c->state), (unsigned long long)path->cwnd, - (unsigned long long)path->mcwnd, + (unsigned long long)path->cwnd_last_max, (int)c->ssthresh, !tick_isset(c->recovery_start_time) ? -1 : TICKS_TO_MS(tick_remain(c->recovery_start_time, now_ms))); diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c index 72ee3e4..ced1c31 100644 --- a/src/quic_cc_newreno.c +++ b/src/quic_cc_newreno.c @@ -55,7 +55,7 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc) struct nr *nr = quic_cc_priv(cc); path = container_of(cc, struct quic_cc_path, cc); - path->cwnd = path->min_cwnd; + path->cwnd = path->limit_min; /* Re-entering slow start state. */ nr->state = QUIC_CC_ST_SS; /* Recovery start time reset */ @@ -71,7 +71,7 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc) path = container_of(cc, struct quic_cc_path, cc); nr->recovery_start_time = now_ms; nr->ssthresh = path->cwnd >> 1; - path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd); + path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min); nr->state = QUIC_CC_ST_RP; } @@ -88,8 +88,8 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev) case QUIC_CC_EVT_ACK: if (quic_cwnd_may_increase(path)) { path->cwnd += ev->ack.acked; - path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd); - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd = QUIC_MIN(path->limit_max, path->cwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); } /* Exit to congestion avoidance if slow start threshold is reached. */ if (path->cwnd > nr->ssthresh) @@ -128,8 +128,8 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev) nr->remain_acked = acked % path->cwnd; if (quic_cwnd_may_increase(path)) { path->cwnd += acked / path->cwnd; - path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd); - path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd); + path->cwnd = QUIC_MIN(path->limit_max, path->cwnd); + path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max); } break; } @@ -190,10 +190,10 @@ static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc) struct nr *nr = quic_cc_priv(cc); path = container_of(cc, struct quic_cc_path, cc); - chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu", + chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%ld rpst=%dms pktloss=%llu", quic_cc_state_str(nr->state), (unsigned long long)path->cwnd, - (unsigned long long)path->mcwnd, + (unsigned long long)path->cwnd_last_max, (long)nr->ssthresh, !tick_isset(nr->recovery_start_time) ? -1 : TICKS_TO_MS(tick_remain(nr->recovery_start_time, now_ms)), diff --git a/src/quic_cc_nocc.c b/src/quic_cc_nocc.c index 6e5cff9..7302aad 100644 --- a/src/quic_cc_nocc.c +++ b/src/quic_cc_nocc.c @@ -14,7 +14,7 @@ static int quic_cc_nocc_init(struct quic_cc *cc) struct quic_cc_path *path; path = container_of(cc, struct quic_cc_path, cc); - path->cwnd = path->max_cwnd; + path->cwnd = path->limit_max; return 1; } diff --git a/src/quic_cli.c b/src/quic_cli.c index 76ff104..03002a9 100644 --- a/src/quic_cli.c +++ b/src/quic_cli.c @@ -307,10 +307,10 @@ static void dump_quic_full(struct show_quic_ctx *ctx, struct quic_conn *qc) qc->path->cc.algo->state_cli(&trash, qc->path); chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u cwnd=%-6llu" - " mcwnd=%-6llu sentpkts=%-6llu lostpkts=%-6llu reorderedpkts=%-6llu\n", + " cwnd_last_max=%-6llu sentpkts=%-6llu lostpkts=%-6llu reorderedpkts=%-6llu\n", qc->path->loss.srtt, qc->path->loss.rtt_var, qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd, - (ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt); + (ullong)qc->path->cwnd_last_max, (ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt); } if (qc->cntrs.dropped_pkt) {