BUG/MINOR: quic: ensure cwnd limits are always enforced
authorAmaury Denoyelle <adenoyelle@haproxy.com>
Mon, 20 Jan 2025 15:24:21 +0000 (16:24 +0100)
committerChristopher Faulet <cfaulet@haproxy.com>
Mon, 2 Jun 2025 15:31:52 +0000 (17:31 +0200)
Congestion window is limit by a minimal and maximum values which can
never be exceeded. Min value is hardcoded to 2 datagrams as recommended
by the specification. Max value is specified via haproxy configuration.

These values must be respected each time the congestion window size is
adjusted. However, in some rare occasions, limit were not always
enforced. Fix this by implementing wrappers to set or increment the
congestion window. These functions ensure limits are always applied
after the operation.

Additionnally, wrappers also ensure that if window reached a new maximum
value, it is saved in <cwnd_last_max> field.

This should be backported up to 2.6, after a brief period of
observation.

(cherry picked from commit 7bad88c35c7547d52ac170ed9f89f29cccd6c46c)
Signed-off-by: Amaury Denoyelle <adenoyelle@haproxy.com>
(cherry picked from commit 355a3225302d1714792781f2dbee7d2ccfdc2a62)
Signed-off-by: Amaury Denoyelle <adenoyelle@haproxy.com>

include/haproxy/quic_cc.h
src/quic_cc.c
src/quic_cc_cubic.c
src/quic_cc_newreno.c

index eb3d784..ddee8d4 100644 (file)
@@ -107,7 +107,9 @@ static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
        return path->cwnd - path->prep_in_flight;
 }
 
-int quic_cwnd_may_increase(const struct quic_cc_path *path);
+void quic_cc_path_reset(struct quic_cc_path *path);
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val);
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val);
 
 #endif /* USE_QUIC */
 #endif /* _PROTO_QUIC_CC_H */
index dd694f3..b3b99e3 100644 (file)
@@ -49,7 +49,7 @@ void quic_cc_state_trace(struct buffer *buf, const struct quic_cc *cc)
 }
 
 /* Returns true if congestion window on path ought to be increased. */
-int quic_cwnd_may_increase(const struct quic_cc_path *path)
+static int quic_cwnd_may_increase(const struct quic_cc_path *path)
 {
        /* RFC 9002 7.8. Underutilizing the Congestion Window
         *
@@ -66,3 +66,32 @@ int quic_cwnd_may_increase(const struct quic_cc_path *path)
         */
        return 2 * path->in_flight >= path->cwnd  || path->cwnd < 16384;
 }
+
+/* Restore congestion window for <path> to its minimal value. */
+void quic_cc_path_reset(struct quic_cc_path *path)
+{
+       path->cwnd = path->limit_min;
+}
+
+/* Set congestion window for <path> to <val>. Min and max limits are enforced. */
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val)
+{
+       path->cwnd = QUIC_MIN(val, path->limit_max);
+       path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+       path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+}
+
+/* Increment congestion window for <path> with <val>. Min and max limits are
+ * enforced. Contrary to quic_cc_path_set(), increase is performed only if a
+ * certain minimal level of the window was already filled.
+ */
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val)
+{
+       if (quic_cwnd_may_increase(path)) {
+               path->cwnd = QUIC_MIN(path->cwnd + val, path->limit_max);
+               path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+               path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+       }
+}
index 2ea64b9..f58b897 100644 (file)
@@ -377,11 +377,7 @@ static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
                        inc = W_est_inc;
        }
 
-       if (quic_cwnd_may_increase(path)) {
-               path->cwnd += inc;
-               path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
-               path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
-       }
+       quic_cc_path_inc(path, inc);
  leave:
        TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
 }
@@ -428,7 +424,7 @@ static void quic_enter_recovery(struct quic_cc *cc)
        }
 
        c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
-       path->cwnd =  QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
+       quic_cc_path_set(path, c->ssthresh);
        c->state = QUIC_CC_ST_RP;
        TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
 }
@@ -450,10 +446,7 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
                        if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
                                goto out;
 
-                       if (quic_cwnd_may_increase(path)) {
-                               path->cwnd += acked;
-                               path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
-                       }
+                       quic_cc_path_inc(path, acked);
                        quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
                        if (ev->ack.pn >= h->wnd_end)
                                h->wnd_end = UINT64_MAX;
@@ -464,15 +457,11 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
                        }
                }
                else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
-                       if (quic_cwnd_may_increase(path)) {
-                               path->cwnd += ev->ack.acked;
-                               path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
-                       }
+                       quic_cc_path_inc(path, ev->ack.acked);
                }
                /* Exit to congestion avoidance if slow start threshold is reached. */
                if (path->cwnd >= c->ssthresh)
                        c->state = QUIC_CC_ST_CA;
-               path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
                break;
 
        case QUIC_CC_EVT_LOSS:
@@ -530,10 +519,7 @@ static void quic_cc_cubic_cs_cb(struct quic_cc *cc, struct quic_cc_event *ev)
                if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
                        goto out;
 
-               if (quic_cwnd_may_increase(path)) {
-                       path->cwnd += acked;
-                       path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
-               }
+               quic_cc_path_inc(path, acked);
                quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
                if (quic_cc_hystart_may_reenter_ss(h)) {
                        /* Exit to slow start */
index ced1c31..91a9664 100644 (file)
@@ -55,7 +55,7 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc)
        struct nr *nr = quic_cc_priv(cc);
 
        path = container_of(cc, struct quic_cc_path, cc);
-       path->cwnd = path->limit_min;
+       quic_cc_path_reset(path);
        /* Re-entering slow start state. */
        nr->state = QUIC_CC_ST_SS;
        /* Recovery start time reset */
@@ -71,7 +71,7 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
        path = container_of(cc, struct quic_cc_path, cc);
        nr->recovery_start_time = now_ms;
        nr->ssthresh = path->cwnd >> 1;
-       path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
+       quic_cc_path_set(path, nr->ssthresh);
        nr->state = QUIC_CC_ST_RP;
 }
 
@@ -86,11 +86,7 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
        path = container_of(cc, struct quic_cc_path, cc);
        switch (ev->type) {
        case QUIC_CC_EVT_ACK:
-               if (quic_cwnd_may_increase(path)) {
-                       path->cwnd += ev->ack.acked;
-                       path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
-                       path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
-               }
+               quic_cc_path_inc(path, ev->ack.acked);
                /* Exit to congestion avoidance if slow start threshold is reached. */
                if (path->cwnd > nr->ssthresh)
                        nr->state = QUIC_CC_ST_CA;
@@ -126,11 +122,7 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
                 */
                acked = ev->ack.acked * path->mtu + nr->remain_acked;
                nr->remain_acked = acked % path->cwnd;
-               if (quic_cwnd_may_increase(path)) {
-                       path->cwnd += acked / path->cwnd;
-                       path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
-                       path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
-               }
+               quic_cc_path_inc(path, acked / path->cwnd);
                break;
        }
 
@@ -170,7 +162,7 @@ static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
 
                nr->state = QUIC_CC_ST_CA;
                nr->recovery_start_time = TICK_ETERNITY;
-               path->cwnd = nr->ssthresh;
+               quic_cc_path_set(path, nr->ssthresh);
                break;
        case QUIC_CC_EVT_LOSS:
                /* Do nothing */