Congestion window is limit by a minimal and maximum values which can
never be exceeded. Min value is hardcoded to 2 datagrams as recommended
by the specification. Max value is specified via haproxy configuration.
These values must be respected each time the congestion window size is
adjusted. However, in some rare occasions, limit were not always
enforced. Fix this by implementing wrappers to set or increment the
congestion window. These functions ensure limits are always applied
after the operation.
Additionnally, wrappers also ensure that if window reached a new maximum
value, it is saved in <cwnd_last_max> field.
This should be backported up to 2.6, after a brief period of
observation.
(cherry picked from commit
7bad88c35c7547d52ac170ed9f89f29cccd6c46c)
Signed-off-by: Amaury Denoyelle <adenoyelle@haproxy.com>
(cherry picked from commit
355a3225302d1714792781f2dbee7d2ccfdc2a62)
Signed-off-by: Amaury Denoyelle <adenoyelle@haproxy.com>
return path->cwnd - path->prep_in_flight;
}
-int quic_cwnd_may_increase(const struct quic_cc_path *path);
+void quic_cc_path_reset(struct quic_cc_path *path);
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val);
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val);
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */
}
/* Returns true if congestion window on path ought to be increased. */
-int quic_cwnd_may_increase(const struct quic_cc_path *path)
+static int quic_cwnd_may_increase(const struct quic_cc_path *path)
{
/* RFC 9002 7.8. Underutilizing the Congestion Window
*
*/
return 2 * path->in_flight >= path->cwnd || path->cwnd < 16384;
}
+
+/* Restore congestion window for <path> to its minimal value. */
+void quic_cc_path_reset(struct quic_cc_path *path)
+{
+ path->cwnd = path->limit_min;
+}
+
+/* Set congestion window for <path> to <val>. Min and max limits are enforced. */
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val)
+{
+ path->cwnd = QUIC_MIN(val, path->limit_max);
+ path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+}
+
+/* Increment congestion window for <path> with <val>. Min and max limits are
+ * enforced. Contrary to quic_cc_path_set(), increase is performed only if a
+ * certain minimal level of the window was already filled.
+ */
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val)
+{
+ if (quic_cwnd_may_increase(path)) {
+ path->cwnd = QUIC_MIN(path->cwnd + val, path->limit_max);
+ path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+ }
+}
inc = W_est_inc;
}
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += inc;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, inc);
leave:
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
}
}
c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
- path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
+ quic_cc_path_set(path, c->ssthresh);
c->state = QUIC_CC_ST_RP;
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
}
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (ev->ack.pn >= h->wnd_end)
h->wnd_end = UINT64_MAX;
}
}
else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- }
+ quic_cc_path_inc(path, ev->ack.acked);
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
c->state = QUIC_CC_ST_CA;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
break;
case QUIC_CC_EVT_LOSS:
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (quic_cc_hystart_may_reenter_ss(h)) {
/* Exit to slow start */
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
- path->cwnd = path->limit_min;
+ quic_cc_path_reset(path);
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
- path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
+ quic_cc_path_set(path, nr->ssthresh);
nr->state = QUIC_CC_ST_RP;
}
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, ev->ack.acked);
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
nr->state = QUIC_CC_ST_CA;
*/
acked = ev->ack.acked * path->mtu + nr->remain_acked;
nr->remain_acked = acked % path->cwnd;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked / path->cwnd;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked / path->cwnd);
break;
}
nr->state = QUIC_CC_ST_CA;
nr->recovery_start_time = TICK_ETERNITY;
- path->cwnd = nr->ssthresh;
+ quic_cc_path_set(path, nr->ssthresh);
break;
case QUIC_CC_EVT_LOSS:
/* Do nothing */