Move quic_path struct from quic_conn-t.h to quic_cc-t.h and rename it to quic_cc_path.
Update the code consequently.
Also some inlined functions in relation with QUIC path to quic_cc.h
#include <stddef.h> /* size_t */
#include <haproxy/buf-t.h>
+#include <haproxy/quic_loss-t.h>
#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
uint32_t priv[16];
};
+struct quic_cc_path {
+ /* Control congestion. */
+ struct quic_cc cc;
+ /* Packet loss detection information. */
+ struct quic_loss loss;
+
+ /* MTU. */
+ size_t mtu;
+ /* Congestion window. */
+ uint64_t cwnd;
+ /* The current maximum congestion window value reached. */
+ uint64_t mcwnd;
+ /* The maximum congestion window value which can be reached. */
+ uint64_t max_cwnd;
+ /* Minimum congestion window. */
+ uint64_t min_cwnd;
+ /* Prepared data to be sent (in bytes). */
+ uint64_t prep_in_flight;
+ /* Outstanding data (in bytes). */
+ uint64_t in_flight;
+ /* Number of in flight ack-eliciting packets. */
+ uint64_t ifae_pkts;
+};
+
struct quic_cc_algo {
enum quic_cc_algo_type type;
int (*init)(struct quic_cc *cc);
#include <haproxy/chunk.h>
#include <haproxy/quic_cc-t.h>
#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_loss.h>
void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
return (void *)cc->priv;
}
+/* Initialize <p> QUIC network path depending on <ipv4> boolean
+ * which is true for an IPv4 path, if not false for an IPv6 path.
+ */
+static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsigned long max_cwnd,
+ struct quic_cc_algo *algo, struct quic_conn *qc)
+{
+ unsigned int max_dgram_sz;
+
+ max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU;
+ quic_loss_init(&path->loss);
+ path->mtu = max_dgram_sz;
+ path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
+ path->mcwnd = path->cwnd;
+ path->max_cwnd = max_cwnd;
+ path->min_cwnd = max_dgram_sz << 1;
+ path->prep_in_flight = 0;
+ path->in_flight = 0;
+ path->ifae_pkts = 0;
+ quic_cc_init(&path->cc, algo, qc);
+}
+
+/* Return the remaining <room> available on <path> QUIC path for prepared data
+ * (before being sent). Almost the same that for the QUIC path room, except that
+ * here this is the data which have been prepared which are taken into an account.
+ */
+static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
+{
+ if (path->prep_in_flight > path->cwnd)
+ return 0;
+
+ return path->cwnd - path->prep_in_flight;
+}
+
+
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */
/* The maximum number of bytes of CRYPTO data in flight during handshakes. */
#define QUIC_CRYPTO_IN_FLIGHT_MAX 4096
-struct quic_path {
- /* Control congestion. */
- struct quic_cc cc;
- /* Packet loss detection information. */
- struct quic_loss loss;
-
- /* MTU. */
- size_t mtu;
- /* Congestion window. */
- uint64_t cwnd;
- /* The current maximum congestion window value reached. */
- uint64_t mcwnd;
- /* The maximum congestion window value which can be reached. */
- uint64_t max_cwnd;
- /* Minimum congestion window. */
- uint64_t min_cwnd;
- /* Prepared data to be sent (in bytes). */
- uint64_t prep_in_flight;
- /* Outstanding data (in bytes). */
- uint64_t in_flight;
- /* Number of in flight ack-eliciting packets. */
- uint64_t ifae_pkts;
-};
-
/* Status of the connection/mux layer. This defines how to handle app data.
*
* During a standard quic_conn lifetime it transitions like this :
} ku;
unsigned int max_ack_delay;
unsigned int max_idle_timeout;
- struct quic_path paths[1];
- struct quic_path *path;
+ struct quic_cc_path paths[1];
+ struct quic_cc_path *path;
struct mt_list accept_list; /* chaining element used for accept, only valid for frontend connections */
ncid_frm->stateless_reset_token = src->stateless_reset_token;
}
-/* Initialize <p> QUIC network path depending on <ipv4> boolean
- * which is true for an IPv4 path, if not false for an IPv6 path.
- */
-static inline void quic_path_init(struct quic_path *path, int ipv4, unsigned long max_cwnd,
- struct quic_cc_algo *algo, struct quic_conn *qc)
-{
- unsigned int max_dgram_sz;
-
- max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU;
- quic_loss_init(&path->loss);
- path->mtu = max_dgram_sz;
- path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
- path->mcwnd = path->cwnd;
- path->max_cwnd = max_cwnd;
- path->min_cwnd = max_dgram_sz << 1;
- path->prep_in_flight = 0;
- path->in_flight = 0;
- path->ifae_pkts = 0;
- quic_cc_init(&path->cc, algo, qc);
-}
-
-/* Return the remaining <room> available on <path> QUIC path for prepared data
- * (before being sent). Almost the same that for the QUIC path room, except that
- * here this is the data which have been prepared which are taken into an account.
- */
-static inline size_t quic_path_prep_data(struct quic_path *path)
-{
- if (path->prep_in_flight > path->cwnd)
- return 0;
-
- return path->cwnd - path->prep_in_flight;
-}
-
/* Return 1 if <pkt> header form is long, 0 if not. */
static inline int qc_pkt_long(const struct quic_rx_packet *pkt)
{
static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
{
struct cubic *c = quic_cc_priv(cc);
- struct quic_path *path = container_of(cc, struct quic_path, cc);
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
/* Current cwnd as number of packets */
uint32_t t, target, inc, inc_diff;
uint64_t delta, diff;
static void quic_enter_recovery(struct quic_cc *cc)
{
- struct quic_path *path = container_of(cc, struct quic_path, cc);
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
struct cubic *c = quic_cc_priv(cc);
/* Current cwnd as number of packets */
/* Congestion slow-start callback. */
static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
- struct quic_path *path = container_of(cc, struct quic_path, cc);
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
struct cubic *c = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct cubic *c = quic_cc_priv(cc);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
quic_cc_state_str(c->state),
(unsigned long long)path->cwnd,
/* Re-enter slow start state. */
static void quic_cc_nr_slow_start(struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
path->cwnd = path->min_cwnd;
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
/* Enter a recovery period. */
static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd);
/* Slow start callback. */
static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
path->cwnd += ev->ack.acked;
/* Congestion avoidance callback. */
static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
{
/* Recovery period callback. */
static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
/* RFC 9022 7.3.2. Recovery
}
static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
quic_cc_state_str(nr->state),
(unsigned long long)path->cwnd,
static int quic_cc_nocc_init(struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
path->cwnd = path->max_cwnd;
return 1;
}
static void quic_cc_nocc_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
- struct quic_path *path;
+ struct quic_cc_path *path;
- path = container_of(cc, struct quic_path, cc);
+ path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " cwnd=%llu", (unsigned long long)path->cwnd);
}
qc->max_ack_delay = 0;
/* Only one path at this time (multipath not supported) */
qc->path = &qc->paths[0];
- quic_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
- cc_algo ? cc_algo : default_quic_cc_algo, qc);
+ quic_cc_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
+ cc_algo ? cc_algo : default_quic_cc_algo, qc);
qc->stream_buf_count = 0;
memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr));
* control window.
*/
if (!qel->pktns->tx.pto_probe) {
- size_t remain = quic_path_prep_data(qc->path);
+ size_t remain = quic_cc_path_prep_data(qc->path);
if (headlen > remain)
goto leave;
if (!probe && !LIST_ISEMPTY(frms) && !cc) {
size_t path_room;
- path_room = quic_path_prep_data(qc->path);
+ path_room = quic_cc_path_prep_data(qc->path);
if (end - beg > path_room)
end = beg + path_room;
}
*
* Probe packets MUST NOT be blocked by the congestion controller.
*/
- if ((quic_path_prep_data(qc->path) || pktns->tx.pto_probe) &&
+ if ((quic_cc_path_prep_data(qc->path) || pktns->tx.pto_probe) &&
(!qc_test_fd(qc) || !fd_send_active(qc->fd))) {
tasklet_wakeup(qc->subs->tasklet);
qc->subs->events &= ~SUB_RETRY_SEND;