struct quic_cc_algo {
enum quic_cc_algo_type type;
- enum quic_cc_algo_state_type state;
int (*init)(struct quic_cc *cc);
void (*event)(struct quic_cc *cc, struct quic_cc_event *ev);
void (*slow_start)(struct quic_cc *cc);
/* K cube factor: (1 - beta) / c */
struct cubic {
+ uint32_t state;
uint32_t ssthresh;
uint32_t remaining_inc;
uint32_t remaining_tcp_inc;
struct cubic *c = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
- cc->algo->state = QUIC_CC_ST_SS;
-
+ c->state = QUIC_CC_ST_SS;
c->ssthresh = QUIC_CC_INFINITE_SSTHESH;
c->remaining_inc = 0;
c->remaining_tcp_inc = 0;
}
path->cwnd = (CUBIC_BETA * path->cwnd) >> CUBIC_BETA_SCALE_SHIFT;
c->ssthresh = QUIC_MAX(path->cwnd, path->min_cwnd);
- cc->algo->state = QUIC_CC_ST_RP;
+ c->state = QUIC_CC_ST_RP;
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
}
path->cwnd += ev->ack.acked;
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
- cc->algo->state = QUIC_CC_ST_CA;
+ c->state = QUIC_CC_ST_CA;
break;
case QUIC_CC_EVT_LOSS:
goto leave;
}
- cc->algo->state = QUIC_CC_ST_CA;
+ c->state = QUIC_CC_ST_CA;
c->recovery_start_time = TICK_ETERNITY;
break;
case QUIC_CC_EVT_LOSS:
static void quic_cc_cubic_event(struct quic_cc *cc, struct quic_cc_event *ev)
{
- return quic_cc_cubic_state_cbs[cc->algo->state](cc, ev);
+ struct cubic *c = quic_cc_priv(cc);
+
+ return quic_cc_cubic_state_cbs[c->state](cc, ev);
}
static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc)
path = container_of(cc, struct quic_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%d rpst=%dms",
- quic_cc_state_str(cc->algo->state),
+ quic_cc_state_str(c->state),
(unsigned long long)path->cwnd,
(int)c->ssthresh,
!tick_isset(c->recovery_start_time) ? -1 :
/* Newreno state */
struct nr {
+ uint32_t state;
uint32_t ssthresh;
uint32_t recovery_start_time;
uint32_t remain_acked;
{
struct nr *nr = quic_cc_priv(cc);
- cc->algo->state = QUIC_CC_ST_SS;
+ nr->state = QUIC_CC_ST_SS;
nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
nr->recovery_start_time = 0;
nr->remain_acked = 0;
path = container_of(cc, struct quic_path, cc);
path->cwnd = path->min_cwnd;
/* Re-entering slow start state. */
- cc->algo->state = QUIC_CC_ST_SS;
+ nr->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
nr->recovery_start_time = 0;
}
path = container_of(cc, struct quic_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = QUIC_MAX(path->cwnd >> 1, path->min_cwnd);
- cc->algo->state = QUIC_CC_ST_RP;
+ nr->state = QUIC_CC_ST_RP;
}
/* Slow start callback. */
path->cwnd += ev->ack.acked;
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
- cc->algo->state = QUIC_CC_ST_CA;
+ nr->state = QUIC_CC_ST_CA;
break;
case QUIC_CC_EVT_LOSS:
goto leave;
}
- cc->algo->state = QUIC_CC_ST_CA;
+ nr->state = QUIC_CC_ST_CA;
nr->recovery_start_time = TICK_ETERNITY;
path->cwnd = nr->ssthresh;
break;
path = container_of(cc, struct quic_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
- quic_cc_state_str(cc->algo->state),
+ quic_cc_state_str(nr->state),
(unsigned long long)path->cwnd,
(long)nr->ssthresh,
(unsigned long long)nr->recovery_start_time);
static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
{
- return quic_cc_nr_state_cbs[cc->algo->state](cc, ev);
+ struct nr *nr = quic_cc_priv(cc);
+
+ return quic_cc_nr_state_cbs[nr->state](cc, ev);
}
struct quic_cc_algo quic_cc_algo_nr = {
static void quic_cc_nocc_event(struct quic_cc *cc, struct quic_cc_event *ev)
{
- return quic_cc_nocc_state_cbs[cc->algo->state](cc, ev);
+ return quic_cc_nocc_state_cbs[QUIC_CC_ST_SS](cc, ev);
}
struct quic_cc_algo quic_cc_algo_nocc = {