Limit the number of old entries we remove in one call of
stktable_trash_oldest(), as we do so while holding the heavily contended
update write lock, so we'd rather not hold it for too long.
This helps getting stick tables perform better under heavy load.
(cherry picked from commit
d2d4c3eb6566145d30eb38dc96b2b79d3f1db8fc)
[wt: backported since situation encountered in 3.1 as well without this
patch. It also contains the definition for STKTABLE_MAX_UPDATES_AT_ONCE
from the previous commit]
Signed-off-by: Willy Tarreau <w@1wt.eu>
# define DEBUG_MEMORY_POOLS 1
#endif
+#ifndef STKTABLE_MAX_UPDATES_AT_ONCE
+#define STKTABLE_MAX_UPDATES_AT_ONCE 100
+#endif /* STKTABLE_MAX_UPDATES_AT_ONCE */
+
#endif /* _HAPROXY_DEFAULTS_H */
{
struct stksess *ts;
struct eb32_node *eb;
- int max_search = to_batch * 2; // no more than 50% misses
- int max_per_shard = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
+ int max_search; // no more than 50% misses
+ int max_per_shard;
int done_per_shard;
int batched = 0;
int updt_locked;
shard = 0;
+ if (to_batch > STKTABLE_MAX_UPDATES_AT_ONCE)
+ to_batch = STKTABLE_MAX_UPDATES_AT_ONCE;
+
+ max_search = to_batch * 2; // no more than 50% misses
+ max_per_shard = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
+
while (batched < to_batch) {
done_per_shard = 0;
looped = 0;