From: Willy Tarreau Date: Fri, 21 Jan 2022 18:00:25 +0000 (+0100) Subject: DEBUG: pools: add new build option DEBUG_POOL_INTEGRITY X-Git-Tag: v2.5.2~26 X-Git-Url: http://git.haproxy.org/?a=commitdiff_plain;h=722601212a6403fede5f55d65a3d082721bf1678;p=haproxy-2.5.git DEBUG: pools: add new build option DEBUG_POOL_INTEGRITY When enabled, objects picked from the cache are checked for corruption by comparing their contents against a pattern that was placed when they were inserted into the cache. Objects are also allocated in the reverse order, from the oldest one to the most recent, so as to maximize the ability to detect such a corruption. The goal is to detect writes after free (or possibly hardware memory corruptions). Contrary to DEBUG_UAF this cannot detect reads after free, but may possibly detect later corruptions and will not consume extra memory. The CPU usage will increase a bit due to the cost of filling/checking the area and for the preference for cold cache instead of hot cache, though not as much as with DEBUG_UAF. This option is meant to be usable in production. (cherry picked from commit 0575d8fd760c6cd1de3d6ed66599d685a03c1873) [wt: adjusted slightly since there is no batch refilling in 2.5; dropped the API doc parts; tested with/without option and works fine] Signed-off-by: Willy Tarreau --- diff --git a/Makefile b/Makefile index 49cebea..599bceb 100644 --- a/Makefile +++ b/Makefile @@ -231,7 +231,7 @@ SMALL_OPTS = # passed as-is to CFLAGS). Please check sources for their exact meaning or do # not use them at all. Some even more obscure ones might also be available # without appearing here. Currently defined DEBUG macros include DEBUG_FULL, -# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, +# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY, # DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK, # DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV, # DEBUG_TASK, DEBUG_MEMORY_POOLS. diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h index 111ae3a..2ddf800 100644 --- a/include/haproxy/pool-t.h +++ b/include/haproxy/pool-t.h @@ -59,6 +59,9 @@ struct pool_cache_head { struct list list; /* head of objects in this pool */ unsigned int count; /* number of objects in this pool */ +#if defined(DEBUG_POOL_INTEGRITY) + ulong fill_pattern; /* pattern used to fill the area on free */ +#endif } THREAD_ALIGNED(64); struct pool_cache_item { diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index cb2c8b4..245f2ff 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -178,6 +178,64 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) * cache first, then from the second level if it exists. */ +#if defined(DEBUG_POOL_INTEGRITY) + +/* Updates 's fill_pattern and fills the free area after with it, + * up to bytes. The item part is left untouched. + */ +static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ + ulong *ptr = (ulong *)item; + uint ofs; + ulong u; + + if (size <= sizeof(*item)) + return; + + /* Upgrade the fill_pattern to change about half of the bits + * (to be sure to catch static flag corruption), and apply it. + */ + u = pch->fill_pattern += ~0UL / 3; // 0x55...55 + ofs = sizeof(*item) / sizeof(*ptr); + while (ofs < size / sizeof(*ptr)) + ptr[ofs++] = u; +} + +/* check for a pool_cache_item integrity after extracting it from the cache. It + * must have been previously initialized using pool_fill_pattern(). If any + * corruption is detected, the function provokes an immediate crash. + */ +static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ + const ulong *ptr = (const ulong *)item; + uint ofs; + ulong u; + + if (size <= sizeof(*item)) + return; + + /* let's check that all words past *item are equal */ + ofs = sizeof(*item) / sizeof(*ptr); + u = ptr[ofs++]; + while (ofs < size / sizeof(*ptr)) { + if (unlikely(ptr[ofs] != u)) + ABORT_NOW(); + ofs++; + } +} + +#else + +static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ +} + +static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ +} + +#endif + /* Tries to retrieve an object from the local pool cache corresponding to pool * . If none is available, tries to allocate from the shared cache, and * returns NULL if nothing is available. @@ -191,7 +249,17 @@ static inline void *pool_get_from_cache(struct pool_head *pool) if (LIST_ISEMPTY(&ph->list)) return pool_get_from_shared_cache(pool); +#if defined(DEBUG_POOL_INTEGRITY) + /* allocate oldest objects first so as to keep them as long as possible + * in the cache before being reused and maximizing the chance to detect + * an overwrite. + */ + item = LIST_PREV(&ph->list, typeof(item), by_pool); + pool_check_pattern(ph, item, pool->size); +#else + /* allocate hottest objects first */ item = LIST_NEXT(&ph->list, typeof(item), by_pool); +#endif ph->count--; pool_cache_bytes -= pool->size; pool_cache_count--; diff --git a/src/pool.c b/src/pool.c index 33c86d0..15e58d3 100644 --- a/src/pool.c +++ b/src/pool.c @@ -286,6 +286,7 @@ void pool_evict_from_local_cache(struct pool_head *pool) ph->count--; pool_cache_bytes -= pool->size; pool_cache_count--; + pool_check_pattern(ph, item, pool->size); LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_lru); pool_put_to_shared_cache(pool, item); @@ -308,6 +309,7 @@ void pool_evict_from_local_caches() */ ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list); pool = container_of(ph - tid, struct pool_head, cache); + pool_check_pattern(ph, item, pool->size); LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_lru); ph->count--; @@ -330,6 +332,7 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr) LIST_INSERT(&ph->list, &item->by_pool); LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru); ph->count++; + pool_fill_pattern(ph, item, pool->size); pool_cache_count++; pool_cache_bytes += pool->size;