# passed as-is to CFLAGS). Please check sources for their exact meaning or do
# not use them at all. Some even more obscure ones might also be available
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
-# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD,
+# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK, DEBUG_MEMORY_POOLS.
* cache first, then from the second level if it exists.
*/
+#if defined(DEBUG_POOL_INTEGRITY)
+
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* Upgrade the fill_pattern to change about half of the bits
+ * (to be sure to catch static flag corruption), and apply it.
+ */
+ u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+ ofs = sizeof(*item) / sizeof(*ptr);
+ while (ofs < size / sizeof(*ptr))
+ ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ const ulong *ptr = (const ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u))
+ ABORT_NOW();
+ ofs++;
+ }
+}
+
+#else
+
+static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+}
+
+static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+}
+
+#endif
+
/* Tries to retrieve an object from the local pool cache corresponding to pool
* <pool>. If none is available, tries to allocate from the shared cache, and
* returns NULL if nothing is available.
if (LIST_ISEMPTY(&ph->list))
return pool_get_from_shared_cache(pool);
+#if defined(DEBUG_POOL_INTEGRITY)
+ /* allocate oldest objects first so as to keep them as long as possible
+ * in the cache before being reused and maximizing the chance to detect
+ * an overwrite.
+ */
+ item = LIST_PREV(&ph->list, typeof(item), by_pool);
+ pool_check_pattern(ph, item, pool->size);
+#else
+ /* allocate hottest objects first */
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+#endif
ph->count--;
pool_cache_bytes -= pool->size;
pool_cache_count--;
ph->count--;
pool_cache_bytes -= pool->size;
pool_cache_count--;
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
pool_put_to_shared_cache(pool, item);
*/
ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
pool = container_of(ph - tid, struct pool_head, cache);
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
ph->count--;
LIST_INSERT(&ph->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
ph->count++;
+ pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
pool_cache_bytes += pool->size;