fix(heap): Patch tlsf_check_pool in ROM heap

The integrity_walker now calls the integrity check hook to control
free AND used blocks of memory in the TLSF pool. This integrity walker
function is called from tlsf_check_pool.

This commit creates a patch of integrity_walker function to update the
outdated implementation in the ROM.
This commit is contained in:
Guillaume Souchere 2023-10-20 15:23:10 +02:00
parent 825f99f16e
commit d8a8149abd
7 changed files with 56 additions and 163 deletions

View File

@ -21,7 +21,7 @@ else()
"patches/esp_rom_regi2c.c" "patches/esp_rom_regi2c.c"
"patches/esp_rom_efuse.c") "patches/esp_rom_efuse.c")
if(CONFIG_HEAP_TLSF_USE_ROM_IMPL AND CONFIG_ESP_ROM_TLSF_CHECK_PATCH) if(CONFIG_HEAP_TLSF_USE_ROM_IMPL AND (CONFIG_ESP_ROM_TLSF_CHECK_PATCH OR CONFIG_HEAP_TLSF_CHECK_PATCH))
# This file shall be included in the build if TLSF in ROM is activated # This file shall be included in the build if TLSF in ROM is activated
list(APPEND sources "patches/esp_rom_tlsf.c") list(APPEND sources "patches/esp_rom_tlsf.c")
endif() endif()
@ -240,7 +240,7 @@ else() # Regular app build
# to force the linker to integrate the whole `esp_rom_tlsf.c` object file inside the # to force the linker to integrate the whole `esp_rom_tlsf.c` object file inside the
# final binary. This is necessary because tlsf_set_rom_patches is a constructor, thus, # final binary. This is necessary because tlsf_set_rom_patches is a constructor, thus,
# there as no explicit reference/call to it in IDF. # there as no explicit reference/call to it in IDF.
if(CONFIG_ESP_ROM_TLSF_CHECK_PATCH) if((CONFIG_ESP_ROM_TLSF_CHECK_PATCH OR CONFIG_HEAP_TLSF_CHECK_PATCH))
target_link_libraries(${COMPONENT_LIB} PRIVATE "-u tlsf_set_rom_patches") target_link_libraries(${COMPONENT_LIB} PRIVATE "-u tlsf_set_rom_patches")
endif() endif()

View File

@ -39,10 +39,6 @@ config ESP_ROM_HAS_HEAP_TLSF
bool bool
default y default y
config ESP_ROM_TLSF_CHECK_PATCH
bool
default y
config ESP_ROM_NEEDS_SET_CACHE_MMU_SIZE config ESP_ROM_NEEDS_SET_CACHE_MMU_SIZE
bool bool
default y default y

View File

@ -15,6 +15,5 @@
#define ESP_ROM_HAS_HAL_WDT (1) // ROM has the implementation of Watchdog HAL driver #define ESP_ROM_HAS_HAL_WDT (1) // ROM has the implementation of Watchdog HAL driver
#define ESP_ROM_HAS_HAL_SYSTIMER (1) // ROM has the implementation of Systimer HAL driver #define ESP_ROM_HAS_HAL_SYSTIMER (1) // ROM has the implementation of Systimer HAL driver
#define ESP_ROM_HAS_HEAP_TLSF (1) // ROM has the implementation of the tlsf and multi-heap library #define ESP_ROM_HAS_HEAP_TLSF (1) // ROM has the implementation of the tlsf and multi-heap library
#define ESP_ROM_TLSF_CHECK_PATCH (1) // ROM does not contain the patch of tlsf_check()
#define ESP_ROM_NEEDS_SET_CACHE_MMU_SIZE (1) // ROM needs to set cache MMU size according to instruction and rodata for flash mmap #define ESP_ROM_NEEDS_SET_CACHE_MMU_SIZE (1) // ROM needs to set cache MMU size according to instruction and rodata for flash mmap
#define ESP_ROM_HAS_MBEDTLS_CRYPTO_LIB (1) // ROM has the mbedtls crypto algorithm lib #define ESP_ROM_HAS_MBEDTLS_CRYPTO_LIB (1) // ROM has the mbedtls crypto algorithm lib

View File

@ -7,7 +7,7 @@ entries:
esp_rom_cache_esp32s2_esp32s3 (noflash) esp_rom_cache_esp32s2_esp32s3 (noflash)
if ESP_ROM_HAS_CACHE_WRITEBACK_BUG = y: if ESP_ROM_HAS_CACHE_WRITEBACK_BUG = y:
esp_rom_cache_writeback_esp32s3 (noflash) esp_rom_cache_writeback_esp32s3 (noflash)
if HEAP_TLSF_USE_ROM_IMPL = y && ESP_ROM_TLSF_CHECK_PATCH = y: if HEAP_TLSF_USE_ROM_IMPL = y && (ESP_ROM_TLSF_CHECK_PATCH = y || HEAP_TLSF_CHECK_PATCH = y):
esp_rom_tlsf (noflash) esp_rom_tlsf (noflash)
if SOC_SYSTIMER_SUPPORTED = y: if SOC_SYSTIMER_SUPPORTED = y:
esp_rom_systimer (noflash) esp_rom_systimer (noflash)

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -33,46 +33,10 @@ typedef void* tlsf_walker;
#define tlsf_cast(t, exp) ((t) (exp)) #define tlsf_cast(t, exp) ((t) (exp))
enum tlsf_config {
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical.
*/
SL_INDEX_COUNT_LOG2 = 5,
/* All allocation sizes and addresses are aligned to 4 bytes. */
ALIGN_SIZE_LOG2 = 2,
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
/*
** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
** However, because we linearly subdivide the second-level lists, and
** our minimum size granularity is 4 bytes, it doesn't make sense to
** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
** trying to split size ranges into more slots than we have available.
** Instead, we calculate the minimum threshold size, and place all
** blocks below that size into the 0th first-level list.
*/
/* Fix the value of FL_INDEX_MAX to match the value that is defined
* in the ROM implementation. */
FL_INDEX_MAX = 18, //Each pool can have up 256KB
SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
};
#define block_header_free_bit (1 << 0) #define block_header_free_bit (1 << 0)
#define block_header_prev_free_bit (1 << 1) #define block_header_prev_free_bit (1 << 1)
#define block_header_overhead (sizeof(size_t)) #define block_header_overhead (sizeof(size_t))
#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t)) #define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
typedef ptrdiff_t tlsfptr_t;
typedef struct block_header_t typedef struct block_header_t
{ {
@ -87,26 +51,6 @@ typedef struct block_header_t
struct block_header_t* prev_free; struct block_header_t* prev_free;
} block_header_t; } block_header_t;
/* The TLSF control structure. */
typedef struct control_t
{
/* Empty lists point at this block to indicate they are free. */
block_header_t block_null;
/* Bitmaps for free lists. */
unsigned int fl_bitmap;
unsigned int sl_bitmap[FL_INDEX_COUNT];
/* Head of free lists. */
block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
} control_t;
static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word)
{
const int bit = word ? 32 - __builtin_clz(word) : 0;
return bit - 1;
}
static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block) static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
{ {
return block->size & ~(block_header_free_bit | block_header_prev_free_bit); return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
@ -122,41 +66,10 @@ static inline __attribute__((__always_inline__)) int block_is_prev_free(const bl
return tlsf_cast(int, block->size & block_header_prev_free_bit); return tlsf_cast(int, block->size & block_header_prev_free_bit);
} }
static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size) static inline __attribute__((always_inline)) block_header_t* block_from_ptr(const void* ptr)
{ {
return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size); return tlsf_cast(block_header_t*,
} tlsf_cast(unsigned char*, ptr) - block_start_offset);
static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
{
return tlsf_cast(void*,
tlsf_cast(unsigned char*, block) + block_start_offset);
}
static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
{
block_header_t* next = offset_to_block(block_to_ptr(block),
block_size(block) - block_header_overhead);
return next;
}
static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli)
{
int fl, sl;
if (size < SMALL_BLOCK_SIZE)
{
/* Store small blocks in first list. */
fl = 0;
sl = tlsf_cast(int, size) >> 2;
}
else
{
fl = tlsf_fls(size);
sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
fl -= (FL_INDEX_SHIFT - 1);
}
*fli = fl;
*sli = sl;
} }
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
@ -173,74 +86,54 @@ void tlsf_poison_check_pfunc_set(poison_check_pfunc_t pfunc)
#define tlsf_insist_no_assert(x) { if (!(x)) { status--; } } #define tlsf_insist_no_assert(x) { if (!(x)) { status--; } }
int tlsf_check(tlsf_t tlsf) typedef struct integrity_t
{ {
int i, j; int prev_status;
int status;
} integrity_t;
control_t* control = tlsf_cast(control_t*, tlsf); static void integrity_walker(void* ptr, size_t size, int used, void* user)
int status = 0; {
block_header_t* block = block_from_ptr(ptr);
integrity_t* integ = tlsf_cast(integrity_t*, user);
const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
const int this_status = block_is_free(block) ? 1 : 0;
const size_t this_block_size = block_size(block);
/* Check that the free lists and bitmaps are accurate. */ int status = 0;
for (i = 0; i < FL_INDEX_COUNT; ++i) tlsf_insist_no_assert(integ->prev_status == this_prev_status && "prev status incorrect");
{ tlsf_insist_no_assert(size == this_block_size && "block size incorrect");
for (j = 0; j < SL_INDEX_COUNT; ++j)
{
const int fl_map = control->fl_bitmap & (1 << i);
const int sl_list = control->sl_bitmap[i];
const int sl_map = sl_list & (1 << j);
const block_header_t* block = control->blocks[i][j];
/* Check that first- and second-level lists agree. */ if (s_poison_check_region != NULL)
if (!fl_map) {
{ /* block_size(block) returns the size of the usable memory when the block is allocated.
tlsf_insist_no_assert(!sl_map && "second-level map must be null"); * As the block under test is free, we need to subtract to the block size the next_free
} * and prev_free fields of the block header as they are not a part of the usable memory
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
* as this field is in fact part of the current free block and not part of the next (allocated)
* block. Check the comments in block_split function for more details.
*/
const size_t actual_free_block_size = used ? this_block_size :
this_block_size - offsetof(block_header_t, next_free)- block_header_overhead;
if (!sl_map) void* ptr_block = used ? (void*)block + block_start_offset :
{ (void*)block + sizeof(block_header_t);
tlsf_insist_no_assert(block == &control->block_null && "block list must be null");
continue;
}
/* Check that there is at least one free block. */ tlsf_insist_no_assert(s_poison_check_region(ptr_block, actual_free_block_size, !used, true));
tlsf_insist_no_assert(sl_list && "no free blocks in second-level map"); }
tlsf_insist_no_assert(block != &control->block_null && "block should not be null");
while (block != &control->block_null) integ->prev_status = this_status;
{ integ->status += status;
int fli, sli; }
const bool is_block_free = block_is_free(block);
tlsf_insist_no_assert(is_block_free && "block should be free");
tlsf_insist_no_assert(!block_is_prev_free(block) && "blocks should have coalesced");
tlsf_insist_no_assert(!block_is_free(block_next(block)) && "blocks should have coalesced");
tlsf_insist_no_assert(block_is_prev_free(block_next(block)) && "block should be free");
tlsf_insist_no_assert(block_size(block) >= block_size_min && "block not minimum size");
mapping_insert(block_size(block), &fli, &sli); extern void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
tlsf_insist_no_assert(fli == i && sli == j && "block size indexed in wrong list"); int tlsf_check_pool(pool_t pool)
{
/* Check that the blocks are physically correct. */
integrity_t integ = { 0, 0 };
tlsf_walk_pool(pool, integrity_walker, &integ);
/* block_size(block) returns the size of the usable memory when the block is allocated. return integ.status;
* As the block under test is free, we need to subtract to the block size the next_free
* and prev_free fields of the block header as they are not a part of the usable memory
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
* as this field is in fact part of the current free block and not part of the next (allocated)
* block. Check the comments in block_split function for more details.
*/
const size_t actual_free_block_size = block_size(block)
- offsetof(block_header_t, next_free)
- block_header_overhead;
if (s_poison_check_region != NULL) {
tlsf_insist_no_assert(s_poison_check_region((char *)block + sizeof(block_header_t),
actual_free_block_size, is_block_free, true /* print errors */));
}
block = block->next_free;
}
}
}
return status;
} }
#undef tlsf_insist_no_assert #undef tlsf_insist_no_assert
@ -299,7 +192,7 @@ void __attribute__((constructor)) tlsf_set_rom_patches(void)
memcpy(&heap_tlsf_patch_table_ptr, heap_tlsf_table_ptr, sizeof(struct heap_tlsf_stub_table_t)); memcpy(&heap_tlsf_patch_table_ptr, heap_tlsf_table_ptr, sizeof(struct heap_tlsf_stub_table_t));
/* Set the patched function here */ /* Set the patched function here */
heap_tlsf_patch_table_ptr.tlsf_check = tlsf_check; heap_tlsf_patch_table_ptr.tlsf_check_pool = tlsf_check_pool;
/* Set our table as the one to use in the ROM code */ /* Set our table as the one to use in the ROM code */
heap_tlsf_table_ptr = &heap_tlsf_patch_table_ptr; heap_tlsf_table_ptr = &heap_tlsf_patch_table_ptr;

View File

@ -84,4 +84,13 @@ menu "Heap memory debugging"
features will be added and bugs will be fixed in the IDF source features will be added and bugs will be fixed in the IDF source
but cannot be synced to ROM. but cannot be synced to ROM.
config HEAP_TLSF_CHECK_PATCH
bool "Patch the tlsf_check_pool() for ROM HEAP TLSF implementation"
depends on IDF_TARGET_ESP32C2 && ESP32C2_REV_MIN_FULL < 200
default y
help
ROM does not contain the patch of tlsf_check_pool() allowing perform
the integrity checking on used blocks. The patch to allow such check
needs to be applied.
endmenu endmenu

View File

@ -67,8 +67,6 @@ TEST_CASE("multi_heap poisoning detection", "[heap]")
} }
} }
#if !defined(CONFIG_HEAP_TLSF_USE_ROM_IMPL)
#ifdef CONFIG_HEAP_TASK_TRACKING #ifdef CONFIG_HEAP_TASK_TRACKING
#define HEAD_CANARY_OFFSET 3 // head canary | task tracking | allocated size #define HEAD_CANARY_OFFSET 3 // head canary | task tracking | allocated size
#else #else
@ -114,5 +112,3 @@ TEST_CASE("canary corruption in light or comprehensive poisoning mode", "[heap]"
ptr[TAIL_CANARY_OFFSET] = canary; ptr[TAIL_CANARY_OFFSET] = canary;
heap_caps_free(ptr); heap_caps_free(ptr);
} }
#endif // !CONFIG_HEAP_TLSF_USE_ROM_IMPL