mirror of
https://github.com/espressif/esp-idf
synced 2025-03-09 09:09:10 -04:00
For a given group of heaps sharing the same capabilities, it is best to use smaller heaps first when performing "small" allocations to keep the bigger heaps untouched. This prevents the scenario where a series of small allocations would crowd the bigger heaps making any bigger allocation fail for lack of space. Closes https://github.com/espressif/esp-idf/issues/13588
310 lines
12 KiB
C
310 lines
12 KiB
C
/*
|
|
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
#include "heap_private.h"
|
|
#include <assert.h>
|
|
#include <string.h>
|
|
#include <sys/lock.h>
|
|
|
|
#include "esp_log.h"
|
|
#include "multi_heap.h"
|
|
#include "multi_heap_platform.h"
|
|
#include "esp_heap_caps_init.h"
|
|
#include "heap_memory_layout.h"
|
|
|
|
#include "esp_private/startup_internal.h"
|
|
|
|
static const char *TAG = "heap_init";
|
|
|
|
/* Linked-list of registered heaps */
|
|
struct registered_heap_ll registered_heaps;
|
|
|
|
ESP_SYSTEM_INIT_FN(init_heap, CORE, BIT(0), 100)
|
|
{
|
|
heap_caps_init();
|
|
return ESP_OK;
|
|
}
|
|
|
|
/**
|
|
* @brief This helper function adds a new heap to list of registered
|
|
* heaps making sure to keep the heaps sorted by ascending size.
|
|
*
|
|
* @param new_heap heap to be inserted in the list of registered
|
|
* heaps
|
|
*/
|
|
static void sorted_add_to_registered_heaps(heap_t *new_heap)
|
|
{
|
|
// if list empty, insert head and return
|
|
if (SLIST_EMPTY(®istered_heaps)) {
|
|
SLIST_INSERT_HEAD(®istered_heaps, new_heap, next);
|
|
return;
|
|
}
|
|
|
|
// else, go through the registered heaps and add the new one
|
|
// so the registered heaps are sorted by increasing heap size.
|
|
heap_t *cur_heap = NULL;
|
|
heap_t *prev_heap = NULL;
|
|
const size_t new_heap_size = new_heap->end - new_heap->start;
|
|
SLIST_FOREACH(cur_heap, ®istered_heaps, next) {
|
|
const size_t cur_heap_size = cur_heap->end - cur_heap->start;
|
|
if (cur_heap_size >= new_heap_size) {
|
|
if (prev_heap != NULL) {
|
|
SLIST_INSERT_AFTER(prev_heap, new_heap, next);
|
|
} else {
|
|
SLIST_INSERT_HEAD(®istered_heaps, new_heap, next);
|
|
}
|
|
return;
|
|
}
|
|
prev_heap = cur_heap;
|
|
}
|
|
|
|
// new heap size if the biggest so far, insert it at the end
|
|
SLIST_INSERT_AFTER(prev_heap, new_heap, next);
|
|
}
|
|
|
|
static void register_heap(heap_t *region)
|
|
{
|
|
size_t heap_size = region->end - region->start;
|
|
assert(heap_size <= HEAP_SIZE_MAX);
|
|
region->heap = multi_heap_register((void *)region->start, heap_size);
|
|
if (region->heap != NULL) {
|
|
ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
|
|
}
|
|
}
|
|
|
|
void heap_caps_enable_nonos_stack_heaps(void)
|
|
{
|
|
heap_t *heap;
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
// Assume any not-yet-registered heap is
|
|
// a nonos-stack heap
|
|
if (heap->heap == NULL) {
|
|
register_heap(heap);
|
|
if (heap->heap != NULL) {
|
|
multi_heap_set_lock(heap->heap, &heap->heap_mux);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Initialize the heap allocator to use all of the memory not
|
|
used by static data or reserved for other purposes
|
|
*/
|
|
void heap_caps_init(void)
|
|
{
|
|
#ifdef CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
|
extern void multi_heap_in_rom_init(void);
|
|
multi_heap_in_rom_init();
|
|
#endif
|
|
/* Get the array of regions that we can use for heaps
|
|
(with reserved memory removed already.)
|
|
*/
|
|
size_t num_regions = soc_get_available_memory_region_max_count();
|
|
soc_memory_region_t regions[num_regions];
|
|
num_regions = soc_get_available_memory_regions(regions);
|
|
|
|
// the following for loop will calculate the number of possible heaps
|
|
// based on how many regions were coalesced.
|
|
size_t num_heaps = num_regions;
|
|
|
|
//The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
|
|
//it's useful to coalesce adjacent regions that have the same type.
|
|
for (size_t i = 1; i < num_regions; i++) {
|
|
soc_memory_region_t *a = ®ions[i - 1];
|
|
soc_memory_region_t *b = ®ions[i];
|
|
if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type && b->startup_stack == a->startup_stack ) {
|
|
a->type = -1;
|
|
b->start = a->start;
|
|
b->size += a->size;
|
|
|
|
// remove one heap from the number of heaps as
|
|
// 2 regions just got coalesced.
|
|
num_heaps--;
|
|
}
|
|
}
|
|
|
|
/* Start by allocating the registered heap data on the stack.
|
|
|
|
Once we have a heap to copy it to, we will copy it to a heap buffer.
|
|
*/
|
|
heap_t temp_heaps[num_heaps];
|
|
size_t heap_idx = 0;
|
|
|
|
ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
|
|
for (size_t i = 0; i < num_regions; i++) {
|
|
soc_memory_region_t *region = ®ions[i];
|
|
const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
|
|
heap_t *heap = &temp_heaps[heap_idx];
|
|
if (region->type == -1) {
|
|
memset(heap, 0, sizeof(*heap));
|
|
continue;
|
|
}
|
|
heap_idx++;
|
|
assert(heap_idx <= num_heaps);
|
|
|
|
memcpy(heap->caps, type->caps, sizeof(heap->caps));
|
|
heap->start = region->start;
|
|
heap->end = region->start + region->size;
|
|
MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
|
|
if (region->startup_stack) {
|
|
/* Will be registered when OS scheduler starts */
|
|
heap->heap = NULL;
|
|
} else {
|
|
register_heap(heap);
|
|
}
|
|
SLIST_NEXT(heap, next) = NULL;
|
|
|
|
ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
|
|
region->start, region->size, region->size / 1024, type->name);
|
|
}
|
|
|
|
assert(heap_idx == num_heaps);
|
|
|
|
/* Allocate the permanent heap data that we'll use as a linked list at runtime.
|
|
|
|
Allocate this part of data contiguously, even though it's a linked list... */
|
|
assert(SLIST_EMPTY(®istered_heaps));
|
|
|
|
heap_t *heaps_array = NULL;
|
|
for (size_t i = 0; i < num_heaps; i++) {
|
|
if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
|
|
/* use the first DRAM heap which can fit the data.
|
|
* the allocated block won't include the block owner bytes since this operation
|
|
* is done by the top level API heap_caps_malloc(). So we need to add it manually
|
|
* after successful allocation. Allocate extra 4 bytes for that purpose. */
|
|
heaps_array = multi_heap_malloc(temp_heaps[i].heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(sizeof(heap_t) * num_heaps));
|
|
if (heaps_array != NULL) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
|
|
MULTI_HEAP_SET_BLOCK_OWNER(heaps_array);
|
|
heaps_array = (heap_t *)MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(heaps_array);
|
|
|
|
memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
|
|
|
|
/* Iterate the heaps and set their locks, also add them to the linked list. */
|
|
for (size_t i = 0; i < num_heaps; i++) {
|
|
if (heaps_array[i].heap != NULL) {
|
|
multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
|
|
}
|
|
/* Since the registered heaps list is always traversed from head
|
|
* to tail when looking for a suitable heap when allocating memory, it is
|
|
* best to place smaller heap first. In that way, if several heaps share
|
|
* the same set of capabilities, the smallest heaps will be used first when
|
|
* processing small allocation requests, leaving the bigger heaps untouched
|
|
* until the smaller heaps are full. */
|
|
sorted_add_to_registered_heaps(&heaps_array[i]);
|
|
}
|
|
}
|
|
|
|
esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
|
|
{
|
|
if (start == 0) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
for (size_t i = 0; i < soc_memory_region_count; i++) {
|
|
const soc_memory_region_t *region = &soc_memory_regions[i];
|
|
// Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
|
|
if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
|
|
const uint32_t *caps = soc_memory_types[region->type].caps;
|
|
return heap_caps_add_region_with_caps(caps, start, end);
|
|
}
|
|
}
|
|
|
|
return ESP_ERR_NOT_FOUND;
|
|
}
|
|
|
|
/* This API is used for internal test purpose and hence its not marked as static */
|
|
bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end, intptr_t start, intptr_t end)
|
|
{
|
|
/*
|
|
* We assume that in any region, the "start" must be strictly less than the end.
|
|
* Specially, the 3rd scenario can be allowed. For example, allocate memory from heap,
|
|
* then change the capability and call this function to create a new region for special
|
|
* application.
|
|
* This 'start = start' and 'end = end' scenario is incorrect because the same region
|
|
* cannot be added twice. In fact, registering the same memory region as a heap twice
|
|
* would cause a corruption and then an exception at runtime.
|
|
*
|
|
* the existing heap region start end
|
|
* |----------------------|
|
|
*
|
|
* 1.add region (e1<s) |-----| correct: bool condition_1 = end < heap_start;
|
|
*
|
|
* 2.add region (s2<s && e2>s) |-----------------| wrong: bool condition_2 = start < heap_start && end > heap_start;
|
|
* |---------------------------------| wrong
|
|
*
|
|
* 3.add region (s3>=s && e3<e) |---------------| correct: bool condition_3 = start >= heap_start && end < heap_end;
|
|
* |--------------| correct
|
|
*
|
|
* 4.add region (s4<e && e4>e) |------------------------| wrong: bool condition_4 = start < heap_end && end > heap_end;
|
|
* |---------------------| wrong
|
|
*
|
|
* 5.add region (s5>=e) |----| correct: bool condition_5 = start >= heap_end;
|
|
*
|
|
* 6.add region (s6==s && e6==e) |----------------------| wrong: bool condition_6 = start == heap_start && end == heap_end;
|
|
*/
|
|
|
|
bool condition_2 = start < heap_start && end > heap_start; // if true then region not allowed
|
|
bool condition_4 = start < heap_end && end > heap_end; // if true then region not allowed
|
|
bool condition_6 = start == heap_start && end == heap_end; // if true then region not allowed
|
|
|
|
return !(condition_2 || condition_4 || condition_6);
|
|
}
|
|
|
|
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
|
|
{
|
|
esp_err_t err = ESP_FAIL;
|
|
if (caps == NULL || start == 0 || end == 0 || end <= start) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
//Check if region overlaps the start and/or end of an existing region. If so, the
|
|
//region is invalid (or maybe added twice)
|
|
heap_t *heap;
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
if (!heap_caps_check_add_region_allowed(heap->start, heap->end, start, end)) {
|
|
ESP_EARLY_LOGD(TAG, "invalid overlap detected with existing heap region");
|
|
return ESP_FAIL;
|
|
}
|
|
}
|
|
|
|
heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
|
if (p_new == NULL) {
|
|
err = ESP_ERR_NO_MEM;
|
|
goto done;
|
|
}
|
|
memcpy(p_new->caps, caps, sizeof(p_new->caps));
|
|
p_new->start = start;
|
|
p_new->end = end;
|
|
MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
|
|
p_new->heap = multi_heap_register((void *)start, end - start);
|
|
SLIST_NEXT(p_new, next) = NULL;
|
|
if (p_new->heap == NULL) {
|
|
err = ESP_ERR_INVALID_SIZE;
|
|
goto done;
|
|
}
|
|
multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
|
|
|
|
/* (This insertion is atomic to registered_heaps, so
|
|
we don't need to worry about thread safety for readers,
|
|
only for writers. */
|
|
static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
|
|
MULTI_HEAP_LOCK(®istered_heaps_write_lock);
|
|
SLIST_INSERT_HEAD(®istered_heaps, p_new, next);
|
|
MULTI_HEAP_UNLOCK(®istered_heaps_write_lock);
|
|
|
|
err = ESP_OK;
|
|
|
|
done:
|
|
if (err != ESP_OK) {
|
|
free(p_new);
|
|
}
|
|
return err;
|
|
}
|