Merge branch 'bugfix/fix_cache_data_mem_corrupt_after_sleep_v5.1' into 'release/v5.1'

fix(lightsleep): Suspend cache before goto sleep to avoid cache load wrong data (backport v5.1)

See merge request espressif/esp-idf!25087
This commit is contained in:
Jiang Jiang Jian 2023-08-02 11:21:33 +08:00
commit afcf3e261b
21 changed files with 360 additions and 131 deletions

View File

@ -1,11 +1,12 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <stdlib.h>
#include "esp_attr.h"
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h"
@ -21,6 +22,8 @@
#include "soc/regi2c_lp_bias.h"
#include "soc/regi2c_dig_reg.h"
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/**
* Configure whether certain peripherals are powered down in deep sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure
@ -143,7 +146,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg)
{
if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}
@ -229,7 +231,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}
return reject;

View File

@ -1,11 +1,12 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <stdlib.h>
#include "esp_attr.h"
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h"
@ -27,6 +28,8 @@
#include "soc/systimer_reg.h"
#endif
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/**
* Configure whether certain peripherals are powered down in deep sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure
@ -168,7 +171,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg)
{
if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}
if (cfg.wifi_pd_en) {
@ -368,7 +370,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}
return reject;

View File

@ -1,10 +1,11 @@
/*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include "esp_attr.h"
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h"
@ -20,6 +21,8 @@
#define RTC_CNTL_MEM_FOLW_CPU (RTC_CNTL_SLOWMEM_FOLW_CPU | RTC_CNTL_FASTMEM_FOLW_CPU)
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/**
* Configure whether certain peripherals are powered up in sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure
@ -171,7 +174,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg)
{
if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}
@ -289,7 +291,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg);
}

View File

@ -731,12 +731,13 @@ static esp_err_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags, esp_sleep_mode_t m
result = ESP_OK;
#endif // !CONFIG_IDF_TARGET_ESP32H2
} else {
/* Wait cache idle in cache suspend to avoid cache load wrong data after spi io isolation */
cache_hal_suspend(CACHE_TYPE_ALL);
/* On esp32c6, only the lp_aon pad hold function can only hold the GPIO state in the active mode.
In order to avoid the leakage of the SPI cs pin, hold it here */
#if (CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP && CONFIG_ESP_SLEEP_FLASH_LEAKAGE_WORKAROUND)
#if !CONFIG_IDF_TARGET_ESP32H2 // ESP32H2 TODO IDF-7359: related rtcio ll func not supported yet
if(!(pd_flags & PMU_SLEEP_PD_VDDSDIO)) {
cache_hal_freeze(CACHE_TYPE_ALL);
gpio_ll_hold_en(&GPIO, SPI_CS0_GPIO_NUM);
}
#endif
@ -759,10 +760,11 @@ static esp_err_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags, esp_sleep_mode_t m
#if !CONFIG_IDF_TARGET_ESP32H2 // ESP32H2 TODO IDF-7359: related rtcio ll func not supported yet
if(!(pd_flags & PMU_SLEEP_PD_VDDSDIO)) {
gpio_ll_hold_dis(&GPIO, SPI_CS0_GPIO_NUM);
cache_hal_unfreeze(CACHE_TYPE_ALL);
}
#endif
#endif
/* Resume cache for continue running */
cache_hal_resume(CACHE_TYPE_ALL);
}
#if CONFIG_ESP_SLEEP_SYSTIMER_STALL_WORKAROUND

View File

@ -19,10 +19,14 @@ endif()
if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
list(APPEND srcs "mmu_hal.c")
endif()
if(NOT ${target} STREQUAL "esp32" AND NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
list(APPEND srcs "cache_hal.c")
# We wrap Cache ROM APIs as Cache HAL APIs for: 1. internal ram ; 2. unified APIs
# ESP32 cache structure / ROM APIs are different and we have a patch `cache_hal_esp32.c` for it.
if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/cache_hal_esp32.c")
else()
list(APPEND srcs "cache_hal.c")
endif()
endif()
if(CONFIG_SOC_LP_TIMER_SUPPORTED)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -43,6 +43,10 @@
typedef struct {
uint32_t data_autoload_flag;
uint32_t inst_autoload_flag;
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
// There's no register indicating if cache is enabled on these chips, use sw flag to save this state.
volatile bool cache_enabled;
#endif
} cache_hal_context_t;
static cache_hal_context_t ctx;
@ -66,6 +70,10 @@ void cache_hal_init(void)
cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_DBUS_MASK);
cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_IBUS_MASK);
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
}
void cache_hal_disable(cache_type_t type)
@ -82,6 +90,10 @@ void cache_hal_disable(cache_type_t type)
Cache_Disable_DCache();
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 0;
#endif
}
void cache_hal_enable(cache_type_t type)
@ -98,6 +110,59 @@ void cache_hal_enable(cache_type_t type)
Cache_Enable_DCache(ctx.data_autoload_flag);
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
}
void cache_hal_suspend(cache_type_t type)
{
#if SOC_SHARED_IDCACHE_SUPPORTED
Cache_Suspend_ICache();
#else
if (type == CACHE_TYPE_DATA) {
Cache_Suspend_DCache();
} else if (type == CACHE_TYPE_INSTRUCTION) {
Cache_Suspend_ICache();
} else {
Cache_Suspend_ICache();
Cache_Suspend_DCache();
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 0;
#endif
}
void cache_hal_resume(cache_type_t type)
{
#if SOC_SHARED_IDCACHE_SUPPORTED
Cache_Resume_ICache(ctx.inst_autoload_flag);
#else
if (type == CACHE_TYPE_DATA) {
Cache_Resume_DCache(ctx.data_autoload_flag);
} else if (type == CACHE_TYPE_INSTRUCTION) {
Cache_Resume_ICache(ctx.inst_autoload_flag);
} else {
Cache_Resume_ICache(ctx.inst_autoload_flag);
Cache_Resume_DCache(ctx.data_autoload_flag);
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
}
bool cache_hal_is_cache_enabled(cache_type_t type)
{
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
return ctx.cache_enabled;
#else
return cache_ll_l1_is_cache_enabled(0, type);
#endif
}
void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size)

View File

@ -0,0 +1,40 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "hal/cache_ll.h"
#include "hal/cache_hal.h"
static uint32_t s_cache_status[2];
void cache_hal_suspend(cache_type_t type)
{
s_cache_status[0] = cache_ll_l1_get_enabled_bus(0);
cache_ll_l1_disable_cache(0);
#if !CONFIG_FREERTOS_UNICORE
s_cache_status[1] = cache_ll_l1_get_enabled_bus(1);
cache_ll_l1_disable_cache(1);
#endif
}
void cache_hal_resume(cache_type_t type)
{
cache_ll_l1_enable_cache(0);
cache_ll_l1_enable_bus(0, s_cache_status[0]);
#if !CONFIG_FREERTOS_UNICORE
cache_ll_l1_enable_cache(1);
cache_ll_l1_enable_bus(1, s_cache_status[1]);
#endif
}
bool cache_hal_is_cache_enabled(cache_type_t type)
{
bool result = cache_ll_l1_is_cache_enabled(0, CACHE_TYPE_ALL);
#if !CONFIG_FREERTOS_UNICORE
result = result && cache_ll_l1_is_cache_enabled(1, CACHE_TYPE_ALL);
#endif
return result;
}

View File

@ -19,6 +19,66 @@
extern "C" {
#endif
/**
* @brief enable a cache unit
*
* @param cache_id cache ID (when l1 cache is per core)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_enable_cache(uint32_t cache_id)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
if (cache_id == 0) {
DPORT_REG_SET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
DPORT_REG_SET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
}
/**
* @brief disable a cache unit
*
* @param cache_id cache ID (when l1 cache is per core)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_disable_cache(uint32_t cache_id)
{
if (cache_id == 0) {
while (DPORT_GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1){
;
}
DPORT_REG_CLR_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
while (DPORT_GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1){
;
}
DPORT_REG_CLR_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
}
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
(void) type; //On 32 it shares between I and D cache
bool enabled;
if (cache_id == 0) {
enabled = DPORT_REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
enabled = DPORT_REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
return enabled;
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range
*

View File

@ -8,6 +8,7 @@
#pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/cache_types.h"
@ -34,6 +35,21 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1)
#define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
(void) type; // On C2 there's only ICache
return REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range
*

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,7 @@
#pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/cache_types.h"
@ -35,6 +36,21 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
(void) type; // On C3 there's only ICache
return REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range
*

View File

@ -16,7 +16,7 @@
#ifdef __cplusplus
extern "C" {
#endif
#define CACHE_LL_ENABLE_DISABLE_STATE_SW 1 //There's no register indicating cache enable/disable state, we need to use software way for this state.
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0

View File

@ -16,7 +16,7 @@
#ifdef __cplusplus
extern "C" {
#endif
#define CACHE_LL_ENABLE_DISABLE_STATE_SW 1 //There's no register indicating cache enable/disable state, we need to use software way for this state.
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0

View File

@ -22,6 +22,29 @@ extern "C" {
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_IBUS2
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
bool enabled;
if (type == CACHE_TYPE_INSTRUCTION) {
enabled = REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE);
} else if (type == CACHE_TYPE_DATA) {
enabled = REG_GET_BIT(EXTMEM_PRO_DCACHE_CTRL_REG, EXTMEM_PRO_DCACHE_ENABLE);
} else {
enabled = REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE);
enabled = enabled && REG_GET_BIT(EXTMEM_PRO_DCACHE_CTRL_REG, EXTMEM_PRO_DCACHE_ENABLE);
}
return enabled;
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range

View File

@ -8,6 +8,7 @@
#pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/cache_types.h"
@ -37,6 +38,29 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_ICACHE_PRELOAD_OP_FAULT (1<<1)
#define CACHE_LL_L1_ILG_EVENT_ICACHE_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
bool enabled;
if (type == CACHE_TYPE_INSTRUCTION) {
enabled = REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
} else if (type == CACHE_TYPE_DATA) {
enabled = REG_GET_BIT(EXTMEM_DCACHE_CTRL_REG, EXTMEM_DCACHE_ENABLE);
} else {
enabled = REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
enabled = enabled && REG_GET_BIT(EXTMEM_DCACHE_CTRL_REG, EXTMEM_DCACHE_ENABLE);
}
return enabled;
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range

View File

@ -1,12 +1,13 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdbool.h>
#include "hal/cache_types.h"
#ifdef __cplusplus
@ -39,6 +40,35 @@ void cache_hal_disable(cache_type_t type);
*/
void cache_hal_enable(cache_type_t type);
/**
* @brief Suspend cache
*
* Suspend the ICache or DCache or bothsuspends the CPU access to cache for a while, without invalidation.
*
* @param type see `cache_type_t`
*
* @return Current status of corresponding Cache(s)
*/
void cache_hal_suspend(cache_type_t type);
/**
* @brief Resume cache
*
* Resume the ICache or DCache or both.
*
* @param type see `cache_type_t`
*/
void cache_hal_resume(cache_type_t type);
/**
* @brief Check if corresponding cache is enabled or not
*
* @param type see `cache_type_t`
*
* @return true: enabled; false: disabled
*/
bool cache_hal_is_cache_enabled(cache_type_t type);
/**
* @brief Invalidate cache supported addr
*

View File

@ -5,8 +5,10 @@ entries:
mmu_hal (noflash)
spi_flash_hal_iram (noflash)
spi_flash_encrypt_hal_iram (noflash)
if IDF_TARGET_ESP32 = n && APP_BUILD_TYPE_PURE_RAM_APP = n:
cache_hal (noflash)
if IDF_TARGET_ESP32 = y:
cache_hal_esp32 (noflash)
else:
cache_hal (noflash)
if SOC_GPSPI_SUPPORTED = y:
if HAL_SPI_MASTER_FUNC_IN_IRAM = y:
spi_hal_iram (noflash)

View File

@ -231,6 +231,10 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_IDCACHE_PER_CORE
bool
default y
config SOC_CPU_CORES_NUM
int
default 2

View File

@ -138,7 +138,8 @@
#endif
/*-------------------------- CACHE CAPS --------------------------------------*/
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data within one core
#define SOC_IDCACHE_PER_CORE 1 //Independent Cache unit pre core
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -41,6 +41,8 @@
#include "soc/ext_mem_defs.h"
#endif
#include "esp_rom_spiflash.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include <soc/soc.h>
#include "sdkconfig.h"
#ifndef CONFIG_FREERTOS_UNICORE
@ -58,18 +60,6 @@
static __attribute__((unused)) const char *TAG = "cache";
#define DPORT_CACHE_BIT(cpuid, regid) DPORT_ ## cpuid ## regid
#define DPORT_CACHE_MASK(cpuid) (DPORT_CACHE_BIT(cpuid, _CACHE_MASK_OPSDRAM) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DRAM1) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM1) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM0) )
#define DPORT_CACHE_VAL(cpuid) (~(DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DRAM1) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM0)))
#define DPORT_CACHE_GET_VAL(cpuid) (cpuid == 0) ? DPORT_CACHE_VAL(PRO) : DPORT_CACHE_VAL(APP)
#define DPORT_CACHE_GET_MASK(cpuid) (cpuid == 0) ? DPORT_CACHE_MASK(PRO) : DPORT_CACHE_MASK(APP)
/**
* These two shouldn't be declared as static otherwise if `CONFIG_SPI_FLASH_ROM_IMPL` is enabled,
@ -78,15 +68,9 @@ static __attribute__((unused)) const char *TAG = "cache";
void spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state);
void spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state);
// Used only on ROM impl. in idf, this param unused, cache status hold by hal
static uint32_t s_flash_op_cache_state[2];
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
/* esp32c6 does not has a register indicating if cache is enabled
* so we use s static data to store to state of cache, every time
* disable/restore api is called, the state will be updated
*/
static volatile DRAM_ATTR bool s_cache_enabled = 1;
#endif
#ifndef CONFIG_FREERTOS_UNICORE
static SemaphoreHandle_t s_flash_op_mutex;
@ -221,13 +205,16 @@ void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu(void)
// with non-iram interrupts and the scheduler disabled. None of these CPUs will
// touch external RAM or flash this way, so we can safely disable caches.
spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
#if SOC_IDCACHE_PER_CORE
//only needed if cache(s) is per core
spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]);
#endif
}
void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void)
{
const int cpuid = xPortGetCoreID();
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
#ifndef NDEBUG
// Sanity check: flash operation ends on the same CPU as it has started
assert(cpuid == s_flash_op_cpu);
@ -236,9 +223,13 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void)
s_flash_op_cpu = -1;
#endif
// Re-enable cache on both CPUs. After this, cache (flash and external RAM) should work again.
// Re-enable cache. After this, cache (flash and external RAM) should work again.
spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
#if SOC_IDCACHE_PER_CORE
//only needed if cache(s) is per core
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]);
#endif
if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
// Signal to spi_flash_op_block_task that flash operation is complete
@ -352,6 +343,19 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os(void)
#endif // CONFIG_FREERTOS_UNICORE
void IRAM_ATTR spi_flash_enable_cache(uint32_t cpuid)
{
#if CONFIG_IDF_TARGET_ESP32
uint32_t cache_value = cache_ll_l1_get_enabled_bus(cpuid);
// Re-enable cache on this CPU
spi_flash_restore_cache(cpuid, cache_value);
#else
spi_flash_restore_cache(0, 0); // TODO cache_value should be non-zero
#endif
}
/**
* The following two functions are replacements for Cache_Read_Disable and Cache_Read_Enable
* function in ROM. They are used to work around a bug where Cache_Read_Disable requires a call to
@ -359,87 +363,17 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os(void)
*/
void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state)
{
#if CONFIG_IDF_TARGET_ESP32
uint32_t ret = 0;
const uint32_t cache_mask = DPORT_CACHE_GET_MASK(cpuid);
if (cpuid == 0) {
ret |= DPORT_GET_PERI_REG_BITS2(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, 0);
while (DPORT_GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) {
;
}
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S);
}
#if !CONFIG_FREERTOS_UNICORE
else {
ret |= DPORT_GET_PERI_REG_BITS2(DPORT_APP_CACHE_CTRL1_REG, cache_mask, 0);
while (DPORT_GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) {
;
}
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 0, DPORT_APP_CACHE_ENABLE_S);
}
#endif
*saved_state = ret;
#elif CONFIG_IDF_TARGET_ESP32S2
*saved_state = Cache_Suspend_ICache();
#elif CONFIG_IDF_TARGET_ESP32S3
uint32_t icache_state, dcache_state;
icache_state = Cache_Suspend_ICache() << 16;
dcache_state = Cache_Suspend_DCache();
*saved_state = icache_state | dcache_state;
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C2
uint32_t icache_state;
icache_state = Cache_Suspend_ICache() << 16;
*saved_state = icache_state;
#elif CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
uint32_t icache_state;
icache_state = Cache_Suspend_ICache();
*saved_state = icache_state;
s_cache_enabled = 0;
#endif
cache_hal_suspend(CACHE_TYPE_ALL);
}
void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state)
{
#if CONFIG_IDF_TARGET_ESP32
const uint32_t cache_mask = DPORT_CACHE_GET_MASK(cpuid);
if (cpuid == 0) {
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S);
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
#if !CONFIG_FREERTOS_UNICORE
else {
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S);
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
#endif
#elif CONFIG_IDF_TARGET_ESP32S2
Cache_Resume_ICache(saved_state);
#elif CONFIG_IDF_TARGET_ESP32S3
Cache_Resume_DCache(saved_state & 0xffff);
Cache_Resume_ICache(saved_state >> 16);
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C2
Cache_Resume_ICache(saved_state >> 16);
#elif CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
Cache_Resume_ICache(saved_state);
s_cache_enabled = 1;
#endif
cache_hal_resume(CACHE_TYPE_ALL);
}
IRAM_ATTR bool spi_flash_cache_enabled(void)
bool IRAM_ATTR spi_flash_cache_enabled(void)
{
#if CONFIG_IDF_TARGET_ESP32
bool result = (DPORT_REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE) != 0);
#if portNUM_PROCESSORS == 2
result = result && (DPORT_REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE) != 0);
#endif
#elif CONFIG_IDF_TARGET_ESP32S2
bool result = (REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE) != 0);
#elif CONFIG_IDF_TARGET_ESP32S3 || CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C2
bool result = (REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE) != 0);
#elif CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
bool result = s_cache_enabled;
#endif
return result;
return cache_hal_is_cache_enabled(CACHE_TYPE_ALL);
}
#if CONFIG_IDF_TARGET_ESP32S2
@ -980,16 +914,3 @@ esp_err_t esp_enable_cache_wrap(bool icache_wrap_enable)
return ESP_OK;
}
#endif // CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C2
void IRAM_ATTR spi_flash_enable_cache(uint32_t cpuid)
{
#if CONFIG_IDF_TARGET_ESP32
uint32_t cache_value = DPORT_CACHE_GET_VAL(cpuid);
cache_value &= DPORT_CACHE_GET_MASK(cpuid);
// Re-enable cache on this CPU
spi_flash_restore_cache(cpuid, cache_value);
#else
spi_flash_restore_cache(0, 0); // TODO cache_value should be non-zero
#endif
}

View File

@ -6,3 +6,16 @@ set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/components/spi_flash/test_apps/componen
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(test_esp_flash_stress)
if(CONFIG_COMPILER_DUMP_RTL_FILES)
add_custom_target(check_test_app_sections ALL
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/driver/
--elf-file ${CMAKE_BINARY_DIR}/mspi_test.elf
find-refs
--from-sections=.iram0.text
--to-sections=.flash.text,.flash.rodata
--exit-code
DEPENDS ${elf}
)
endif()

View File

@ -0,0 +1,5 @@
# This config lists merged freertos_flash no_optimization in UT all together.
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE=n
CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH=y
CONFIG_COMPILER_OPTIMIZATION_NONE=y
CONFIG_COMPILER_DUMP_RTL_FILES=y