mirror of
https://github.com/espressif/esp-idf
synced 2025-03-09 17:19:09 -04:00
Merge branch 'fix/esp32p4_lightsleep_fixes_v5.4' into 'release/v5.4'
fix(esp_hw_support): some fixes of esp32p4 lightsleep retention & power switch process (v5.4) See merge request espressif/esp-idf!37086
This commit is contained in:
commit
f9ae8dfb04
@ -279,11 +279,6 @@ void pmu_sleep_increase_ldo_volt(void);
|
||||
* power in the sleep and wake-up processes.
|
||||
*/
|
||||
void pmu_sleep_shutdown_dcdc(void);
|
||||
|
||||
/**
|
||||
* @brief DCDC has taken over power supply, shut down LDO to save power consumption
|
||||
*/
|
||||
void pmu_sleep_shutdown_ldo(void);
|
||||
#endif // SOC_DCDC_SUPPORTED
|
||||
|
||||
/**
|
||||
|
@ -113,6 +113,7 @@ STRUCT_END(RvCoreCriticalSleepFrame)
|
||||
STRUCT_BEGIN
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_MSCRATCH, mscratch)
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_MISA, misa)
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_MHCR, mhcr)
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_TSELECT, tselect)
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA1, tdata1)
|
||||
STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA2, tdata2)
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "freertos/task.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "riscv/csr.h"
|
||||
#include "soc/cache_reg.h"
|
||||
#include "soc/clic_reg.h"
|
||||
#include "soc/rtc_periph.h"
|
||||
#include "soc/soc_caps.h"
|
||||
@ -32,7 +31,6 @@
|
||||
|
||||
#include "esp32p4/rom/ets_sys.h"
|
||||
#include "esp32p4/rom/rtc.h"
|
||||
#include "esp32p4/rom/cache.h"
|
||||
#include "rvsleep-frames.h"
|
||||
|
||||
#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
|
||||
@ -76,7 +74,6 @@ typedef struct {
|
||||
struct {
|
||||
RvCoreCriticalSleepFrame *critical_frame[portNUM_PROCESSORS];
|
||||
RvCoreNonCriticalSleepFrame *non_critical_frame[portNUM_PROCESSORS];
|
||||
cpu_domain_dev_sleep_frame_t *cache_config_frame;
|
||||
cpu_domain_dev_sleep_frame_t *clic_frame[portNUM_PROCESSORS];
|
||||
} retent;
|
||||
} sleep_cpu_retention_t;
|
||||
@ -107,15 +104,6 @@ static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_reg
|
||||
return frame;
|
||||
}
|
||||
|
||||
static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
|
||||
{
|
||||
const static cpu_domain_dev_regs_region_t regions[] = {
|
||||
{ .start = CACHE_L1_ICACHE_CTRL_REG, .end = CACHE_L1_BYPASS_CACHE_CONF_REG + 4 },
|
||||
{ .start = CACHE_L2_CACHE_CTRL_REG, .end = CACHE_L2_CACHE_BLOCKSIZE_CONF_REG + 4 }
|
||||
};
|
||||
return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
|
||||
}
|
||||
|
||||
static inline void * cpu_domain_clic_sleep_frame_alloc_and_init(uint8_t core_id)
|
||||
{
|
||||
const static cpu_domain_dev_regs_region_t regions[portNUM_PROCESSORS][2] = {
|
||||
@ -146,13 +134,6 @@ static esp_err_t esp_sleep_cpu_retention_init_impl(void)
|
||||
s_cpu_retention.retent.non_critical_frame[core_id] = (RvCoreNonCriticalSleepFrame *)frame;
|
||||
}
|
||||
}
|
||||
if (s_cpu_retention.retent.cache_config_frame == NULL) {
|
||||
void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
|
||||
if (frame == NULL) {
|
||||
goto err;
|
||||
}
|
||||
s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
|
||||
}
|
||||
for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) {
|
||||
if (s_cpu_retention.retent.clic_frame[core_id] == NULL) {
|
||||
void *frame = cpu_domain_clic_sleep_frame_alloc_and_init(core_id);
|
||||
@ -186,10 +167,6 @@ static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
|
||||
s_cpu_retention.retent.non_critical_frame[core_id] = NULL;
|
||||
}
|
||||
}
|
||||
if (s_cpu_retention.retent.cache_config_frame) {
|
||||
heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
|
||||
s_cpu_retention.retent.cache_config_frame = NULL;
|
||||
}
|
||||
for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) {
|
||||
if (s_cpu_retention.retent.clic_frame[core_id]) {
|
||||
heap_caps_free((void *)s_cpu_retention.retent.clic_frame[core_id]);
|
||||
@ -215,6 +192,7 @@ static TCM_IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save
|
||||
|
||||
frame->mscratch = RV_READ_CSR(mscratch);
|
||||
frame->misa = RV_READ_CSR(misa);
|
||||
frame->mhcr = RV_READ_CSR(MHCR);
|
||||
frame->tselect = RV_READ_CSR(tselect);
|
||||
frame->tdata1 = RV_READ_CSR(tdata1);
|
||||
frame->tdata2 = RV_READ_CSR(tdata2);
|
||||
@ -283,6 +261,7 @@ static TCM_IRAM_ATTR void rv_core_noncritical_regs_restore(void)
|
||||
|
||||
RV_WRITE_CSR(mscratch, frame->mscratch);
|
||||
RV_WRITE_CSR(misa, frame->misa);
|
||||
RV_WRITE_CSR(MHCR, frame->mhcr);
|
||||
RV_WRITE_CSR(tselect, frame->tselect);
|
||||
RV_WRITE_CSR(tdata1, frame->tdata1);
|
||||
RV_WRITE_CSR(tdata2, frame->tdata2);
|
||||
@ -403,6 +382,8 @@ static TCM_IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
|
||||
uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
|
||||
{
|
||||
uint8_t core_id = esp_cpu_get_core_id();
|
||||
/* mstatus is core privated CSR, do it near the core critical regs restore */
|
||||
uint32_t mstatus = save_mstatus_and_disable_global_int();
|
||||
rv_core_critical_regs_save();
|
||||
|
||||
RvCoreCriticalSleepFrame * frame = s_cpu_retention.retent.critical_frame[core_id];
|
||||
@ -428,7 +409,7 @@ static TCM_IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
|
||||
validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_SZ1 - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
|
||||
}
|
||||
#endif
|
||||
|
||||
restore_mstatus(mstatus);
|
||||
return pmu_sleep_finish(dslp);
|
||||
}
|
||||
|
||||
@ -436,13 +417,11 @@ esp_err_t TCM_IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t,
|
||||
uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
|
||||
{
|
||||
esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0);
|
||||
uint32_t mstatus = save_mstatus_and_disable_global_int();
|
||||
uint8_t core_id = esp_cpu_get_core_id();
|
||||
#if ESP_SLEEP_POWER_DOWN_CPU && !CONFIG_FREERTOS_UNICORE
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START);
|
||||
#endif
|
||||
cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]);
|
||||
cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
|
||||
rv_core_noncritical_regs_save();
|
||||
|
||||
#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
|
||||
@ -467,10 +446,8 @@ esp_err_t TCM_IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t,
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START);
|
||||
#endif
|
||||
|
||||
rv_core_noncritical_regs_restore();
|
||||
cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
|
||||
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]);
|
||||
restore_mstatus(mstatus);
|
||||
rv_core_noncritical_regs_restore();
|
||||
|
||||
#if ESP_SLEEP_POWER_DOWN_CPU && !CONFIG_FREERTOS_UNICORE
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE);
|
||||
@ -495,7 +472,6 @@ bool cpu_domain_pd_allowed(void)
|
||||
allowed &= (s_cpu_retention.retent.critical_frame[core_id] != NULL);
|
||||
allowed &= (s_cpu_retention.retent.non_critical_frame[core_id] != NULL);
|
||||
}
|
||||
allowed &= (s_cpu_retention.retent.cache_config_frame != NULL);
|
||||
for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) {
|
||||
allowed &= (s_cpu_retention.retent.clic_frame[core_id] != NULL);
|
||||
}
|
||||
@ -544,6 +520,7 @@ static TCM_IRAM_ATTR void smp_core_do_retention(void)
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START);
|
||||
rv_core_noncritical_regs_save();
|
||||
cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]);
|
||||
uint32_t mstatus = save_mstatus_and_disable_global_int();
|
||||
rv_core_critical_regs_save();
|
||||
RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id];
|
||||
if ((frame_critical->pmufunc & 0x3) == 0x1) {
|
||||
@ -563,6 +540,7 @@ static TCM_IRAM_ATTR void smp_core_do_retention(void)
|
||||
REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL);
|
||||
}
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START);
|
||||
restore_mstatus(mstatus);
|
||||
cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]);
|
||||
rv_core_noncritical_regs_restore();
|
||||
atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE);
|
||||
|
@ -149,31 +149,6 @@ _rv_core_critical_regs_restore: /* export a strong symbol to jump to here, used
|
||||
nop
|
||||
|
||||
rv_core_critical_regs_restore:
|
||||
/* Invalidate L1 Cache by Core 0*/
|
||||
csrr t0, mhartid
|
||||
bnez t0, start_restore
|
||||
/* Core 0 is wakeup core, Invalidate L1 Cache here */
|
||||
/* Invalidate L1 cache is required here!!! */
|
||||
la t0, CACHE_SYNC_MAP_REG
|
||||
li t1, CACHE_MAP_L1_CACHE_MASK /* map l1 i/dcache */
|
||||
sw t1, 0x0(t0) /* set EXTMEM_CACHE_SYNC_MAP_REG bit 4 */
|
||||
la t2, CACHE_SYNC_ADDR_REG
|
||||
sw zero, 0x0(t2) /* clear EXTMEM_CACHE_SYNC_ADDR_REG */
|
||||
la t0, CACHE_SYNC_SIZE_REG
|
||||
sw zero, 0x0(t0) /* clear EXTMEM_CACHE_SYNC_SIZE_REG */
|
||||
|
||||
la t1, CACHE_SYNC_CTRL_REG
|
||||
lw t2, 0x0(t1)
|
||||
ori t2, t2, 0x1
|
||||
sw t2, 0x0(t1)
|
||||
|
||||
li t0, 0x10 /* SYNC_DONE bit */
|
||||
wait_cache_sync_done1:
|
||||
lw t2, 0x0(t1)
|
||||
and t2, t0, t2
|
||||
beqz t2, wait_cache_sync_done1
|
||||
|
||||
start_restore:
|
||||
la t0, rv_core_critical_regs_frame
|
||||
csrr t1, mhartid
|
||||
slli t1, t1, 2
|
||||
|
@ -149,8 +149,8 @@ static inline void pmu_power_domain_force_default(pmu_context_t *ctx)
|
||||
pmu_ll_lp_set_power_force_power_down(ctx->hal->dev, false);
|
||||
pmu_ll_lp_set_power_force_isolate (ctx->hal->dev, false);
|
||||
pmu_ll_lp_set_power_force_reset (ctx->hal->dev, false);
|
||||
pmu_ll_set_dcdc_force_power_up(ctx->hal->dev, false);
|
||||
pmu_ll_set_dcdc_force_power_down(ctx->hal->dev, false);
|
||||
pmu_ll_set_dcdc_switch_force_power_up(ctx->hal->dev, false);
|
||||
pmu_ll_set_dcdc_switch_force_power_down(ctx->hal->dev, false);
|
||||
}
|
||||
|
||||
static inline void pmu_hp_system_param_default(pmu_hp_mode_t mode, pmu_hp_system_param_t *param)
|
||||
|
@ -338,21 +338,31 @@ void pmu_sleep_increase_ldo_volt(void) {
|
||||
}
|
||||
|
||||
void pmu_sleep_shutdown_dcdc(void) {
|
||||
SET_PERI_REG_MASK(LP_SYSTEM_REG_SYS_CTRL_REG, LP_SYSTEM_REG_LP_FIB_DCDC_SWITCH); //0: enable, 1: disable
|
||||
REG_SET_BIT(PMU_DCM_CTRL_REG, PMU_DCDC_OFF_REQ);
|
||||
pmu_ll_set_dcdc_switch_force_power_down(&PMU, true);
|
||||
pmu_ll_set_dcdc_en(&PMU, false);
|
||||
// Decrease hp_ldo voltage.
|
||||
pmu_ll_hp_set_regulator_dbias(&PMU, PMU_MODE_HP_ACTIVE, HP_CALI_ACTIVE_DBIAS_DEFAULT);
|
||||
}
|
||||
|
||||
void pmu_sleep_enable_dcdc(void) {
|
||||
CLEAR_PERI_REG_MASK(LP_SYSTEM_REG_SYS_CTRL_REG, LP_SYSTEM_REG_LP_FIB_DCDC_SWITCH); //0: enable, 1: disable
|
||||
SET_PERI_REG_MASK(PMU_DCM_CTRL_REG, PMU_DCDC_ON_REQ);
|
||||
REG_SET_FIELD(PMU_HP_ACTIVE_BIAS_REG, PMU_HP_ACTIVE_DCM_VSET, HP_CALI_ACTIVE_DCM_VSET_DEFAULT);
|
||||
FORCE_INLINE_ATTR void pmu_sleep_enable_dcdc(void) {
|
||||
pmu_ll_set_dcdc_switch_force_power_down(&PMU, false);
|
||||
pmu_ll_set_dcdc_en(&PMU, true);
|
||||
pmu_ll_hp_set_dcm_vset(&PMU, PMU_MODE_HP_ACTIVE, HP_CALI_ACTIVE_DCM_VSET_DEFAULT);
|
||||
}
|
||||
|
||||
void pmu_sleep_shutdown_ldo(void) {
|
||||
CLEAR_PERI_REG_MASK(LP_SYSTEM_REG_SYS_CTRL_REG, LP_SYSTEM_REG_LP_FIB_DCDC_SWITCH); //0: enable, 1: disable
|
||||
CLEAR_PERI_REG_MASK(PMU_HP_ACTIVE_HP_REGULATOR0_REG, PMU_HP_ACTIVE_HP_REGULATOR_XPD);
|
||||
FORCE_INLINE_ATTR void pmu_sleep_shutdown_ldo(void) {
|
||||
pmu_ll_hp_set_regulator_xpd(&PMU, PMU_MODE_HP_ACTIVE, 0);
|
||||
}
|
||||
|
||||
FORCE_INLINE_ATTR void pmu_sleep_cache_sync_items(uint32_t gid, uint32_t type, uint32_t map, uint32_t addr, uint32_t bytes)
|
||||
{
|
||||
REG_WRITE(CACHE_SYNC_ADDR_REG, addr);
|
||||
REG_WRITE(CACHE_SYNC_SIZE_REG, bytes);
|
||||
REG_WRITE(CACHE_SYNC_MAP_REG, map);
|
||||
REG_SET_FIELD(CACHE_SYNC_CTRL_REG, CACHE_SYNC_RGID, gid);
|
||||
REG_SET_BIT(CACHE_SYNC_CTRL_REG, type);
|
||||
while (!REG_GET_BIT(CACHE_SYNC_CTRL_REG, CACHE_SYNC_DONE))
|
||||
;
|
||||
}
|
||||
|
||||
static TCM_DRAM_ATTR uint32_t s_mpll_freq_mhz_before_sleep = 0;
|
||||
@ -369,11 +379,12 @@ TCM_IRAM_ATTR uint32_t pmu_sleep_start(uint32_t wakeup_opt, uint32_t reject_opt,
|
||||
pmu_ll_hp_clear_reject_intr_status(PMU_instance()->hal->dev);
|
||||
pmu_ll_hp_clear_reject_cause(PMU_instance()->hal->dev);
|
||||
|
||||
// For the sleep where powered down the TOP domain, the L1 cache data memory will be lost and needs to be written back here.
|
||||
// For the sleep without power down the TOP domain, regdma retention may still be enabled, and dirty data in the L1 cache needs
|
||||
// to be written back so that regdma can get the correct link. So we always need to write back to L1 DCache here.
|
||||
// !!! Need to manually check that data in L2 memory will not be modified from now on. !!!
|
||||
Cache_WriteBack_All(CACHE_MAP_L1_DCACHE);
|
||||
// 1. For the sleep where powered down the TOP domain, the L1 cache data memory will be lost and needs to be written back here.
|
||||
// 2. For the sleep without power down the TOP domain, regdma retention may still be enabled, and dirty data in the L1 cache needs
|
||||
// to be written back so that regdma can get the correct link.
|
||||
// 3. We cannot use the API provided by ROM to invalidate the cache, since it is a function calling that writes data to the stack during
|
||||
// the return process, which results in dirty cachelines in L1 Cache again.
|
||||
pmu_sleep_cache_sync_items(SMMU_GID_DEFAULT, CACHE_SYNC_WRITEBACK, CACHE_MAP_L1_DCACHE, 0, 0);
|
||||
|
||||
#if CONFIG_SPIRAM
|
||||
psram_ctrlr_ll_wait_all_transaction_done();
|
||||
@ -434,10 +445,10 @@ TCM_IRAM_ATTR bool pmu_sleep_finish(bool dslp)
|
||||
#endif
|
||||
{
|
||||
pmu_ll_hp_set_dcm_vset(&PMU, PMU_MODE_HP_ACTIVE, HP_CALI_ACTIVE_DCM_VSET_DEFAULT);
|
||||
pmu_sleep_enable_dcdc();
|
||||
if (pmu_ll_hp_is_sleep_reject(PMU_instance()->hal->dev)) {
|
||||
// If sleep is rejected, the hardware wake-up process that turns on DCDC
|
||||
// is skipped, and software is used to enable DCDC here.
|
||||
pmu_sleep_enable_dcdc();
|
||||
// is skipped, and wait DCDC volt rise up by software here.
|
||||
esp_rom_delay_us(950);
|
||||
}
|
||||
pmu_sleep_shutdown_ldo();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -54,14 +54,13 @@ void rtc_clk_init(rtc_clk_config_t cfg)
|
||||
REGI2C_WRITE_MASK(I2C_BIAS, I2C_BIAS_OR_FORCE_XPD_IPH, 0);
|
||||
REGI2C_WRITE_MASK(I2C_BIAS, I2C_BIAS_OR_FORCE_XPD_VGATE_BUF, 0);
|
||||
|
||||
REG_SET_FIELD(PMU_HP_SLEEP_LP_REGULATOR0_REG, PMU_HP_SLEEP_LP_REGULATOR_DBIAS, LP_CALI_DBIAS);
|
||||
|
||||
pmu_ll_lp_set_regulator_dbias(&PMU, PMU_MODE_LP_ACTIVE, LP_CALI_DBIAS);
|
||||
// Switch to DCDC
|
||||
SET_PERI_REG_MASK(PMU_DCM_CTRL_REG, PMU_DCDC_ON_REQ);
|
||||
CLEAR_PERI_REG_MASK(LP_SYSTEM_REG_SYS_CTRL_REG, LP_SYSTEM_REG_LP_FIB_DCDC_SWITCH); //0: enable, 1: disable
|
||||
REG_SET_FIELD(PMU_HP_ACTIVE_BIAS_REG, PMU_HP_ACTIVE_DCM_VSET, HP_CALI_ACTIVE_DCM_VSET_DEFAULT);
|
||||
pmu_ll_set_dcdc_en(&PMU, true);
|
||||
pmu_ll_set_dcdc_switch_force_power_down(&PMU, false);
|
||||
pmu_ll_hp_set_dcm_vset(&PMU, PMU_MODE_HP_ACTIVE, HP_CALI_ACTIVE_DCM_VSET_DEFAULT);
|
||||
esp_rom_delay_us(1000);
|
||||
CLEAR_PERI_REG_MASK(PMU_HP_ACTIVE_HP_REGULATOR0_REG, PMU_HP_ACTIVE_HP_REGULATOR_XPD);
|
||||
pmu_ll_hp_set_regulator_xpd(&PMU, PMU_MODE_HP_ACTIVE, false);
|
||||
|
||||
soc_xtal_freq_t xtal_freq = cfg.xtal_freq;
|
||||
esp_rom_output_tx_wait_idle(0);
|
||||
|
@ -101,11 +101,11 @@ static __attribute__((unused)) esp_err_t sleep_sys_periph_systimer_retention_ini
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_err_t sleep_sys_periph_l2_cache_retention_init(void)
|
||||
#if SOC_PM_CACHE_RETENTION_BY_PAU
|
||||
esp_err_t sleep_sys_periph_cache_retention_init(void)
|
||||
{
|
||||
esp_err_t err = sleep_retention_entries_create(l2_cache_regs_retention, ARRAY_SIZE(l2_cache_regs_retention), REGDMA_LINK_PRI_SYS_PERIPH_HIGH, SLEEP_RETENTION_MODULE_SYS_PERIPH);
|
||||
ESP_RETURN_ON_ERROR(err, TAG, "failed to allocate memory for digital peripherals (L2 Cache) retention");
|
||||
esp_err_t err = sleep_retention_entries_create(cache_regs_retention, ARRAY_SIZE(cache_regs_retention), REGDMA_LINK_PRI_SYS_PERIPH_HIGH, SLEEP_RETENTION_MODULE_SYS_PERIPH);
|
||||
ESP_RETURN_ON_ERROR(err, TAG, "failed to allocate memory for digital peripherals (Cache) retention");
|
||||
ESP_LOGI(TAG, "L2 Cache sleep retention initialization");
|
||||
return ESP_OK;
|
||||
}
|
||||
@ -128,8 +128,8 @@ static __attribute__((unused)) esp_err_t sleep_sys_periph_retention_init(void *a
|
||||
if(err) goto error;
|
||||
err = sleep_sys_periph_hp_system_retention_init(arg);
|
||||
if(err) goto error;
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
err = sleep_sys_periph_l2_cache_retention_init();
|
||||
#if SOC_PM_CACHE_RETENTION_BY_PAU
|
||||
err = sleep_sys_periph_cache_retention_init();
|
||||
if(err) goto error;
|
||||
#endif
|
||||
#if SOC_APM_SUPPORTED
|
||||
|
@ -231,6 +231,7 @@ typedef enum {
|
||||
#define CACHE_MAP_L2_CACHE BIT(5)
|
||||
|
||||
#define CACHE_MAP_L1_ICACHE_MASK (CACHE_MAP_L1_ICACHE_0 | CACHE_MAP_L1_ICACHE_1)
|
||||
#define CACHE_MAP_L1_CACHE_MASK (CACHE_MAP_L1_ICACHE_MASK | CACHE_MAP_L1_DCACHE)
|
||||
#define CACHE_MAP_MASK (CACHE_MAP_L1_ICACHE_MASK | CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE)
|
||||
|
||||
struct cache_internal_stub_table {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -663,16 +663,27 @@ FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_digital_power_up_wait_cycle(pmu_dev_t *
|
||||
return hw->power.wait_timer0.powerup_timer;
|
||||
}
|
||||
|
||||
FORCE_INLINE_ATTR void pmu_ll_set_dcdc_force_power_up(pmu_dev_t *hw, bool fpu)
|
||||
FORCE_INLINE_ATTR void pmu_ll_set_dcdc_switch_force_power_up(pmu_dev_t *hw, bool fpu)
|
||||
{
|
||||
hw->power.dcdc_switch.force_pd = 0;
|
||||
hw->power.dcdc_switch.force_pu = fpu;
|
||||
}
|
||||
|
||||
FORCE_INLINE_ATTR void pmu_ll_set_dcdc_force_power_down(pmu_dev_t *hw, bool fpd)
|
||||
FORCE_INLINE_ATTR void pmu_ll_set_dcdc_switch_force_power_down(pmu_dev_t *hw, bool fpd)
|
||||
{
|
||||
hw->power.dcdc_switch.force_pu = 0;
|
||||
hw->power.dcdc_switch.force_pd = fpd;
|
||||
}
|
||||
|
||||
FORCE_INLINE_ATTR void pmu_ll_set_dcdc_en(pmu_dev_t *hw, bool en)
|
||||
{
|
||||
if (en) {
|
||||
hw->dcm_ctrl.on_req = 1;
|
||||
} else {
|
||||
hw->dcm_ctrl.off_req = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get ext1 wakeup source status
|
||||
* @return The lower 8 bits of the returned value are the bitmap of
|
||||
|
@ -1923,6 +1923,10 @@ config SOC_PM_CPU_RETENTION_BY_SW
|
||||
bool
|
||||
default y
|
||||
|
||||
config SOC_PM_CACHE_RETENTION_BY_PAU
|
||||
bool
|
||||
default y
|
||||
|
||||
config SOC_PM_PAU_LINK_NUM
|
||||
int
|
||||
default 4
|
||||
|
@ -718,6 +718,7 @@
|
||||
#define SOC_PM_SUPPORT_DEEPSLEEP_CHECK_STUB_ONLY (1) /*!<Supports CRC only the stub code in RTC memory */
|
||||
|
||||
#define SOC_PM_CPU_RETENTION_BY_SW (1)
|
||||
#define SOC_PM_CACHE_RETENTION_BY_PAU (1)
|
||||
|
||||
#define SOC_PM_PAU_LINK_NUM (4)
|
||||
#define SOC_PM_PAU_REGDMA_LINK_MULTI_ADDR (1)
|
||||
|
@ -26,14 +26,14 @@ extern "C"
|
||||
extern const regdma_entries_config_t intr_matrix_regs_retention[INT_MTX_RETENTION_LINK_LEN];
|
||||
|
||||
/**
|
||||
* @brief Provide access to l2_cache configuration registers retention
|
||||
* @brief Provide access to cache configuration registers retention
|
||||
* context definition.
|
||||
*
|
||||
* This is an internal function of the sleep retention driver, and is not
|
||||
* useful for external use.
|
||||
*/
|
||||
#define L2_CACHE_RETENTION_LINK_LEN 1
|
||||
extern const regdma_entries_config_t l2_cache_regs_retention[L2_CACHE_RETENTION_LINK_LEN];
|
||||
#define CACHE_RETENTION_LINK_LEN 8
|
||||
extern const regdma_entries_config_t cache_regs_retention[CACHE_RETENTION_LINK_LEN];
|
||||
|
||||
/**
|
||||
* @brief Provide access to hp_system configuration registers retention
|
||||
@ -93,7 +93,7 @@ extern const regdma_entries_config_t systimer_regs_retention[SYSTIMER_RETENTION_
|
||||
* useful for external use.
|
||||
*/
|
||||
#define PAU_RETENTION_LINK_LEN 1
|
||||
extern const regdma_entries_config_t pau_regs_retention[L2_CACHE_RETENTION_LINK_LEN];
|
||||
extern const regdma_entries_config_t pau_regs_retention[PAU_RETENTION_LINK_LEN];
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "soc/timer_group_reg.h"
|
||||
#include "soc/timer_periph.h"
|
||||
#include "soc/uart_reg.h"
|
||||
#include "esp32p4/rom/cache.h"
|
||||
|
||||
/* Interrupt Matrix Registers Context */
|
||||
#define N_REGS_INTR_CORE0() (((INTERRUPT_CORE0_CLOCK_GATE_REG - DR_REG_INTERRUPT_CORE0_BASE) / 4) + 1)
|
||||
@ -32,17 +33,49 @@ const regdma_entries_config_t intr_matrix_regs_retention[] = {
|
||||
};
|
||||
_Static_assert(ARRAY_SIZE(intr_matrix_regs_retention) == INT_MTX_RETENTION_LINK_LEN, "Inconsistent INT_MTX retention link length definitions");
|
||||
|
||||
/* L1 Cache Registers Context */
|
||||
/* CACHE_L1_ICACHE_CTRL_REG & CACHE_L1_DCACHE_CTRL_REG & CACHE_L1_BYPASS_CACHE_CONF_REG &
|
||||
CACHE_L1_CACHE_ACS_FAIL_CTRL_REG & CACHE_L1_CACHE_ACS_FAIL_INT_ENA_REG*/
|
||||
#define L1_CACHE_RETENTION_REGS_CNT (5)
|
||||
#define L1_CACHE_RETENTION_REGS_BASE (CACHE_L1_ICACHE_CTRL_REG)
|
||||
static const uint32_t l1_cache_regs_map[4] = {0x7, 0x0, 0xc000000, 0x0};
|
||||
/* L2 Cache Registers Context */
|
||||
#define N_REGS_L2_CACHE() (((CACHE_L2_CACHE_DATA_MEM_POWER_CTRL_REG - CACHE_L2_CACHE_CTRL_REG) / 4) + 1)
|
||||
const regdma_entries_config_t l2_cache_regs_retention[] = {
|
||||
[0] = { .config = REGDMA_LINK_CONTINUOUS_INIT(REGDMA_HPSYS_LINK(0), CACHE_L2_CACHE_CTRL_REG, CACHE_L2_CACHE_CTRL_REG, N_REGS_L2_CACHE(), 0, 0), .owner = ENTRY(0) } /* hp system */
|
||||
/* CACHE_L2_CACHE_CTRL_REG & CACHE_L2_BYPASS_CACHE_CONF_REG &
|
||||
CACHE_L2_CACHE_CACHESIZE_CONF_REG & CACHE_L2_CACHE_BLOCKSIZE_CONF_REG &
|
||||
CACHE_L2_CACHE_ACS_FAIL_CTRL_REG & CACHE_L2_CACHE_ACS_FAIL_INT_ENA_REG */
|
||||
#define L2_CACHE_RETENTION_REGS_CNT (6)
|
||||
#define L2_CACHE_RETENTION_REGS_BASE (CACHE_L2_CACHE_CTRL_REG)
|
||||
static const uint32_t l2_cache_regs_map[4] = {0xc000000f, 0x0, 0x0, 0x0};
|
||||
const regdma_entries_config_t cache_regs_retention[] = {
|
||||
[0] = {
|
||||
.config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_CACHE_LINK(0x00), L1_CACHE_RETENTION_REGS_BASE, L1_CACHE_RETENTION_REGS_BASE, \
|
||||
L1_CACHE_RETENTION_REGS_CNT, 0, 0, \
|
||||
l1_cache_regs_map[0], l1_cache_regs_map[1], \
|
||||
l1_cache_regs_map[2], l1_cache_regs_map[3]), \
|
||||
.owner = ENTRY(0)
|
||||
},
|
||||
[1] = {
|
||||
.config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_CACHE_LINK(0x01), \
|
||||
L2_CACHE_RETENTION_REGS_BASE, L2_CACHE_RETENTION_REGS_BASE, \
|
||||
L2_CACHE_RETENTION_REGS_CNT, 0, 0, \
|
||||
l2_cache_regs_map[0], l2_cache_regs_map[1], \
|
||||
l2_cache_regs_map[2], l2_cache_regs_map[3]), \
|
||||
.owner = ENTRY(0)
|
||||
},
|
||||
// Invalidate L1 Cache
|
||||
[2] = { .config = REGDMA_LINK_WRITE_INIT(REGDMA_CACHE_LINK(0x02), CACHE_SYNC_ADDR_REG, 0, CACHE_SYNC_ADDR_M, 1, 0), .owner = ENTRY(0) },
|
||||
[3] = { .config = REGDMA_LINK_WRITE_INIT(REGDMA_CACHE_LINK(0x03), CACHE_SYNC_SIZE_REG, 0, CACHE_SYNC_SIZE_M, 1, 0), .owner = ENTRY(0) },
|
||||
[4] = { .config = REGDMA_LINK_WRITE_INIT(REGDMA_CACHE_LINK(0x04), CACHE_SYNC_MAP_REG, CACHE_MAP_L1_CACHE_MASK, CACHE_SYNC_MAP_M, 1, 0), .owner = ENTRY(0) },
|
||||
[5] = { .config = REGDMA_LINK_WRITE_INIT(REGDMA_CACHE_LINK(0x05), CACHE_SYNC_CTRL_REG, 0, CACHE_SYNC_RGID_M, 1, 0), .owner = ENTRY(0) },
|
||||
[6] = { .config = REGDMA_LINK_WRITE_INIT(REGDMA_CACHE_LINK(0x06), CACHE_SYNC_CTRL_REG, CACHE_INVALIDATE_ENA, CACHE_INVALIDATE_ENA_M, 1, 0), .owner = ENTRY(0) },
|
||||
[7] = { .config = REGDMA_LINK_WAIT_INIT(REGDMA_CACHE_LINK(0x07), CACHE_SYNC_CTRL_REG, CACHE_SYNC_DONE, CACHE_SYNC_DONE_M, 1, 0), .owner = ENTRY(0) },
|
||||
};
|
||||
_Static_assert(ARRAY_SIZE(l2_cache_regs_retention) == HP_SYSTEM_RETENTION_LINK_LEN, "Inconsistent L2 CACHE retention link length definitions");
|
||||
_Static_assert(ARRAY_SIZE(cache_regs_retention) == CACHE_RETENTION_LINK_LEN, "Inconsistent L2 CACHE retention link length definitions");
|
||||
|
||||
/* HP System Registers Context */
|
||||
#define N_REGS_HP_SYSTEM() (((HP_SYSTEM_AHB2AXI_BRESP_ERR_INT_ENA_REG - DR_REG_HP_SYS_BASE) / 4) + 1)
|
||||
const regdma_entries_config_t hp_system_regs_retention[] = {
|
||||
[0] = { .config = REGDMA_LINK_CONTINUOUS_INIT(REGDMA_HPSYS_LINK(0), DR_REG_HP_SYS_BASE, DR_REG_HP_SYS_BASE, N_REGS_HP_SYSTEM(), 0, 0), .owner = ENTRY(0) } /* hp system */
|
||||
[0] = { .config = REGDMA_LINK_CONTINUOUS_INIT(REGDMA_HPSYS_LINK(2), DR_REG_HP_SYS_BASE, DR_REG_HP_SYS_BASE, N_REGS_HP_SYSTEM(), 0, 0), .owner = ENTRY(0) } /* hp system */
|
||||
};
|
||||
_Static_assert(ARRAY_SIZE(hp_system_regs_retention) == HP_SYSTEM_RETENTION_LINK_LEN, "Inconsistent HP_SYSTEM retention link length definitions");
|
||||
|
||||
|
@ -35,6 +35,7 @@ extern "C" {
|
||||
#define REGDMA_MODEMLPCON_LINK(_pri) ((0x03 << 8) | _pri)
|
||||
#define REGDMA_PAU_LINK(_pri) ((0x04 << 8) | _pri)
|
||||
|
||||
#define REGDMA_CACHE_LINK(_pri) ((0x0c << 8) | _pri)
|
||||
#define REGDMA_INTMTX_LINK(_pri) ((0x0d << 8) | _pri)
|
||||
#define REGDMA_HPSYS_LINK(_pri) ((0x0e << 8) | _pri)
|
||||
#define REGDMA_TEEAPM_LINK(_pri) ((0x0f << 8) | _pri)
|
||||
|
Loading…
x
Reference in New Issue
Block a user