diff --git a/components/app_trace/app_trace_util.c b/components/app_trace/app_trace_util.c index 6b72b607d5..cce713778b 100644 --- a/components/app_trace/app_trace_util.c +++ b/components/app_trace/app_trace_util.c @@ -62,13 +62,21 @@ esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t * while (1) { //Todo: Replace the current locking mechanism and int_state with portTRY_ENTER_CRITICAL() instead. // do not overwrite lock->int_state before we actually acquired the mux +#if CONFIG_FREERTOS_SMP + unsigned int_state = portDISABLE_INTERRUPTS(); +#else unsigned int_state = portSET_INTERRUPT_MASK_FROM_ISR(); +#endif bool success = spinlock_acquire(&lock->mux, 0); if (success) { lock->int_state = int_state; return ESP_OK; } +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(int_state); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(int_state); +#endif // we can be preempted from this place till the next call (above) to portSET_INTERRUPT_MASK_FROM_ISR() res = esp_apptrace_tmo_check(tmo); if (res != ESP_OK) { @@ -85,7 +93,11 @@ esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock) // after call to the following func we can not be sure that lock->int_state // is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take(). spinlock_release(&lock->mux); +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(int_state); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(int_state); +#endif return ESP_OK; } diff --git a/components/esp_pm/pm_impl.c b/components/esp_pm/pm_impl.c index fb99261fb1..76b8b19768 100644 --- a/components/esp_pm/pm_impl.c +++ b/components/esp_pm/pm_impl.c @@ -789,7 +789,11 @@ void esp_pm_impl_init(void) void esp_pm_impl_idle_hook(void) { int core_id = xPortGetCoreID(); +#if CONFIG_FREERTOS_SMP + uint32_t state = portDISABLE_INTERRUPTS(); +#else uint32_t state = portSET_INTERRUPT_MASK_FROM_ISR(); +#endif if (!s_core_idle[core_id] #ifdef CONFIG_FREERTOS_USE_TICKLESS_IDLE && !periph_should_skip_light_sleep() @@ -798,7 +802,11 @@ void esp_pm_impl_idle_hook(void) esp_pm_lock_release(s_rtos_lock_handle[core_id]); s_core_idle[core_id] = true; } +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(state); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(state); +#endif ESP_PM_TRACE_ENTER(IDLE, core_id); } @@ -809,7 +817,11 @@ void IRAM_ATTR esp_pm_impl_isr_hook(void) /* Prevent higher level interrupts (than the one this function was called from) * from happening in this section, since they will also call into esp_pm_impl_isr_hook. */ +#if CONFIG_FREERTOS_SMP + uint32_t state = portDISABLE_INTERRUPTS(); +#else uint32_t state = portSET_INTERRUPT_MASK_FROM_ISR(); +#endif #if defined(CONFIG_FREERTOS_SYSTICK_USES_CCOUNT) && (portNUM_PROCESSORS == 2) if (s_need_update_ccompare[core_id]) { update_ccompare(); @@ -820,7 +832,11 @@ void IRAM_ATTR esp_pm_impl_isr_hook(void) #else leave_idle(); #endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT && portNUM_PROCESSORS == 2 +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(state); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(state); +#endif ESP_PM_TRACE_EXIT(ISR_HOOK, core_id); } diff --git a/components/esp_system/port/arch/xtensa/esp_ipc_isr.c b/components/esp_system/port/arch/xtensa/esp_ipc_isr.c index 959f791646..20092206a0 100644 --- a/components/esp_system/port/arch/xtensa/esp_ipc_isr.c +++ b/components/esp_system/port/arch/xtensa/esp_ipc_isr.c @@ -108,7 +108,11 @@ void esp_ipc_isr_waiting_for_finish_cmd(void* finish_cmd); void IRAM_ATTR esp_ipc_isr_stall_other_cpu(void) { if (s_stall_state == STALL_STATE_RUNNING) { +#if CONFIG_FREERTOS_SMP + BaseType_t intLvl = portDISABLE_INTERRUPTS(); +#else BaseType_t intLvl = portSET_INTERRUPT_MASK_FROM_ISR(); +#endif const uint32_t cpu_id = xPortGetCoreID(); if (s_count_of_nested_calls[cpu_id]++ == 0) { IPC_ISR_ENTER_CRITICAL(); @@ -119,7 +123,11 @@ void IRAM_ATTR esp_ipc_isr_stall_other_cpu(void) } /* Interrupts are already disabled by the parent, we're nested here. */ +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(intLvl); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(intLvl); +#endif } } @@ -130,7 +138,11 @@ void IRAM_ATTR esp_ipc_isr_release_other_cpu(void) if (--s_count_of_nested_calls[cpu_id] == 0) { esp_ipc_isr_finish_cmd = 1; IPC_ISR_EXIT_CRITICAL(); +#if CONFIG_FREERTOS_SMP + portRESTORE_INTERRUPTS(s_stored_interrupt_level); +#else portCLEAR_INTERRUPT_MASK_FROM_ISR(s_stored_interrupt_level); +#endif } else if (s_count_of_nested_calls[cpu_id] < 0) { assert(0); } diff --git a/components/esp_system/test/test_reset_reason.c b/components/esp_system/test/test_reset_reason.c index af32cf6242..5dc5585ad1 100644 --- a/components/esp_system/test/test_reset_reason.c +++ b/components/esp_system/test/test_reset_reason.c @@ -181,7 +181,11 @@ TEST_CASE_MULTIPLE_STAGES("reset reason ESP_RST_SW after restart from APP CPU", static void do_int_wdt(void) { setup_values(); +#if CONFIG_FREERTOS_SMP + BaseType_t prev_level = portDISABLE_INTERRUPTS(); +#else BaseType_t prev_level = portSET_INTERRUPT_MASK_FROM_ISR(); +#endif (void) prev_level; while(1); } diff --git a/components/newlib/stdatomic.c b/components/newlib/stdatomic.c index f4b2bca371..d98edd34ac 100644 --- a/components/newlib/stdatomic.c +++ b/components/newlib/stdatomic.c @@ -39,6 +39,16 @@ // Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR // and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts. +#if CONFIG_FREERTOS_SMP +#define _ATOMIC_ENTER_CRITICAL() ({ \ + unsigned state = portDISABLE_INTERRUPTS(); \ + state; \ +}) + +#define _ATOMIC_EXIT_CRITICAL(state) do { \ + portRESTORE_INTERRUPTS(state); \ + } while (0) +#else // CONFIG_FREERTOS_SMP #define _ATOMIC_ENTER_CRITICAL() ({ \ unsigned state = portSET_INTERRUPT_MASK_FROM_ISR(); \ state; \ @@ -47,7 +57,7 @@ #define _ATOMIC_EXIT_CRITICAL(state) do { \ portCLEAR_INTERRUPT_MASK_FROM_ISR(state); \ } while (0) - +#endif #else // SOC_CPU_CORES_NUM _Static_assert(HAS_ATOMICS_32, "32-bit atomics should be supported if SOC_CPU_CORES_NUM > 1");