fix(freertos/idf): Fix utility function performance

This commit optimizes the following utility functions to achieve higher
overall system performance:

xTaskGetCurrentTaskHandle():
- Made into stand alone function instead of wrapper call to "ForCore" variant
- Replaced critical sections with disabling interrupts. Lack of lock contention
improves performance.

xTaskGetCurrentTaskHandleForCore():
- Removed unecessary critical sections

xTaskGetSchedulerState():
- Replaced critical sections with disabling interrupts. Lack of lock contention
improves performance.
This commit is contained in:
Darian Leung 2023-10-19 21:45:08 +08:00
parent b13b9c1163
commit ec1ba78b7c
3 changed files with 60 additions and 31 deletions

View File

@ -117,6 +117,10 @@ The following functions were modified to accommodate SMP behavior:
- Updated logic to determine whether sleep is possible in SMP by checking the status of both cores. - Updated logic to determine whether sleep is possible in SMP by checking the status of both cores.
- `prvCheckTasksWaitingTermination()` - `prvCheckTasksWaitingTermination()`
- Updated logic so that we don't delete tasks on `xTasksWaitingTermination` which are still currently running on the other core. - Updated logic so that we don't delete tasks on `xTasksWaitingTermination` which are still currently running on the other core.
- `xTaskGetCurrentTaskHandle()`
- In SMP, the function will now disables interrupts to ensure that the calling task does not switch cores while fetching the current core's TCB.
- `xTaskGetSchedulerState()`
- In SMP, the function now disables interrupts to ensure that the calling task does not switch cores while checking its own copy of `uxSchedulerSuspended`.
- `prvAddCurrentTaskToDelayedList()` - `prvAddCurrentTaskToDelayedList()`
- Added extra check to see if current blocking task has already been deleted by the other core. - Added extra check to see if current blocking task has already been deleted by the other core.

View File

@ -68,16 +68,19 @@
/* Some code sections require extra critical sections when building for SMP /* Some code sections require extra critical sections when building for SMP
* ( configNUMBER_OF_CORES > 1 ). */ * ( configNUMBER_OF_CORES > 1 ). */
#if ( configNUMBER_OF_CORES > 1 ) #if ( configNUMBER_OF_CORES > 1 )
/* Macros that Enter/exit a critical section only when building for SMP */ /* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
/* Macros that Enter/exit a critical section only when building for single-core */ /* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
static inline __attribute__( ( always_inline ) ) static inline __attribute__( ( always_inline ) )
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
@ -114,16 +117,20 @@
} }
#else /* configNUMBER_OF_CORES > 1 */ #else /* configNUMBER_OF_CORES > 1 */
/* Macros that Enter/exit a critical section only when building for SMP */ /* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) #define taskENTER_CRITICAL_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
/* Macros that Enter/exit a critical section only when building for single-core */ /* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
#endif /* configNUMBER_OF_CORES > 1 */ #endif /* configNUMBER_OF_CORES > 1 */
#if ( configUSE_PREEMPTION == 0 ) #if ( configUSE_PREEMPTION == 0 )
@ -4832,7 +4839,24 @@ static void prvResetNextTaskUnblockTime( void )
TaskHandle_t xTaskGetCurrentTaskHandle( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void )
{ {
return xTaskGetCurrentTaskHandleForCore( portGET_CORE_ID() ); TaskHandle_t xReturn;
UBaseType_t uxSavedInterruptStatus;
/* For SMP, we need to disable interrupts to ensure the caller does not
* switch cores in between portGET_CORE_ID() and fetching the current
* core's TCB. We use the ISR versions of interrupt macros as this
* function could be called inside critical sections.
*
* For single-core a critical section is not required as this is not
* called from an interrupt and the current TCB will always be the same
* for any individual execution thread. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
}
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
return xReturn;
} }
#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
@ -4843,10 +4867,18 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xTaskGetSchedulerState( void ) BaseType_t xTaskGetSchedulerState( void )
{ {
BaseType_t xReturn; BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to disable interrupts here to ensure we don't switch
* access kernel data structures. */ * cores midway. We forego taking the kernel lock here as a minor
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); * optimization as it is not required.
*
* - xSchedulerRunning is only ever set by core 0 atomically
* - Each core will only ever update its own copy of uxSchedulerSuspended.
*
* We use the ISR versions of interrupt macros as this function could be
* called inside critical sections. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{ {
if( xSchedulerRunning == pdFALSE ) if( xSchedulerRunning == pdFALSE )
{ {
@ -4864,8 +4896,7 @@ static void prvResetNextTaskUnblockTime( void )
} }
} }
} }
/* Release the previously taken kernel lock. */ taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn; return xReturn;
} }

View File

@ -482,17 +482,11 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
configASSERT( xCoreID < configNUMBER_OF_CORES ); configASSERT( xCoreID < configNUMBER_OF_CORES );
configASSERT( xCoreID != tskNO_AFFINITY ); configASSERT( xCoreID != tskNO_AFFINITY );
/* For SMP, we need to take the kernel lock here as we are about to /* A critical section is not required as this function does not
* access kernel data structures. For single core, a critical section is * guarantee that the TCB will still be valid when this function
* not required as this is not called from an interrupt and the current * returns. */
* TCB will always be the same for any individual execution thread. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
xReturn = pxCurrentTCBs[ xCoreID ]; xReturn = pxCurrentTCBs[ xCoreID ];
} }
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
#endif /* CONFIG_FREERTOS_SMP */ #endif /* CONFIG_FREERTOS_SMP */
return xReturn; return xReturn;
@ -861,7 +855,7 @@ uint8_t * pxTaskGetStackStart( TaskHandle_t xTask )
struct _reent * __getreent( void ) struct _reent * __getreent( void )
{ {
/* No lock needed because if this changes, we won't be running anymore. */ /* No lock needed because if this changes, we won't be running anymore. */
TCB_t * pxCurTask = xTaskGetCurrentTaskHandle(); TCB_t * pxCurTask = ( TCB_t * ) xTaskGetCurrentTaskHandle();
struct _reent * ret; struct _reent * ret;
if( pxCurTask == NULL ) if( pxCurTask == NULL )