freertos: Synchronize tasks.c functions (major changes) to v10.4.3

This commit synchronizes multiple functions that contain major differences with
upstream v10.4.3. Multi-core modifications are then reapplied to the function.

Vanilla code uses "pxTCB == pxCurrentTCB" to check whether a task is currently
running. This commit adds the following macros instead.

- taskIS_CURRENTLY_RUNNING()
- taskIS_CURRENTLY_RUNNING_ON_CORE()

The following functions have been updated to use those macros instead.

- eTaskGetState()
- vTaskPrioritySet()
- vTaskResume()

The following functions have been synchronized with upstream v10.4.3.
Multi-core modifications are then reapplied.

- vTaskSuspend()
- vTaskDelete()
- prvGetExpectedIdleTime()
This commit is contained in:
Darian Leung 2022-08-24 16:25:21 +08:00
parent 15d8fc0722
commit 5742c194e1

View File

@ -259,6 +259,15 @@ extern void esp_vApplicationIdleHook(void);
#define taskCAN_RUN_ON_CORE( xCore, xCoreID ) ( pdTRUE )
#endif /* configNUM_CORES > 1 */
/* Check if a task is a currently running task. */
#if ( configNUM_CORES > 1 )
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB[ 0 ] ) || ( ( pxTCB ) == pxCurrentTCB[ 1 ] ) )
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) ( ( pxTCB ) == pxCurrentTCB[ ( xCoreID ) ] )
#else
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( pxTCB ) == pxCurrentTCB[ 0 ] )
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( ( pxTCB ) )
#endif /* configNUM_CORES > 1 */
/*
* Several functions take a TaskHandle_t parameter that can optionally be NULL,
* where NULL is used to indicate that the handle of the currently executing
@ -1363,14 +1372,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
void vTaskDelete( TaskHandle_t xTaskToDelete )
{
TCB_t * pxTCB;
TCB_t * curTCB;
BaseType_t core;
BaseType_t xFreeNow = 0;
BaseType_t xFreeNow;
taskENTER_CRITICAL( &xKernelLock );
{
core = xPortGetCoreID();
curTCB = pxCurrentTCB[core];
BaseType_t xCurCoreID;
#if ( configNUM_CORES > 1 )
xCurCoreID = xPortGetCoreID();
#else
xCurCoreID = 0;
( void ) xCurCoreID;
#endif
/* If null is passed in here then it is the calling task that is
* being deleted. */
@ -1402,12 +1414,19 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* not return. */
uxTaskNumber++;
if( pxTCB == curTCB ||
/* in SMP, we also can't immediately delete the task active on the other core */
(configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
/* ... and we can't delete a non-running task pinned to the other core, as
FPU cleanup has to happen on the same core */
(configNUM_CORES > 1 && pxTCB->xCoreID == (!core)) )
/*
* We cannot immediately a task that is
* - Currently running on either core
* - If the task is not currently running but is pinned to the other (due to FPU cleanup)
* Todo: Allow deletion of tasks pinned to other core (IDF-5803)
*/
#if ( configNUM_CORES > 1 )
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) || ( pxTCB->xCoreID == !xCurCoreID ) ) ? pdFALSE : pdTRUE;
#else
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) ) ? pdFALSE : pdTRUE;
#endif /* configNUM_CORES > 1 */
if( xFreeNow == pdFALSE )
{
/* A task is deleting itself. This cannot complete within the
* task itself, as a context switch to another task is required.
@ -1421,43 +1440,47 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* check the xTasksWaitingTermination list. */
++uxDeletedTasksWaitingCleanUp;
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
* portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
traceTASK_DELETE( pxTCB );
/* The pre-delete hook is primarily for the Windows simulator,
* in which Windows specific clean up operations are performed,
* after which it is not possible to yield away from this task -
* hence xYieldPending is used to latch that a context switch is
* required. */
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[ xCurCoreID ] );
if (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ])
{
/* SMP case of deleting a task running on a different core. Same issue
as a task deleting itself, but we need to send a yield to this task now
before we release xKernelLock.
#if ( configNUM_CORES > 1 )
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) )
{
/* SMP case of deleting a task running on a different core. Same issue
as a task deleting itself, but we need to send a yield to this task now
before we release xKernelLock.
Specifically there is a case where the other core may already be spinning on
xKernelLock waiting to go into a blocked state. A check is added in
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
xTasksWaitingTermination list in this case (instead it will immediately
release xKernelLock again and be yielded before the FreeRTOS function
returns.) */
vPortYieldOtherCore( !core );
}
Specifically there is a case where the other core may already be spinning on
xKernelLock waiting to go into a blocked state. A check is added in
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
xTasksWaitingTermination list in this case (instead it will immediately
release xKernelLock again and be yielded before the FreeRTOS function
returns.) */
vPortYieldOtherCore( !xCurCoreID );
}
#endif /* configNUM_CORES > 1 */
}
else
{
--uxCurrentNumberOfTasks;
xFreeNow = pdTRUE;
traceTASK_DELETE( pxTCB );
/* Reset the next expected unblock time in case it referred to
* the task that has just been deleted. */
prvResetNextTaskUnblockTime();
}
traceTASK_DELETE( pxTCB );
}
taskEXIT_CRITICAL( &xKernelLock );
if(xFreeNow == pdTRUE) {
if( xFreeNow == pdTRUE ) {
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
prvDeleteTLS( pxTCB );
#endif
@ -1469,7 +1492,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* been deleted. */
if( xSchedulerRunning != pdFALSE )
{
if( pxTCB == curTCB )
taskENTER_CRITICAL( &xKernelLock );
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
{
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
portYIELD_WITHIN_API();
@ -1478,6 +1502,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL( &xKernelLock );
}
}
@ -1649,18 +1674,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
configASSERT( pxTCB );
taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between
if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
if( taskIS_CURRENTLY_RUNNING( pxTCB ) )
{
/* The task calling this function is querying its own state. */
eReturn = eRunning;
}
#if (configNUM_CORES > 1)
else if (pxTCB == pxCurrentTCB[!xPortGetCoreID()])
{
/* The task calling this function is querying its own state. */
eReturn = eRunning;
}
#endif
else
{
pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
@ -1847,7 +1865,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* priority than the calling task. */
if( uxNewPriority > uxCurrentBasePriority )
{
if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) )
{
/* The priority of a task other than the currently
* running task is being raised. Is the priority being
@ -1868,13 +1886,22 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* priority task able to run so no yield is required. */
}
}
else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 0 ) )
{
/* Setting the priority of the running task down means
* there may now be another task of higher priority that
* is ready to execute. */
xYieldRequired = pdTRUE;
}
#if ( configNUM_CORES > 1 )
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 1 ) )
{
/* Setting the priority of the running task on the other
* core down means there may now be another task of
* higher priority that is ready to execute. */
vPortYieldOtherCore( 1 );
}
#endif /* configNUM_CORES > 1 */
else
{
/* Setting the priority of any other task down does not
@ -1973,7 +2000,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
void vTaskSuspend( TaskHandle_t xTaskToSuspend )
{
TCB_t * pxTCB;
TCB_t * curTCB;
taskENTER_CRITICAL( &xKernelLock );
{
@ -2005,7 +2031,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
}
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
{
@ -2022,76 +2047,70 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
}
}
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
}
taskEXIT_CRITICAL( &xKernelLock );
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( pxTCB == curTCB )
{
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
taskENTER_CRITICAL( &xKernelLock );
BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
taskEXIT_CRITICAL( &xKernelLock );
configASSERT( suspended == 0 );
(void)suspended;
portYIELD_WITHIN_API();
}
else
{
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority
* is. */
taskENTER_CRITICAL( &xKernelLock );
pxCurrentTCB[ xPortGetCoreID() ] = NULL;
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
vTaskSwitchContext();
}
}
}
else
{
if( xSchedulerRunning != pdFALSE )
{
/* A task other than the currently running task was suspended,
* reset the next expected unblock time in case it referred to the
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL( &xKernelLock );
prvResetNextTaskUnblockTime();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
{
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
portYIELD_WITHIN_API();
}
else
{
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
* must be adjusted to point to a different task. */
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
{
/* No other tasks are ready, so set pxCurrentTCB back to
* NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority
* is. */
pxCurrentTCB[ xPortGetCoreID() ] = NULL;
}
else
{
vTaskSwitchContext();
}
}
}
#if ( configNUM_CORES > 1 )
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xPortGetCoreID() ) )
{
/* The other core's current task has just been suspended */
if( xSchedulerRunning != pdFALSE )
{
vPortYieldOtherCore( !xPortGetCoreID() );
}
else
{
/* The scheduler is not running, but the task that was pointed
* to by pxCurrentTCB[ otherCore ] has just been suspended.
* We simply set the pxCurrentTCB[ otherCore ] to NULL for now.
* Todo: Update vTaskSwitchContext() to be runnable on
* behalf of the other core. */
pxCurrentTCB[ !xPortGetCoreID() ] = NULL;
}
}
#endif /* configNUM_CORES > 1 */
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* INCLUDE_vTaskSuspend */
@ -2157,7 +2176,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
{
/* The parameter cannot be NULL as it is impossible to resume the
* currently executing task. */
if( ( pxTCB != pxCurrentTCB[xPortGetCoreID()] ) && ( pxTCB != NULL ) )
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) && ( pxTCB != NULL ) )
{
if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
{
@ -2457,47 +2476,45 @@ void vTaskSuspendAll( void )
#if ( configUSE_TICKLESS_IDLE != 0 )
#if ( configNUM_CORES > 1 )
static BaseType_t xHaveReadyTasks( void )
{
for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
{
return pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
return pdFALSE;
}
#endif // configNUM_CORES > 1
static TickType_t prvGetExpectedIdleTime( void )
{
TickType_t xReturn;
TickType_t xReturn;
UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
/* We need a critical section here as we are about to access kernel data structures */
taskENTER_CRITICAL( &xKernelLock );
/* uxHigherPriorityReadyTasks takes care of the case where
* configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
* task that are in the Ready state, even though the idle task is
* running. */
#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
{
if( uxTopReadyPriority > tskIDLE_PRIORITY )
{
uxHigherPriorityReadyTasks = pdTRUE;
}
}
#else
{
const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
/* When port optimised task selection is used the uxTopReadyPriority
* variable is used as a bit map. If bits other than the least
* significant bit are set then there are tasks that have a priority
* above the idle priority that are in the Ready state. This takes
* care of the case where the co-operative scheduler is in use. */
if( uxTopReadyPriority > uxLeastSignificantBit )
{
uxHigherPriorityReadyTasks = pdTRUE;
}
}
#endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
{
xReturn = 0;
}
#if configNUM_CORES > 1
/* This function is called from Idle task; in single core case this
* means that no higher priority tasks are ready to run, and we can
* enter sleep. In SMP case, there might be ready tasks waiting for
* the other CPU, so need to check all ready lists.
*/
else if( xHaveReadyTasks() )
{
xReturn = 0;
}
#endif // configNUM_CORES > 1
else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUM_CORES )
{
/* There are other idle priority tasks in the ready state. If
@ -2505,10 +2522,18 @@ void vTaskSuspendAll( void )
* processed. */
xReturn = 0;
}
else if( uxHigherPriorityReadyTasks != pdFALSE )
{
/* There are tasks in the Ready state that have a priority above the
* idle priority. This path can only be reached if
* configUSE_PREEMPTION is 0. */
xReturn = 0;
}
else
{
xReturn = xNextTaskUnblockTime - xTickCount;
}
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;