Merge branch 'zim-marking-vtasksuspendall' into 'master'

freertos: clearly marking vTaskSuspendAll and a few other small cleanups

See merge request espressif/esp-idf!15048
This commit is contained in:
Zim Kalinowski 2021-09-06 06:01:28 +00:00
commit cbb087ae9c
9 changed files with 472 additions and 53 deletions

View File

@ -217,6 +217,9 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
{
EventBits_t uxOriginalBitValue, uxReturn;
EventGroup_t * pxEventBits = xEventGroup;
#ifndef ESP_PLATFORM
BaseType_t xAlreadyYielded;
#endif // ESP_PLATFORM
BaseType_t xTimeoutOccurred = pdFALSE;
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
@ -227,7 +230,11 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
}
#endif
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
uxOriginalBitValue = pxEventBits->uxEventBits;
@ -270,11 +277,26 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
}
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 )
{
#ifdef ESP_PLATFORM
portYIELD_WITHIN_API();
#else
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
/* The task blocked to wait for its required bits to be set - at this
* point either the required bits were set or the block time expired. If
@ -333,7 +355,11 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
{
EventGroup_t * pxEventBits = xEventGroup;
EventBits_t uxReturn, uxControlBits = 0;
#ifdef ESP_PLATFORM
BaseType_t xWaitConditionMet;
#else
BaseType_t xWaitConditionMet, xAlreadyYielded;
#endif // ESP_PLATFORM
BaseType_t xTimeoutOccurred = pdFALSE;
/* Check the user is not attempting to wait on the bits used by the kernel
@ -347,7 +373,11 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
}
#endif
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -415,11 +445,26 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 )
{
#ifdef ESP_PLATFORM
portYIELD_WITHIN_API();
#else
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
/* The task blocked to wait for its required bits to be set - at this
* point either the required bits were set or the block time expired. If
@ -551,7 +596,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -623,7 +672,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
* bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return pxEventBits->uxEventBits;
}
@ -636,6 +689,7 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
traceEVENT_GROUP_DELETE( xEventGroup );
// IDF-3755
taskENTER_CRITICAL();
{
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )

View File

@ -210,6 +210,21 @@ BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION;
*/
void vPortEndScheduler( void ) PRIVILEGED_FUNCTION;
/*
* The structures and methods of manipulating the MPU are contained within the
* port layer.
*
* Fills the xMPUSettings structure with the memory region information
* contained in xRegions.
*/
#if ( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
const struct xMEMORY_REGION * const xRegions,
StackType_t * pxBottomOfStack,
uint32_t ulStackDepth ) PRIVILEGED_FUNCTION;
#endif
/* *INDENT-OFF* */
#ifdef __cplusplus
}

View File

@ -312,6 +312,16 @@ typedef QueueHandle_t SemaphoreHandle_t;
#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) )
/**
* @cond
* semphr. h
* @code{c}
* xSemaphoreTakeRecursive(
* SemaphoreHandle_t xMutex,
* TickType_t xBlockTime
* );
* @endcode
* @endcond
*
* <i>Macro</i> to recursively obtain, or 'take', a mutex type semaphore.
* The mutex must have previously been created using a call to
* xSemaphoreCreateRecursiveMutex();
@ -400,6 +410,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
*/
#define xSemaphoreTakeRecursive( xMutex, xBlockTime ) xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) )
#ifdef ESP_PLATFORM // IDF-3814
/** @cond */
/*
* xSemaphoreAltTake() is an alternative version of xSemaphoreTake().
@ -415,7 +426,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
*/
#define xSemaphoreAltTake( xSemaphore, xBlockTime ) xQueueAltGenericReceive( ( QueueHandle_t ) ( xSemaphore ), NULL, ( xBlockTime ), pdFALSE )
/** @endcond */
#endif // ESP_PLATFORM
/**
* <i>Macro</i> to release a semaphore. The semaphore must have previously been
* created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or
@ -568,6 +579,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
*/
#define xSemaphoreGiveRecursive( xMutex ) xQueueGiveMutexRecursive( ( xMutex ) )
#ifdef ESP_PLATFORM // IDF-3814
/** @cond */
/*
* xSemaphoreAltGive() is an alternative version of xSemaphoreGive().
@ -584,6 +596,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
#define xSemaphoreAltGive( xSemaphore ) xQueueAltGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK )
/** @endcond */
#endif // ESP_PLATFORM
/**
* <i>Macro</i> to release a semaphore. The semaphore must have previously been

View File

@ -1,3 +1,28 @@
/*
* FreeRTOS Kernel V10.4.3
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
#ifndef FREERTOS_STDINT
#define FREERTOS_STDINT
@ -10,7 +35,7 @@
* To use this file:
*
* 1) Copy this file into the directory that contains your FreeRTOSConfig.h
* header file, as that directory will already be in the compilers include
* header file, as that directory will already be in the compiler's include
* path.
*
* 2) Rename the copied file stdint.h.

View File

@ -613,7 +613,16 @@ typedef enum
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*
/**
* @cond
* task. h
* @code{c}
* BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
* @endcode
* @endcond
*
* Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1.
*
* xTaskCreateRestricted() should only be used in systems that include an MPU
* implementation.
*
@ -686,12 +695,19 @@ typedef enum
TaskHandle_t * pxCreatedTask );
#endif
/*
* xTaskCreateRestrictedStatic() should only be used in systems that include an
* MPU implementation.
/**
* @cond
* task. h
* @code{c}
* BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
* @endcode
* @endcond
*
* Only available when configSUPPORT_STATIC_ALLOCATION is set to 1.
*
* xTaskCreateRestrictedStatic() should only be used in systems that include an
* MPU implementation.
*
* Internally, within the FreeRTOS implementation, tasks use two blocks of
* memory. The first block is used to hold the task's data structures. The
* second block is used by the task as its stack. If a task is created using
@ -770,7 +786,7 @@ typedef enum
*/
#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
TaskHandle_t * pxCreatedTask );
TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
#endif
/**
@ -828,8 +844,12 @@ void vTaskAllocateMPURegions( TaskHandle_t xTask,
const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION;
/**
* Remove a task from the RTOS real time kernel's management. The task being
* deleted will be removed from all ready, blocked, suspended and event lists.
* @cond
* task. h
* @code{c}
* void vTaskDelete( TaskHandle_t xTask );
* @endcode
* @endcond
*
* INCLUDE_vTaskDelete must be defined as 1 for this function to be available.
* See the configuration section for more information.
@ -1245,7 +1265,12 @@ void vTaskPrioritySet( TaskHandle_t xTask,
UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION;
/**
* Suspend a task.
* @cond
* task. h
* @code{c}
* void vTaskSuspend( TaskHandle_t xTaskToSuspend );
* @endcode
* @endcond
*
* INCLUDE_vTaskSuspend must be defined as 1 for this function to be available.
* See the configuration section for more information.
@ -1352,11 +1377,18 @@ void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
/**
* An implementation of vTaskResume() that can be called from within an ISR.
* @cond
* task. h
* @code{c}
* void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
* @endcode
* @endcond
*
* INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be
* available. See the configuration section for more information.
*
* An implementation of vTaskResume() that can be called from within an ISR.
*
* A task that has been suspended by one or more calls to vTaskSuspend ()
* will be made available for running again by a single call to
* xTaskResumeFromISR ().
@ -1384,14 +1416,20 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
*----------------------------------------------------------*/
/** @cond */
/**
* Starts the real time kernel tick processing.
* @cond
* task. h
* @code{c}
* void vTaskStartScheduler( void );
* @endcode
* @endcond
*
* Starts the real time kernel tick processing. After calling the kernel
* has control over which tasks are executed and when.
* NOTE: In ESP-IDF the scheduler is started automatically during
* application startup, vTaskStartScheduler() should not be called from
* ESP-IDF applications.
*
* After calling the kernel has control over which tasks are executed and when.
*
* See the demo application file main.c for an example of creating
* tasks and starting the kernel.
*
@ -1417,7 +1455,12 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION;
/**
* Stops the real time kernel tick.
* @cond
* task. h
* @code{c}
* void vTaskEndScheduler( void );
* @endcode
* @endcond
*
* NOTE: At the time of writing only the x86 real mode port, which runs on a PC
* in place of DOS, implements this function.
@ -1476,9 +1519,15 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
/** @endcond */
/**
* Suspends the scheduler without disabling interrupts.
* @cond
* task. h
* @code{c}
* void vTaskSuspendAll( void );
* @endcode
* @endcond
*
* Context switches will not occur while the scheduler is suspended.
* Suspends the scheduler without disabling interrupts. Context switches will
* not occur while the scheduler is suspended.
*
* After calling vTaskSuspendAll () the calling task will continue to execute
* without risk of being swapped out until a call to xTaskResumeAll () has been
@ -1591,7 +1640,12 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
*----------------------------------------------------------*/
/**
* Get tick count
* @cond
* task. h
* @code{c}
* TickType_t xTaskGetTickCount( void );
* @endcode
* @endcond
*
* @return The count of ticks since vTaskStartScheduler was called.
*
@ -1603,7 +1657,12 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
/**
* Get tick count from ISR
* @cond
* task. h
* @code{c}
* TickType_t xTaskGetTickCountFromISR( void );
* @endcode
* @endcond
*
* @return The count of ticks since vTaskStartScheduler was called.
*
@ -1620,7 +1679,12 @@ TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
/**
* Get current number of tasks
* @cond
* task. h
* @code{c}
* uint16_t uxTaskGetNumberOfTasks( void );
* @endcode
* @endcond
*
* @return The number of tasks that the real time kernel is currently managing.
* This includes all ready, blocked and suspended tasks. A task that
@ -1635,7 +1699,12 @@ TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
/**
* Get task name
* @cond
* task. h
* @code{c}
* char *pcTaskGetName( TaskHandle_t xTaskToQuery );
* @endcode
* @endcond
*
* @return The text (human readable) name of the task referenced by the handle
* xTaskToQuery. A task can query its own name by either passing in its own
@ -1646,10 +1715,17 @@ UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
* @endcond
* \ingroup TaskUtils
*/
char *pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
/**
* @note This function takes a relatively long time to complete and should be
* @cond
* task. h
* @code{c}
* TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
* @endcode
* @endcond
*
* NOTE: This function takes a relatively long time to complete and should be
* used sparingly.
*
* @return The handle of the task that has the human readable name pcNameToQuery.

View File

@ -975,7 +975,11 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
@ -998,22 +1002,36 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* task is already in the ready list before it yields - in which
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
portYIELD_WITHIN_API();
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
@ -1440,7 +1458,11 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
@ -1453,15 +1475,31 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
portYIELD_WITHIN_API();
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{
portYIELD_WITHIN_API();
}
#ifndef ESP_PLATFORM
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
}
else
{
/* The queue contains data again. Loop back to try and read the
* data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
else
@ -1469,7 +1507,11 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
@ -1605,7 +1647,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
@ -1627,7 +1673,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
{
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL();
}
else
{
@ -1638,22 +1684,42 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
portYIELD_WITHIN_API();
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{
portYIELD_WITHIN_API();
}
#ifndef ESP_PLATFORM
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
}
else
{
/* There was no timeout and the semaphore count was not 0, so
* attempt to take the semaphore again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
else
{
/* Timed out. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
/* If the semaphore count is 0 exit now as the timeout has
* expired. Otherwise return to attempt to take the semaphore that is
@ -1772,7 +1838,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{
/* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL();
traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
@ -1796,7 +1862,11 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
@ -1809,15 +1879,31 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
portYIELD_WITHIN_API();
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{
portYIELD_WITHIN_API();
}
#ifndef ESP_PLATFORM
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
}
else
{
/* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
else
@ -1825,7 +1911,11 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{

View File

@ -64,7 +64,9 @@
* or #defined the notification macros away, then provide default implementations
* that uses task notifications. */
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
#ifndef sbRECEIVE_COMPLETED
#ifdef ESP_PLATFORM // IDF-3775
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL(); \
{ \
@ -77,6 +79,20 @@
} \
} \
taskEXIT_CRITICAL();
#else
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
#endif /* sbRECEIVE_COMPLETED */
#ifndef sbRECEIVE_COMPLETED_FROM_ISR
@ -104,6 +120,7 @@
* or #defined the notification macro away, them provide a default implementation
* that uses task notifications. */
#ifndef sbSEND_COMPLETED
#ifdef ESP_PLATFORM // IDF-3755
#define sbSEND_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL(); \
{ \
@ -116,6 +133,20 @@
} \
} \
taskEXIT_CRITICAL();
#else
#define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
#endif /* sbSEND_COMPLETED */
#ifndef sbSEND_COMPLETE_FROM_ISR
@ -163,7 +194,9 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */
#endif
#ifdef ESP_PLATFORM
portMUX_TYPE xStreamBufferMux; //Mutex required due to SMP
#endif // ESP_PLATFORM
} StreamBuffer_t;
/*
@ -539,6 +572,10 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
size_t xRequiredSpace = xDataLengthBytes;
TimeOut_t xTimeOut;
/* The maximum amount of space a stream buffer will ever report is its length
* minus 1. */
const size_t xMaxReportedSpace = pxStreamBuffer->xLength - ( size_t ) 1;
configASSERT( pvTxData );
configASSERT( pxStreamBuffer );
@ -552,10 +589,33 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
/* Overflow? */
configASSERT( xRequiredSpace > xDataLengthBytes );
/* If this is a message buffer then it must be possible to write the
* whole message. */
if( xRequiredSpace > xMaxReportedSpace )
{
/* The message would not fit even if the entire buffer was empty,
* so don't wait for space. */
xTicksToWait = ( TickType_t ) 0;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
/* If this is a stream buffer then it is acceptable to write only part
* of the message to the buffer. Cap the length to the total length of
* the buffer. */
if( xRequiredSpace > xMaxReportedSpace )
{
xRequiredSpace = xMaxReportedSpace;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
if( xTicksToWait != ( TickType_t ) 0 )
@ -1266,7 +1326,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pxStreamBuffer->xLength = xBufferSizeBytes;
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
pxStreamBuffer->ucFlags = ucFlags;
#ifdef ESP_PLATFORM
vPortCPUInitializeMutex( &pxStreamBuffer->xStreamBufferMux );
#endif // ESP_PLATFORM
}
#if ( configUSE_TRACE_FACILITY == 1 )

View File

@ -576,7 +576,7 @@ static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID,
* Set xNextTaskUnblockTime to the time at which the next Blocked state task
* will exit the Blocked state.
*/
static void prvResetNextTaskUnblockTime( void );
static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
@ -671,15 +671,15 @@ void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
configASSERT( portVALID_STACK_MEM(pxStackBuffer) );
configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
#if( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
variable of type StaticTask_t equals the size of the real task
structure. */
volatile size_t xSize = sizeof( StaticTask_t );
configASSERT( xSize == sizeof( TCB_t ) );
( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
}
#if ( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
* variable of type StaticTask_t equals the size of the real task
* structure. */
volatile size_t xSize = sizeof( StaticTask_t );
configASSERT( xSize == sizeof( TCB_t ) );
( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
}
#endif /* configASSERT_DEFINED */
@ -762,7 +762,8 @@ void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
TaskHandle_t * pxCreatedTask )
{
TCB_t * pxNewTCB;
BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
@ -1559,7 +1560,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
if( xTicksToDelay > ( TickType_t ) 0U )
{
configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
traceTASK_DELAY();
@ -1572,7 +1577,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* executing task. */
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay );
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
}
else
{
@ -2760,7 +2769,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
/* Search the ready lists. */
do
@ -2806,7 +2819,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
#endif
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return pxTCB;
}
@ -2822,7 +2839,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
/* Is there a space in the array for each task in the system? */
if( uxArraySize >= uxCurrentNumberOfTasks )
@ -2881,7 +2902,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
mtCOVERAGE_TEST_MARKER();
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return uxTask;
}
@ -2931,7 +2956,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
{
#ifdef ESP_PLATFORM
BaseType_t xYieldRequired = pdFALSE;
#else
BaseType_t xYieldOccurred;
#endif // ESP_PLATFORM
/* Must not be called with the scheduler suspended as the implementation
* relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
@ -2939,11 +2968,20 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
xPendedTicks += xTicksToCatchUp;
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
return xYieldRequired;
#else
xYieldOccurred = xTaskResumeAll();
return xYieldOccurred;
#endif // ESP_PLATFORM
}
/*----------------------------------------------------------*/
@ -2956,7 +2994,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
configASSERT( pxTCB );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
/* A task can only be prematurely removed from the Blocked state if
* it is actually in the Blocked state. */
@ -3019,7 +3061,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
xReturn = pdFAIL;
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return xReturn;
}
@ -3933,7 +3979,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
/* Now the scheduler is suspended, the expected idle
* time can be sampled again, and this time its value can
@ -3957,7 +4007,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER();
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
else
{
@ -4258,14 +4312,22 @@ static void prvCheckTasksWaitingTermination( void )
* it should be reported as being in the Blocked state. */
if( eState == eSuspended )
{
taskENTER_CRITICAL();
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
pxTaskStatus->eCurrentState = eBlocked;
}
}
taskEXIT_CRITICAL();
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
#endif /* INCLUDE_vTaskSuspend */

View File

@ -608,7 +608,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
TickType_t xTimeNow;
BaseType_t xTimerListsWereSwitched;
#ifdef ESP_PLATFORM
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{
/* Obtain the time now to make an assessment as to whether the timer
* has expired or not. If obtaining the time causes the lists to switch
@ -622,7 +626,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
/* The tick count has not overflowed, has the timer expired? */
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
{
#ifdef ESP_PLATFORM
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
}
else
@ -642,19 +650,33 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
/* Yield to wait for either a command to arrive, or the
* block time to expire. If a command arrived between the
* critical section being exited and this yield then the yield
* will not cause the task to block. */
portYIELD_WITHIN_API();
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{
/* Yield to wait for either a command to arrive, or the
* block time to expire. If a command arrived between the
* critical section being exited and this yield then the yield
* will not cause the task to block. */
portYIELD_WITHIN_API();
}
#ifndef ESP_PLATFORM // IDF-3755
else
{
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
}
}
else
{
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
}
}
}
@ -986,8 +1008,8 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
{
/* The timer queue is allocated statically in case
* configSUPPORT_DYNAMIC_ALLOCATION is 0. */
static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */
static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */
PRIVILEGED_DATA static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */
PRIVILEGED_DATA static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */
xTimerQueue = xQueueCreateStatic( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, ( UBaseType_t ) sizeof( DaemonTaskMessage_t ), &( ucStaticTimerQueueStorage[ 0 ] ), &xStaticTimerQueue );
}