Add coverage test markers.

This commit is contained in:
Richard Barry 2013-12-23 18:11:15 +00:00
parent 64ad1c00b5
commit c861e3883d
7 changed files with 726 additions and 39 deletions

View File

@ -219,7 +219,7 @@ portBASE_TYPE xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
@ -245,7 +245,7 @@ portBASE_TYPE xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
@ -302,7 +302,7 @@ portBASE_TYPE xWaitConditionMet, xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
else if( xTicksToWait == ( portTickType ) 0 )
@ -323,7 +323,7 @@ portBASE_TYPE xWaitConditionMet, xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
if( xWaitForAllBits != pdFALSE )
@ -332,7 +332,7 @@ portBASE_TYPE xWaitConditionMet, xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
/* Store the bits that the calling task is waiting for in the
@ -356,7 +356,7 @@ portBASE_TYPE xWaitConditionMet, xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
@ -382,12 +382,12 @@ portBASE_TYPE xWaitConditionMet, xAlreadyYielded;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
@ -475,7 +475,7 @@ portBASE_TYPE xMatchFound = pdFALSE;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor )
@ -497,7 +497,7 @@ portBASE_TYPE xMatchFound = pdFALSE;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
/* Store the actual event flag value in the task's event list
@ -569,7 +569,7 @@ portBASE_TYPE xWaitConditionMet = pdFALSE;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -582,7 +582,7 @@ portBASE_TYPE xWaitConditionMet = pdFALSE;
}
else
{
mtBRANCH_TEST_INSTRUCTION();
mtCOVERAGE_TEST_MARKER();
}
}

View File

@ -689,8 +689,8 @@ typedef portTickType xEventBitsType;
#define configUSE_TRACE_FACILITY 0
#endif
#ifndef configBRANCH_TEST_INSTRUCTION
#define configBRANCH_TEST_INSTRUCTION()
#ifndef mtCOVERAGE_TEST_MARKER
#define mtCOVERAGE_TEST_MARKER()
#endif
/* For backward compatability. */

View File

@ -189,6 +189,10 @@ xList * const pxList = ( xList * ) pxItemToRemove->pvContainer;
{
pxList->pxIndex = pxItemToRemove->pxPrevious;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxItemToRemove->pvContainer = NULL;
( pxList->uxNumberOfItems )--;

View File

@ -1,5 +1,5 @@
/*
FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.
FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
@ -64,11 +64,11 @@
*/
/*
* A sample implementation of pvPortMalloc() and vPortFree() that combines
* (coalescences) adjacent memory blocks as they are freed, and in so doing
* A sample implementation of pvPortMalloc() and vPortFree() that combines
* (coalescences) adjacent memory blocks as they are freed, and in so doing
* limits memory fragmentation.
*
* See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
* See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
* memory management pages of http://www.FreeRTOS.org for more information.
*/
#include <stdlib.h>
@ -106,7 +106,7 @@ typedef struct A_BLOCK_LINK
/*-----------------------------------------------------------*/
/*
* Inserts a block of memory that is being freed into the correct position in
* Inserts a block of memory that is being freed into the correct position in
* the list of free memory blocks. The block being freed will be merged with
* the block in front it and/or the block behind it if the memory blocks are
* adjacent to each other.
@ -136,8 +136,8 @@ fragmentation. */
static size_t xFreeBytesRemaining = ( ( size_t ) heapADJUSTED_HEAP_SIZE ) & ( ( size_t ) ~portBYTE_ALIGNMENT_MASK );
static size_t xMinimumEverFreeBytesRemaining = ( ( size_t ) heapADJUSTED_HEAP_SIZE ) & ( ( size_t ) ~portBYTE_ALIGNMENT_MASK );
/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize
member of an xBlockLink structure is set then the block belongs to the
/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize
member of an xBlockLink structure is set then the block belongs to the
application. When the bit is free the block is still part of the free heap
space. */
static size_t xBlockAllocatedBit = 0;
@ -157,9 +157,13 @@ void *pvReturn = NULL;
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Check the requested block size is not so large that the top bit is
set. The top bit of the block size member of the xBlockLink structure
set. The top bit of the block size member of the xBlockLink structure
is used to determine who owns the block - the application or the
kernel, so it must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
@ -170,18 +174,26 @@ void *pvReturn = NULL;
{
xWantedSize += heapSTRUCT_SIZE;
/* Ensure that blocks are always aligned to the required number
/* Ensure that blocks are always aligned to the required number
of bytes. */
if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
/* Traverse the list from the start (lowest address) block until
one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
@ -191,29 +203,29 @@ void *pvReturn = NULL;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size
/* If the end marker was reached then a block of adequate size
was not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
/* Return the memory space pointed to - jumping over the
xBlockLink structure at its start. */
pvReturn = ( void * ) ( ( ( unsigned char * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE );
/* This block is being returned for use so must be taken out
/* This block is being returned for use so must be taken out
of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
/* If the block is larger than required it can be split into
two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
block following the number of bytes requested. The void
cast is used to prevent byte alignment warnings from the
/* This block is to be split into two. Create a new
block following the number of bytes requested. The void
cast is used to prevent byte alignment warnings from the
compiler. */
pxNewBlockLink = ( void * ) ( ( ( unsigned char * ) pxBlock ) + xWantedSize );
/* Calculate the sizes of two blocks split from the
/* Calculate the sizes of two blocks split from the
single block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
@ -221,6 +233,10 @@ void *pvReturn = NULL;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
@ -228,13 +244,29 @@ void *pvReturn = NULL;
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned
by the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC( pvReturn, xWantedSize );
@ -248,6 +280,10 @@ void *pvReturn = NULL;
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
@ -272,7 +308,7 @@ xBlockLink *pxLink;
/* Check the block is actually allocated. */
configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
configASSERT( pxLink->pxNextFreeBlock == NULL );
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
@ -290,6 +326,14 @@ xBlockLink *pxLink;
}
xTaskResumeAll();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -362,13 +406,17 @@ unsigned char *puc;
}
/* Do the block being inserted, and the block it is being inserted after
make a contiguous block of memory? */
make a contiguous block of memory? */
puc = ( unsigned char * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( unsigned char * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
make a contiguous block of memory? */
@ -388,7 +436,7 @@ unsigned char *puc;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
@ -399,5 +447,9 @@ unsigned char *puc;
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}

View File

@ -271,6 +271,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -338,6 +346,14 @@ xQueueHandle xReturn = NULL;
vPortFree( pxNewQueue );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( xReturn );
@ -474,6 +490,10 @@ xQueueHandle xReturn = NULL;
task that might be waiting to access the mutex. */
( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xReturn = pdPASS;
}
@ -604,6 +624,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -619,6 +643,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -636,6 +668,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
@ -669,6 +709,7 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -759,6 +800,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
our own so yield immediately. */
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
@ -790,6 +839,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -843,6 +896,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
priority inheritance should it become necessary. */
pxQueue->pxMutexHolder = ( signed char * ) xTaskGetCurrentTaskHandle();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
@ -852,6 +909,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
else
@ -873,8 +934,15 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
/* The task waiting has a higher priority than this task. */
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
@ -915,12 +983,20 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -993,6 +1069,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -1007,7 +1091,19 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -1023,7 +1119,19 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
@ -1063,7 +1171,7 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/* This function relaxes the coding standard somewhat to allow return
statements within the function itself. This is done in the interest
of execution time efficiency. */
@ -1097,6 +1205,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
priority inheritance should it become necessary. */
pxQueue->pxMutexHolder = ( signed char * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as xTaskHandle is a typedef. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
@ -1106,6 +1218,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -1127,6 +1247,14 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
/* The task waiting has a higher priority than this task. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -1153,6 +1281,7 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -1181,6 +1310,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
@ -1190,6 +1323,10 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1260,7 +1397,19 @@ xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -1344,7 +1493,9 @@ unsigned portBASE_TYPE uxReturn;
configASSERT( xQueue );
taskENTER_CRITICAL();
{
uxReturn = ( ( xQUEUE * ) xQueue )->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
return uxReturn;
@ -1360,7 +1511,9 @@ xQUEUE *pxQueue;
configASSERT( pxQueue );
taskENTER_CRITICAL();
{
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
return uxReturn;
@ -1438,6 +1591,10 @@ static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, port
vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
pxQueue->pxMutexHolder = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_MUTEXES */
}
@ -1449,6 +1606,10 @@ static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, port
{
pxQueue->pcWriteTo = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1458,6 +1619,10 @@ static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, port
{
pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xPosition == queueOVERWRITE )
{
@ -1469,6 +1634,14 @@ static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, port
correct. */
--( pxQueue->uxMessagesWaiting );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -1485,8 +1658,16 @@ static void prvCopyDataFromQueue( xQUEUE * const pxQueue, void * const pvBuffer
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/*-----------------------------------------------------------*/
@ -1516,6 +1697,10 @@ static void prvUnlockQueue( xQUEUE *pxQueue )
A context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1529,6 +1714,10 @@ static void prvUnlockQueue( xQUEUE *pxQueue )
context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1548,6 +1737,10 @@ static void prvUnlockQueue( xQUEUE *pxQueue )
context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1574,6 +1767,10 @@ static void prvUnlockQueue( xQUEUE *pxQueue )
{
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->xRxLock );
}
@ -1721,6 +1918,14 @@ signed portBASE_TYPE xReturn;
that a yield might be appropriate. */
xReturn = errQUEUE_YIELD;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -1766,6 +1971,10 @@ signed portBASE_TYPE xReturn;
return errQUEUE_FULL;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
portENABLE_INTERRUPTS();
@ -1779,6 +1988,10 @@ signed portBASE_TYPE xReturn;
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->uxMessagesWaiting );
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
@ -1795,6 +2008,14 @@ signed portBASE_TYPE xReturn;
{
xReturn = errQUEUE_YIELD;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
@ -1832,8 +2053,24 @@ signed portBASE_TYPE xReturn;
{
return pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xCoRoutinePreviouslyWoken;
@ -1859,6 +2096,10 @@ signed portBASE_TYPE xReturn;
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->uxMessagesWaiting );
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
@ -1870,7 +2111,19 @@ signed portBASE_TYPE xReturn;
{
*pxCoRoutineWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xReturn = pdPASS;
@ -1903,6 +2156,10 @@ signed portBASE_TYPE xReturn;
xQueueRegistry[ ux ].xHandle = xQueue;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -1925,6 +2182,10 @@ signed portBASE_TYPE xReturn;
xQueueRegistry[ ux ].pcQueueName = NULL;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
@ -1958,6 +2219,10 @@ signed portBASE_TYPE xReturn;
/* There is nothing in the queue, block for the specified period. */
vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvUnlockQueue( pxQueue );
}
@ -2095,7 +2360,19 @@ signed portBASE_TYPE xReturn;
/* The task waiting has a higher priority */
xReturn = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xReturn;

View File

@ -572,6 +572,10 @@ tskTCB * pxNewTCB;
required.*/
*pxCreatedTask = ( xTaskHandle ) pxNewTCB;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Ensure interrupts don't access the task lists while they are being
updated. */
@ -591,6 +595,10 @@ tskTCB * pxNewTCB;
fails, but we will report the failure. */
prvInitialiseTaskLists();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -603,6 +611,14 @@ tskTCB * pxNewTCB;
{
pxCurrentTCB = pxNewTCB;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -639,6 +655,14 @@ tskTCB * pxNewTCB;
{
taskYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -666,12 +690,20 @@ tskTCB * pxNewTCB;
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xGenericListItem ) );
@ -747,6 +779,10 @@ tskTCB * pxNewTCB;
{
xShouldDelay = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -757,6 +793,10 @@ tskTCB * pxNewTCB;
{
xShouldDelay = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/* Update the wake time ready for the next call. */
@ -776,9 +816,17 @@ tskTCB * pxNewTCB;
directly. */
portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvAddCurrentTaskToDelayedList( xTimeToWake );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
xAlreadyYielded = xTaskResumeAll();
@ -788,6 +836,10 @@ tskTCB * pxNewTCB;
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* INCLUDE_vTaskDelayUntil */
@ -831,10 +883,18 @@ tskTCB * pxNewTCB;
directly. */
portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvAddCurrentTaskToDelayedList( xTimeToWake );
}
xAlreadyYielded = xTaskResumeAll();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Force a reschedule if xTaskResumeAll has not already done so, we may
have put ourselves to sleep. */
@ -842,6 +902,10 @@ tskTCB * pxNewTCB;
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* INCLUDE_vTaskDelay */
@ -954,6 +1018,10 @@ tskTCB * pxNewTCB;
{
uxNewPriority = ( unsigned portBASE_TYPE ) configMAX_PRIORITIES - ( unsigned portBASE_TYPE ) 1U;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();
{
@ -988,6 +1056,10 @@ tskTCB * pxNewTCB;
{
xYieldRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
@ -1023,6 +1095,10 @@ tskTCB * pxNewTCB;
{
pxTCB->uxPriority = uxNewPriority;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The base priority gets set whatever. */
pxTCB->uxBasePriority = uxNewPriority;
@ -1039,6 +1115,10 @@ tskTCB * pxNewTCB;
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( portTickType ) configMAX_PRIORITIES - ( portTickType ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If the task is in the blocked or suspended list we need do
nothing more than change it's priority variable. However, if
@ -1056,13 +1136,25 @@ tskTCB * pxNewTCB;
reset macro can be called directly. */
portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvAddTaskToReadyList( pxTCB );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xYieldRequired == pdTRUE )
{
taskYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Remove compiler warning about unused variables when the port
optimised task selection is not being used. */
@ -1095,12 +1187,20 @@ tskTCB * pxNewTCB;
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Is the task waiting on an event also? */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) );
}
@ -1142,6 +1242,10 @@ tskTCB * pxNewTCB;
task that is now in the Suspended state. */
prvResetNextTaskUnblockTime();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
@ -1173,7 +1277,19 @@ tskTCB * pxNewTCB;
{
xReturn = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xReturn;
@ -1214,10 +1330,22 @@ tskTCB * pxNewTCB;
next yield. */
taskYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* INCLUDE_vTaskSuspend */
@ -1264,6 +1392,10 @@ tskTCB * pxNewTCB;
{
xYieldRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
@ -1276,6 +1408,10 @@ tskTCB * pxNewTCB;
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@ -1309,6 +1445,10 @@ portBASE_TYPE xReturn;
{
xReturn = xTimerCreateTimerTask();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_TIMERS */
@ -1445,11 +1585,15 @@ portBASE_TYPE xAlreadyYielded = pdFALSE;
{
xYieldPending = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/* If any ticks occurred while the scheduler was suspended then
they should be processed now. This ensures the tick count does
not slip, and that any delayed tasks are resumed at the correct
they should be processed now. This ensures the tick count does
not slip, and that any delayed tasks are resumed at the correct
time. */
if( uxPendedTicks > ( unsigned portBASE_TYPE ) 0U )
{
@ -1459,9 +1603,17 @@ portBASE_TYPE xAlreadyYielded = pdFALSE;
{
xYieldPending = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--uxPendedTicks;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xYieldPending == pdTRUE )
{
@ -1472,8 +1624,16 @@ portBASE_TYPE xAlreadyYielded = pdFALSE;
#endif
taskYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
@ -1609,6 +1769,10 @@ unsigned portBASE_TYPE uxTaskGetNumberOfTasks( void )
}
#endif
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
( void ) xTaskResumeAll();
@ -1675,6 +1839,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
taskSWITCH_DELAYED_LISTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* See if this tick has made a timeout expire. Tasks are stored in
the queue in the order of their wake time - meaning once one task
@ -1713,6 +1881,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
xNextTaskUnblockTime = xItemValue;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* It is time to remove the item from the Blocked state. */
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
@ -1723,6 +1895,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Place the unblocked task into the appropriate ready
list. */
@ -1740,6 +1916,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
@ -1756,6 +1936,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
@ -1767,6 +1951,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
vApplicationTickHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_TICK_HOOK */
}
@ -1789,6 +1977,10 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
@ -1843,7 +2035,9 @@ portBASE_TYPE xSwitchRequired = pdFALSE;
/* Save the hook function in the TCB. A critical section is required as
the value can be accessed from an interrupt. */
taskENTER_CRITICAL();
{
xReturn = xTCB->pxTaskTag;
}
taskEXIT_CRITICAL();
return xReturn;
@ -1916,6 +2110,10 @@ void vTaskSwitchContext( void )
{
pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
ulTaskSwitchedInTime = ulTotalRunTime;
}
#endif /* configGENERATE_RUN_TIME_STATS */
@ -1961,6 +2159,10 @@ portTickType xTimeToWake;
check, and the port reset macro can be called directly. */
portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#if ( INCLUDE_vTaskSuspend == 1 )
{
@ -2015,6 +2217,10 @@ portTickType xTimeToWake;
check, and the port reset macro can be called directly. */
portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
#if ( INCLUDE_vTaskSuspend == 1 )
{
@ -2073,6 +2279,10 @@ portTickType xTimeToWake;
check, and the port reset macro can be called directly. */
portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Calculate the time at which the task should be woken if the event does
not occur. This may overflow but this doesn't matter. */
@ -2337,6 +2547,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
{
taskYIELD();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
@ -2384,9 +2598,17 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
traceLOW_POWER_IDLE_END();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
( void ) xTaskResumeAll();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_TICKLESS_IDLE */
}
@ -2424,6 +2646,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
{
eReturn = eNoTasksWaitingTimeout;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_TIMERS */
}
@ -2449,6 +2675,10 @@ unsigned portBASE_TYPE x;
{
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/* Ensure the name string is terminated in the case that the string length
@ -2461,6 +2691,10 @@ unsigned portBASE_TYPE x;
{
uxPriority = ( unsigned portBASE_TYPE ) configMAX_PRIORITIES - ( unsigned portBASE_TYPE ) 1U;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxTCB->uxPriority = uxPriority;
#if ( configUSE_MUTEXES == 1 )
@ -2594,6 +2828,10 @@ static void prvCheckTasksWaitingTermination( void )
prvDeleteTCB( pxTCB );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#endif /* vTaskDelete */
@ -2622,6 +2860,10 @@ static void prvAddCurrentTaskToDelayedList( portTickType xTimeToWake )
{
xNextTaskUnblockTime = xTimeToWake;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/*-----------------------------------------------------------*/
@ -2722,6 +2964,10 @@ tskTCB *pxNewTCB;
} while( pxNextTCB != pxFirstTCB );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return uxTask;
}
@ -2884,6 +3130,10 @@ tskTCB *pxTCB;
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( portTickType ) configMAX_PRIORITIES - ( portTickType ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If the task being modified is in the ready state it will need to
be moved into a new list. */
@ -2893,6 +3143,10 @@ tskTCB *pxTCB;
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Inherit the priority before being moved into the new list. */
pxTCB->uxPriority = pxCurrentTCB->uxPriority;
@ -2906,6 +3160,14 @@ tskTCB *pxTCB;
traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -2928,6 +3190,10 @@ tskTCB *pxTCB;
{
taskRESET_READY_PRIORITY( pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Disinherit the priority before adding the task into the new
ready list. */
@ -2940,8 +3206,20 @@ tskTCB *pxTCB;
{
listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( portTickType ) configMAX_PRIORITIES - ( portTickType ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvAddTaskToReadyList( pxTCB );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -2958,6 +3236,10 @@ tskTCB *pxTCB;
{
( pxCurrentTCB->uxCriticalNesting )++;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* portCRITICAL_NESTING_IN_TCB */
@ -2977,7 +3259,19 @@ tskTCB *pxTCB;
{
portENABLE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
@ -3062,6 +3356,10 @@ tskTCB *pxTCB;
/* Free the array again. */
vPortFree( pxTaskStatusArray );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) ) */
@ -3163,10 +3461,18 @@ tskTCB *pxTCB;
pcWriteBuffer += strlen( ( char * ) pcWriteBuffer );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Free the array again. */
vPortFree( pxTaskStatusArray );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) ) */

View File

@ -251,6 +251,10 @@ portBASE_TYPE xReturn = pdFAIL;
}
#endif
}
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( xReturn );
return xReturn;
@ -330,6 +334,10 @@ xDAEMON_TASK_MESSAGE xMessage;
traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xReturn;
}
@ -373,6 +381,14 @@ xTIMER * const pxTimer = ( xTIMER * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTime
configASSERT( xResult );
( void ) xResult;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Call the timer callback. */
@ -443,6 +459,10 @@ portBASE_TYPE xTimerListsWereSwitched;
to block. */
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
else
@ -565,6 +585,10 @@ portTickType xTimeNow;
/* Call the function. */
pxCallback->pxCallbackFunction( pxCallback->pvParameter1, pxCallback->ulParameter2 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* INCLUDE_xTimerPendCallbackFromISR */
@ -579,6 +603,10 @@ portTickType xTimeNow;
/* The timer is in a list, remove it. */
( void ) uxListRemove( &( pxTimer->xTimerListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceTIMER_COMMAND_RECEIVED( pxTimer, xMessage.xMessageID, xMessage.u.xTimerParameters.xMessageValue );
@ -607,6 +635,14 @@ portTickType xTimeNow;
configASSERT( xResult );
( void ) xResult;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
break;
@ -690,6 +726,10 @@ portBASE_TYPE xResult;
( void ) xResult;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
pxTemp = pxCurrentTimerList;
@ -720,9 +760,17 @@ static void prvCheckForValidListAndQueue( void )
{
vQueueAddToRegistry( xTimerQueue, ( signed char * ) "TmrQ" );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configQUEUE_REGISTRY_SIZE */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
}