Create minor optimisations (just an asm instruction or two) by using consts in a few places where previously a volatile variable that didn't change was used.

Add the simple xTimerGetPeriod() and xTimerGetExpiryTime() functions.
This commit is contained in:
Richard Barry 2016-03-29 13:07:27 +00:00
parent 26d3770fad
commit aeb03e5fa0
5 changed files with 217 additions and 140 deletions

View File

@ -1266,6 +1266,32 @@ BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvPar
*/
const char * pcTimerGetTimerName( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
/**
* TickType_t xTimerGetPeriod( TimerHandle_t xTimer );
*
* Returns the period of a timer.
*
* @param xTimer The handle of the timer being queried.
*
* @return The period of the timer in ticks.
*/
TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
/**
* TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer );
*
* Returns the time in ticks at which the timer will expire. If this is less
* than the current tick count then the expiry time has overflowed from the
* current time.
*
* @param xTimer The handle of the timer being queried.
*
* @return If the timer is running then the time in ticks at which the timer
* will next expire is returned. If the timer is not running then the return
* value is undefined.
*/
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
/*
* Functions beyond this part are not part of the public API and are intended
* for use by the kernel only.

View File

@ -88,13 +88,15 @@ Priorities are higher when a soak test is performed to lessen the effect of
Windows interfering with the timing. */
#define portSOAK_TEST
#ifndef portSOAK_TEST
#define portDELETE_SELF_THREAD_PRIORITY THREAD_PRIORITY_HIGHEST /* Must be highest. */
#define portSIMULATED_INTERRUPTS_THREAD_PRIORITY THREAD_PRIORITY_NORMAL
#define portSIMULATED_TIMER_THREAD_PRIORITY THREAD_PRIORITY_BELOW_NORMAL
#define portTASK_THREAD_PRIORITY THREAD_PRIORITY_IDLE
#else
#define portSIMULATED_INTERRUPTS_THREAD_PRIORITY THREAD_PRIORITY_TIME_CRITICAL
#define portSIMULATED_TIMER_THREAD_PRIORITY THREAD_PRIORITY_HIGHEST
#define portTASK_THREAD_PRIORITY THREAD_PRIORITY_ABOVE_NORMAL
#define portDELETE_SELF_THREAD_PRIORITY THREAD_PRIORITY_TIME_CRITICAL /* Must be highest. */
#define portSIMULATED_INTERRUPTS_THREAD_PRIORITY THREAD_PRIORITY_HIGHEST
#define portSIMULATED_TIMER_THREAD_PRIORITY THREAD_PRIORITY_ABOVE_NORMAL
#define portTASK_THREAD_PRIORITY THREAD_PRIORITY_NORMAL
#endif
/*
* Created as a high priority thread, this function uses a timer to simulate
@ -516,7 +518,7 @@ uint32_t ulErrorCode;
does not run and swap it out before it is closed. If that were to happen
the thread would never run again and effectively be a thread handle and
memory leak. */
SetThreadPriority( pvThread, THREAD_PRIORITY_HIGHEST );
SetThreadPriority( pvThread, portDELETE_SELF_THREAD_PRIORITY );
/* This function will not return, therefore a yield is set as pending to
ensure a context switch occurs away from this thread on the next tick. */

View File

@ -947,6 +947,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
@ -958,7 +960,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( pxQueue->cTxLock == queueUNLOCKED )
if( cTxLock == queueUNLOCKED )
{
#if ( configUSE_QUEUE_SETS == 1 )
{
@ -1044,7 +1046,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
++( pxQueue->cTxLock );
pxQueue->cTxLock = cTxLock + 1;
}
xReturn = pdPASS;
@ -1102,11 +1104,15 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* When the queue is used to implement a semaphore no data is ever
moved through the queue but it is still valid to see if the queue 'has
space'. */
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
if( uxMessagesWaiting < pxQueue->uxLength )
{
const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* A task can only have an inherited priority if it is a mutex
@ -1115,11 +1121,11 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
can be assumed there is no mutex holder and no need to determine if
priority disinheritance is needed. Simply increase the count of
messages (semaphores) available. */
++( pxQueue->uxMessagesWaiting );
pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( pxQueue->cTxLock == queueUNLOCKED )
if( cTxLock == queueUNLOCKED )
{
#if ( configUSE_QUEUE_SETS == 1 )
{
@ -1205,7 +1211,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
++( pxQueue->cTxLock );
pxQueue->cTxLock = cTxLock + 1;
}
xReturn = pdPASS;
@ -1245,9 +1251,11 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
taskENTER_CRITICAL();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Remember the read position in case the queue is only being
peeked. */
@ -1260,7 +1268,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
traceQUEUE_RECEIVE( pxQueue );
/* Actually removing data, not just peeking. */
--( pxQueue->uxMessagesWaiting );
pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
#if ( configUSE_MUTEXES == 1 )
{
@ -1444,19 +1452,23 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
const int8_t cRxLock = pxQueue->cRxLock;
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
prvCopyDataFromQueue( pxQueue, pvBuffer );
--( pxQueue->uxMessagesWaiting );
pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
/* If the queue is locked the event list will not be modified.
Instead update the lock count so the task that unlocks the queue
will know that an ISR has removed data while the queue was
locked. */
if( pxQueue->cRxLock == queueUNLOCKED )
if( cRxLock == queueUNLOCKED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
@ -1487,7 +1499,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
/* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */
++( pxQueue->cRxLock );
pxQueue->cRxLock = cRxLock + 1;
}
xReturn = pdPASS;
@ -1673,6 +1685,11 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
{
BaseType_t xReturn = pdFALSE;
UBaseType_t uxMessagesWaiting;
/* This function is called from a critical section. */
uxMessagesWaiting = pxQueue->uxMessagesWaiting;
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
{
@ -1719,13 +1736,13 @@ BaseType_t xReturn = pdFALSE;
if( xPosition == queueOVERWRITE )
{
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* An item is not being added but overwritten, so subtract
one from the recorded number of items in the queue so when
one is added again below the number of recorded items remains
correct. */
--( pxQueue->uxMessagesWaiting );
--uxMessagesWaiting;
}
else
{
@ -1738,7 +1755,7 @@ BaseType_t xReturn = pdFALSE;
}
}
++( pxQueue->uxMessagesWaiting );
pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
return xReturn;
}
@ -1772,8 +1789,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
updated. */
taskENTER_CRITICAL();
{
int8_t cTxLock = pxQueue->cTxLock;
/* See if data was added to the queue while it was locked. */
while( pxQueue->cTxLock > queueLOCKED_UNMODIFIED )
while( cTxLock > queueLOCKED_UNMODIFIED )
{
/* Data was posted while the queue was locked. Are any tasks
blocked waiting for data to become available? */
@ -1841,7 +1860,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
}
#endif /* configUSE_QUEUE_SETS */
--( pxQueue->cTxLock );
--cTxLock;
}
pxQueue->cTxLock = queueUNLOCKED;
@ -1851,7 +1870,9 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
{
while( pxQueue->cRxLock > queueLOCKED_UNMODIFIED )
int8_t cRxLock = pxQueue->cRxLock;
while( cRxLock > queueLOCKED_UNMODIFIED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
@ -1864,7 +1885,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->cRxLock );
--cRxLock;
}
else
{
@ -2479,12 +2500,14 @@ BaseType_t xReturn;
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
traceQUEUE_SEND( pxQueueSetContainer );
/* The data copied is the handle of the queue that contains data. */
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
if( pxQueueSetContainer->cTxLock == queueUNLOCKED )
if( cTxLock == queueUNLOCKED )
{
if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
{
@ -2505,7 +2528,7 @@ BaseType_t xReturn;
}
else
{
( pxQueueSetContainer->cTxLock )++;
pxQueueSetContainer->cTxLock = cTxLock + 1;
}
}
else

View File

@ -153,16 +153,19 @@ functions but without including stdio.h here. */
#define taskSELECT_HIGHEST_PRIORITY_TASK() \
{ \
UBaseType_t uxTopPriority = uxTopReadyPriority; \
\
/* Find the highest priority queue that contains ready tasks. */ \
while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopReadyPriority ] ) ) ) \
while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
{ \
configASSERT( uxTopReadyPriority ); \
--uxTopReadyPriority; \
configASSERT( uxTopPriority ); \
--uxTopPriority; \
} \
\
/* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
the same priority get an equal share of the processor time. */ \
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); \
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
uxTopReadyPriority = uxTopPriority; \
} /* taskSELECT_HIGHEST_PRIORITY_TASK */
/*-----------------------------------------------------------*/
@ -354,7 +357,7 @@ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#if( ( INCLUDE_vTaskDelete == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
#if( INCLUDE_vTaskDelete == 1 )
PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
@ -559,12 +562,11 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
configASSERT( puxStackBuffer != NULL );
configASSERT( pxTaskBuffer != NULL );
/* The memory used for the task's TCB and stack are passed into this
function - use them. */
pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
if( pxNewTCB != NULL )
if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
{
/* The memory used for the task's TCB and stack are passed into this
function - use them. */
pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
@ -1955,24 +1957,30 @@ BaseType_t xAlreadyYielded = pdFALSE;
they should be processed now. This ensures the tick count does
not slip, and that any delayed tasks are resumed at the correct
time. */
if( uxPendedTicks > ( UBaseType_t ) 0U )
{
while( uxPendedTicks > ( UBaseType_t ) 0U )
UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
if( uxPendedCounts > ( UBaseType_t ) 0U )
{
if( xTaskIncrementTick() != pdFALSE )
do
{
xYieldPending = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--uxPendedTicks;
if( xTaskIncrementTick() != pdFALSE )
{
xYieldPending = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--uxPendedCounts;
} while( uxPendedCounts > ( UBaseType_t ) 0U );
uxPendedTicks = 0;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xYieldPending != pdFALSE )
@ -2389,103 +2397,101 @@ BaseType_t xSwitchRequired = pdFALSE;
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
{
/* Minor optimisation. The tick count cannot change in this
block. */
const TickType_t xConstTickCount = xTickCount + 1;
/* Increment the RTOS tick, switching the delayed and overflowed
delayed lists if it wraps to 0. */
++xTickCount;
xTickCount = xConstTickCount;
if( xConstTickCount == ( TickType_t ) 0U )
{
/* Minor optimisation. The tick count cannot change in this
block. */
const TickType_t xConstTickCount = xTickCount;
taskSWITCH_DELAYED_LISTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xConstTickCount == ( TickType_t ) 0U )
/* See if this tick has made a timeout expire. Tasks are stored in
the queue in the order of their wake time - meaning once one task
has been found whose block time has not expired there is no need to
look any further down the list. */
if( xConstTickCount >= xNextTaskUnblockTime )
{
for( ;; )
{
taskSWITCH_DELAYED_LISTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* See if this tick has made a timeout expire. Tasks are stored in
the queue in the order of their wake time - meaning once one task
has been found whose block time has not expired there is no need to
look any further down the list. */
if( xConstTickCount >= xNextTaskUnblockTime )
{
for( ;; )
if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
{
if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
/* The delayed list is empty. Set xNextTaskUnblockTime
to the maximum possible value so it is extremely
unlikely that the
if( xTickCount >= xNextTaskUnblockTime ) test will pass
next time through. */
xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
break;
}
else
{
/* The delayed list is not empty, get the value of the
item at the head of the delayed list. This is the time
at which the task at the head of the delayed list must
be removed from the Blocked state. */
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
if( xConstTickCount < xItemValue )
{
/* The delayed list is empty. Set xNextTaskUnblockTime
to the maximum possible value so it is extremely
unlikely that the
if( xTickCount >= xNextTaskUnblockTime ) test will pass
next time through. */
xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/* It is not time to unblock this item yet, but the
item value is the time at which the task at the head
of the blocked list must be removed from the Blocked
state - so record the item value in
xNextTaskUnblockTime. */
xNextTaskUnblockTime = xItemValue;
break;
}
else
{
/* The delayed list is not empty, get the value of the
item at the head of the delayed list. This is the time
at which the task at the head of the delayed list must
be removed from the Blocked state. */
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
if( xConstTickCount < xItemValue )
{
/* It is not time to unblock this item yet, but the
item value is the time at which the task at the head
of the blocked list must be removed from the Blocked
state - so record the item value in
xNextTaskUnblockTime. */
xNextTaskUnblockTime = xItemValue;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* It is time to remove the item from the Blocked state. */
( void ) uxListRemove( &( pxTCB->xStateListItem ) );
/* Is the task waiting on an event also? If so remove
it from the event list. */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Place the unblocked task into the appropriate ready
list. */
prvAddTaskToReadyList( pxTCB );
/* A task being unblocked cannot cause an immediate
context switch if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Preemption is on, but a context switch should
only be performed if the unblocked task has a
priority that is equal to or higher than the
currently executing task. */
if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
mtCOVERAGE_TEST_MARKER();
}
/* It is time to remove the item from the Blocked state. */
( void ) uxListRemove( &( pxTCB->xStateListItem ) );
/* Is the task waiting on an event also? If so remove
it from the event list. */
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Place the unblocked task into the appropriate ready
list. */
prvAddTaskToReadyList( pxTCB );
/* A task being unblocked cannot cause an immediate
context switch if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Preemption is on, but a context switch should
only be performed if the unblocked task has a
priority that is equal to or higher than the
currently executing task. */
if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
}
}
@ -3248,7 +3254,7 @@ UBaseType_t uxPriority;
static void prvCheckTasksWaitingTermination( void )
{
/** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
/** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
#if ( INCLUDE_vTaskDelete == 1 )
{
@ -4111,7 +4117,7 @@ TickType_t uxReturn;
}
else
{
( pxCurrentTCB->ulNotifiedValue )--;
pxCurrentTCB->ulNotifiedValue = ulReturn - 1;
}
}
else
@ -4563,6 +4569,7 @@ TickType_t uxReturn;
static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
{
TickType_t xTimeToWake;
const TickType_t xConstTickCount = xTickCount;
#if( INCLUDE_xTaskAbortDelay == 1 )
{
@ -4586,7 +4593,6 @@ TickType_t xTimeToWake;
mtCOVERAGE_TEST_MARKER();
}
#if ( INCLUDE_vTaskSuspend == 1 )
{
if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
@ -4601,12 +4607,12 @@ TickType_t xTimeToWake;
/* Calculate the time at which the task should be woken if the event
does not occur. This may overflow but this doesn't matter, the
kernel will manage it correctly. */
xTimeToWake = xTickCount + xTicksToWait;
xTimeToWake = xConstTickCount + xTicksToWait;
/* The list item will be inserted in wake time order. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
if( xTimeToWake < xTickCount )
if( xTimeToWake < xConstTickCount )
{
/* Wake time has overflowed. Place this item in the overflow
list. */
@ -4637,12 +4643,12 @@ TickType_t xTimeToWake;
/* Calculate the time at which the task should be woken if the event
does not occur. This may overflow but this doesn't matter, the kernel
will manage it correctly. */
xTimeToWake = xTickCount + xTicksToWait;
xTimeToWake = xConstTickCount + xTicksToWait;
/* The list item will be inserted in wake time order. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
if( xTimeToWake < xTickCount )
if( xTimeToWake < xConstTickCount )
{
/* Wake time has overflowed. Place this item in the overflow list. */
vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );

View File

@ -433,6 +433,26 @@ TaskHandle_t xTimerGetTimerDaemonTaskHandle( void )
}
/*-----------------------------------------------------------*/
TickType_t xTimerGetPeriod( TimerHandle_t xTimer )
{
const Timer_t * const pxTimer = ( const Timer_t * const ) xTimer;
configASSERT( xTimer );
return pxTimer->xTimerPeriodInTicks;
}
/*-----------------------------------------------------------*/
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer )
{
const Timer_t * const pxTimer = ( const Timer_t * const ) xTimer;
TickType_t xReturn;
configASSERT( xTimer );
xReturn = listGET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ) );
return xReturn;
}
/*-----------------------------------------------------------*/
const char * pcTimerGetTimerName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
{
Timer_t *pxTimer = ( Timer_t * ) xTimer;