From b8b70528f4ee8d5be04fcaadb7b4e38da32d81a3 Mon Sep 17 00:00:00 2001 From: Richard Barry Date: Mon, 3 Mar 2008 16:32:05 +0000 Subject: [PATCH] Add trace macros. --- Source/queue.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++ Source/tasks.c | 32 +++++++++++++++++-- 2 files changed, 113 insertions(+), 3 deletions(-) diff --git a/Source/queue.c b/Source/queue.c index a5c75914a..77b0a4fb9 100644 --- a/Source/queue.c +++ b/Source/queue.c @@ -213,10 +213,13 @@ size_t xQueueSizeInBytes; vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) ); vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) ); + traceQUEUE_CREATE( pxNewQueue ); + return pxNewQueue; } else { + traceQUEUE_CREATE_FAILED(); vPortFree( pxNewQueue ); } } @@ -262,6 +265,12 @@ size_t xQueueSizeInBytes; /* Start with the semaphore in the expected state. */ xQueueGenericSend( pxNewQueue, NULL, 0, queueSEND_TO_BACK ); + + traceCREATE_MUTEX( pxNewQueue ); + } + else + { + traceCREATE_MUTEX_FAILED(); } return pxNewQueue; @@ -300,11 +309,15 @@ size_t xQueueSizeInBytes; } xReturn = pdPASS; + + traceGIVE_MUTEX_RECURSIVE( pxMutex ); } else { /* We cannot give the mutex because we are not the holder. */ xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); } return xReturn; @@ -339,6 +352,8 @@ size_t xQueueSizeInBytes; } } + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + return xReturn; } @@ -356,6 +371,12 @@ size_t xQueueSizeInBytes; if( pxHandle != NULL ) { pxHandle->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); } return pxHandle; @@ -448,6 +469,8 @@ xTimeOutType xTimeOut; list. */ taskENTER_CRITICAL(); { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + /* We can safely unlock the queue and scheduler here as interrupts are disabled. We must not yield with anything locked, but we can yield from within a critical section. @@ -505,6 +528,8 @@ xTimeOutType xTimeOut; { if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { + traceQUEUE_SEND( pxQueue ); + /* There is room in the queue, copy the data into the queue. */ prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xReturn = pdPASS; @@ -529,6 +554,14 @@ xTimeOutType xTimeOut; { xReturn = queueERRONEOUS_UNBLOCK; } + else + { + traceQUEUE_SEND_FAILED( pxQueue ); + } + } + else + { + traceQUEUE_SEND_FAILED( pxQueue ); } } } @@ -571,6 +604,8 @@ xTimeOutType xTimeOut; posting? */ if( xTicksToWait > ( portTickType ) 0 ) { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + /* We are going to place ourselves on the xTasksWaitingToSend event list, and will get woken should the delay expire, or space become available on the queue. */ @@ -586,6 +621,8 @@ xTimeOutType xTimeOut; if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { + traceQUEUE_SEND( pxQueue ); + /* There is room in the queue, copy the data into the queue. */ prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xReturn = pdPASS; @@ -611,6 +648,14 @@ xTimeOutType xTimeOut; this task unblocking and actually executing. */ xReturn = queueERRONEOUS_UNBLOCK; } + else + { + traceQUEUE_SEND_FAILED( pxQueue ); + } + } + else + { + traceQUEUE_SEND_FAILED( pxQueue ); } } } @@ -655,6 +700,8 @@ xTimeOutType xTimeOut; leave with nothing? */ if( xTicksToWait > ( portTickType ) 0 ) { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) @@ -678,6 +725,8 @@ xTimeOutType xTimeOut; if( xJustPeeking == pdFALSE ) { + traceQUEUE_RECEIVE( pxQueue ); + /* We are actually removing data. */ --( pxQueue->uxMessagesWaiting ); @@ -703,6 +752,8 @@ xTimeOutType xTimeOut; } else { + traceQUEUE_PEEK( pxQueue ); + /* We are not removing the data, so reset our read pointer. */ pxQueue->pcReadFrom = pcOriginalReadPosition; @@ -720,6 +771,14 @@ xTimeOutType xTimeOut; { xReturn = queueERRONEOUS_UNBLOCK; } + else + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + } + } + else + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); } } @@ -742,6 +801,8 @@ signed portBASE_TYPE xQueueGenericSendFromISR( xQueueHandle pxQueue, const void by this post). */ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { + traceQUEUE_SEND_FROM_ISR( pxQueue ); + prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); /* If the queue is locked we do not alter the event list. This will @@ -770,6 +831,10 @@ signed portBASE_TYPE xQueueGenericSendFromISR( xQueueHandle pxQueue, const void ++( pxQueue->xTxLock ); } } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + } return xTaskPreviouslyWoken; } @@ -802,6 +867,8 @@ signed portCHAR *pcOriginalReadPosition; leave with nothing? */ if( xTicksToWait > ( portTickType ) 0 ) { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) @@ -849,6 +916,8 @@ signed portCHAR *pcOriginalReadPosition; if( xJustPeeking == pdFALSE ) { + traceQUEUE_RECEIVE( pxQueue ); + /* We are actually removing data. */ --( pxQueue->uxMessagesWaiting ); @@ -869,6 +938,8 @@ signed portCHAR *pcOriginalReadPosition; } else { + traceQUEUE_PEEK( pxQueue ); + /* We are not removing the data, so reset our read pointer. */ pxQueue->pcReadFrom = pcOriginalReadPosition; @@ -897,6 +968,14 @@ signed portCHAR *pcOriginalReadPosition; { xReturn = queueERRONEOUS_UNBLOCK; } + else + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + } + } + else + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); @@ -916,6 +995,8 @@ signed portBASE_TYPE xReturn; /* We cannot block from an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + prvCopyDataFromQueue( pxQueue, pvBuffer ); --( pxQueue->uxMessagesWaiting ); @@ -951,6 +1032,7 @@ signed portBASE_TYPE xReturn; else { xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } return xReturn; @@ -971,6 +1053,8 @@ unsigned portBASE_TYPE uxReturn; void vQueueDelete( xQueueHandle pxQueue ) { + traceQUEUE_DELETE( pxQueue ); + vPortFree( pxQueue->pcHead ); vPortFree( pxQueue ); } diff --git a/Source/tasks.c b/Source/tasks.c index 1c1013a1c..15e61cf1b 100644 --- a/Source/tasks.c +++ b/Source/tasks.c @@ -508,7 +508,7 @@ static tskTCB *prvAllocateTCBAndStack( unsigned portSHORT usStackDepth ); * This function determines the 'high water mark' of the task stack by * determining how much of the stack remains at the original preset value. */ -#if ( configUSE_TRACE_FACILITY == 1 ) +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxGetStackHighWaterMark == 1 ) ) unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte ); @@ -620,12 +620,14 @@ tskTCB * pxNewTCB; prvAddTaskToReadyQueue( pxNewTCB ); xReturn = pdPASS; + traceTASK_CREATE( pxNewTCB ); } portEXIT_CRITICAL(); } else { xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + traceTASK_CREATE_FAILED( pxNewTCB ); } if( xReturn == pdPASS ) @@ -671,6 +673,8 @@ tskTCB * pxNewTCB; /* If null is passed in here then we are deleting ourselves. */ pxTCB = prvGetTCBFromHandle( pxTaskToDelete ); + traceTASK_DELETE( pxTCB ); + /* Remove task from the ready list and place in the termination list. This will stop the task from be scheduled. The idle task will check the termination list and free up any memory allocated by the @@ -753,6 +757,8 @@ tskTCB * pxNewTCB; if( xShouldDelay ) { + traceTASK_DELAY_UNTIL(); + /* We must remove ourselves from the ready list before adding ourselves to the blocked list as the same list item is used for both lists. */ @@ -800,6 +806,8 @@ tskTCB * pxNewTCB; { vTaskSuspendAll(); { + traceTASK_DELAY(); + /* A task that is removed from the event list while the scheduler is suspended will not get placed in the ready list or removed from the blocked list until the scheduler @@ -888,6 +896,8 @@ tskTCB * pxNewTCB; priority of the calling function. */ pxTCB = prvGetTCBFromHandle( pxTask ); + traceTASK_PRIORITY_SET( pxTask, uxNewPriority ); + #if ( configUSE_MUTEXES == 1 ) { uxCurrentPriority = pxTCB->uxBasePriority; @@ -985,6 +995,8 @@ tskTCB * pxNewTCB; /* If null is passed in here then we are suspending ourselves. */ pxTCB = prvGetTCBFromHandle( pxTaskToSuspend ); + traceTASK_SUSPEND( pxTaskToSuspend ); + /* Remove task from the ready/delayed list and place in the suspended list. */ vListRemove( &( pxTCB->xGenericListItem ) ); @@ -1056,6 +1068,8 @@ tskTCB * pxNewTCB; { if( prvIsTaskSuspended( pxTCB ) == pdTRUE ) { + traceTASK_RESUME( pxTCB ); + /* As we are in a critical section we can access the ready lists even if the scheduler is suspended. */ vListRemove( &( pxTCB->xGenericListItem ) ); @@ -1089,6 +1103,8 @@ tskTCB * pxNewTCB; if( prvIsTaskSuspended( pxTCB ) == pdTRUE ) { + traceTASK_RESUME_FROM_ISR( pxTCB ); + if( uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE ) { xYieldRequired = ( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ); @@ -1430,6 +1446,8 @@ inline void vTaskIncrementTick( void ) } } #endif + + traceTASK_INCREMENT_TICK( xTickCount ); } /*-----------------------------------------------------------*/ @@ -1505,6 +1523,8 @@ void vTaskSwitchContext( void ) /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the tasks of the same priority get an equal share of the processor time. */ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopReadyPriority ] ) ); + + traceTASK_SWITCHED_IN(); vWriteTraceToBuffer(); } /*-----------------------------------------------------------*/ @@ -1913,7 +1933,7 @@ tskTCB *pxNewTCB; #endif /*-----------------------------------------------------------*/ -#if ( configUSE_TRACE_FACILITY == 1 ) +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxGetStackHighWaterMark == 1 ) ) unsigned portSHORT usTaskCheckFreeStackSpace( const unsigned portCHAR * pucStackByte ) { register unsigned portSHORT usCount = 0; @@ -1931,7 +1951,13 @@ tskTCB *pxNewTCB; #endif /*-----------------------------------------------------------*/ - +#if ( INCLUDE_uxGetStackHighWaterMark == 1 ) + unsigned portBASE_TYPE uxGetStackHighWaterMark( void ) + { + return usTaskCheckFreeStackSpace( pxCurrentTCB->pxStack ); + } +#endif +/*-----------------------------------------------------------*/ #if ( ( INCLUDE_vTaskDelete == 1 ) || ( INCLUDE_vTaskCleanUpResources == 1 ) )