Reintroduce Espressif's IDF v4.2 changes to ESP32 port (#193)

* Renamed old port to ESP_IDF_V3

* Update ESP32 port files to support IDF v4.2.

* Add changes required to support ESP32-S2

Co-authored-by: Shubham Kulkarni <shubham.kulkarni@espressif.com>
This commit is contained in:
Carl Lundin 2020-10-08 11:03:27 -07:00 committed by GitHub
parent 77ad717400
commit 3d4d17178f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 7086 additions and 34 deletions

View File

@ -19,6 +19,9 @@
#define USED
#endif
#ifdef CONFIG_ESP32_DEBUG_OCDAWARE
const int USED DRAM_ATTR uxTopUsedPriority = configMAX_PRIORITIES - 1;
#endif
/*
* This file is no longer needed as AFTER FreeRTOS V10.14.1 OpenOCD is fixed in the kernel.
* #ifdef CONFIG_ESP32_DEBUG_OCDAWARE
* const int USED DRAM_ATTR uxTopUsedPriority = configMAX_PRIORITIES - 1;
* #endif
*/

View File

@ -307,12 +307,32 @@
uint32_t compare,
uint32_t * set )
{
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
: "=r" ( *set )
: "r" ( addr ), "r" ( compare ), "0" ( *set )
);
#if ( XCHAL_HAVE_S32C1I > 0 )
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
: "=r" ( *set )
: "r" ( addr ), "r" ( compare ), "0" ( *set )
);
#else
/* No S32C1I, so do this by disabling and re-enabling interrupts (slower) */
uint32_t intlevel, old_value;
__asm__ __volatile__ ( "rsil %0, " XTSTR( XCHAL_EXCM_LEVEL ) "\n"
: "=r" ( intlevel ) );
old_value = *addr;
if( old_value == compare )
{
*addr = *set;
}
__asm__ __volatile__ ( "memw \n"
"wsr %0, ps\n"
: : "r" ( intlevel ) );
*set = old_value;
#endif /* if ( XCHAL_HAVE_S32C1I > 0 ) */
}
void uxPortCompareSetExtram( volatile uint32_t * addr,
@ -409,13 +429,6 @@
#define xPortGetFreeHeapSize esp_get_free_heap_size
#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size
/*
* Send an interrupt to another core in order to make the task running
* on it yield for a higher-priority task.
*/
void vPortYieldOtherCore( BaseType_t coreid ) PRIVILEGED_FUNCTION;
/*
* Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack

View File

@ -97,17 +97,23 @@
#include "xtensa_rtos.h"
#include "rom/ets_sys.h"
#if CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h"
#endif
#include "soc/cpu.h"
#include "FreeRTOS.h"
#include "task.h"
#include "esp_panic.h"
#include "esp_private/panic_reason.h"
#include "esp_debug_helpers.h"
#include "esp_heap_caps.h"
#include "esp_crosscore_int.h"
#include "esp_private/crosscore_int.h"
#include "esp_intr_alloc.h"
#include "esp_log.h"
/* Defined in portasm.h */
extern void _frxt_tick_timer_init( void );
@ -133,6 +139,19 @@ unsigned port_interruptNesting[ portNUM_PROCESSORS ] = { 0 }; /* Interrupt nest
/* User exception dispatcher when exiting */
void _xt_user_exit( void );
#if CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER
/* Wrapper to allow task functions to return (increases stack overhead by 16 bytes) */
static void vPortTaskWrapper( TaskFunction_t pxCode,
void * pvParameters )
{
pxCode( pvParameters );
/*FreeRTOS tasks should not return. Log the task name and abort. */
char * pcTaskName = pcTaskGetTaskName( NULL );
ESP_LOGE( "FreeRTOS", "FreeRTOS Task \"%s\" should not return, Aborting now!", pcTaskName );
abort();
}
#endif /* if CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER */
/*
* Stack initialization
*/
@ -168,21 +187,35 @@ void _xt_user_exit( void );
frame = ( XtExcFrame * ) sp;
/* Explicitly initialize certain saved registers */
frame->pc = ( UBaseType_t ) pxCode; /* task entrypoint */
frame->a0 = 0; /* to terminate GDB backtrace */
frame->a1 = ( UBaseType_t ) sp + XT_STK_FRMSZ; /* physical top of stack frame */
frame->exit = ( UBaseType_t ) _xt_user_exit; /* user exception exit dispatcher */
#if CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER
frame->pc = ( UBaseType_t ) vPortTaskWrapper; /* task wrapper */
#else
frame->pc = ( UBaseType_t ) pxCode; /* task entrypoint */
#endif
frame->a0 = 0; /* to terminate GDB backtrace */
frame->a1 = ( UBaseType_t ) sp + XT_STK_FRMSZ; /* physical top of stack frame */
frame->exit = ( UBaseType_t ) _xt_user_exit; /* user exception exit dispatcher */
/* Set initial PS to int level 0, EXCM disabled ('rfe' will enable), user mode. */
/* Also set entry point argument parameter. */
#ifdef __XTENSA_CALL0_ABI__
frame->a2 = ( UBaseType_t ) pvParameters;
#if CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER
frame->a2 = ( UBaseType_t ) pxCode;
frame->a3 = ( UBaseType_t ) pvParameters;
#else
frame->a2 = ( UBaseType_t ) pvParameters;
#endif
frame->ps = PS_UM | PS_EXCM;
#else
/* + for windowed ABI also set WOE and CALLINC (pretend task was 'call4'd). */
frame->a6 = ( UBaseType_t ) pvParameters;
#if CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER
frame->a6 = ( UBaseType_t ) pxCode;
frame->a7 = ( UBaseType_t ) pvParameters;
#else
frame->a6 = ( UBaseType_t ) pvParameters;
#endif
frame->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC( 1 );
#endif
#endif /* ifdef __XTENSA_CALL0_ABI__ */
#ifdef XT_USE_SWPRI
/* Set the initial virtual priority mask value to all 1's. */

View File

@ -138,8 +138,24 @@ _frxt_int_enter:
mull a2, a4, a2
add a1, a1, a2 /* for current proc */
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
rsr a3, CPENABLE /* Restore thread scope CPENABLE */
addi sp, sp,-4 /* ISR will manage FPU coprocessor by forcing */
s32i a3, a1, 0 /* its trigger */
#endif
#endif
.Lnested:
1:
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
movi a3, 0 /* whilst ISRs pending keep CPENABLE exception active */
wsr a3, CPENABLE
rsync
#endif
#endif
mov a0, a12 /* restore return addr and return */
ret
@ -176,6 +192,15 @@ _frxt_int_exit:
s32i a2, a3, 0 /* save nesting count */
bnez a2, .Lnesting /* !=0 after decr so still nested */
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
l32i a3, sp, 0 /* Grab last CPENABLE before leave ISR */
addi sp, sp, 4
wsr a3, CPENABLE
rsync /* ensure CPENABLE was modified */
#endif
#endif
movi a2, pxCurrentTCB
addx4 a2, a4, a2
l32i a2, a2, 0 /* a2 = current TCB */
@ -642,7 +667,6 @@ _frxt_task_coproc_state:
addx4 a15, a3, a15
l32i a15, a15, 0 /* && pxCurrentTCB != 0) { */
beqz a15, 2f
l32i a15, a15, CP_TOPOFSTACK_OFFS
ret

View File

@ -34,7 +34,11 @@
#endif
#include "xtensa_rtos.h"
#include "esp_clk.h"
#if CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/clk.h"
#elif CONFIG_IDF_TARGET_ESP32
#include "esp32/clk.h"
#endif
#ifdef XT_RTOS_TIMER_INT

View File

@ -34,7 +34,11 @@
#include "freertos/xtensa_api.h"
#include "freertos/portable.h"
#include "rom/ets_sys.h"
#if CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/ets_sys.h"
#elif CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/ets_sys.h"
#endif
#if XCHAL_HAVE_EXCEPTIONS

View File

@ -13,7 +13,7 @@
// limitations under the License.
#include "xtensa_rtos.h"
#include "esp_panic.h"
#include "esp_private/panic_reason.h"
#include "sdkconfig.h"
#include "soc/soc.h"

View File

@ -91,10 +91,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#include "xtensa_rtos.h"
#include "esp_panic.h"
#include "esp_private/panic_reason.h"
#include "sdkconfig.h"
#include "soc/soc.h"
#include "soc/dport_reg.h"
/*
Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
@ -103,7 +102,25 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
.extern pxCurrentTCB
/* Enable stack backtrace across exception/interrupt - see below */
/*
--------------------------------------------------------------------------------
In order for backtracing to be able to trace from the pre-exception stack
across to the exception stack (including nested interrupts), we need to create
a pseudo base-save area to make it appear like the exception dispatcher was
triggered by a CALL4 from the pre-exception code. In reality, the exception
dispatcher uses the same window as pre-exception code, and only CALL0s are
used within the exception dispatcher.
To create the pseudo base-save area, we need to store a copy of the pre-exception's
base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
be used to store a copy of the SP that points to the interrupted code's exception
frame just in case the exception dispatcher's SP does not point to the exception
frame (which is the case when switching from task to interrupt stack).
Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
will restore the current SP to that of the pre-exception SP.
--------------------------------------------------------------------------------
*/
#ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
#define XT_DEBUG_BACKTRACE 1
#endif
@ -202,9 +219,22 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/* This bit of code provides a nice debug backtrace in the debugger.
It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
if you want to save the cycles.
At this point, the exception frame should have been allocated and filled,
and current sp points to the interrupt stack (for non-nested interrupt)
or below the allocated exception frame (for nested interrupts). Copy the
pre-exception's base save area below the current SP.
*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
rsr a0, EXCSAVE_1 + \level - 1 /* Get exception frame pointer stored in EXCSAVE_x */
l32i a3, a0, XT_STK_A0 /* Copy pre-exception a0 (return address) */
s32e a3, a1, -16
l32i a3, a0, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
s32e a3, a1, -12
/* Backtracing only needs a0 and a1, no need to create full base save area.
Also need to change current frame's return address to point to pre-exception's
last run instruction.
*/
rsr a0, EPC_1 + \level - 1 /* return address */
movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */
or a0, a0, a4 /* set top 2 bits */
@ -698,8 +728,16 @@ _xt_user_exc:
#endif
wsr a0, PS
/*
Create pseudo base save area. At this point, sp is still pointing to the
allocated and filled exception stack frame.
*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
l32i a3, sp, XT_STK_A0 /* Copy pre-exception a0 (return address) */
s32e a3, sp, -16
l32i a3, sp, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
s32e a3, sp, -12
rsr a0, EPC_1 /* return address for debug backtrace */
movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */
rsync /* wait for WSR.PS to complete */
@ -945,7 +983,12 @@ _xt_coproc_exc:
/* Get co-processor state save area of new owner thread. */
call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
beqz a15, .L_goto_invalid /* not in a thread (invalid) */
#ifndef CONFIG_FREERTOS_FPU_IN_ISR
beqz a15, .L_goto_invalid
#endif
/*When FPU in ISR is enabled we could deal with zeroed a15 */
/* Enable the co-processor's bit in CPENABLE. */
movi a0, _xt_coproc_mask
@ -987,7 +1030,13 @@ locking.
rsync /* ensure wsr.CPENABLE is complete */
/* Only need to context switch if new owner != old owner. */
/* If float is necessary on ISR, we need to remove this check */
/* below, because on restoring from ISR we may have new == old condition used
* to force cp restore to next thread
*/
#ifndef CONFIG_FREERTOS_FPU_IN_ISR
beq a15, a2, .L_goto_done /* new owner == old, we're done */
#endif
/* If no old owner then nothing to save. */
beqz a2, .L_check_new
@ -1029,6 +1078,7 @@ locking.
.L_check_new:
/* Check if any state has to be restored for new owner. */
/* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
beqz a15, .L_xt_coproc_done
l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */
movi a4, _xt_coproc_sa_offset
@ -1114,6 +1164,16 @@ _xt_lowint1:
movi a0, _xt_user_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_1
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
@ -1194,6 +1254,16 @@ _xt_medint2:
movi a0, _xt_medint2_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_2
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
@ -1265,6 +1335,16 @@ _xt_medint3:
movi a0, _xt_medint3_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_3
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
@ -1335,6 +1415,16 @@ _xt_medint4:
movi a0, _xt_medint4_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_4
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
@ -1405,6 +1495,15 @@ _xt_medint5:
movi a0, _xt_medint5_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_5
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
@ -1475,6 +1574,15 @@ _xt_medint6:
movi a0, _xt_medint6_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_EXIT
/* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
current stack pointer that points to the exception frame (XT_STK_FRAME).*/
#ifdef XT_DEBUG_BACKTRACE
#ifndef __XTENSA_CALL0_ABI__
mov a0, sp
wsr a0, EXCSAVE_6
#endif
#endif
/* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */

View File

@ -0,0 +1,27 @@
/*
* Since at least FreeRTOS V7.5.3 uxTopUsedPriority is no longer
* present in the kernel, so it has to be supplied by other means for
* OpenOCD's threads awareness.
*
* Add this file to your project, and, if you're using --gc-sections,
* ``--undefined=uxTopUsedPriority'' (or
* ``-Wl,--undefined=uxTopUsedPriority'' when using gcc for final
* linking) to your LDFLAGS; same with all the other symbols you need.
*/
#include "FreeRTOS.h"
#include "esp_attr.h"
#include "sdkconfig.h"
#ifdef __GNUC__
#define USED __attribute__( ( used ) )
#else
#define USED
#endif
/*
* This file is no longer needed as AFTER FreeRTOS V10.14.1 OpenOCD is fixed in the kernel.
* #ifdef CONFIG_ESP32_DEBUG_OCDAWARE
* const int USED DRAM_ATTR uxTopUsedPriority = configMAX_PRIORITIES - 1;
* #endif
*/

View File

@ -0,0 +1,46 @@
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* --------------------------------------------------------------------------------
*/
/*
* This utility helps benchmarking interrupt latency and context switches.
* In order to enable it, set configBENCHMARK to 1 in FreeRTOSConfig.h.
* You will also need to download the FreeRTOS_trace patch that contains
* portbenchmark.c and the complete version of portbenchmark.h
*/
#ifndef PORTBENCHMARK_H
#define PORTBENCHMARK_H
#if configBENCHMARK
#error "You need to download the FreeRTOS_trace patch that overwrites this file"
#endif
#define portbenchmarkINTERRUPT_DISABLE()
#define portbenchmarkINTERRUPT_RESTORE( newstate )
#define portbenchmarkIntLatency()
#define portbenchmarkIntWait()
#define portbenchmarkReset()
#define portbenchmarkPrint()
#endif /* PORTBENCHMARK */

View File

@ -0,0 +1,489 @@
/*
* FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
* All rights reserved
*
* VISIT https://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
*
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that has become a de facto standard. *
* *
* Help yourself get started quickly and support the FreeRTOS *
* project by purchasing a FreeRTOS tutorial book, reference *
* manual, or both from: https://www.FreeRTOS.org/Documentation *
* *
* Thank you! *
* *
***************************************************************************
*
* This file is part of the FreeRTOS distribution.
*
* FreeRTOS is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License (version 2) as published by the
* Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
*
* >>! NOTE: The modification to the GPL is included to allow you to !<<
* >>! distribute a combined work that includes FreeRTOS without being !<<
* >>! obliged to provide the source code for proprietary components !<<
* >>! outside of the FreeRTOS kernel. !<<
*
* FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. Full license text is available from the following
* link: https://www.FreeRTOS.org/a00114.html
*
*
***************************************************************************
* *
* Having a problem? Start by reading the FAQ "My application does *
* not run, what could be wrong?" *
* *
* https://www.FreeRTOS.org/FAQHelp.html *
* *
***************************************************************************
*
* https://www.FreeRTOS.org - Documentation, books, training, latest versions,
* license and Real Time Engineers Ltd. contact details.
*
* https://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
* including FreeRTOS+Trace - an indispensable productivity tool, a DOS
* compatible FAT file system, and our tiny thread aware UDP/IP stack.
*
* https://www.highintegritysystems.com/openrtos/ - Real Time Engineers ltd
* license FreeRTOS to High Integrity Systems to sell under the OpenRTOS brand.
* Low cost OpenRTOS licenses offer ticketed support, indemnification
* and middleware.
*
* https://www.highintegritysystems.com/safertos/ - High Integrity Systems
* also provide a safety engineered and independently SIL3 certified version
* for use in safety and mission critical applications that require
* provable dependability.
*
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
#ifndef __ASSEMBLER__
#include <stdint.h>
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include <xtensa/xtruntime.h>
#include "esp_timer.h" /* required for FreeRTOS run time stats */
#include "esp_system.h"
#include <esp_heap_caps.h>
#include "soc/soc_memory_layout.h"
/*#include "xtensa_context.h" */
/*-----------------------------------------------------------
* Port specific definitions.
*
* The settings in this file configure FreeRTOS correctly for the
* given hardware and compiler.
*
* These settings should not be altered.
*-----------------------------------------------------------
*/
/* Type definitions. */
#define portCHAR int8_t
#define portFLOAT float
#define portDOUBLE double
#define portLONG int32_t
#define portSHORT int16_t
#define portSTACK_TYPE uint8_t
#define portBASE_TYPE int
typedef portSTACK_TYPE StackType_t;
typedef portBASE_TYPE BaseType_t;
typedef unsigned portBASE_TYPE UBaseType_t;
#if ( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
#endif
/*-----------------------------------------------------------*/
/* portbenchmark */
#include "portbenchmark.h"
#include "sdkconfig.h"
#include "esp_attr.h"
/* "mux" data structure (spinlock) */
typedef struct
{
/* owner field values:
* 0 - Uninitialized (invalid)
* portMUX_FREE_VAL - Mux is free, can be locked by either CPU
* CORE_ID_PRO / CORE_ID_APP - Mux is locked to the particular core
*
* Any value other than portMUX_FREE_VAL, CORE_ID_PRO, CORE_ID_APP indicates corruption
*/
uint32_t owner;
/* count field:
* If mux is unlocked, count should be zero.
* If mux is locked, count is non-zero & represents the number of recursive locks on the mux.
*/
uint32_t count;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
const char * lastLockedFn;
int lastLockedLine;
#endif
} portMUX_TYPE;
#define portMUX_FREE_VAL 0xB33FFFFF
/* Special constants for vPortCPUAcquireMutexTimeout() */
#define portMUX_NO_TIMEOUT ( -1 ) /* When passed for 'timeout_cycles', spin forever if necessary */
#define portMUX_TRY_LOCK 0 /* Try to acquire the spinlock a single time only */
/* Keep this in sync with the portMUX_TYPE struct definition please. */
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
#define portMUX_INITIALIZER_UNLOCKED \
{ \
.owner = portMUX_FREE_VAL, \
.count = 0, \
}
#else
#define portMUX_INITIALIZER_UNLOCKED \
{ \
.owner = portMUX_FREE_VAL, \
.count = 0, \
.lastLockedFn = "(never locked)", \
.lastLockedLine = -1 \
}
#endif /* ifndef CONFIG_FREERTOS_PORTMUX_DEBUG */
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR();
#define portCRITICAL_NESTING_IN_TCB 1
/*
* Modifications to portENTER_CRITICAL.
*
* For an introduction, see "Critical Sections & Disabling Interrupts" in docs/api-guides/freertos-smp.rst
*
* The original portENTER_CRITICAL only disabled the ISRs. This is enough for single-CPU operation: by
* disabling the interrupts, there is no task switch so no other tasks can meddle in the data, and because
* interrupts are disabled, ISRs can't corrupt data structures either.
*
* For multiprocessing, things get a bit more hairy. First of all, disabling the interrupts doesn't stop
* the tasks or ISRs on the other processors meddling with our CPU. For tasks, this is solved by adding
* a spinlock to the portENTER_CRITICAL macro. A task running on the other CPU accessing the same data will
* spinlock in the portENTER_CRITICAL code until the first CPU is done.
*
* For ISRs, we now also need muxes: while portENTER_CRITICAL disabling interrupts will stop ISRs on the same
* CPU from meddling with the data, it does not stop interrupts on the other cores from interfering with the
* data. For this, we also use a spinlock in the routines called by the ISR, but these spinlocks
* do not disable the interrupts (because they already are).
*
* This all assumes that interrupts are either entirely disabled or enabled. Interrupt priority levels
* will break this scheme.
*
* Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vTaskEnterCritical, meaning
* that either function can be called both from ISR as well as task context. This is not standard FreeRTOS
* behaviour; please keep this in mind if you need any compatibility with other FreeRTOS implementations.
*/
void vPortCPUInitializeMutex( portMUX_TYPE * mux );
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
#error CONFIG_FREERTOS_PORTMUX_DEBUG not supported in Amazon FreeRTOS
#endif
void vTaskExitCritical();
void vTaskEnterCritical();
static inline void vPortConsumeSpinlockArg( int unused,
... )
{
}
/** @brief Acquire a portmux spinlock with a timeout
*
* @param mux Pointer to portmux to acquire.
* @param timeout_cycles Timeout to spin, in CPU cycles. Pass portMUX_NO_TIMEOUT to wait forever,
* portMUX_TRY_LOCK to try a single time to acquire the lock.
*
* @return true if mutex is successfully acquired, false on timeout.
*/
bool vPortCPUAcquireMutexTimeout( portMUX_TYPE * mux,
int timeout_cycles );
void vPortCPUReleaseMutex( portMUX_TYPE * mux );
#define portENTER_CRITICAL( ... ) do { vTaskEnterCritical(); vPortConsumeSpinlockArg( 0, ## __VA_ARGS__ ); } while( 0 )
#define portEXIT_CRITICAL( ... ) do { vTaskExitCritical(); vPortConsumeSpinlockArg( 0, ## __VA_ARGS__ ); } while( 0 )
#define portENTER_CRITICAL_ISR( mux ) vPortCPUAcquireMutexTimeout( mux, portMUX_NO_TIMEOUT )
#define portEXIT_CRITICAL_ISR( mux ) vPortCPUReleaseMutex( mux )
#define portENTER_CRITICAL_SAFE( mux ) \
do { \
if( xPortInIsrContext() ) { \
portENTER_CRITICAL_ISR( mux ); \
} \
else { \
portENTER_CRITICAL( mux ); \
} \
} while( 0 )
#define portEXIT_CRITICAL_SAFE( mux ) \
do { \
if( xPortInIsrContext() ) { \
portEXIT_CRITICAL_ISR( mux ); \
} \
else { \
portEXIT_CRITICAL( mux ); \
} \
} while( 0 )
/* Critical section management. NW-TODO: replace XTOS_SET_INTLEVEL with more efficient version, if any? */
/* These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level. */
/* */
/* Only applies to one CPU. See notes above & below for reasons not to use these. */
#define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL ); portbenchmarkINTERRUPT_DISABLE(); } while( 0 )
#define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE( 0 ); XTOS_SET_INTLEVEL( 0 ); } while( 0 )
/* Cleaner solution allows nested interrupts disabling and restoring via local registers or stack. */
/* They can be called from interrupts too. */
/* WARNING: Only applies to current CPU. See notes above. */
static inline unsigned portENTER_CRITICAL_NESTED()
{
unsigned state = XTOS_SET_INTLEVEL( XCHAL_EXCM_LEVEL );
portbenchmarkINTERRUPT_DISABLE();
return state;
}
#define portEXIT_CRITICAL_NESTED( state ) do { portbenchmarkINTERRUPT_RESTORE( state ); XTOS_RESTORE_JUST_INTLEVEL( state ); } while( 0 )
/* These FreeRTOS versions are similar to the nested versions above */
#define portSET_INTERRUPT_MASK_FROM_ISR() portENTER_CRITICAL_NESTED()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( state ) portEXIT_CRITICAL_NESTED( state )
/*Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force */
/*the stack memory to always be internal. */
#define pvPortMallocTcbMem( size ) heap_caps_malloc( size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT )
#define pvPortMallocStackMem( size ) heap_caps_malloc( size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT )
/*xTaskCreateStatic uses these functions to check incoming memory. */
#define portVALID_TCB_MEM( ptr ) ( esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr ) )
#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
#define portVALID_STACK_MEM( ptr ) esp_ptr_byte_accessible( ptr )
#else
#define portVALID_STACK_MEM( ptr ) ( esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr ) )
#endif
/*
* Wrapper for the Xtensa compare-and-set instruction. This subroutine will atomically compare
* *addr to 'compare'. If *addr == compare, *addr is set to *set. *set is updated with the previous
* value of *addr (either 'compare' or some other value.)
*
* Warning: From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the
* *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the
* ESP32 (portMUX assertions would fail).
*/
static inline void uxPortCompareSet( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
: "=r" ( *set )
: "r" ( addr ), "r" ( compare ), "0" ( *set )
);
}
void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set );
/*-----------------------------------------------------------*/
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 4
#define portNOP() XT_NOP()
/*-----------------------------------------------------------*/
/* Fine resolution time */
#define portGET_RUN_TIME_COUNTER_VALUE() xthal_get_ccount()
/*ccount or esp_timer are initialized elsewhere */
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER
/* Coarse resolution time (us) */
#define portALT_GET_RUN_TIME_COUNTER_VALUE( x ) x = ( uint32_t ) esp_timer_get_time()
#endif
/* Kernel utilities. */
void vPortYield( void );
void _frxt_setup_switch( void );
#define portYIELD() vPortYield()
#define portYIELD_FROM_ISR() { traceISR_EXIT_TO_SCHEDULER(); _frxt_setup_switch(); }
static inline uint32_t xPortGetCoreID();
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
/* When coprocessors are defined, we to maintain a pointer to coprocessors area. */
/* We currently use a hack: redefine field xMPU_SETTINGS in TCB block as a structure that can hold: */
/* MPU wrappers, coprocessor area pointer, trace code structure, and more if needed. */
/* The field is normally used for memory protection. FreeRTOS should create another general purpose field. */
typedef struct
{
#if XCHAL_CP_NUM > 0
volatile StackType_t * coproc_area; /* Pointer to coprocessor save area; MUST BE FIRST */
#endif
#if portUSING_MPU_WRAPPERS
/* Define here mpu_settings, which is port dependent */
int mpu_setting; /* Just a dummy example here; MPU not ported to Xtensa yet */
#endif
#if configUSE_TRACE_FACILITY_2
struct
{
/* Cf. porttraceStamp() */
int taskstamp; /* Stamp from inside task to see where we are */
int taskstampcount; /* A counter usually incremented when we restart the task's loop */
} porttrace;
#endif
} xMPU_SETTINGS;
/* Main hack to use MPU_wrappers even when no MPU is defined (warning: mpu_setting should not be accessed; otherwise move this above xMPU_SETTINGS) */
#if ( XCHAL_CP_NUM > 0 || configUSE_TRACE_FACILITY_2 ) && !portUSING_MPU_WRAPPERS /* If MPU wrappers not used, we still need to allocate coproc area */
#undef portUSING_MPU_WRAPPERS
#define portUSING_MPU_WRAPPERS 1 /* Enable it to allocate coproc area */
#define MPU_WRAPPERS_H /* Override mpu_wrapper.h to disable unwanted code */
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
#endif
bool vApplicationSleep( TickType_t xExpectedIdleTime );
#define portSUPPRESS_TICKS_AND_SLEEP( idleTime ) vApplicationSleep( idleTime )
void _xt_coproc_release( volatile void * coproc_sa_base );
/*
* Map to the memory management routines required for the port.
*
* Note that libc standard malloc/free are also available for
* non-FreeRTOS-specific code, and behave the same as
* pvPortMalloc()/vPortFree().
*/
#define pvPortMalloc heap_caps_malloc_default
#define vPortFree heap_caps_free
#define xPortGetFreeHeapSize esp_get_free_heap_size
#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size
/*
* Send an interrupt to another core in order to make the task running
* on it yield for a higher-priority task.
*/
void vPortYieldOtherCore( BaseType_t coreid ) PRIVILEGED_FUNCTION;
/*
* Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack
* watchpoint around.
*/
void vPortSetStackWatchpoint( void * pxStackStart );
/*
* Returns true if the current core is in ISR context; low prio ISR, med prio ISR or timer tick ISR. High prio ISRs
* aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.
*/
BaseType_t xPortInIsrContext();
/*
* This function will be called in High prio ISRs. Returns true if the current core was in ISR context
* before calling into high prio ISR context.
*/
BaseType_t xPortInterruptedFromISRContext();
/*
* The structures and methods of manipulating the MPU are contained within the
* port layer.
*
* Fills the xMPUSettings structure with the memory region information
* contained in xRegions.
*/
#if ( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
const struct xMEMORY_REGION * const xRegions,
StackType_t * pxBottomOfStack,
uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS * xMPUSettings );
#endif
/* Multi-core: get current core ID */
static inline uint32_t IRAM_ATTR xPortGetCoreID()
{
int id;
asm (
"rsr.prid %0\n"
" extui %0,%0,13,1"
: "=r" ( id ) );
return id;
}
/* Get tick rate per second */
uint32_t xPortGetTickRateHz( void );
/* porttrace */
#if configUSE_TRACE_FACILITY_2
#include "porttrace.h"
#endif
/* configASSERT_2 if requested */
#if configASSERT_2
#include <stdio.h>
void exit( int );
#define configASSERT( x ) if( !( x ) ) { porttracePrint( -1 ); printf( "\nAssertion failed in %s:%d\n", __FILE__, __LINE__ ); exit( -1 ); }
#endif
#endif // __ASSEMBLER__
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* PORTMACRO_H */

View File

@ -0,0 +1,132 @@
/*******************************************************************************
* Copyright (c) 2006-2015 Cadence Design Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
/******************************************************************************
* Xtensa-specific API for RTOS ports.
******************************************************************************/
#ifndef __XTENSA_API_H__
#define __XTENSA_API_H__
#include <xtensa/hal.h>
#include "xtensa_context.h"
/* Typedef for C-callable interrupt handler function */
typedef void (* xt_handler)( void * );
/* Typedef for C-callable exception handler function */
typedef void (* xt_exc_handler)( XtExcFrame * );
/*
* -------------------------------------------------------------------------------
* Call this function to set a handler for the specified exception. The handler
* will be installed on the core that calls this function.
*
* n - Exception number (type)
* f - Handler function address, NULL to uninstall handler.
*
* The handler will be passed a pointer to the exception frame, which is created
* on the stack of the thread that caused the exception.
*
* If the handler returns, the thread context will be restored and the faulting
* instruction will be retried. Any values in the exception frame that are
* modified by the handler will be restored as part of the context. For details
* of the exception frame structure see xtensa_context.h.
* -------------------------------------------------------------------------------
*/
extern xt_exc_handler xt_set_exception_handler( int n,
xt_exc_handler f );
/*
* -------------------------------------------------------------------------------
* Call this function to set a handler for the specified interrupt. The handler
* will be installed on the core that calls this function.
*
* n - Interrupt number.
* f - Handler function address, NULL to uninstall handler.
* arg - Argument to be passed to handler.
* -------------------------------------------------------------------------------
*/
extern xt_handler xt_set_interrupt_handler( int n,
xt_handler f,
void * arg );
/*
* -------------------------------------------------------------------------------
* Call this function to enable the specified interrupts on the core that runs
* this code.
*
* mask - Bit mask of interrupts to be enabled.
* -------------------------------------------------------------------------------
*/
extern void xt_ints_on( unsigned int mask );
/*
* -------------------------------------------------------------------------------
* Call this function to disable the specified interrupts on the core that runs
* this code.
*
* mask - Bit mask of interrupts to be disabled.
* -------------------------------------------------------------------------------
*/
extern void xt_ints_off( unsigned int mask );
/*
* -------------------------------------------------------------------------------
* Call this function to set the specified (s/w) interrupt.
* -------------------------------------------------------------------------------
*/
static inline void xt_set_intset( unsigned int arg )
{
xthal_set_intset( arg );
}
/*
* -------------------------------------------------------------------------------
* Call this function to clear the specified (s/w or edge-triggered)
* interrupt.
* -------------------------------------------------------------------------------
*/
static inline void xt_set_intclear( unsigned int arg )
{
xthal_set_intclear( arg );
}
/*
* -------------------------------------------------------------------------------
* Call this function to get handler's argument for the specified interrupt.
*
* n - Interrupt number.
* -------------------------------------------------------------------------------
*/
extern void * xt_get_interrupt_handler_arg( int n );
#endif /* __XTENSA_API_H__ */

View File

@ -0,0 +1,145 @@
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* --------------------------------------------------------------------------------
*
* Configuration-specific information for Xtensa build. This file must be
* included in FreeRTOSConfig.h to properly set up the config-dependent
* parameters correctly.
*
* NOTE: To enable thread-safe C library support, XT_USE_THREAD_SAFE_CLIB must
* be defined to be > 0 somewhere above or on the command line.
*
*******************************************************************************/
#ifndef XTENSA_CONFIG_H
#define XTENSA_CONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include <xtensa/config/system.h> /* required for XSHAL_CLIB */
#include "xtensa_context.h"
/*-----------------------------------------------------------------------------
* STACK REQUIREMENTS
*
* This section defines the minimum stack size, and the extra space required to
* be allocated for saving coprocessor state and/or C library state information
* (if thread safety is enabled for the C library). The sizes are in bytes.
*
* Stack sizes for individual tasks should be derived from these minima based on
* the maximum call depth of the task and the maximum level of interrupt nesting.
* A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum is based
* on the requirement for a task that calls nothing else but can be interrupted.
* This assumes that interrupt handlers do not call more than a few levels deep.
* If this is not true, i.e. one or more interrupt handlers make deep calls then
* the minimum must be increased.
*
* If the Xtensa processor configuration includes coprocessors, then space is
* allocated to save the coprocessor state on the stack.
*
* If thread safety is enabled for the C runtime library, (XT_USE_THREAD_SAFE_CLIB
* is defined) then space is allocated to save the C library context in the TCB.
*
* Allocating insufficient stack space is a common source of hard-to-find errors.
* During development, it is best to enable the FreeRTOS stack checking features.
*
* Usage:
*
* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable thread-safe
* use of the C library. This will require extra stack
* space to be allocated for tasks that use the C library
* reentrant functions. See below for more information.
*
* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them
* support thread safety. Check your core configuration to see which C library
* was chosen for your system.
*
* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is recommended
* that you do not use a stack smaller than this for any
* task. In case you want to use stacks smaller than this
* size, you must verify that the smaller size(s) will work
* under all operating conditions.
*
* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a task
* that does not make C library reentrant calls. Add this
* to the amount of stack space required by the task itself.
*
* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library state.
*
* -----------------------------------------------------------------------------*/
/* Extra space required for interrupt/exception hooks. */
#ifdef XT_INTEXC_HOOKS
#ifdef __XTENSA_CALL0_ABI__
#define STK_INTEXC_EXTRA 0x200
#else
#define STK_INTEXC_EXTRA 0x180
#endif
#else
#define STK_INTEXC_EXTRA 0
#endif
#define XT_CLIB_CONTEXT_AREA_SIZE 0
/*------------------------------------------------------------------------------
* Extra size -- interrupt frame plus coprocessor save area plus hook space.
* NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE )
#else
#define XT_XTRA_SIZE ( XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE )
#endif
/*------------------------------------------------------------------------------
* Space allocated for user code -- function calls and local variables.
* NOTE: This number can be adjusted to suit your needs. You must verify that the
* amount of space you reserve is adequate for the worst-case conditions in your
* application.
* NOTE: The windowed ABI requires more stack, since space has to be reserved
* for spilling register windows.
* ------------------------------------------------------------------------------*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_USER_SIZE 0x200
#else
#define XT_USER_SIZE 0x400
#endif
/* Minimum recommended stack size. */
#define XT_STACK_MIN_SIZE ( ( XT_XTRA_SIZE + XT_USER_SIZE ) / sizeof( unsigned char ) )
/* OS overhead with and without C library thread context. */
#define XT_STACK_EXTRA ( XT_XTRA_SIZE )
#define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE )
#ifdef __cplusplus
}
#endif
#endif /* XTENSA_CONFIG_H */

View File

@ -0,0 +1,378 @@
/*******************************************************************************
Copyright (c) 2006-2015 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
XTENSA CONTEXT FRAMES AND MACROS FOR RTOS ASSEMBLER SOURCES
This header contains definitions and macros for use primarily by Xtensa
RTOS assembly coded source files. It includes and uses the Xtensa hardware
abstraction layer (HAL) to deal with config specifics. It may also be
included in C source files.
!! Supports only Xtensa Exception Architecture 2 (XEA2). XEA1 not supported. !!
NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes.
*******************************************************************************/
#ifndef XTENSA_CONTEXT_H
#define XTENSA_CONTEXT_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#endif
#include <xtensa/config/tie.h>
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include <xtensa/xtruntime-frames.h>
/* Align a value up to nearest n-byte boundary, where n is a power of 2. */
#define ALIGNUP(n, val) (((val) + (n)-1) & -(n))
/*
-------------------------------------------------------------------------------
Macros that help define structures for both C and assembler.
-------------------------------------------------------------------------------
*/
#ifdef STRUCT_BEGIN
#undef STRUCT_BEGIN
#undef STRUCT_FIELD
#undef STRUCT_AFIELD
#undef STRUCT_END
#endif
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
#define STRUCT_BEGIN .pushsection .text; .struct 0
#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size
#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n)
#define STRUCT_END(sname) sname##Size:; .popsection
#else
#define STRUCT_BEGIN typedef struct {
#define STRUCT_FIELD(ctype,size,asname,name) ctype name;
#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n];
#define STRUCT_END(sname) } sname;
#endif //_ASMLANGUAGE || __ASSEMBLER__
/*
-------------------------------------------------------------------------------
INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT
A stack frame of this structure is allocated for any interrupt or exception.
It goes on the current stack. If the RTOS has a system stack for handling
interrupts, every thread stack must allow space for just one interrupt stack
frame, then nested interrupt stack frames go on the system stack.
The frame includes basic registers (explicit) and "extra" registers introduced
by user TIE or the use of the MAC16 option in the user's Xtensa config.
The frame size is minimized by omitting regs not applicable to user's config.
For Windowed ABI, this stack frame includes the interruptee's base save area,
another base save area to manage gcc nested functions, and a little temporary
space to help manage the spilling of the register windows.
-------------------------------------------------------------------------------
*/
STRUCT_BEGIN
STRUCT_FIELD (long, 4, XT_STK_EXIT, exit) /* exit point for dispatch */
STRUCT_FIELD (long, 4, XT_STK_PC, pc) /* return PC */
STRUCT_FIELD (long, 4, XT_STK_PS, ps) /* return PS */
STRUCT_FIELD (long, 4, XT_STK_A0, a0)
STRUCT_FIELD (long, 4, XT_STK_A1, a1) /* stack pointer before interrupt */
STRUCT_FIELD (long, 4, XT_STK_A2, a2)
STRUCT_FIELD (long, 4, XT_STK_A3, a3)
STRUCT_FIELD (long, 4, XT_STK_A4, a4)
STRUCT_FIELD (long, 4, XT_STK_A5, a5)
STRUCT_FIELD (long, 4, XT_STK_A6, a6)
STRUCT_FIELD (long, 4, XT_STK_A7, a7)
STRUCT_FIELD (long, 4, XT_STK_A8, a8)
STRUCT_FIELD (long, 4, XT_STK_A9, a9)
STRUCT_FIELD (long, 4, XT_STK_A10, a10)
STRUCT_FIELD (long, 4, XT_STK_A11, a11)
STRUCT_FIELD (long, 4, XT_STK_A12, a12)
STRUCT_FIELD (long, 4, XT_STK_A13, a13)
STRUCT_FIELD (long, 4, XT_STK_A14, a14)
STRUCT_FIELD (long, 4, XT_STK_A15, a15)
STRUCT_FIELD (long, 4, XT_STK_SAR, sar)
STRUCT_FIELD (long, 4, XT_STK_EXCCAUSE, exccause)
STRUCT_FIELD (long, 4, XT_STK_EXCVADDR, excvaddr)
#if XCHAL_HAVE_LOOPS
STRUCT_FIELD (long, 4, XT_STK_LBEG, lbeg)
STRUCT_FIELD (long, 4, XT_STK_LEND, lend)
STRUCT_FIELD (long, 4, XT_STK_LCOUNT, lcount)
#endif
#ifndef __XTENSA_CALL0_ABI__
/* Temporary space for saving stuff during window spill */
STRUCT_FIELD (long, 4, XT_STK_TMP0, tmp0)
STRUCT_FIELD (long, 4, XT_STK_TMP1, tmp1)
STRUCT_FIELD (long, 4, XT_STK_TMP2, tmp2)
#endif
#ifdef XT_USE_SWPRI
/* Storage for virtual priority mask */
STRUCT_FIELD (long, 4, XT_STK_VPRI, vpri)
#endif
#ifdef XT_USE_OVLY
/* Storage for overlay state */
STRUCT_FIELD (long, 4, XT_STK_OVLY, ovly)
#endif
STRUCT_END(XtExcFrame)
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
#define XT_STK_NEXT1 XtExcFrameSize
#else
#define XT_STK_NEXT1 sizeof(XtExcFrame)
#endif
/* Allocate extra storage if needed */
#if XCHAL_EXTRA_SA_SIZE != 0
#if XCHAL_EXTRA_SA_ALIGN <= 16
#define XT_STK_EXTRA ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1)
#else
/* If need more alignment than stack, add space for dynamic alignment */
#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) + XCHAL_EXTRA_SA_ALIGN)
#endif
#define XT_STK_NEXT2 (XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE)
#else
#define XT_STK_NEXT2 XT_STK_NEXT1
#endif
/*
-------------------------------------------------------------------------------
This is the frame size. Add space for 4 registers (interruptee's base save
area) and some space for gcc nested functions if any.
-------------------------------------------------------------------------------
*/
#define XT_STK_FRMSZ (ALIGNUP(0x10, XT_STK_NEXT2) + 0x20)
/*
-------------------------------------------------------------------------------
SOLICITED STACK FRAME FOR A THREAD
A stack frame of this structure is allocated whenever a thread enters the
RTOS kernel intentionally (and synchronously) to submit to thread scheduling.
It goes on the current thread's stack.
The solicited frame only includes registers that are required to be preserved
by the callee according to the compiler's ABI conventions, some space to save
the return address for returning to the caller, and the caller's PS register.
For Windowed ABI, this stack frame includes the caller's base save area.
Note on XT_SOL_EXIT field:
It is necessary to distinguish a solicited from an interrupt stack frame.
This field corresponds to XT_STK_EXIT in the interrupt stack frame and is
always at the same offset (0). It can be written with a code (usually 0)
to distinguish a solicted frame from an interrupt frame. An RTOS port may
opt to ignore this field if it has another way of distinguishing frames.
-------------------------------------------------------------------------------
*/
STRUCT_BEGIN
#ifdef __XTENSA_CALL0_ABI__
STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)
STRUCT_FIELD (long, 4, XT_SOL_PC, pc)
STRUCT_FIELD (long, 4, XT_SOL_PS, ps)
STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)
STRUCT_FIELD (long, 4, XT_SOL_A12, a12) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_A13, a13)
STRUCT_FIELD (long, 4, XT_SOL_A14, a14)
STRUCT_FIELD (long, 4, XT_SOL_A15, a15)
#else
STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)
STRUCT_FIELD (long, 4, XT_SOL_PC, pc)
STRUCT_FIELD (long, 4, XT_SOL_PS, ps)
STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)
STRUCT_FIELD (long, 4, XT_SOL_A0, a0) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_A1, a1)
STRUCT_FIELD (long, 4, XT_SOL_A2, a2)
STRUCT_FIELD (long, 4, XT_SOL_A3, a3)
#endif
STRUCT_END(XtSolFrame)
/* Size of solicited stack frame */
#define XT_SOL_FRMSZ ALIGNUP(0x10, XtSolFrameSize)
/*
-------------------------------------------------------------------------------
CO-PROCESSOR STATE SAVE AREA FOR A THREAD
The RTOS must provide an area per thread to save the state of co-processors
when that thread does not have control. Co-processors are context-switched
lazily (on demand) only when a new thread uses a co-processor instruction,
otherwise a thread retains ownership of the co-processor even when it loses
control of the processor. An Xtensa co-processor exception is triggered when
any co-processor instruction is executed by a thread that is not the owner,
and the context switch of that co-processor is then peformed by the handler.
Ownership represents which thread's state is currently in the co-processor.
Co-processors may not be used by interrupt or exception handlers. If an
co-processor instruction is executed by an interrupt or exception handler,
the co-processor exception handler will trigger a kernel panic and freeze.
This restriction is introduced to reduce the overhead of saving and restoring
co-processor state (which can be quite large) and in particular remove that
overhead from interrupt handlers.
The co-processor state save area may be in any convenient per-thread location
such as in the thread control block or above the thread stack area. It need
not be in the interrupt stack frame since interrupts don't use co-processors.
Along with the save area for each co-processor, two bitmasks with flags per
co-processor (laid out as in the CPENABLE reg) help manage context-switching
co-processors as efficiently as possible:
XT_CPENABLE
The contents of a non-running thread's CPENABLE register.
It represents the co-processors owned (and whose state is still needed)
by the thread. When a thread is preempted, its CPENABLE is saved here.
When a thread solicits a context-swtich, its CPENABLE is cleared - the
compiler has saved the (caller-saved) co-proc state if it needs to.
When a non-running thread loses ownership of a CP, its bit is cleared.
When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg.
Avoids co-processor exceptions when no change of ownership is needed.
XT_CPSTORED
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether the state of each co-processor is saved in the state
save area. When a thread enters the kernel, only the state of co-procs
still enabled in CPENABLE is saved. When the co-processor exception
handler assigns ownership of a co-processor to a thread, it restores
the saved state only if this bit is set, and clears this bit.
XT_CP_CS_ST
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether callee-saved state is saved in the state save area.
Callee-saved state is saved by itself on a solicited context switch,
and restored when needed by the coprocessor exception handler.
Unsolicited switches will cause the entire coprocessor to be saved
when necessary.
XT_CP_ASA
Pointer to the aligned save area. Allows it to be aligned more than
the overall save area (which might only be stack-aligned or TCB-aligned).
Especially relevant for Xtensa cores configured with a very large data
path that requires alignment greater than 16 bytes (ABI stack alignment).
-------------------------------------------------------------------------------
*/
#if XCHAL_CP_NUM > 0
/* Offsets of each coprocessor save area within the 'aligned save area': */
#define XT_CP0_SA 0
#define XT_CP1_SA ALIGNUP(XCHAL_CP1_SA_ALIGN, XT_CP0_SA + XCHAL_CP0_SA_SIZE)
#define XT_CP2_SA ALIGNUP(XCHAL_CP2_SA_ALIGN, XT_CP1_SA + XCHAL_CP1_SA_SIZE)
#define XT_CP3_SA ALIGNUP(XCHAL_CP3_SA_ALIGN, XT_CP2_SA + XCHAL_CP2_SA_SIZE)
#define XT_CP4_SA ALIGNUP(XCHAL_CP4_SA_ALIGN, XT_CP3_SA + XCHAL_CP3_SA_SIZE)
#define XT_CP5_SA ALIGNUP(XCHAL_CP5_SA_ALIGN, XT_CP4_SA + XCHAL_CP4_SA_SIZE)
#define XT_CP6_SA ALIGNUP(XCHAL_CP6_SA_ALIGN, XT_CP5_SA + XCHAL_CP5_SA_SIZE)
#define XT_CP7_SA ALIGNUP(XCHAL_CP7_SA_ALIGN, XT_CP6_SA + XCHAL_CP6_SA_SIZE)
#define XT_CP_SA_SIZE ALIGNUP(16, XT_CP7_SA + XCHAL_CP7_SA_SIZE)
/* Offsets within the overall save area: */
#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */
#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */
#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */
#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */
/* Overall size allows for dynamic alignment: */
#define XT_CP_SIZE (12 + XT_CP_SA_SIZE + XCHAL_TOTAL_SA_ALIGN)
#else
#define XT_CP_SIZE 0
#endif
/*
Macro to get the current core ID. Only uses the reg given as an argument.
Reading PRID on the ESP32 gives us 0xCDCD on the PRO processor (0)
and 0xABAB on the APP CPU (1). We can distinguish between the two by checking
bit 13: it's 1 on the APP and 0 on the PRO processor.
*/
#ifdef __ASSEMBLER__
.macro getcoreid reg
rsr.prid \reg
extui \reg,\reg,13,1
.endm
#endif
#define CORE_ID_PRO 0xCDCD
#define CORE_ID_APP 0xABAB
/*
-------------------------------------------------------------------------------
MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
Convenient where the frame size requirements are the same for both ABIs.
ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
ENTRY0, RET0 are for frameless functions (no locals, no calls).
where size = size of stack frame in bytes (must be >0 and aligned to 16).
For framed functions the frame is created and the return address saved at
base of frame (Call0 ABI) or as determined by hardware (Windowed ABI).
For frameless functions, there is no frame and return address remains in a0.
Note: Because CPP macros expand to a single line, macros requiring multi-line
expansions are implemented as assembler macros.
-------------------------------------------------------------------------------
*/
#ifdef __ASSEMBLER__
#ifdef __XTENSA_CALL0_ABI__
/* Call0 */
#define ENTRY(sz) entry1 sz
.macro entry1 size=0x10
addi sp, sp, -\size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET(sz) ret1 sz
.macro ret1 size=0x10
l32i a0, sp, 0
addi sp, sp, \size
ret
.endm
#define RET0 ret
#else
/* Windowed */
#define ENTRY(sz) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET(sz) retw
#define RET0 retw
#endif
#endif
#endif /* XTENSA_CONTEXT_H */

View File

@ -0,0 +1,231 @@
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* --------------------------------------------------------------------------------
*
* RTOS-SPECIFIC INFORMATION FOR XTENSA RTOS ASSEMBLER SOURCES
* (FreeRTOS Port)
*
* This header is the primary glue between generic Xtensa RTOS support
* sources and a specific RTOS port for Xtensa. It contains definitions
* and macros for use primarily by Xtensa assembly coded source files.
*
* Macros in this header map callouts from generic Xtensa files to specific
* RTOS functions. It may also be included in C source files.
*
* Xtensa RTOS ports support all RTOS-compatible configurations of the Xtensa
* architecture, using the Xtensa hardware abstraction layer (HAL) to deal
* with configuration specifics.
*
* Should be included by all Xtensa generic and RTOS port-specific sources.
*
*******************************************************************************/
#ifndef XTENSA_RTOS_H
#define XTENSA_RTOS_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#else
#include <xtensa/config/core.h>
#endif
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
/*
* Include any RTOS specific definitions that are needed by this header.
*/
#include "FreeRTOSConfig.h"
/*
* Convert FreeRTOSConfig definitions to XTENSA definitions.
* However these can still be overridden from the command line.
*/
#ifndef XT_SIMULATOR
#if configXT_SIMULATOR
#define XT_SIMULATOR 1 /* Simulator mode */
#endif
#endif
#ifndef XT_BOARD
#if configXT_BOARD
#define XT_BOARD 1 /* Board mode */
#endif
#endif
#ifndef XT_TIMER_INDEX
#if defined configXT_TIMER_INDEX
#define XT_TIMER_INDEX configXT_TIMER_INDEX /* Index of hardware timer to be used */
#endif
#endif
#ifndef XT_INTEXC_HOOKS
#if configXT_INTEXC_HOOKS
#define XT_INTEXC_HOOKS 1 /* Enables exception hooks */
#endif
#endif
#if !defined( XT_SIMULATOR ) && !defined( XT_BOARD )
#error Either XT_SIMULATOR or XT_BOARD must be defined.
#endif
/*
* Name of RTOS (for messages).
*/
#define XT_RTOS_NAME FreeRTOS
/*
* Check some Xtensa configuration requirements and report error if not met.
* Error messages can be customize to the RTOS port.
*/
#if !XCHAL_HAVE_XEA2
#error "FreeRTOS/Xtensa requires XEA2 (exception architecture 2)."
#endif
/*******************************************************************************
*
* RTOS CALLOUT MACROS MAPPED TO RTOS PORT-SPECIFIC FUNCTIONS.
*
* Define callout macros used in generic Xtensa code to interact with the RTOS.
* The macros are simply the function names for use in calls from assembler code.
* Some of these functions may call back to generic functions in xtensa_context.h .
*
*******************************************************************************/
/*
* Inform RTOS of entry into an interrupt handler that will affect it.
* Allows RTOS to manage switch to any system stack and count nesting level.
* Called after minimal context has been saved, with interrupts disabled.
* RTOS port can call0 _xt_context_save to save the rest of the context.
* May only be called from assembly code by the 'call0' instruction.
*/
/* void XT_RTOS_INT_ENTER(void) */
#define XT_RTOS_INT_ENTER _frxt_int_enter
/*
* Inform RTOS of completion of an interrupt handler, and give control to
* RTOS to perform thread/task scheduling, switch back from any system stack
* and restore the context, and return to the exit dispatcher saved in the
* stack frame at XT_STK_EXIT. RTOS port can call0 _xt_context_restore
* to save the context saved in XT_RTOS_INT_ENTER via _xt_context_save,
* leaving only a minimal part of the context to be restored by the exit
* dispatcher. This function does not return to the place it was called from.
* May only be called from assembly code by the 'call0' instruction.
*/
/* void XT_RTOS_INT_EXIT(void) */
#define XT_RTOS_INT_EXIT _frxt_int_exit
/*
* Inform RTOS of the occurrence of a tick timer interrupt.
* If RTOS has no tick timer, leave XT_RTOS_TIMER_INT undefined.
* May be coded in or called from C or assembly, per ABI conventions.
* RTOS may optionally define XT_TICK_PER_SEC in its own way (eg. macro).
*/
/* void XT_RTOS_TIMER_INT(void) */
#define XT_RTOS_TIMER_INT _frxt_timer_int
#define XT_TICK_PER_SEC configTICK_RATE_HZ
/*
* Return in a15 the base address of the co-processor state save area for the
* thread that triggered a co-processor exception, or 0 if no thread was running.
* The state save area is structured as defined in xtensa_context.h and has size
* XT_CP_SIZE. Co-processor instructions should only be used in thread code, never
* in interrupt handlers or the RTOS kernel. May only be called from assembly code
* and by the 'call0' instruction. A result of 0 indicates an unrecoverable error.
* The implementation may use only a2-4, a15 (all other regs must be preserved).
*/
/* void* XT_RTOS_CP_STATE(void) */
#define XT_RTOS_CP_STATE _frxt_task_coproc_state
/*******************************************************************************
*
* HOOKS TO DYNAMICALLY INSTALL INTERRUPT AND EXCEPTION HANDLERS PER LEVEL.
*
* This Xtensa RTOS port provides hooks for dynamically installing exception
* and interrupt handlers to facilitate automated testing where each test
* case can install its own handler for user exceptions and each interrupt
* priority (level). This consists of an array of function pointers indexed
* by interrupt priority, with index 0 being the user exception handler hook.
* Each entry in the array is initially 0, and may be replaced by a function
* pointer of type XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.
*
* The handler for low and medium priority obeys ABI conventions so may be coded
* in C. For the exception handler, the cause is the contents of the EXCCAUSE
* reg, and the result is -1 if handled, else the cause (still needs handling).
* For interrupt handlers, the cause is a mask of pending enabled interrupts at
* that level, and the result is the same mask with the bits for the handled
* interrupts cleared (those not cleared still need handling). This allows a test
* case to either pre-handle or override the default handling for the exception
* or interrupt level (see xtensa_vectors.S).
*
* High priority handlers (including NMI) must be coded in assembly, are always
* called by 'call0' regardless of ABI, must preserve all registers except a0,
* and must not use or modify the interrupted stack. The hook argument 'cause'
* is not passed and the result is ignored, so as not to burden the caller with
* saving and restoring a2 (it assumes only one interrupt per level - see the
* discussion in high priority interrupts in xtensa_vectors.S). The handler
* therefore should be coded to prototype 'void h(void)' even though it plugs
* into an array of handlers of prototype 'unsigned h(unsigned)'.
*
* To enable interrupt/exception hooks, compile the RTOS with '-DXT_INTEXC_HOOKS'.
*
*******************************************************************************/
#define XT_INTEXC_HOOK_NUM ( 1 + XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI )
#ifndef __ASSEMBLER__
typedef unsigned (* XT_INTEXC_HOOK)( unsigned cause );
extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[ XT_INTEXC_HOOK_NUM ];
#endif
/*******************************************************************************
*
* CONVENIENCE INCLUSIONS.
*
* Ensures RTOS specific files need only include this one Xtensa-generic header.
* These headers are included last so they can use the RTOS definitions above.
*
*******************************************************************************/
#include "xtensa_context.h"
#ifdef XT_RTOS_TIMER_INT
#include "xtensa_timer.h"
#endif
/*******************************************************************************
*
* Xtensa Port Version.
*
*******************************************************************************/
#define XTENSA_PORT_VERSION 1.4 .2
#define XTENSA_PORT_VERSION_STRING "1.4.2"
#endif /* XTENSA_RTOS_H */

View File

@ -0,0 +1,158 @@
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* --------------------------------------------------------------------------------
*
* XTENSA INFORMATION FOR RTOS TICK TIMER AND CLOCK FREQUENCY
*
* This header contains definitions and macros for use primarily by Xtensa
* RTOS assembly coded source files. It includes and uses the Xtensa hardware
* abstraction layer (HAL) to deal with config specifics. It may also be
* included in C source files.
*
* User may edit to modify timer selection and to specify clock frequency and
* tick duration to match timer interrupt to the real-time tick duration.
*
* If the RTOS has no timer interrupt, then there is no tick timer and the
* clock frequency is irrelevant, so all of these macros are left undefined
* and the Xtensa core configuration need not have a timer.
*
*******************************************************************************/
#ifndef XTENSA_TIMER_H
#define XTENSA_TIMER_H
#ifdef __ASSEMBLER__
#include <xtensa/coreasm.h>
#endif
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include "xtensa_rtos.h" /* in case this wasn't included directly */
#include "FreeRTOSConfig.h"
/*
* Select timer to use for periodic tick, and determine its interrupt number
* and priority. User may specify a timer by defining XT_TIMER_INDEX with -D,
* in which case its validity is checked (it must exist in this core and must
* not be on a high priority interrupt - an error will be reported in invalid).
* Otherwise select the first low or medium priority interrupt timer available.
*/
#if XCHAL_NUM_TIMERS == 0
#error "This Xtensa configuration is unsupported, it has no timers."
#else
#ifndef XT_TIMER_INDEX
#if XCHAL_TIMER3_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER3_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 3
#endif
#endif
#if XCHAL_TIMER2_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER2_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 2
#endif
#endif
#if XCHAL_TIMER1_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER1_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 1
#endif
#endif
#if XCHAL_TIMER0_INTERRUPT != XTHAL_TIMER_UNCONFIGURED
#if XCHAL_INT_LEVEL( XCHAL_TIMER0_INTERRUPT ) <= XCHAL_EXCM_LEVEL
#undef XT_TIMER_INDEX
#define XT_TIMER_INDEX 0
#endif
#endif
#endif /* ifndef XT_TIMER_INDEX */
#ifndef XT_TIMER_INDEX
#error "There is no suitable timer in this Xtensa configuration."
#endif
#define XT_CCOMPARE ( CCOMPARE + XT_TIMER_INDEX )
#define XT_TIMER_INTNUM XCHAL_TIMER_INTERRUPT( XT_TIMER_INDEX )
#define XT_TIMER_INTPRI XCHAL_INT_LEVEL( XT_TIMER_INTNUM )
#define XT_TIMER_INTEN ( 1 << XT_TIMER_INTNUM )
#if XT_TIMER_INTNUM == XTHAL_TIMER_UNCONFIGURED
#error "The timer selected by XT_TIMER_INDEX does not exist in this core."
#elif XT_TIMER_INTPRI > XCHAL_EXCM_LEVEL
#error "The timer interrupt cannot be high priority (use medium or low)."
#endif
#endif /* XCHAL_NUM_TIMERS */
/*
* Set processor clock frequency, used to determine clock divisor for timer tick.
* User should BE SURE TO ADJUST THIS for the Xtensa platform being used.
* If using a supported board via the board-independent API defined in xtbsp.h,
* this may be left undefined and frequency and tick divisor will be computed
* and cached during run-time initialization.
*
* NOTE ON SIMULATOR:
* Under the Xtensa instruction set simulator, the frequency can only be estimated
* because it depends on the speed of the host and the version of the simulator.
* Also because it runs much slower than hardware, it is not possible to achieve
* real-time performance for most applications under the simulator. A frequency
* too low does not allow enough time between timer interrupts, starving threads.
* To obtain a more convenient but non-real-time tick duration on the simulator,
* compile with xt-xcc option "-DXT_SIMULATOR".
* Adjust this frequency to taste (it's not real-time anyway!).
*/
#if defined( XT_SIMULATOR ) && !defined( XT_CLOCK_FREQ )
#define XT_CLOCK_FREQ configCPU_CLOCK_HZ
#endif
#if !defined( XT_CLOCK_FREQ ) && !defined( XT_BOARD )
#error "XT_CLOCK_FREQ must be defined for the target platform."
#endif
/*
* Default number of timer "ticks" per second (default 100 for 10ms tick).
* RTOS may define this in its own way (if applicable) in xtensa_rtos.h.
* User may redefine this to an optimal value for the application, either by
* editing this here or in xtensa_rtos.h, or compiling with xt-xcc option
* "-DXT_TICK_PER_SEC=<value>" where <value> is a suitable number.
*/
#ifndef XT_TICK_PER_SEC
#define XT_TICK_PER_SEC configTICK_RATE_HZ /* 10 ms tick = 100 ticks per second */
#endif
/*
* Derivation of clock divisor for timer tick and interrupt (one per tick).
*/
#ifdef XT_CLOCK_FREQ
#define XT_TICK_DIVISOR ( XT_CLOCK_FREQ / XT_TICK_PER_SEC )
#endif
#ifndef __ASSEMBLER__
extern unsigned _xt_tick_divisor;
extern void _xt_tick_divisor_init( void );
#endif
#endif /* XTENSA_TIMER_H */

View File

@ -0,0 +1,481 @@
/*
* FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
* All rights reserved
*
* VISIT https://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
*
* This file is part of the FreeRTOS distribution.
*
* FreeRTOS is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License (version 2) as published by the
* Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
*
***************************************************************************
* >>! NOTE: The modification to the GPL is included to allow you to !<<
* >>! distribute a combined work that includes FreeRTOS without being !<<
* >>! obliged to provide the source code for proprietary components !<<
* >>! outside of the FreeRTOS kernel. !<<
***************************************************************************
*
* FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. Full license text is available on the following
* link: https://www.FreeRTOS.org/a00114.html
*
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* https://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
*
* https://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
* the FAQ page "My application does not run, what could be wrong?". Have you
* defined configASSERT()?
*
* https://www.FreeRTOS.org/support - In return for receiving this top quality
* embedded software for free we request you assist our global community by
* participating in the support forum.
*
* https://www.FreeRTOS.org/training - Investing in training allows your team
* to be as productive as possible as early as possible. Now you can receive
* FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
* Ltd, and the world's leading authority on the world's leading RTOS.
*
* https://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
* including FreeRTOS+Trace - an indispensable productivity tool, a DOS
* compatible FAT file system, and our tiny thread aware UDP/IP stack.
*
* https://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
* Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
*
* https://www.highintegritysystems.com/openrtos/ - Real Time Engineers ltd.
* license FreeRTOS to High Integrity Systems ltd. to sell under the OpenRTOS
* brand. Low cost OpenRTOS licenses offer ticketed support, indemnification
* and commercial middleware.
*
* https://www.highintegritysystems.com/safertos/ - High Integrity Systems
* also provide a safety engineered and independently SIL3 certified version
* for use in safety and mission critical applications that require provable
* dependability.
*
*/
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* -----------------------------------------------------------------------------
*/
#include <stdlib.h>
#include <xtensa/config/core.h>
#include "xtensa_rtos.h"
#include "rom/ets_sys.h"
#include "soc/cpu.h"
#include "FreeRTOS.h"
#include "task.h"
#include "esp_panic.h"
#include "esp_heap_caps.h"
#include "esp_crosscore_int.h"
#include "esp_intr_alloc.h"
/* Defined in portasm.h */
extern void _frxt_tick_timer_init( void );
/* Defined in xtensa_context.S */
extern void _xt_coproc_init( void );
#if CONFIG_FREERTOS_CORETIMER_0
#define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER0_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF )
#endif
#if CONFIG_FREERTOS_CORETIMER_1
#define SYSTICK_INTR_ID ( ETS_INTERNAL_TIMER1_INTR_SOURCE + ETS_INTERNAL_INTR_SOURCE_OFF )
#endif
/*-----------------------------------------------------------*/
unsigned port_xSchedulerRunning[ portNUM_PROCESSORS ] = { 0 }; /* Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting */
unsigned port_interruptNesting[ portNUM_PROCESSORS ] = { 0 }; /* Interrupt nesting level. Increased/decreased in portasm.c, _frxt_int_enter/_frxt_int_exit */
/*-----------------------------------------------------------*/
/* User exception dispatcher when exiting */
void _xt_user_exit( void );
/*
* Stack initialization
*/
/* *INDENT-OFF* */
#if portUSING_MPU_WRAPPERS
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged )
#else
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
#endif
/* *INDENT-ON* */
{
StackType_t * sp, * tp;
XtExcFrame * frame;
#if XCHAL_CP_NUM > 0
uint32_t * p;
#endif
/* Create interrupt stack frame aligned to 16 byte boundary */
sp = ( StackType_t * ) ( ( ( UBaseType_t ) ( pxTopOfStack + 1 ) - XT_CP_SIZE - XT_STK_FRMSZ ) & ~0xf );
/* Clear the entire frame (do not use memset() because we don't depend on C library) */
for( tp = sp; tp <= pxTopOfStack; ++tp )
{
*tp = 0;
}
frame = ( XtExcFrame * ) sp;
/* Explicitly initialize certain saved registers */
frame->pc = ( UBaseType_t ) pxCode; /* task entrypoint */
frame->a0 = 0; /* to terminate GDB backtrace */
frame->a1 = ( UBaseType_t ) sp + XT_STK_FRMSZ; /* physical top of stack frame */
frame->exit = ( UBaseType_t ) _xt_user_exit; /* user exception exit dispatcher */
/* Set initial PS to int level 0, EXCM disabled ('rfe' will enable), user mode. */
/* Also set entry point argument parameter. */
#ifdef __XTENSA_CALL0_ABI__
frame->a2 = ( UBaseType_t ) pvParameters;
frame->ps = PS_UM | PS_EXCM;
#else
/* + for windowed ABI also set WOE and CALLINC (pretend task was 'call4'd). */
frame->a6 = ( UBaseType_t ) pvParameters;
frame->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC( 1 );
#endif
#ifdef XT_USE_SWPRI
/* Set the initial virtual priority mask value to all 1's. */
frame->vpri = 0xFFFFFFFF;
#endif
#if XCHAL_CP_NUM > 0
/* Init the coprocessor save area (see xtensa_context.h) */
/* No access to TCB here, so derive indirectly. Stack growth is top to bottom.
* //p = (uint32_t *) xMPUSettings->coproc_area;
*/
p = ( uint32_t * ) ( ( ( uint32_t ) pxTopOfStack - XT_CP_SIZE ) & ~0xf );
p[ 0 ] = 0;
p[ 1 ] = 0;
p[ 2 ] = ( ( ( uint32_t ) p ) + 12 + XCHAL_TOTAL_SA_ALIGN - 1 ) & -XCHAL_TOTAL_SA_ALIGN;
#endif
return sp;
}
/*-----------------------------------------------------------*/
void vPortEndScheduler( void )
{
/* It is unlikely that the Xtensa port will get stopped. If required simply
* disable the tick interrupt here. */
}
/*-----------------------------------------------------------*/
BaseType_t xPortStartScheduler( void )
{
/* Interrupts are disabled at this point and stack contains PS with enabled interrupts when task context is restored */
#if XCHAL_CP_NUM > 0
/* Initialize co-processor management for tasks. Leave CPENABLE alone. */
_xt_coproc_init();
#endif
/* Init the tick divisor value */
_xt_tick_divisor_init();
/* Setup the hardware to generate the tick. */
_frxt_tick_timer_init();
port_xSchedulerRunning[ xPortGetCoreID() ] = 1;
/* Cannot be directly called from C; never returns */
__asm__ volatile ( "call0 _frxt_dispatch\n" );
/* Should not get here. */
return pdTRUE;
}
/*-----------------------------------------------------------*/
BaseType_t xPortSysTickHandler( void )
{
BaseType_t ret;
unsigned interruptMask;
portbenchmarkIntLatency();
traceISR_ENTER( SYSTICK_INTR_ID );
/* Interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY must be
* disabled before calling xTaskIncrementTick as it access the
* kernel lists. */
interruptMask = portSET_INTERRUPT_MASK_FROM_ISR();
{
ret = xTaskIncrementTick();
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( interruptMask );
if( ret != pdFALSE )
{
portYIELD_FROM_ISR();
}
else
{
traceISR_EXIT();
}
return ret;
}
void vPortYieldOtherCore( BaseType_t coreid )
{
esp_crosscore_int_send_yield( coreid );
}
/*-----------------------------------------------------------*/
/*
* Used to set coprocessor area in stack. Current hack is to reuse MPU pointer for coprocessor area.
*/
#if portUSING_MPU_WRAPPERS
void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
const struct xMEMORY_REGION * const xRegions,
StackType_t * pxBottomOfStack,
uint32_t usStackDepth )
{
#if XCHAL_CP_NUM > 0
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( ( uint32_t ) ( pxBottomOfStack + usStackDepth - 1 ) ) - XT_CP_SIZE ) & ~0xf );
/* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to
* clear the stack area after we return. This is done in pxPortInitialiseStack().
*/
#endif
}
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS * xMPUSettings )
{
/* If task has live floating point registers somewhere, release them */
_xt_coproc_release( xMPUSettings->coproc_area );
}
#endif /* if portUSING_MPU_WRAPPERS */
/*
* Returns true if the current core is in ISR context; low prio ISR, med prio ISR or timer tick ISR. High prio ISRs
* aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.
*/
BaseType_t xPortInIsrContext()
{
unsigned int irqStatus;
BaseType_t ret;
irqStatus = portENTER_CRITICAL_NESTED();
ret = ( port_interruptNesting[ xPortGetCoreID() ] != 0 );
portEXIT_CRITICAL_NESTED( irqStatus );
return ret;
}
/*
* This function will be called in High prio ISRs. Returns true if the current core was in ISR context
* before calling into high prio ISR context.
*/
BaseType_t IRAM_ATTR xPortInterruptedFromISRContext()
{
return( port_interruptNesting[ xPortGetCoreID() ] != 0 );
}
void vPortAssertIfInISR()
{
if( xPortInIsrContext() )
{
ets_printf( "core=%d port_interruptNesting=%d\n\n", xPortGetCoreID(), port_interruptNesting[ xPortGetCoreID() ] );
}
configASSERT( !xPortInIsrContext() );
}
/*
* For kernel use: Initialize a per-CPU mux. Mux will be initialized unlocked.
*/
void vPortCPUInitializeMutex( portMUX_TYPE * mux )
{
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
ets_printf( "Initializing mux %p\n", mux );
mux->lastLockedFn = "(never locked)";
mux->lastLockedLine = -1;
#endif
mux->owner = portMUX_FREE_VAL;
mux->count = 0;
}
#include "portmux_impl.h"
/*
* For kernel use: Acquire a per-CPU mux. Spinlocks, so don't hold on to these muxes for too long.
*/
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
void vPortCPUAcquireMutex( portMUX_TYPE * mux,
const char * fnName,
int line )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, fnName, line );
portEXIT_CRITICAL_NESTED( irqStatus );
}
bool vPortCPUAcquireMutexTimeout( portMUX_TYPE * mux,
int timeout_cycles,
const char * fnName,
int line )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
bool result = vPortCPUAcquireMutexIntsDisabled( mux, timeout_cycles, fnName, line );
portEXIT_CRITICAL_NESTED( irqStatus );
return result;
}
#else /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
void vPortCPUAcquireMutex( portMUX_TYPE * mux )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
portEXIT_CRITICAL_NESTED( irqStatus );
}
bool vPortCPUAcquireMutexTimeout( portMUX_TYPE * mux,
int timeout_cycles )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
bool result = vPortCPUAcquireMutexIntsDisabled( mux, timeout_cycles );
portEXIT_CRITICAL_NESTED( irqStatus );
return result;
}
#endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
/*
* For kernel use: Release a per-CPU mux
*
* Mux must be already locked by this core
*/
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
void vPortCPUReleaseMutex( portMUX_TYPE * mux,
const char * fnName,
int line )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
vPortCPUReleaseMutexIntsDisabled( mux, fnName, line );
portEXIT_CRITICAL_NESTED( irqStatus );
}
#else
void vPortCPUReleaseMutex( portMUX_TYPE * mux )
{
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
vPortCPUReleaseMutexIntsDisabled( mux );
portEXIT_CRITICAL_NESTED( irqStatus );
}
#endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
void vPortSetStackWatchpoint( void * pxStackStart )
{
/*Set watchpoint 1 to watch the last 32 bytes of the stack. */
/*Unfortunately, the Xtensa watchpoints can't set a watchpoint on a random [base - base+n] region because */
/*the size works by masking off the lowest address bits. For that reason, we futz a bit and watch the lowest 32 */
/*bytes of the stack we can actually watch. In general, this can cause the watchpoint to be triggered at most */
/*28 bytes early. The value 32 is chosen because it's larger than the stack canary, which in FreeRTOS is 20 bytes. */
/*This way, we make sure we trigger before/when the stack canary is corrupted, not after. */
int addr = ( int ) pxStackStart;
addr = ( addr + 31 ) & ( ~31 );
esp_set_watchpoint( 1, ( char * ) addr, 32, ESP_WATCHPOINT_STORE );
}
#if defined( CONFIG_SPIRAM_SUPPORT )
/*
* Compare & set (S32C1) does not work in external RAM. Instead, this routine uses a mux (in internal memory) to fake it.
*/
static portMUX_TYPE extram_mux = portMUX_INITIALIZER_UNLOCKED;
void uxPortCompareSetExtram( volatile uint32_t * addr,
uint32_t compare,
uint32_t * set )
{
uint32_t prev;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT, __FUNCTION__, __LINE__ );
#else
vPortCPUAcquireMutexIntsDisabled( &extram_mux, portMUX_NO_TIMEOUT );
#endif
prev = *addr;
if( prev == compare )
{
*addr = *set;
}
*set = prev;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUReleaseMutexIntsDisabled( &extram_mux, __FUNCTION__, __LINE__ );
#else
vPortCPUReleaseMutexIntsDisabled( &extram_mux );
#endif
}
#endif //defined(CONFIG_SPIRAM_SUPPORT)
uint32_t xPortGetTickRateHz( void )
{
return ( uint32_t ) configTICK_RATE_HZ;
}

View File

@ -0,0 +1,653 @@
/*
//-----------------------------------------------------------------------------
// Copyright (c) 2003-2015 Cadence Design Systems, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//-----------------------------------------------------------------------------
*/
#include "xtensa_rtos.h"
#include "sdkconfig.h"
#define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */
#define CP_TOPOFSTACK_OFFS 0x04 /* xMPU_SETTINGS.coproc_area */
.extern pxCurrentTCB
/*
*******************************************************************************
* Interrupt stack. The size of the interrupt stack is determined by the config
* parameter "configISR_STACK_SIZE" in FreeRTOSConfig.h
*******************************************************************************
*/
.data
.align 16
.global port_IntStack
.global port_IntStackTop
.global port_switch_flag
port_IntStack:
.space configISR_STACK_SIZE*portNUM_PROCESSORS /* This allocates stacks for each individual CPU. */
port_IntStackTop:
.word 0
port_switch_flag:
.space portNUM_PROCESSORS*4 /* One flag for each individual CPU. */
.text
/*
*******************************************************************************
* _frxt_setup_switch
* void _frxt_setup_switch(void);
*
* Sets an internal flag indicating that a task switch is required on return
* from interrupt handling.
*
*******************************************************************************
*/
.global _frxt_setup_switch
.type _frxt_setup_switch,@function
.align 4
_frxt_setup_switch:
ENTRY(16)
getcoreid a3
movi a2, port_switch_flag
addx4 a2, a3, a2
movi a3, 1
s32i a3, a2, 0
RET(16)
/*
*******************************************************************************
* _frxt_int_enter
* void _frxt_int_enter(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for
* freeRTOS. Saves the rest of the interrupt context (not already saved).
* May only be called from assembly code by the 'call0' instruction, with
* interrupts disabled.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
*******************************************************************************
*/
.globl _frxt_int_enter
.type _frxt_int_enter,@function
.align 4
_frxt_int_enter:
/* Save a12-13 in the stack frame as required by _xt_context_save. */
s32i a12, a1, XT_STK_A12
s32i a13, a1, XT_STK_A13
/* Save return address in a safe place (free a0). */
mov a12, a0
/* Save the rest of the interrupted context (preserves A12-13). */
call0 _xt_context_save
/*
Save interrupted task's SP in TCB only if not nesting.
Manage nesting directly rather than call the generic IntEnter()
(in windowed ABI we can't call a C function here anyway because PS.EXCM is still set).
*/
getcoreid a4
movi a2, port_xSchedulerRunning
addx4 a2, a4, a2
movi a3, port_interruptNesting
addx4 a3, a4, a3
l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */
beqz a2, 1f /* scheduler not running, no tasks */
l32i a2, a3, 0 /* a2 = port_interruptNesting */
addi a2, a2, 1 /* increment nesting count */
s32i a2, a3, 0 /* save nesting count */
bnei a2, 1, .Lnested /* !=0 before incr, so nested */
movi a2, pxCurrentTCB
addx4 a2, a4, a2
l32i a2, a2, 0 /* a2 = current TCB */
beqz a2, 1f
s32i a1, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */
movi a1, port_IntStack+configISR_STACK_SIZE /* a1 = top of intr stack for CPU 0 */
movi a2, configISR_STACK_SIZE /* add configISR_STACK_SIZE * cpu_num to arrive at top of stack for cpu_num */
mull a2, a4, a2
add a1, a1, a2 /* for current proc */
.Lnested:
1:
mov a0, a12 /* restore return addr and return */
ret
/*
*******************************************************************************
* _frxt_int_exit
* void _frxt_int_exit(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for
* FreeRTOS. If required, calls vPortYieldFromInt() to perform task context
* switching, restore the (possibly) new task's context, and return to the
* exit dispatcher saved in the task's stack frame at XT_STK_EXIT.
* May only be called from assembly code by the 'call0' instruction. Does not
* return to caller.
* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
*******************************************************************************
*/
.globl _frxt_int_exit
.type _frxt_int_exit,@function
.align 4
_frxt_int_exit:
getcoreid a4
movi a2, port_xSchedulerRunning
addx4 a2, a4, a2
movi a3, port_interruptNesting
addx4 a3, a4, a3
rsil a0, XCHAL_EXCM_LEVEL /* lock out interrupts */
l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */
beqz a2, .Lnoswitch /* scheduler not running, no tasks */
l32i a2, a3, 0 /* a2 = port_interruptNesting */
addi a2, a2, -1 /* decrement nesting count */
s32i a2, a3, 0 /* save nesting count */
bnez a2, .Lnesting /* !=0 after decr so still nested */
movi a2, pxCurrentTCB
addx4 a2, a4, a2
l32i a2, a2, 0 /* a2 = current TCB */
beqz a2, 1f /* no task ? go to dispatcher */
l32i a1, a2, TOPOFSTACK_OFFS /* SP = pxCurrentTCB->pxTopOfStack */
movi a2, port_switch_flag /* address of switch flag */
addx4 a2, a4, a2 /* point to flag for this cpu */
l32i a3, a2, 0 /* a3 = port_switch_flag */
beqz a3, .Lnoswitch /* flag = 0 means no switch reqd */
movi a3, 0
s32i a3, a2, 0 /* zero out the flag for next time */
1:
/*
Call0 ABI callee-saved regs a12-15 need to be saved before possible preemption.
However a12-13 were already saved by _frxt_int_enter().
*/
#ifdef __XTENSA_CALL0_ABI__
s32i a14, a1, XT_STK_A14
s32i a15, a1, XT_STK_A15
#endif
#ifdef __XTENSA_CALL0_ABI__
call0 vPortYieldFromInt /* call dispatch inside the function; never returns */
#else
call4 vPortYieldFromInt /* this one returns */
call0 _frxt_dispatch /* tail-call dispatcher */
/* Never returns here. */
#endif
.Lnoswitch:
/*
If we came here then about to resume the interrupted task.
*/
.Lnesting:
/*
We come here only if there was no context switch, that is if this
is a nested interrupt, or the interrupted task was not preempted.
In either case there's no need to load the SP.
*/
/* Restore full context from interrupt stack frame */
call0 _xt_context_restore
/*
Must return via the exit dispatcher corresponding to the entrypoint from which
this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt
stack frame is deallocated in the exit dispatcher.
*/
l32i a0, a1, XT_STK_EXIT
ret
/*
**********************************************************************************************************
* _frxt_timer_int
* void _frxt_timer_int(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function for FreeRTOS.
* Called every timer interrupt.
* Manages the tick timer and calls xPortSysTickHandler() every tick.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
* Callable from C (obeys ABI conventions). Implemented in assmebly code for performance.
*
**********************************************************************************************************
*/
.globl _frxt_timer_int
.type _frxt_timer_int,@function
.align 4
_frxt_timer_int:
/*
Xtensa timers work by comparing a cycle counter with a preset value. Once the match occurs
an interrupt is generated, and the handler has to set a new cycle count into the comparator.
To avoid clock drift due to interrupt latency, the new cycle count is computed from the old,
not the time the interrupt was serviced. However if a timer interrupt is ever serviced more
than one tick late, it is necessary to process multiple ticks until the new cycle count is
in the future, otherwise the next timer interrupt would not occur until after the cycle
counter had wrapped (2^32 cycles later).
do {
ticks++;
old_ccompare = read_ccompare_i();
write_ccompare_i( old_ccompare + divisor );
service one tick;
diff = read_ccount() - old_ccompare;
} while ( diff > divisor );
*/
ENTRY(16)
#ifdef CONFIG_PM_TRACE
movi a6, 1 /* = ESP_PM_TRACE_TICK */
getcoreid a7
call4 esp_pm_trace_enter
#endif // CONFIG_PM_TRACE
.L_xt_timer_int_catchup:
/* Update the timer comparator for the next tick. */
#ifdef XT_CLOCK_FREQ
movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */
#else
movi a3, _xt_tick_divisor
l32i a2, a3, 0 /* a2 = comparator increment */
#endif
rsr a3, XT_CCOMPARE /* a3 = old comparator value */
add a4, a3, a2 /* a4 = new comparator value */
wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */
esync
#ifdef __XTENSA_CALL0_ABI__
/* Preserve a2 and a3 across C calls. */
s32i a2, sp, 4
s32i a3, sp, 8
#endif
/* Call the FreeRTOS tick handler (see port.c). */
#ifdef __XTENSA_CALL0_ABI__
call0 xPortSysTickHandler
#else
call4 xPortSysTickHandler
#endif
#ifdef __XTENSA_CALL0_ABI__
/* Restore a2 and a3. */
l32i a2, sp, 4
l32i a3, sp, 8
#endif
/* Check if we need to process more ticks to catch up. */
esync /* ensure comparator update complete */
rsr a4, CCOUNT /* a4 = cycle count */
sub a4, a4, a3 /* diff = ccount - old comparator */
blt a2, a4, .L_xt_timer_int_catchup /* repeat while diff > divisor */
#ifdef CONFIG_PM_TRACE
movi a6, 1 /* = ESP_PM_TRACE_TICK */
getcoreid a7
call4 esp_pm_trace_exit
#endif // CONFIG_PM_TRACE
RET(16)
/*
**********************************************************************************************************
* _frxt_tick_timer_init
* void _frxt_tick_timer_init(void)
*
* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has already been been called).
* Callable from C (obeys ABI conventions on entry).
*
**********************************************************************************************************
*/
.globl _frxt_tick_timer_init
.type _frxt_tick_timer_init,@function
.align 4
_frxt_tick_timer_init:
ENTRY(16)
/* Set up the periodic tick timer (assume enough time to complete init). */
#ifdef XT_CLOCK_FREQ
movi a3, XT_TICK_DIVISOR
#else
movi a2, _xt_tick_divisor
l32i a3, a2, 0
#endif
rsr a2, CCOUNT /* current cycle count */
add a2, a2, a3 /* time of first timer interrupt */
wsr a2, XT_CCOMPARE /* set the comparator */
/*
Enable the timer interrupt at the device level. Don't write directly
to the INTENABLE register because it may be virtualized.
*/
#ifdef __XTENSA_CALL0_ABI__
movi a2, XT_TIMER_INTEN
call0 xt_ints_on
#else
movi a6, XT_TIMER_INTEN
call4 xt_ints_on
#endif
RET(16)
/*
**********************************************************************************************************
* DISPATCH THE HIGH READY TASK
* void _frxt_dispatch(void)
*
* Switch context to the highest priority ready task, restore its state and dispatch control to it.
*
* This is a common dispatcher that acts as a shared exit path for all the context switch functions
* including vPortYield() and vPortYieldFromInt(), all of which tail-call this dispatcher
* (for windowed ABI vPortYieldFromInt() calls it indirectly via _frxt_int_exit() ).
*
* The Xtensa port uses different stack frames for solicited and unsolicited task suspension (see
* comments on stack frames in xtensa_context.h). This function restores the state accordingly.
* If restoring a task that solicited entry, restores the minimal state and leaves CPENABLE clear.
* If restoring a task that was preempted, restores all state including the task's CPENABLE.
*
* Entry:
* pxCurrentTCB points to the TCB of the task to suspend,
* Because it is tail-called without a true function entrypoint, it needs no 'entry' instruction.
*
* Exit:
* If incoming task called vPortYield() (solicited), this function returns as if from vPortYield().
* If incoming task was preempted by an interrupt, this function jumps to exit dispatcher.
*
**********************************************************************************************************
*/
.globl _frxt_dispatch
.type _frxt_dispatch,@function
.align 4
_frxt_dispatch:
#ifdef __XTENSA_CALL0_ABI__
call0 vTaskSwitchContext // Get next TCB to resume
movi a2, pxCurrentTCB
getcoreid a3
addx4 a2, a3, a2
#else
call4 vTaskSwitchContext // Get next TCB to resume
movi a2, pxCurrentTCB
getcoreid a3
addx4 a2, a3, a2
#endif
l32i a3, a2, 0
l32i sp, a3, TOPOFSTACK_OFFS /* SP = next_TCB->pxTopOfStack; */
s32i a3, a2, 0
/* Determine the type of stack frame. */
l32i a2, sp, XT_STK_EXIT /* exit dispatcher or solicited flag */
bnez a2, .L_frxt_dispatch_stk
.L_frxt_dispatch_sol:
/* Solicited stack frame. Restore minimal context and return from vPortYield(). */
l32i a3, sp, XT_SOL_PS
#ifdef __XTENSA_CALL0_ABI__
l32i a12, sp, XT_SOL_A12
l32i a13, sp, XT_SOL_A13
l32i a14, sp, XT_SOL_A14
l32i a15, sp, XT_SOL_A15
#endif
l32i a0, sp, XT_SOL_PC
#if XCHAL_CP_NUM > 0
/* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */
rsync
#endif
/* As soons as PS is restored, interrupts can happen. No need to sync PS. */
wsr a3, PS
#ifdef __XTENSA_CALL0_ABI__
addi sp, sp, XT_SOL_FRMSZ
ret
#else
retw
#endif
.L_frxt_dispatch_stk:
#if XCHAL_CP_NUM > 0
/* Restore CPENABLE from task's co-processor save area. */
movi a3, pxCurrentTCB /* cp_state = */
getcoreid a2
addx4 a3, a2, a3
l32i a3, a3, 0
l32i a2, a3, CP_TOPOFSTACK_OFFS /* StackType_t *pxStack; */
l16ui a3, a2, XT_CPENABLE /* CPENABLE = cp_state->cpenable; */
wsr a3, CPENABLE
#endif
/* Interrupt stack frame. Restore full context and return to exit dispatcher. */
call0 _xt_context_restore
/* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
#ifdef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
#if XCHAL_CP_NUM > 0
/* Ensure wsr.CPENABLE has completed. */
rsync
#endif
/*
Must return via the exit dispatcher corresponding to the entrypoint from which
this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt
stack frame is deallocated in the exit dispatcher.
*/
l32i a0, sp, XT_STK_EXIT
ret
/*
**********************************************************************************************************
* PERFORM A SOLICTED CONTEXT SWITCH (from a task)
* void vPortYield(void)
*
* This function saves the minimal state needed for a solicited task suspension, clears CPENABLE,
* then tail-calls the dispatcher _frxt_dispatch() to perform the actual context switch
*
* At Entry:
* pxCurrentTCB points to the TCB of the task to suspend
* Callable from C (obeys ABI conventions on entry).
*
* Does not return to caller.
*
**********************************************************************************************************
*/
.globl vPortYield
.type vPortYield,@function
.align 4
vPortYield:
#ifdef __XTENSA_CALL0_ABI__
addi sp, sp, -XT_SOL_FRMSZ
#else
entry sp, XT_SOL_FRMSZ
#endif
rsr a2, PS
s32i a0, sp, XT_SOL_PC
s32i a2, sp, XT_SOL_PS
#ifdef __XTENSA_CALL0_ABI__
s32i a12, sp, XT_SOL_A12 /* save callee-saved registers */
s32i a13, sp, XT_SOL_A13
s32i a14, sp, XT_SOL_A14
s32i a15, sp, XT_SOL_A15
#else
/* Spill register windows. Calling xthal_window_spill() causes extra */
/* spills and reloads, so we will set things up to call the _nw version */
/* instead to save cycles. */
movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) /* spills a4-a7 if needed */
and a2, a2, a6 /* clear WOE, INTLEVEL */
addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */
wsr a2, PS
rsync
call0 xthal_window_spill_nw
l32i a2, sp, XT_SOL_PS /* restore PS */
wsr a2, PS
#endif
rsil a2, XCHAL_EXCM_LEVEL /* disable low/med interrupts */
#if XCHAL_CP_NUM > 0
/* Save coprocessor callee-saved state (if any). At this point CPENABLE */
/* should still reflect which CPs were in use (enabled). */
call0 _xt_coproc_savecs
#endif
movi a2, pxCurrentTCB
getcoreid a3
addx4 a2, a3, a2
l32i a2, a2, 0 /* a2 = pxCurrentTCB */
movi a3, 0
s32i a3, sp, XT_SOL_EXIT /* 0 to flag as solicited frame */
s32i sp, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */
#if XCHAL_CP_NUM > 0
/* Clear CPENABLE, also in task's co-processor state save area. */
l32i a2, a2, CP_TOPOFSTACK_OFFS /* a2 = pxCurrentTCB->cp_state */
movi a3, 0
wsr a3, CPENABLE
beqz a2, 1f
s16i a3, a2, XT_CPENABLE /* clear saved cpenable */
1:
#endif
/* Tail-call dispatcher. */
call0 _frxt_dispatch
/* Never reaches here. */
/*
**********************************************************************************************************
* PERFORM AN UNSOLICITED CONTEXT SWITCH (from an interrupt)
* void vPortYieldFromInt(void)
*
* This calls the context switch hook (removed), saves and clears CPENABLE, then tail-calls the dispatcher
* _frxt_dispatch() to perform the actual context switch.
*
* At Entry:
* Interrupted task context has been saved in an interrupt stack frame at pxCurrentTCB->pxTopOfStack.
* pxCurrentTCB points to the TCB of the task to suspend,
* Callable from C (obeys ABI conventions on entry).
*
* At Exit:
* Windowed ABI defers the actual context switch until the stack is unwound to interrupt entry.
* Call0 ABI tail-calls the dispatcher directly (no need to unwind) so does not return to caller.
*
**********************************************************************************************************
*/
.globl vPortYieldFromInt
.type vPortYieldFromInt,@function
.align 4
vPortYieldFromInt:
ENTRY(16)
#if XCHAL_CP_NUM > 0
/* Save CPENABLE in task's co-processor save area, and clear CPENABLE. */
movi a3, pxCurrentTCB /* cp_state = */
getcoreid a2
addx4 a3, a2, a3
l32i a3, a3, 0
l32i a2, a3, CP_TOPOFSTACK_OFFS
rsr a3, CPENABLE
s16i a3, a2, XT_CPENABLE /* cp_state->cpenable = CPENABLE; */
movi a3, 0
wsr a3, CPENABLE /* disable all co-processors */
#endif
#ifdef __XTENSA_CALL0_ABI__
/* Tail-call dispatcher. */
call0 _frxt_dispatch
/* Never reaches here. */
#else
RET(16)
#endif
/*
**********************************************************************************************************
* _frxt_task_coproc_state
* void _frxt_task_coproc_state(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function for FreeRTOS.
*
* May only be called when a task is running, not within an interrupt handler (returns 0 in that case).
* May only be called from assembly code by the 'call0' instruction. Does NOT obey ABI conventions.
* Returns in A15 a pointer to the base of the co-processor state save area for the current task.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
**********************************************************************************************************
*/
#if XCHAL_CP_NUM > 0
.globl _frxt_task_coproc_state
.type _frxt_task_coproc_state,@function
.align 4
_frxt_task_coproc_state:
/* We can use a3 as a scratchpad, the instances of code calling XT_RTOS_CP_STATE don't seem to need it saved. */
getcoreid a3
movi a15, port_xSchedulerRunning /* if (port_xSchedulerRunning */
addx4 a15, a3,a15
l32i a15, a15, 0
beqz a15, 1f
movi a15, port_interruptNesting /* && port_interruptNesting == 0 */
addx4 a15, a3, a15
l32i a15, a15, 0
bnez a15, 1f
movi a15, pxCurrentTCB
addx4 a15, a3, a15
l32i a15, a15, 0 /* && pxCurrentTCB != 0) { */
beqz a15, 2f
l32i a15, a15, CP_TOPOFSTACK_OFFS
ret
1: movi a15, 0
2: ret
#endif /* XCHAL_CP_NUM > 0 */

View File

@ -0,0 +1,113 @@
/*
* Copyright (C) 2016-2017 Espressif Shanghai PTE LTD
* Copyright (C) 2015 Real Time Engineers Ltd.
*
* All rights reserved
*
* FreeRTOS is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License (version 2) as published by the
* Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
*
***************************************************************************
* >>! NOTE: The modification to the GPL is included to allow you to !<<
* >>! distribute a combined work that includes FreeRTOS without being !<<
* >>! obliged to provide the source code for proprietary components !<<
* >>! outside of the FreeRTOS kernel. !<<
***************************************************************************
*
* FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. Full license text is available on the following
* link: https://www.FreeRTOS.org/a00114.html
*/
/* This header exists for performance reasons, in order to inline the
* implementation of vPortCPUAcquireMutexIntsDisabled and
* vPortCPUReleaseMutexIntsDisabled into the
* vTaskEnterCritical/vTaskExitCritical functions in task.c as well as the
* vPortCPUAcquireMutex/vPortCPUReleaseMutex implementations.
*
* Normally this kind of performance hack is over the top, but
* vTaskEnterCritical/vTaskExitCritical is called a great
* deal by FreeRTOS internals.
*
* It should be #included by freertos port.c or tasks.c, in esp-idf.
*
* The way it works is that it essentially uses portmux_impl.inc.h as a
* generator template of sorts. When no external memory is used, this
* template is only used to generate the vPortCPUAcquireMutexIntsDisabledInternal
* and vPortCPUReleaseMutexIntsDisabledInternal functions, which use S32C1 to
* do an atomic compare & swap. When external memory is used the functions
* vPortCPUAcquireMutexIntsDisabledExtram and vPortCPUReleaseMutexIntsDisabledExtram
* are also generated, which use uxPortCompareSetExtram to fake the S32C1 instruction.
* The wrapper functions vPortCPUAcquireMutexIntsDisabled and
* vPortCPUReleaseMutexIntsDisabled will then use the appropriate function to do the
* actual lock/unlock.
*/
#include "soc/cpu.h"
#include "portable.h"
/* XOR one core ID with this value to get the other core ID */
#define CORE_ID_XOR_SWAP ( CORE_ID_PRO ^ CORE_ID_APP )
/*Define the mux routines for use with muxes in internal RAM */
#define PORTMUX_AQUIRE_MUX_FN_NAME vPortCPUAcquireMutexIntsDisabledInternal
#define PORTMUX_RELEASE_MUX_FN_NAME vPortCPUReleaseMutexIntsDisabledInternal
#define PORTMUX_COMPARE_SET_FN_NAME uxPortCompareSet
#include "portmux_impl.inc.h"
#undef PORTMUX_AQUIRE_MUX_FN_NAME
#undef PORTMUX_RELEASE_MUX_FN_NAME
#undef PORTMUX_COMPARE_SET_FN_NAME
#if defined( CONFIG_SPIRAM_SUPPORT )
#define PORTMUX_AQUIRE_MUX_FN_NAME vPortCPUAcquireMutexIntsDisabledExtram
#define PORTMUX_RELEASE_MUX_FN_NAME vPortCPUReleaseMutexIntsDisabledExtram
#define PORTMUX_COMPARE_SET_FN_NAME uxPortCompareSetExtram
#include "portmux_impl.inc.h"
#undef PORTMUX_AQUIRE_MUX_FN_NAME
#undef PORTMUX_RELEASE_MUX_FN_NAME
#undef PORTMUX_COMPARE_SET_FN_NAME
#endif
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
#define PORTMUX_AQUIRE_MUX_FN_ARGS portMUX_TYPE * mux, int timeout_cycles, const char * fnName, int line
#define PORTMUX_RELEASE_MUX_FN_ARGS portMUX_TYPE * mux, const char * fnName, int line
#define PORTMUX_AQUIRE_MUX_FN_CALL_ARGS( x ) x, timeout_cycles, fnName, line
#define PORTMUX_RELEASE_MUX_FN_CALL_ARGS( x ) x, fnName, line
#else
#define PORTMUX_AQUIRE_MUX_FN_ARGS portMUX_TYPE * mux, int timeout_cycles
#define PORTMUX_RELEASE_MUX_FN_ARGS portMUX_TYPE * mux
#define PORTMUX_AQUIRE_MUX_FN_CALL_ARGS( x ) x, timeout_cycles
#define PORTMUX_RELEASE_MUX_FN_CALL_ARGS( x ) x
#endif
static inline bool __attribute__( ( always_inline ) ) vPortCPUAcquireMutexIntsDisabled( PORTMUX_AQUIRE_MUX_FN_ARGS )
{
#if defined( CONFIG_SPIRAM_SUPPORT )
if( esp_ptr_external_ram( mux ) )
{
return vPortCPUAcquireMutexIntsDisabledExtram( PORTMUX_AQUIRE_MUX_FN_CALL_ARGS( mux ) );
}
#endif
return vPortCPUAcquireMutexIntsDisabledInternal( PORTMUX_AQUIRE_MUX_FN_CALL_ARGS( mux ) );
}
static inline void vPortCPUReleaseMutexIntsDisabled( PORTMUX_RELEASE_MUX_FN_ARGS )
{
#if defined( CONFIG_SPIRAM_SUPPORT )
if( esp_ptr_external_ram( mux ) )
{
vPortCPUReleaseMutexIntsDisabledExtram( PORTMUX_RELEASE_MUX_FN_CALL_ARGS( mux ) );
return;
}
#endif
vPortCPUReleaseMutexIntsDisabledInternal( PORTMUX_RELEASE_MUX_FN_CALL_ARGS( mux ) );
}

View File

@ -0,0 +1,200 @@
/*
* Copyright (C) 2016-2017 Espressif Shanghai PTE LTD
* Copyright (C) 2015 Real Time Engineers Ltd.
*
* All rights reserved
*
* FreeRTOS is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License (version 2) as published by the
* Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
*
***************************************************************************
* >>! NOTE: The modification to the GPL is included to allow you to !<<
* >>! distribute a combined work that includes FreeRTOS without being !<<
* >>! obliged to provide the source code for proprietary components !<<
* >>! outside of the FreeRTOS kernel. !<<
***************************************************************************
*
* FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. Full license text is available on the following
* link: https://www.FreeRTOS.org/a00114.html
*/
/*
* Warning: funky preprocessor hackery ahead. Including these headers will generate two
* functions, which names are defined by the preprocessor macros
* PORTMUX_AQUIRE_MUX_FN_NAME and PORTMUX_RELEASE_MUX_FN_NAME. In order to do the compare
* and exchange function, they will use whatever PORTMUX_COMPARE_SET_FN_NAME resolves to.
*
* In some scenarios, this header is included *twice* in portmux_impl.h: one time
* for the 'normal' mux code which uses a compare&exchange routine, another time
* to generate code for a second set of these routines that use a second mux
* (in internal ram) to fake a compare&exchange on a variable in external memory.
*/
static inline bool __attribute__( ( always_inline ) )
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
PORTMUX_AQUIRE_MUX_FN_NAME( portMUX_TYPE * mux,
int timeout_cycles,
const char * fnName,
int line )
{
#else
PORTMUX_AQUIRE_MUX_FN_NAME( portMUX_TYPE * mux, int timeout_cycles )
{
#endif
#if !CONFIG_FREERTOS_UNICORE
uint32_t res;
portBASE_TYPE coreID, otherCoreID;
uint32_t ccount_start;
bool set_timeout = timeout_cycles > portMUX_NO_TIMEOUT;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
if( !set_timeout )
{
timeout_cycles = 10000; /* Always set a timeout in debug mode */
set_timeout = true;
}
#endif
if( set_timeout ) /* Timeout */
{
RSR( CCOUNT, ccount_start );
}
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
uint32_t owner = mux->owner;
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
{
ets_printf( "ERROR: vPortCPUAcquireMutex: mux %p is uninitialized (0x%X)! Called from %s line %d.\n", mux, owner, fnName, line );
mux->owner = portMUX_FREE_VAL;
}
#endif
/* Spin until we own the core */
RSR( PRID, coreID );
/* Note: coreID is the full 32 bit core ID (CORE_ID_PRO/CORE_ID_APP),
* not the 0/1 value returned by xPortGetCoreID()
*/
otherCoreID = CORE_ID_XOR_SWAP ^ coreID;
do
{
/* mux->owner should be one of portMUX_FREE_VAL, CORE_ID_PRO,
* CORE_ID_APP:
*
* - If portMUX_FREE_VAL, we want to atomically set to 'coreID'.
* - If "our" coreID, we can drop through immediately.
* - If "otherCoreID", we spin here.
*/
res = coreID;
PORTMUX_COMPARE_SET_FN_NAME( &mux->owner, portMUX_FREE_VAL, &res );
if( res != otherCoreID )
{
break; /* mux->owner is "our" coreID */
}
if( set_timeout )
{
uint32_t ccount_now;
RSR( CCOUNT, ccount_now );
if( ccount_now - ccount_start > ( unsigned ) timeout_cycles )
{
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
ets_printf( "Timeout on mux! last non-recursive lock %s line %d, curr %s line %d\n", mux->lastLockedFn, mux->lastLockedLine, fnName, line );
ets_printf( "Owner 0x%x count %d\n", mux->owner, mux->count );
#endif
return false;
}
}
} while( 1 );
assert( res == coreID || res == portMUX_FREE_VAL ); /* any other value implies memory corruption or uninitialized mux */
assert( ( res == portMUX_FREE_VAL ) == ( mux->count == 0 ) ); /* we're first to lock iff count is zero */
assert( mux->count < 0xFF ); /* Bad count value implies memory corruption */
/* now we own it, we can increment the refcount */
mux->count++;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
if( res == portMUX_FREE_VAL ) /*initial lock */
{
mux->lastLockedFn = fnName;
mux->lastLockedLine = line;
}
else
{
ets_printf( "Recursive lock: count=%d last non-recursive lock %s line %d, curr %s line %d\n", mux->count - 1,
mux->lastLockedFn, mux->lastLockedLine, fnName, line );
}
#endif /* CONFIG_FREERTOS_PORTMUX_DEBUG */
#endif /* CONFIG_FREERTOS_UNICORE */
return true;
}
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
static inline void PORTMUX_RELEASE_MUX_FN_NAME( portMUX_TYPE * mux,
const char * fnName,
int line )
{
#else
static inline void PORTMUX_RELEASE_MUX_FN_NAME( portMUX_TYPE * mux )
{
#endif
#if !CONFIG_FREERTOS_UNICORE
portBASE_TYPE coreID;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
const char * lastLockedFn = mux->lastLockedFn;
int lastLockedLine = mux->lastLockedLine;
mux->lastLockedFn = fnName;
mux->lastLockedLine = line;
uint32_t owner = mux->owner;
if( ( owner != portMUX_FREE_VAL ) && ( owner != CORE_ID_PRO ) && ( owner != CORE_ID_APP ) )
{
ets_printf( "ERROR: vPortCPUReleaseMutex: mux %p is invalid (0x%x)!\n", mux, mux->owner );
}
#endif /* ifdef CONFIG_FREERTOS_PORTMUX_DEBUG */
#if CONFIG_FREERTOS_PORTMUX_DEBUG || !defined( NDEBUG )
RSR( PRID, coreID );
#endif
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
if( coreID != mux->owner )
{
ets_printf( "ERROR: vPortCPUReleaseMutex: mux %p was already unlocked!\n", mux );
ets_printf( "Last non-recursive unlock %s line %d, curr unlock %s line %d\n", lastLockedFn, lastLockedLine, fnName, line );
}
#endif
assert( coreID == mux->owner ); /* This is a mutex we didn't lock, or it's corrupt */
mux->count--;
if( mux->count == 0 )
{
mux->owner = portMUX_FREE_VAL;
}
else
{
assert( mux->count < 0x100 ); /* Indicates memory corruption */
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG_RECURSIVE
ets_printf( "Recursive unlock: count=%d last locked %s line %d, curr %s line %d\n", mux->count, lastLockedFn, lastLockedLine, fnName, line );
#endif
}
#endif //!CONFIG_FREERTOS_UNICORE
}

View File

@ -0,0 +1,648 @@
/*******************************************************************************
Copyright (c) 2006-2015 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
XTENSA CONTEXT SAVE AND RESTORE ROUTINES
Low-level Call0 functions for handling generic context save and restore of
registers not specifically addressed by the interrupt vectors and handlers.
Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
Except for the calls to RTOS functions, this code is generic to Xtensa.
Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
save regs (A12-A15), which is always the case if the handlers are coded in C.
However A12, A13 are made available as scratch registers for interrupt dispatch
code, so are presumed saved anyway, and are always restored even in Call0 ABI.
Only A14, A15 are truly handled as callee-save regs.
Because Xtensa is a configurable architecture, this port supports all user
generated configurations (except restrictions stated in the release notes).
This is accomplished by conditional compilation using macros and functions
defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
Only the processor state included in your configuration is saved and restored,
including any processor state added by user configuration options or TIE.
*******************************************************************************/
/* Warn nicely if this file gets named with a lowercase .s instead of .S: */
#define NOERROR #
NOERROR: .error "C preprocessor needed for this file: make sure its filename\
ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
#include "xtensa_rtos.h"
#include "xtensa_context.h"
#ifdef XT_USE_OVLY
#include <xtensa/overlay_os_asm.h>
#endif
.text
/*******************************************************************************
_xt_context_save
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
interrupt stack frame defined in xtensa_rtos.h.
Its counterpart is _xt_context_restore (which also restores A12, A13).
Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
This function preserves A12 & A13 in order to provide the caller with 2 scratch
regs that need not be saved over the call to this function. The choice of which
2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
to avoid moving data more than necessary. Caller can assign regs accordingly.
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Original A12, A13 have already been saved in the interrupt stack frame.
Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
point of interruption.
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
A12, A13 as at entry (preserved).
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*******************************************************************************/
.global _xt_context_save
.type _xt_context_save,@function
.align 4
.literal_position
.align 4
_xt_context_save:
s32i a2, sp, XT_STK_A2
s32i a3, sp, XT_STK_A3
s32i a4, sp, XT_STK_A4
s32i a5, sp, XT_STK_A5
s32i a6, sp, XT_STK_A6
s32i a7, sp, XT_STK_A7
s32i a8, sp, XT_STK_A8
s32i a9, sp, XT_STK_A9
s32i a10, sp, XT_STK_A10
s32i a11, sp, XT_STK_A11
/*
Call0 ABI callee-saved regs a12-15 do not need to be saved here.
a12-13 are the caller's responsibility so it can use them as scratch.
So only need to save a14-a15 here for Windowed ABI (not Call0).
*/
#ifndef __XTENSA_CALL0_ABI__
s32i a14, sp, XT_STK_A14
s32i a15, sp, XT_STK_A15
#endif
rsr a3, SAR
s32i a3, sp, XT_STK_SAR
#if XCHAL_HAVE_LOOPS
rsr a3, LBEG
s32i a3, sp, XT_STK_LBEG
rsr a3, LEND
s32i a3, sp, XT_STK_LEND
rsr a3, LCOUNT
s32i a3, sp, XT_STK_LCOUNT
#endif
#ifdef XT_USE_SWPRI
/* Save virtual priority mask */
movi a3, _xt_vpri_mask
l32i a3, a3, 0
s32i a3, sp, XT_STK_VPRI
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
mov a9, a0 /* preserve ret addr */
#endif
#ifndef __XTENSA_CALL0_ABI__
/*
To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
Need to save a9,12,13 temporarily (in frame temps) and recover originals.
Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
and underflow exceptions disabled (assured by PS.EXCM == 1).
*/
s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
s32i a13, sp, XT_STK_TMP1
s32i a9, sp, XT_STK_TMP2
/*
Save the overlay state if we are supporting overlays. Since we just saved
three registers, we can conveniently use them here. Note that as of now,
overlays only work for windowed calling ABI.
*/
#ifdef XT_USE_OVLY
l32i a9, sp, XT_STK_PC /* recover saved PC */
_xt_overlay_get_state a9, a12, a13
s32i a9, sp, XT_STK_OVLY /* save overlay state */
#endif
l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
l32i a13, sp, XT_STK_A13
l32i a9, sp, XT_STK_A9
addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
addi sp, sp, -XT_STK_FRMSZ
l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */
l32i a13, sp, XT_STK_TMP1
l32i a9, sp, XT_STK_TMP2
#endif
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_save_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we assume a9,12,13 are preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
addi a2, sp, XT_STK_EXTRA /* where to save it */
# if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* align dynamically >16 bytes */
# endif
call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
mov a0, a9 /* retrieve ret addr */
#endif
ret
/*******************************************************************************
_xt_context_restore
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
stack frame defined in xtensa_rtos.h .
Its counterpart is _xt_context_save (whose caller saved A12, A13).
Caller is responsible to restore PC, PS, A0, A1 (SP).
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Other processor state except PC, PS, A0, A1 (SP), is as at the point
of interruption.
*******************************************************************************/
.global _xt_context_restore
.type _xt_context_restore,@function
.align 4
.literal_position
.align 4
_xt_context_restore:
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_restore_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we only assume a13 is preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
mov a13, a0 /* preserve ret addr */
addi a2, sp, XT_STK_EXTRA /* where to find it */
# if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* align dynamically >16 bytes */
# endif
call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
mov a0, a13 /* retrieve ret addr */
#endif
#if XCHAL_HAVE_LOOPS
l32i a2, sp, XT_STK_LBEG
l32i a3, sp, XT_STK_LEND
wsr a2, LBEG
l32i a2, sp, XT_STK_LCOUNT
wsr a3, LEND
wsr a2, LCOUNT
#endif
#ifdef XT_USE_OVLY
/*
If we are using overlays, this is a good spot to check if we need
to restore an overlay for the incoming task. Here we have a bunch
of registers to spare. Note that this step is going to use a few
bytes of storage below SP (SP-20 to SP-32) if an overlay is going
to be restored.
*/
l32i a2, sp, XT_STK_PC /* retrieve PC */
l32i a3, sp, XT_STK_PS /* retrieve PS */
l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
_xt_overlay_check_map a2, a3, a4, a5, a6
s32i a2, sp, XT_STK_PC /* save updated PC */
s32i a3, sp, XT_STK_PS /* save updated PS */
#endif
#ifdef XT_USE_SWPRI
/* Restore virtual interrupt priority and interrupt enable */
movi a3, _xt_intdata
l32i a4, a3, 0 /* a4 = _xt_intenable */
l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
and a4, a4, a5
wsr a4, INTENABLE /* update INTENABLE */
s32i a5, a3, 4 /* restore _xt_vpri_mask */
#endif
l32i a3, sp, XT_STK_SAR
l32i a2, sp, XT_STK_A2
wsr a3, SAR
l32i a3, sp, XT_STK_A3
l32i a4, sp, XT_STK_A4
l32i a5, sp, XT_STK_A5
l32i a6, sp, XT_STK_A6
l32i a7, sp, XT_STK_A7
l32i a8, sp, XT_STK_A8
l32i a9, sp, XT_STK_A9
l32i a10, sp, XT_STK_A10
l32i a11, sp, XT_STK_A11
/*
Call0 ABI callee-saved regs a12-15 do not need to be restored here.
However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
so need to be restored anyway, despite being callee-saved in Call0.
*/
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
#ifndef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
ret
/*******************************************************************************
_xt_coproc_init
Initializes global co-processor management data, setting all co-processors
to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
Called during initialization of the RTOS, before any threads run.
This may be called from normal Xtensa single-threaded application code which
might use co-processors. The Xtensa run-time initialization enables all
co-processors. They must remain enabled here, else a co-processor exception
might occur outside of a thread, which the exception handler doesn't expect.
Entry Conditions:
Xtensa single-threaded run-time environment is in effect.
No thread is yet running.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_init(void)
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_init
.type _xt_coproc_init,@function
.align 4
.literal_position
.align 4
_xt_coproc_init:
ENTRY0
/* Initialize thread co-processor ownerships to 0 (unowned). */
movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */
movi a4, 0 /* a4 = 0 (unowned) */
1: s32i a4, a2, 0
addi a2, a2, 4
bltu a2, a3, 1b
RET0
#endif
/*******************************************************************************
_xt_coproc_release
Releases any and all co-processors owned by a given thread. The thread is
identified by it's co-processor state save area defined in xtensa_context.h .
Must be called before a thread's co-proc save area is deleted to avoid
memory corruption when the exception handler tries to save the state.
May be called when a thread terminates or completes but does not delete
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).
Needs to be called on the processor the thread was running on. Unpinned threads
won't have an entry here because they get pinned as soon they use a coprocessor.
Entry Conditions:
A2 = Pointer to base of co-processor state save area.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_release(void * coproc_sa_base)
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_release
.type _xt_coproc_release,@function
.align 4
.literal_position
.align 4
_xt_coproc_release:
ENTRY0 /* a2 = base of save area */
getcoreid a5
movi a3, XCHAL_CP_MAX << 2
mull a5, a5, a3
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a5
addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
movi a5, 0 /* a5 = 0 (unowned) */
rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
1: l32i a7, a3, 0 /* a7 = owner at a3 */
bne a2, a7, 2f /* if (coproc_sa_base == owner) */
s32i a5, a3, 0 /* owner = unowned */
2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
bltu a3, a4, 1b /* repeat until end of array */
3: wsr a6, PS /* restore interrupts */
RET0
#endif
/*******************************************************************************
_xt_coproc_savecs
If there is a current thread and it has a coprocessor state save area, then
save all callee-saved state into this area. This function is called from the
solicited context switch handler. It calls a system-specific function to get
the coprocessor save area base address.
Entry conditions:
- The thread being switched out is still the current thread.
- CPENABLE state reflects which coprocessors are active.
- Registers have been saved/spilled already.
Exit conditions:
- All necessary CP callee-saved state has been saved.
- Registers a2-a7, a13-a15 have been trashed.
Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.extern _xt_coproc_sa_offset /* external reference */
.global _xt_coproc_savecs
.type _xt_coproc_savecs,@function
.align 4
.literal_position
.align 4
_xt_coproc_savecs:
/* At entry, CPENABLE should be showing which CPs are enabled. */
rsr a2, CPENABLE /* a2 = which CPs are enabled */
beqz a2, .Ldone /* quick exit if none */
mov a14, a0 /* save return address */
call0 XT_RTOS_CP_STATE /* get address of CP save area */
mov a0, a14 /* restore return address */
beqz a15, .Ldone /* if none then nothing to do */
s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
#if XCHAL_CP0_SA_SIZE
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone:
ret
#endif
/*******************************************************************************
_xt_coproc_restorecs
Restore any callee-saved coprocessor state for the incoming thread.
This function is called from coprocessor exception handling, when giving
ownership to a thread that solicited a context switch earlier. It calls a
system-specific function to get the coprocessor save area base address.
Entry conditions:
- The incoming thread is set as the current thread.
- CPENABLE is set up correctly for all required coprocessors.
- a2 = mask of coprocessors to be restored.
Exit conditions:
- All necessary CP callee-saved state has been restored.
- CPENABLE - unchanged.
- Registers a2-a7, a13-a15 have been trashed.
Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_restorecs
.type _xt_coproc_restorecs,@function
.align 4
.literal_position
.align 4
_xt_coproc_restorecs:
mov a14, a0 /* save return address */
call0 XT_RTOS_CP_STATE /* get address of CP save area */
mov a0, a14 /* restore return address */
beqz a15, .Ldone2 /* if none then nothing to do */
l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
xor a3, a3, a2 /* clear the ones being restored */
s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
#if XCHAL_CP0_SA_SIZE
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone2:
ret
#endif

View File

@ -0,0 +1,54 @@
/*******************************************************************************
* // Copyright (c) 2003-2015 Cadence Design Systems, Inc.
* //
* // Permission is hereby granted, free of charge, to any person obtaining
* // a copy of this software and associated documentation files (the
* // "Software"), to deal in the Software without restriction, including
* // without limitation the rights to use, copy, modify, merge, publish,
* // distribute, sublicense, and/or sell copies of the Software, and to
* // permit persons to whom the Software is furnished to do so, subject to
* // the following conditions:
* //
* // The above copyright notice and this permission notice shall be included
* // in all copies or substantial portions of the Software.
* //
* // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* --------------------------------------------------------------------------------
*
* XTENSA INITIALIZATION ROUTINES CODED IN C
*
* This file contains miscellaneous Xtensa RTOS-generic initialization functions
* that are implemented in C.
*
*******************************************************************************/
#ifdef XT_BOARD
#include <xtensa/xtbsp.h>
#endif
#include "xtensa_rtos.h"
#include "esp_clk.h"
#ifdef XT_RTOS_TIMER_INT
unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */
void _xt_tick_divisor_init( void )
{
_xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC;
}
/* Deprecated, to be removed */
int xt_clock_freq( void )
{
return esp_clk_cpu_freq();
}
#endif /* XT_RTOS_TIMER_INT */

View File

@ -0,0 +1,174 @@
/*******************************************************************************
* Copyright (c) 2006-2015 Cadence Design Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
/******************************************************************************
* Xtensa-specific interrupt and exception functions for RTOS ports.
* Also see xtensa_intr_asm.S.
******************************************************************************/
#include <stdlib.h>
#include <xtensa/config/core.h>
#include "freertos/FreeRTOS.h"
#include "freertos/xtensa_api.h"
#include "freertos/portable.h"
#include "rom/ets_sys.h"
#if XCHAL_HAVE_EXCEPTIONS
/* Handler table is in xtensa_intr_asm.S */
extern xt_exc_handler _xt_exception_table[ XCHAL_EXCCAUSE_NUM * portNUM_PROCESSORS ];
/*
* Default handler for unhandled exceptions.
* CHANGED: We do this in panic.c now
*/
/*void xt_unhandled_exception(XtExcFrame *frame) */
/*{ */
/*exit(-1); */
/*} */
extern void xt_unhandled_exception( XtExcFrame * frame );
/*
* This function registers a handler for the specified exception.
* The function returns the address of the previous handler.
* On error, it returns 0.
*/
xt_exc_handler xt_set_exception_handler( int n,
xt_exc_handler f )
{
xt_exc_handler old;
if( ( n < 0 ) || ( n >= XCHAL_EXCCAUSE_NUM ) )
{
return 0; /* invalid exception number */
}
/* Convert exception number to _xt_exception_table name */
n = n * portNUM_PROCESSORS + xPortGetCoreID();
old = _xt_exception_table[ n ];
if( f )
{
_xt_exception_table[ n ] = f;
}
else
{
_xt_exception_table[ n ] = &xt_unhandled_exception;
}
return( ( old == &xt_unhandled_exception ) ? 0 : old );
}
#endif /* if XCHAL_HAVE_EXCEPTIONS */
#if XCHAL_HAVE_INTERRUPTS
/* Handler table is in xtensa_intr_asm.S */
typedef struct xt_handler_table_entry
{
void * handler;
void * arg;
} xt_handler_table_entry;
extern xt_handler_table_entry _xt_interrupt_table[ XCHAL_NUM_INTERRUPTS * portNUM_PROCESSORS ];
/*
* Default handler for unhandled interrupts.
*/
void xt_unhandled_interrupt( void * arg )
{
ets_printf( "Unhandled interrupt %d on cpu %d!\n", ( int ) arg, xPortGetCoreID() );
}
/*
* This function registers a handler for the specified interrupt. The "arg"
* parameter specifies the argument to be passed to the handler when it is
* invoked. The function returns the address of the previous handler.
* On error, it returns 0.
*/
xt_handler xt_set_interrupt_handler( int n,
xt_handler f,
void * arg )
{
xt_handler_table_entry * entry;
xt_handler old;
if( ( n < 0 ) || ( n >= XCHAL_NUM_INTERRUPTS ) )
{
return 0; /* invalid interrupt number */
}
if( Xthal_intlevel[ n ] > XCHAL_EXCM_LEVEL )
{
return 0; /* priority level too high to safely handle in C */
}
/* Convert exception number to _xt_exception_table name */
n = n * portNUM_PROCESSORS + xPortGetCoreID();
entry = _xt_interrupt_table + n;
old = entry->handler;
if( f )
{
entry->handler = f;
entry->arg = arg;
}
else
{
entry->handler = &xt_unhandled_interrupt;
entry->arg = ( void * ) n;
}
return( ( old == &xt_unhandled_interrupt ) ? 0 : old );
}
#if CONFIG_SYSVIEW_ENABLE
void * xt_get_interrupt_handler_arg( int n )
{
xt_handler_table_entry * entry;
if( ( n < 0 ) || ( n >= XCHAL_NUM_INTERRUPTS ) )
{
return 0; /* invalid interrupt number */
}
/* Convert exception number to _xt_exception_table name */
n = n * portNUM_PROCESSORS + xPortGetCoreID();
entry = _xt_interrupt_table + n;
return entry->arg;
}
#endif /* if CONFIG_SYSVIEW_ENABLE */
#endif /* XCHAL_HAVE_INTERRUPTS */

View File

@ -0,0 +1,225 @@
/*******************************************************************************
Copyright (c) 2006-2015 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
/******************************************************************************
Xtensa interrupt handling data and assembly routines.
Also see xtensa_intr.c and xtensa_vectors.S.
******************************************************************************/
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include "xtensa_context.h"
#include "FreeRTOSConfig.h"
#if XCHAL_HAVE_INTERRUPTS
/*
-------------------------------------------------------------------------------
INTENABLE virtualization information.
-------------------------------------------------------------------------------
*/
#if XT_USE_SWPRI
/* Warning - this is not multicore-compatible. */
.data
.global _xt_intdata
.align 8
_xt_intdata:
.global _xt_intenable
.type _xt_intenable,@object
.size _xt_intenable,4
.global _xt_vpri_mask
.type _xt_vpri_mask,@object
.size _xt_vpri_mask,4
_xt_intenable: .word 0 /* Virtual INTENABLE */
_xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */
#endif
/*
-------------------------------------------------------------------------------
Table of C-callable interrupt handlers for each interrupt. Note that not all
slots can be filled, because interrupts at level > EXCM_LEVEL will not be
dispatched to a C handler by default.
Stored as:
int 0 cpu 0
int 0 cpu 1
...
int 0 cpu n
int 1 cpu 0
int 1 cpu 1
etc
-------------------------------------------------------------------------------
*/
.data
.global _xt_interrupt_table
.align 8
_xt_interrupt_table:
.set i, 0
.rept XCHAL_NUM_INTERRUPTS*portNUM_PROCESSORS
.word xt_unhandled_interrupt /* handler address */
.word i /* handler arg (default: intnum) */
.set i, i+1
.endr
#endif /* XCHAL_HAVE_INTERRUPTS */
#if XCHAL_HAVE_EXCEPTIONS
/*
-------------------------------------------------------------------------------
Table of C-callable exception handlers for each exception. Note that not all
slots will be active, because some exceptions (e.g. coprocessor exceptions)
are always handled by the OS and cannot be hooked by user handlers.
Stored as:
exc 0 cpu 0
exc 0 cpu 1
...
exc 0 cpu n
exc 1 cpu 0
exc 1 cpu 1
etc
-------------------------------------------------------------------------------
*/
.data
.global _xt_exception_table
.align 4
_xt_exception_table:
.rept XCHAL_EXCCAUSE_NUM * portNUM_PROCESSORS
.word xt_unhandled_exception /* handler address */
.endr
#endif
/*
-------------------------------------------------------------------------------
unsigned int xt_ints_on ( unsigned int mask )
Enables a set of interrupts. Does not simply set INTENABLE directly, but
computes it as a function of the current virtual priority if XT_USE_SWPRI is
enabled.
Can be called from interrupt handlers.
-------------------------------------------------------------------------------
*/
.text
.align 4
.global xt_ints_on
.type xt_ints_on,@function
xt_ints_on:
ENTRY0
#if XCHAL_HAVE_INTERRUPTS
#if XT_USE_SWPRI
movi a3, 0
movi a4, _xt_intdata
xsr a3, INTENABLE /* Disables all interrupts */
rsync
l32i a3, a4, 0 /* a3 = _xt_intenable */
l32i a6, a4, 4 /* a6 = _xt_vpri_mask */
or a5, a3, a2 /* a5 = _xt_intenable | mask */
s32i a5, a4, 0 /* _xt_intenable |= mask */
and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */
wsr a5, INTENABLE /* Reenable interrupts */
mov a2, a3 /* Previous mask */
#else
movi a3, 0
xsr a3, INTENABLE /* Disables all interrupts */
rsync
or a2, a3, a2 /* set bits in mask */
wsr a2, INTENABLE /* Re-enable ints */
rsync
mov a2, a3 /* return prev mask */
#endif
#else
movi a2, 0 /* Return zero */
#endif
RET0
.size xt_ints_on, . - xt_ints_on
/*
-------------------------------------------------------------------------------
unsigned int xt_ints_off ( unsigned int mask )
Disables a set of interrupts. Does not simply set INTENABLE directly,
but computes it as a function of the current virtual priority if XT_USE_SWPRI is
enabled.
Can be called from interrupt handlers.
-------------------------------------------------------------------------------
*/
.text
.align 4
.global xt_ints_off
.type xt_ints_off,@function
xt_ints_off:
ENTRY0
#if XCHAL_HAVE_INTERRUPTS
#if XT_USE_SWPRI
movi a3, 0
movi a4, _xt_intdata
xsr a3, INTENABLE /* Disables all interrupts */
rsync
l32i a3, a4, 0 /* a3 = _xt_intenable */
l32i a6, a4, 4 /* a6 = _xt_vpri_mask */
or a5, a3, a2 /* a5 = _xt_intenable | mask */
xor a5, a5, a2 /* a5 = _xt_intenable & ~mask */
s32i a5, a4, 0 /* _xt_intenable &= ~mask */
and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */
wsr a5, INTENABLE /* Reenable interrupts */
mov a2, a3 /* Previous mask */
#else
movi a4, 0
xsr a4, INTENABLE /* Disables all interrupts */
rsync
or a3, a4, a2 /* set bits in mask */
xor a3, a3, a2 /* invert bits in mask set in mask, essentially clearing them */
wsr a3, INTENABLE /* Re-enable ints */
rsync
mov a2, a4 /* return prev mask */
#endif
#else
movi a2, 0 /* return zero */
#endif
RET0
.size xt_ints_off, . - xt_ints_off

View File

@ -0,0 +1,559 @@
/*
Copyright 2019 Espressif Systems (Shanghai) PTE LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* LoadStoreErrorCause: Occurs when trying to access 32 bit addressable memory region as 8 bit or 16 bit
* LoadStoreAlignmentCause: Occurs when trying to access in an unaligned manner
*
* xxxx xxxx = imm8 field
* yyyy = imm4 field
* ssss = s field
* tttt = t field
*
* 16 0
* -------------------
* L32I.N yyyy ssss tttt 1000
* S32I.N yyyy ssss tttt 1001
*
* 23 0
* -----------------------------
* L8UI xxxx xxxx 0000 ssss tttt 0010 <- LoadStoreError
* L16UI xxxx xxxx 0001 ssss tttt 0010 <- LoadStoreError, LoadStoreAlignment
* L16SI xxxx xxxx 1001 ssss tttt 0010 <- LoadStoreError, LoadStoreAlignment
* L32I xxxx xxxx 0010 ssss tttt 0010 <- LoadStoreAlignment
*
* S8I xxxx xxxx 0100 ssss tttt 0010 <- LoadStoreError
* S16I xxxx xxxx 0101 ssss tttt 0010 <- LoadStoreError, LoadStoreAlignment
* S32I xxxx xxxx 0110 ssss tttt 0010 <- LoadStoreAlignment
*
* ******* UNSUPPORTED *******
*
* L32E 0000 1001 rrrr ssss tttt 0000
* S32E 0100 1001 rrrr ssss tttt 0000
* -----------------------------
*/
#include "xtensa_rtos.h"
#include "sdkconfig.h"
#include "soc/soc.h"
#define LOADSTORE_HANDLER_STACK_SZ 8
.section .bss, "aw"
.balign 16
LoadStoreHandlerStack:
.rept LOADSTORE_HANDLER_STACK_SZ
.word 0
.endr
/* LoadStoreErrorCause handler:
*
* Completes 8-bit or 16-bit load/store instructions from 32-bit aligned memory region
* Called from UserExceptionVector if EXCCAUSE is LoadStoreErrorCause
*/
.global LoadStoreErrorHandler
.section .iram1, "ax"
.literal_position
.balign 4
LoadStoreErrorHandler:
.type LoadStoreErrorHandler, @function
wsr a0, depc // Save return address in depc
mov a0, sp
movi sp, LoadStoreHandlerStack
s32i a0, sp, 0x04 // Since a0 contains value of a1
s32i a2, sp, 0x08
s32i a3, sp, 0x0c
s32i a4, sp, 0x10
rsr a0, sar // Save SAR in a0 to restore later
/* Check whether the address lies in the valid range */
rsr a3, excvaddr
movi a4, _iram_text_end // End of code section of IRAM
bge a3, a4, 1f
movi a4, SOC_CACHE_APP_LOW // Check if in APP cache region
blt a3, a4, .LS_wrong_opcode
movi a4, SOC_CACHE_APP_HIGH
bge a3, a4, .LS_wrong_opcode
j 2f
1:
movi a4, SOC_IRAM_HIGH // End of IRAM address range
bge a3, a4, .LS_wrong_opcode
2:
/* Examine the opcode which generated the exception */
/* Note: Instructions are in this order to avoid pipeline stalls. */
rsr a2, epc1
movi a4, ~3
ssa8l a2 // sar is now correct shift for aligned read
and a2, a2, a4 // a2 now 4-byte aligned address of instruction
l32i a4, a2, 0
l32i a2, a2, 4
src a2, a2, a4 // a2 now instruction that failed
bbci a2, 1, .LS_wrong_opcode
bbsi a2, 14, .LSE_store_op // Store instruction
/* l8/l16ui/l16si */
movi a4, ~3
and a4, a3, a4 // a4 now word aligned read address
ssa8l a3 // sar is now shift to extract a3's byte
l32i a4, a4, 0 // perform the actual read
srl a4, a4 // shift right correct distance
extui a3, a2, 12, 4
bnez a3, 1f // l16ui/l16si
extui a4, a4, 0, 8 // mask off bits needed for an l8
j 2f
1:
extui a4, a4, 0, 16
bbci a2, 15, 2f // l16ui
/* Sign adjustment */
slli a4, a4, 16
srai a4, a4, 16 // a4 contains the value
2:
/* a4 contains the value */
rsr a3, epc1
addi a3, a3, 3
wsr a3, epc1
wsr a0, sar
rsr a0, excsave1
extui a2, a2, 3, 5
blti a2, 10, .LSE_stack_reg
movi a3, .LS_jumptable_base
addx8 a2, a2, a3 // a2 is now the address to jump to
l32i a3, sp, 0x0c
jx a2
.LSE_stack_reg:
addx2 a2, a2, sp
s32i a4, a2, 0
/* Restore all values */
l32i a4, sp, 0x10
l32i a3, sp, 0x0c
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.LSE_store_op:
s32i a5, a1, 0x14
s32i a6, a1, 0x18
/* a2 -> instruction that caused the error */
/* a3 -> unaligned address */
extui a4, a2, 4, 4
blti a4, 7, 1f
movi a5, .LSE_store_reg
addx8 a5, a4, a5
jx a5
1:
addx4 a4, a4, sp
l32i a4, a4, 0
.LSE_store_data:
/* a4 contains the value */
rsr a6, epc1
addi a6, a6, 3
wsr a6, epc1
ssa8b a3
movi a5, -1
bbsi a2, 12, 1f // s16
extui a4, a4, 0, 8
movi a6, 0xff
j 2f
1:
extui a4, a4, 0, 16
movi a6, 0xffff
2:
sll a4, a4 // shift the value to proper offset
sll a6, a6
xor a5, a5, a6 // a5 contains the mask
movi a6, ~3
and a3, a3, a6 // a3 has the aligned address
l32i a6, a3, 0 // a6 contains the data at the aligned address
and a6, a6, a5
or a4, a6, a4
s32i a4, a3, 0
/* Restore registers */
wsr a0, sar
l32i a6, sp, 0x18
l32i a5, sp, 0x14
l32i a4, sp, 0x10
l32i a3, sp, 0x0c
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rsr a0, excsave1
rfe
.LSE_store_reg:
.org .LSE_store_reg + (7 * 8)
mov a4, a7
j .LSE_store_data
.org .LSE_store_reg + (8 * 8)
mov a4, a8
j .LSE_store_data
.org .LSE_store_reg + (9 * 8)
mov a4, a9
j .LSE_store_data
.org .LSE_store_reg + (10 * 8)
mov a4, a10
j .LSE_store_data
.org .LSE_store_reg + (11 * 8)
mov a4, a11
j .LSE_store_data
.org .LSE_store_reg + (12 * 8)
mov a4, a12
j .LSE_store_data
.org .LSE_store_reg + (13 * 8)
mov a4, a13
j .LSE_store_data
.org .LSE_store_reg + (14 * 8)
mov a4, a14
j .LSE_store_data
.org .LSE_store_reg + (15 * 8)
mov a4, a15
j .LSE_store_data
/* LoadStoreAlignmentCause handler:
*
* Completes unaligned 16-bit and 32-bit load/store instructions from 32-bit aligned memory region
* Called from UserExceptionVector if EXCCAUSE is LoadStoreAlignmentCause
*/
.global AlignmentErrorHandler
.section .iram1, "ax"
.literal_position
.balign 4
AlignmentErrorHandler:
.type AlignmentErrorHandler, @function
wsr a0, depc // Save return address in depc
mov a0, sp
movi sp, LoadStoreHandlerStack
s32i a0, sp, 0x04 // Since a0 contains value of a1
s32i a2, sp, 0x08
s32i a3, sp, 0x0c
s32i a4, sp, 0x10
rsr a0, sar // Save SAR in a0 to restore later
/* Check whether the address lies in the valid range */
rsr a3, excvaddr
movi a4, _iram_text_end // End of code section of IRAM
bge a3, a4, 1f
movi a4, SOC_CACHE_APP_LOW // Check if in APP cache region
blt a3, a4, .LS_wrong_opcode
movi a4, SOC_CACHE_APP_HIGH
bge a3, a4, .LS_wrong_opcode
j 2f
1:
movi a4, SOC_IRAM_HIGH // End of IRAM address range
bge a3, a4, .LS_wrong_opcode
2:
/* Examine the opcode which generated the exception */
/* Note: Instructions are in this order to avoid pipeline stalls. */
rsr a2, epc1
movi a4, ~3
ssa8l a2 // sar is now correct shift for aligned read
and a2, a2, a4 // a2 now 4-byte aligned address of instruction
l32i a4, a2, 0
l32i a2, a2, 4
/* a2 has the instruction that caused the error */
src a2, a2, a4
extui a4, a2, 0, 4
addi a4, a4, -9
beqz a4, .LSA_store_op
bbsi a2, 14, .LSA_store_op
ssa8l a3 // a3 contains the unaligned address
movi a4, ~3
and a4, a3, a4 // a4 has the aligned address
l32i a3, a4, 0
l32i a4, a4, 4
src a4, a4, a3
rsr a3, epc1
addi a3, a3, 2
bbsi a2, 3, 1f // l32i.n
bbci a2, 1, .LS_wrong_opcode
addi a3, a3, 1
bbsi a2, 13, 1f // l32
extui a4, a4, 0, 16
bbci a2, 15, 1f // l16ui
/* Sign adjustment */
slli a4, a4, 16
srai a4, a4, 16 // a4 contains the value
1:
wsr a3, epc1
wsr a0, sar
rsr a0, excsave1
extui a2, a2, 4, 4
blti a2, 5, .LSA_stack_reg // a3 contains the target register
movi a3, .LS_jumptable_base
slli a2, a2, 4
add a2, a2, a3 // a2 is now the address to jump to
l32i a3, sp, 0x0c
jx a2
.LSA_stack_reg:
addx4 a2, a2, sp
s32i a4, a2, 0
/* Restore all values */
l32i a4, sp, 0x10
l32i a3, sp, 0x0c
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
/* Store instruction */
.LSA_store_op:
s32i a5, sp, 0x14
s32i a6, sp, 0x18
s32i a7, sp, 0x1c
/* a2 -> instruction that caused the error */
/* a3 -> unaligned address */
extui a4, a2, 4, 4
blti a4, 8, 1f
movi a5, .LSA_store_reg
addx8 a5, a4, a5
jx a5
1:
addx4 a4, a4, sp
l32i a4, a4, 0 // a4 contains the value
.LSA_store_data:
movi a6, 0
rsr a7, epc1
addi a7, a7 ,2
bbsi a2, 3, 1f // s32i.n
bbci a2, 1, .LS_wrong_opcode
addi a7, a7, 1
bbsi a2, 13, 1f // s32i
movi a5, -1
extui a4, a4, 0, 16
slli a6, a5, 16 // 0xffff0000
1:
wsr a7, epc1
movi a5, ~3
and a5, a3, a5 // a5 has the aligned address
ssa8b a3
movi a3, -1
src a7, a6, a3
src a3, a3, a6
/* Store data on lower address */
l32i a6, a5, 0
and a6, a6, a7
sll a7, a4
or a6, a6, a7
s32i a6, a5, 0
/* Store data on higher address */
l32i a7, a5, 4
srl a6, a4
and a3, a7, a3
or a3, a3, a6
s32i a3, a5, 4
/* Restore registers */
wsr a0, sar
rsr a0, excsave1
l32i a7, sp, 0x1c
l32i a6, sp, 0x18
l32i a5, sp, 0x14
l32i a4, sp, 0x10
l32i a3, sp, 0x0c
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.LSA_store_reg:
.org .LSA_store_reg + (8 * 8)
mov a4, a8
j .LSA_store_data
.org .LSA_store_reg + (9 * 8)
mov a4, a9
j .LSA_store_data
.org .LSA_store_reg + (10 * 8)
mov a4, a10
j .LSA_store_data
.org .LSA_store_reg + (11 * 8)
mov a4, a11
j .LSA_store_data
.org .LSA_store_reg + (12 * 8)
mov a4, a12
j .LSA_store_data
.org .LSA_store_reg + (13 * 8)
mov a4, a13
j .LSA_store_data
.org .LSA_store_reg + (14 * 8)
mov a4, a14
j .LSA_store_data
.org .LSA_store_reg + (15 * 8)
mov a4, a15
j .LSA_store_data
/*
* Common routines for both the exception handlers
*/
.balign 4
.LS_jumptable:
/* The first 5 entries (80 bytes) of this table are unused (registers
a0..a4 are handled separately above). Rather than have a whole bunch
of wasted space, just pretend that the table starts 80 bytes
earlier in memory. */
.set .LS_jumptable_base, .LS_jumptable - (16 * 5)
.org .LS_jumptable_base + (16 * 5)
mov a5, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 6)
mov a6, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 7)
mov a7, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 8)
mov a8, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 9)
mov a9, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 10)
mov a10, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 11)
mov a11, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 12)
mov a12, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 13)
mov a13, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 14)
mov a14, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.org .LS_jumptable_base + (16 * 15)
mov a15, a4
l32i a4, sp, 0x10
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rfe
.LS_wrong_opcode:
/* Reaches here if the address is in invalid range or the opcode isn't supported.
* Restore registers and jump back to _xt_user_exc
*/
wsr a0, sar
l32i a4, sp, 0x10
l32i a3, sp, 0x0c
l32i a2, sp, 0x08
l32i a1, sp, 0x04
rsr a0, depc
ret // Equivalent to jx a0

View File

@ -0,0 +1,67 @@
/* xtensa_overlay_os_hook.c -- Overlay manager OS hooks for FreeRTOS. */
/* Copyright (c) 2015-2015 Cadence Design Systems Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be included */
/* in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "FreeRTOS.h"
#include "semphr.h"
#if configUSE_MUTEX
/* Mutex object that controls access to the overlay. Currently only one
* overlay region is supported so one mutex suffices.
*/
static SemaphoreHandle_t xt_overlay_mutex;
/* This function should be overridden to provide OS specific init such
* as the creation of a mutex lock that can be used for overlay locking.
* Typically this mutex would be set up with priority inheritance. See
* overlay manager documentation for more details.
*/
void xt_overlay_init_os( void )
{
/* Create the mutex for overlay access. Priority inheritance is
* required.
*/
xt_overlay_mutex = xSemaphoreCreateMutex();
}
/* This function locks access to shared overlay resources, typically
* by acquiring a mutex.
*/
void xt_overlay_lock( void )
{
xSemaphoreTake( xt_overlay_mutex, 0 );
}
/* This function releases access to shared overlay resources, typically
* by unlocking a mutex.
*/
void xt_overlay_unlock( void )
{
xSemaphoreGive( xt_overlay_mutex );
}
#endif /* if configUSE_MUTEX */

View File

@ -0,0 +1,164 @@
// Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "xtensa_rtos.h"
#include "esp_panic.h"
#include "sdkconfig.h"
#include "soc/soc.h"
/*
This file contains the default handlers for the high interrupt levels as well as some specialized exceptions.
The default behaviour is to just exit the interrupt or call the panic handler on the exceptions
*/
#if XCHAL_HAVE_DEBUG
.global xt_debugexception
.weak xt_debugexception
.set xt_debugexception, _xt_debugexception
.section .iram1,"ax"
.type _xt_debugexception,@function
.align 4
_xt_debugexception:
movi a0,PANIC_RSN_DEBUGEXCEPTION
wsr a0,EXCCAUSE
/* _xt_panic assumes a level 1 exception. As we're
crashing anyhow, copy EPC & EXCSAVE from DEBUGLEVEL
to level 1. */
rsr a0,(EPC + XCHAL_DEBUGLEVEL)
wsr a0,EPC_1
rsr a0,(EXCSAVE + XCHAL_DEBUGLEVEL)
wsr a0,EXCSAVE_1
call0 _xt_panic /* does not return */
rfi XCHAL_DEBUGLEVEL
#endif /* Debug exception */
#if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
.global xt_highint2
.weak xt_highint2
.set xt_highint2, _xt_highint2
.section .iram1,"ax"
.type _xt_highint2,@function
.align 4
_xt_highint2:
/* Default handler does nothing; just returns */
.align 4
.L_xt_highint2_exit:
rsr a0, EXCSAVE_2 /* restore a0 */
rfi 2
#endif /* Level 2 */
#if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
.global xt_highint3
.weak xt_highint3
.set xt_highint3, _xt_highint3
.section .iram1,"ax"
.type _xt_highint3,@function
.align 4
_xt_highint3:
/* Default handler does nothing; just returns */
.align 4
.L_xt_highint3_exit:
rsr a0, EXCSAVE_3 /* restore a0 */
rfi 3
#endif /* Level 3 */
#if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
.global xt_highint4
.weak xt_highint4
.set xt_highint4, _xt_highint4
.section .iram1,"ax"
.type _xt_highint4,@function
.align 4
_xt_highint4:
/* Default handler does nothing; just returns */
.align 4
.L_xt_highint4_exit:
rsr a0, EXCSAVE_4 /* restore a0 */
rfi 4
#endif /* Level 4 */
#if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
.global xt_highint5
.weak xt_highint5
.set xt_highint5, _xt_highint5
.section .iram1,"ax"
.type _xt_highint5,@function
.align 4
_xt_highint5:
/* Default handler does nothing; just returns */
.align 4
.L_xt_highint5_exit:
rsr a0, EXCSAVE_5 /* restore a0 */
rfi 5
#endif /* Level 5 */
#if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
.global _xt_highint6
.global xt_highint6
.weak xt_highint6
.set xt_highint6, _xt_highint6
.section .iram1,"ax"
.type _xt_highint6,@function
.align 4
_xt_highint6:
/* Default handler does nothing; just returns */
.align 4
.L_xt_highint6_exit:
rsr a0, EXCSAVE_6 /* restore a0 */
rfi 6
#endif /* Level 6 */
#if XCHAL_HAVE_NMI
.global _xt_nmi
.global xt_nmi
.weak xt_nmi
.set xt_nmi, _xt_nmi
.section .iram1,"ax"
.type _xt_nmi,@function
.align 4
_xt_nmi:
/* Default handler does nothing; just returns */
.align 4
.L_xt_nmi_exit:
rsr a0, EXCSAVE + XCHAL_NMILEVEL /* restore a0 */
rfi XCHAL_NMILEVEL
#endif /* NMI */

File diff suppressed because it is too large Load Diff