update to v6.1.3

This commit is contained in:
Scott Larson
2021-01-08 13:31:36 -08:00
parent b0e9b132b5
commit f108ebdbaf
477 changed files with 98409 additions and 5320 deletions

View File

@@ -0,0 +1,406 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This file contains the interface functions to provide thread-safe */
/* operation of the C library. Both newlib and the Xtensa C Library */
/* are supported. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include "tx_api.h" /* TX_THREAD_SAFE_CLIB may be defined by tx_port.h */
#include "tx_thread.h"
#include "tx_initialize.h"
/* Xtensa specific */
#include <xtensa/config/system.h>
#ifdef TX_THREAD_SAFE_CLIB /* this file is only needed if using C lib */
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#include <malloc.h>
/* NOTE: should have been declared in reent.h... */
extern void _wrapup_reent(struct _reent * ptr);
/* Mutex used for all C library protection */
TX_MUTEX clib_lock_mutex;
/**************************************************************************/
/* __malloc_lock - called by the malloc() family of routines when they */
/* need to lock the memory pool. A call to malloc() may call this */
/* function recursively. */
/**************************************************************************/
void
__malloc_lock (struct _reent * ptr)
{
if (_tx_thread_system_state != TX_INITIALIZE_IS_FINISHED) {
return;
}
tx_mutex_get (&clib_lock_mutex, TX_WAIT_FOREVER);
}
/**************************************************************************/
/* __malloc_unlock - called by the malloc() family of routines when */
/* need to unlock the memory pool. */
/**************************************************************************/
void
__malloc_unlock (struct _reent * ptr)
{
if (_tx_thread_system_state != TX_INITIALIZE_IS_FINISHED) {
return;
}
#ifndef THREADX_TESTSUITE /* see THREADX_TESTSUITE comments below */
tx_mutex_prioritize (&clib_lock_mutex); /* is this at all necessary? */
#endif
tx_mutex_put (&clib_lock_mutex);
}
/**************************************************************************/
/* __env_lock - called by the setenv() family of routines when they */
/* need to modify the environment. A call to setenv() may call this */
/* function recursively. */
/**************************************************************************/
void
__env_lock (struct _reent * ptr)
{
if (_tx_thread_system_state != TX_INITIALIZE_IS_FINISHED) {
return;
}
tx_mutex_get (&clib_lock_mutex, TX_WAIT_FOREVER);
}
/**************************************************************************/
/* __env_unlock - called by the setenv() family of routines when they */
/* need to unlock the environment. */
/**************************************************************************/
void
__env_unlock (struct _reent * ptr)
{
if (_tx_thread_system_state != TX_INITIALIZE_IS_FINISHED) {
return;
}
tx_mutex_prioritize (&clib_lock_mutex);
tx_mutex_put (&clib_lock_mutex);
}
#endif /* XSHAL_CLIB == XTHAL_CLIB_NEWLIB */
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#include <errno.h>
#include <sys/reent.h>
#define XT_NUM_CLIB_LOCKS (_MAX_LOCK + FOPEN_MAX)
typedef TX_MUTEX * _Rmtx;
static TX_MUTEX xclib_locks[XT_NUM_CLIB_LOCKS];
static uint32_t lcnt;
/* Override this and set to nonzero to enable locking. */
int32_t _xclib_use_mt = 1;
/**************************************************************************/
/* _Mtxinit - initialize a lock. Called once for each lock. */
/**************************************************************************/
void
_Mtxinit (_Rmtx * mtx)
{
TX_MUTEX * lock;
if (lcnt >= XT_NUM_CLIB_LOCKS) {
/* Fatal error */
}
lock = &(xclib_locks[lcnt]);
lcnt++;
/* See notes for newlib case below. */
#ifdef THREADX_TESTSUITE
tx_mutex_create (lock, "Clib lock", 0);
#else
tx_mutex_create (lock, "Clib lock", TX_INHERIT);
#endif
*mtx = lock;
}
/**************************************************************************/
/* _Mtxdst - destroy a lock. Called once for each lock. */
/**************************************************************************/
void
_Mtxdst (_Rmtx * mtx)
{
if ((mtx) && (*mtx)) {
tx_mutex_delete (*mtx);
}
}
/**************************************************************************/
/* _Mtxlock - acquire lock. */
/**************************************************************************/
void
_Mtxlock (_Rmtx * mtx)
{
if ((mtx) && (*mtx)) {
tx_mutex_get (*mtx, TX_WAIT_FOREVER);
}
}
/**************************************************************************/
/* _Mtxunlock - release a lock. */
/**************************************************************************/
void
_Mtxunlock (_Rmtx * mtx)
{
if ((mtx) && (*mtx)) {
tx_mutex_put (*mtx);
}
}
#endif /* XSHAL_CLIB == XTHAL_CLIB_XCLIB */
/**************************************************************************/
/* _sbrk_r - heap allocator. This function is called when the memory */
/* allocator needs a new chunk of memory. */
/* The bounds of the heap area are global variables so that customer */
/* startup code can easily override them if needed. */
/* */
/* _tx_clib_heap_start is the start of memory assigned to the heap */
/* or 0 (NULL) if no memory is assigned (in */
/* that case all calls to malloc will fail). */
/* */
/* _tx_clib_heap_end is the end of memory assigned to the heap */
/* or 0 (NULL) if no memory is assigned. If a */
/* nonzero start value is set then a nonzero */
/* end value must be set. */
/**************************************************************************/
char * _tx_clib_heap_start = NULL;
char * _tx_clib_heap_end = NULL;
void *
_sbrk_r (struct _reent * reent, int32_t incr)
{
static char * heap_ptr;
char * new_heap_ptr;
char * alloc_ptr;
/* The heap is bound by _tx_clib_heap_{start,end}. */
if (heap_ptr == NULL) {
heap_ptr = _tx_clib_heap_start;
}
new_heap_ptr = heap_ptr + incr;
if ((heap_ptr == NULL) || /* no heap */
(new_heap_ptr >= _tx_clib_heap_end) || /* heap exhausted */
(new_heap_ptr < heap_ptr)) { /* wraparound */
reent->_errno = ENOMEM;
return (void *) -1;
}
alloc_ptr = heap_ptr;
heap_ptr = new_heap_ptr;
return (void *) alloc_ptr;
}
/**************************************************************************/
/* _tx_clib_init - initialize C library thread safety support. */
/* Called by _tx_initialize_low_level(). */
/**************************************************************************/
void
_tx_clib_init (void)
{
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#ifdef THREADX_TESTSUITE
/* Priority inheritance causes printf() (which calls malloc()
which calls __malloc_unlock() which calls tx_mutex_put()
which calls _tx_mutex_priority_change() if TX_INHERIT is set)
which causes the task to suspend and resume which sometimes
changes execution order in the very sensitive testsuite
and makes it fail. So, for the testsuite, don't request
priority inheritance (it doesn't need it in any case). */
tx_mutex_create (&clib_lock_mutex, "Clib lock", 0);
#else
tx_mutex_create (&clib_lock_mutex, "Clib lock", TX_INHERIT);
#endif
#endif /* NEWLIB */
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
/* Nothing. */
#endif /* XCLIB */
}
/**************************************************************************/
/* _tx_clib_reent_init - initialize C library thread reent structure. */
/* Called by tx_thread_create() to init per-thread C library state. */
/**************************************************************************/
void
_tx_clib_reent_init (TX_THREAD * thread_ptr)
{
if (thread_ptr == NULL) {
/* Should never happen */
return;
}
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
struct _reent * reent = &(thread_ptr->tx_thread_clib_reent);
memset (reent, 0, sizeof(struct _reent));
_REENT_INIT_PTR (reent);
thread_ptr->tx_thread_clib_ptr = reent;
#endif
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
thread_ptr->tx_thread_clib_ptr = &(thread_ptr->tx_thread_clib_reent);
_init_reent (thread_ptr->tx_thread_clib_ptr);
#endif
}
/**************************************************************************/
/* _tx_clib_reent_cleanup - clean up C library thread reent structure. */
/* Called by tx_thread_delete() to clean up per-thread C library state */
/* and free any allocated memory (partial = 0). */
/* Called by tx_thread_shell_entry and tx_thread_terminate to perform */
/* "atexit" processing and clean up stdio, but leaving the rest of the */
/* structure intact so the thread can be restarted (partial = 1). */
/**************************************************************************/
void
_tx_clib_reent_cleanup (TX_THREAD * thread_ptr, int32_t partial)
{
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
struct _reent * reent = &(thread_ptr->tx_thread_clib_reent);
FILE * fp = &(reent->__sf[0]);
int32_t i;
/* Avoid closing stdin,stdout,stderr so other threads can still use them. */
for (i = 0; i < 3; i++) {
fp->_close = NULL;
fp++;
}
if (partial != 0) {
/* Perform "atexit" processing and clean up stdio. */
_wrapup_reent (reent);
}
else {
/* Free all the heap memory allocated in the reent structure.
ThreadX requires that the thread has either exited or been
terminated before it can be deleted so we can assume that
_wrapup_reent has already been called for this thread. */
_reclaim_reent (reent);
}
#endif
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
/* Unused, keep compiler happy */
(void) partial;
/* File handle table is global; no allocated memory in struct. */
thread_ptr->tx_thread_clib_ptr = 0;
#endif
}
/**************************************************************************/
/* _xt_wrapper - thread wrapper to handle C library init/cleanup. */
/* If C library thread safety is enabled, every thread is invoked */
/* via this wrapper in order to handle thread context setup/cleanup. */
/**************************************************************************/
void
_xt_thread_wrapper (ULONG arg)
{
TX_INTERRUPT_SAVE_AREA
TX_THREAD * thread_ptr = _tx_thread_current_ptr;
/* No use for this parameter */
(void) arg;
/* Init the C library thread context */
_tx_clib_reent_init (thread_ptr);
/* Disable interrupts around the global context ptr update */
TX_DISABLE
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
_impure_ptr = thread_ptr->tx_thread_clib_ptr;
#endif
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
_reent_ptr = thread_ptr->tx_thread_clib_ptr;
#endif
TX_RESTORE
/* Call actual thread entry point */
(thread_ptr->tx_real_thread_entry)(thread_ptr->tx_thread_entry_parameter);
/* Clean up C library thread context */
_tx_clib_reent_cleanup(thread_ptr, 1);
_tx_clib_reent_cleanup(thread_ptr, 0);
}
/**************************************************************************/
/* _tx_clib_thread_setup - Xtensa-specific thread setup actions. */
/* This function will be called only if thread safe C library usage */
/* is enabled. It inserts the wrapper as the thread entry point and */
/* saves the actual entry point for later use. */
/**************************************************************************/
void
_tx_clib_thread_setup (TX_THREAD * thread_ptr)
{
thread_ptr->tx_real_thread_entry = thread_ptr->tx_thread_entry;
thread_ptr->tx_thread_entry = &(_xt_thread_wrapper);
}
#endif /* TX_THREAD_SAFE_CLIB */

View File

@@ -0,0 +1,187 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "tx_api.h"
#include "tx_port.h"
#include "xtensa_api.h"
#if XCHAL_HAVE_ISL || XCHAL_HAVE_KSL || XCHAL_HAVE_PSL
#include <xtensa/tie/xt_exception_dispatch.h>
#endif
#if XCHAL_HAVE_XEA3
int32_t xt_sw_intnum = -1;
int32_t xt_timer_intnum = -1;
#endif
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* It also sets the default heap region for the optional C library. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
VOID _tx_initialize_low_level(VOID)
{
extern char _xt_interrupt_stack_top;
extern void _tx_timer_interrupt(void *);
extern void * _tx_thread_system_stack_ptr;
extern void * _tx_initialize_unused_memory;
extern char _end;
#ifdef TX_THREAD_SAFE_CLIB
extern char __stack;
extern void _tx_clib_init(void);
#endif
#if XCHAL_CP_NUM > 0
extern void _xt_coproc_init(void);
extern void _xt_coproc_exc(XtExcFrame * fp);
#endif
#ifdef TX_ENABLE_STACK_CHECKING
extern VOID _tx_xtensa_stack_error_handler(TX_THREAD * thread);
#endif
#if XCHAL_HAVE_XEA3
extern void xt_sched_handler(void * arg);
int32_t i;
#endif
TX_INTERRUPT_SAVE_AREA
/* Disable interrupts - don't want any that interact with ThreadX yet. */
TX_DISABLE
/*
Disable stack limit checking if present. Whatever was set up earlier
is not going to work for us.
*/
#if XCHAL_HAVE_KSL
XT_WSR_KSL(0);
#endif
#if XCHAL_HAVE_ISL
XT_WSR_ISL(0);
#endif
/* Save the system stack pointer. */
_tx_thread_system_stack_ptr = &(_xt_interrupt_stack_top);
/* Save the first available memory address. */
_tx_initialize_unused_memory = (void *)(((UINT)&_end + 15) & ~0xF);
#ifdef TX_THREAD_SAFE_CLIB
/*
Pre-allocate default memory region for the C library heap.
Bisect the region from first available memory to end of system memory,
align to 16 byte boundary, and allocate the heap in the upper half.
*/
_tx_clib_heap_end = &(__stack);
_tx_clib_heap_start =
(void *)(((UINT)_tx_initialize_unused_memory/2 + (UINT)_tx_clib_heap_end/2) & ~0xF);
#endif
#if XCHAL_CP_NUM > 0
/*
Initialize co-processor management for threads. Leave CPENABLE alone.
This is called from a normal Xtensa single-threaded run-time environment
before multi-threading has commenced. All co-processors are enabled.
It is important NOT to clear CPENABLE yet because tx_application_define()
is user code which might use a co-processor. The co-processor exception
handler does not expect to be called outside a thread.
*/
_xt_coproc_init();
#if XCHAL_HAVE_XEA3
/* Install the coprocessor exception handler. */
xt_set_exception_handler(EXCCAUSE_CP_DISABLED, _xt_coproc_exc);
#endif
#endif
#if XCHAL_HAVE_XEA3
/* Select a software interrupt to use for scheduling. */
for (i = 0; i < XCHAL_NUM_INTERRUPTS; i++) {
if ((Xthal_inttype[i] == XTHAL_INTTYPE_SOFTWARE) && (Xthal_intlevel[i] == 1)) {
xt_sw_intnum = i;
break;
}
}
if (xt_sw_intnum == -1) {
__asm__ volatile ("break 1, 1");
}
/* Set the interrupt handler and enable the interrupt. */
xt_set_interrupt_handler(xt_sw_intnum, xt_sched_handler, 0);
xt_interrupt_enable(xt_sw_intnum);
#endif
#ifndef TX_NO_TIMER
/* Compute tick divisor if clock freq is not compile-time constant. */
#ifndef XT_CLOCK_FREQ
_xt_tick_divisor_init();
#endif
/* Set up the periodic tick timer (assume enough time to complete init). */
#ifdef XT_CLOCK_FREQ
XT_WSR_CCOMPARE(XT_RSR_CCOUNT() + XT_TICK_DIVISOR);
#else
XT_WSR_CCOMPARE(XT_RSR_CCOUNT() + _xt_tick_divisor);
#endif
#if XCHAL_HAVE_XEA3
xt_timer_intnum = XT_TIMER_INTNUM;
xt_set_interrupt_handler(xt_timer_intnum, _tx_timer_interrupt, 0);
#endif
/* Enable the timer interrupt at the device level. */
xt_interrupt_enable(XT_TIMER_INTNUM);
#endif /* TX_NO_TIMER */
/* Initialize C library thread safety support. */
#ifdef TX_THREAD_SAFE_CLIB
_tx_clib_init();
#endif
/* Install stack overflow notification callback. */
#ifdef TX_ENABLE_STACK_CHECKING
tx_thread_stack_error_notify(_tx_xtensa_stack_error_handler);
#endif
}

View File

@@ -0,0 +1,216 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
#if XCHAL_HAVE_XEA2
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function restores the interrupt context if it is processing a */
/* nested interrupt. If not, it returns to the interrupt thread if no */
/* preemption is necessary. Otherwise, if preemption is necessary or */
/* if no thread was running, the function returns to the scheduler. */
/* */
/* RELEASE HISTORY */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.globl _tx_thread_context_restore
.type _tx_thread_context_restore,@function
.align 4
_tx_thread_context_restore:
/*
Please note: Control flow might seem strange. This is because it has been
optimized to avoid taken branches in the longest normal path (the critical
one for worst-case latency), presumed to be a non-nested interrupt that
preempts) and to hide pipeline interlock cycles where possible.
*/
/* Lockout interrupts. */
XT_INTS_DISABLE(a0)
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR exit function to indicate an ISR is complete. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_isr_exit
#else
call8 _tx_execution_isr_exit
#endif
#endif
/* Determine if interrupts are nested. */
// if (--_tx_thread_system_state)
// {
movi a2, _tx_thread_system_state /* a2 = & interrupt nesting count */
l32i a3, a2, 0 /* decrement interrupt nesting count */
addi a3, a3, -1
s32i a3, a2, 0
bnez a3, .L_tx_thread_nested_restore
// }
.Ln_tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
// else if (((_tx_thread_current_ptr)
// && (_tx_thread_current_ptr == _tx_thread_execute_ptr))
// || (_tx_thread_preempt_disable))
// {
movi a0, _tx_thread_current_ptr /* a0 = &_tx_thread_current_ptr */
l32i a2, a0, 0 /* a2 = _tx_thread_current_ptr (old) */
movi a3, _tx_thread_execute_ptr
beqz a2, .L_tx_thread_idle_system_restore
l32i a3, a3, 0 /* a3 = _tx_thread_execute_ptr (new) */
beq a3, a2, .L_tx_thread_no_preempt_restore
movi a3, _tx_thread_preempt_disable
l32i a3, a3, 0 /* a3 = _tx_thread_preempt_disable */
// /* the no-preempt case has moved down so we fall-thru to preempt */
bgei a3, 1, .L_tx_thread_no_preempt_restore
// }
// else
// {
.Ln_tx_thread_preempt_restore:
/* Save remaining context on the thread's stack. */
l32i a3, a2, tx_thread_stack_ptr /* a3 = thread's stack ptr */
/* Store standard preserved registers. */
/*
Call0 ABI callee-saved regs a12-15 need to be saved before preemption.
However a12-13 were saved for scratch by _tx_thread_context_save().
*/
#ifdef __XTENSA_CALL0_ABI__ /* Call0: now save callee-save regs */
s32i a14, a3, XT_STK_A14
s32i a15, a3, XT_STK_A15
#endif
/* Save the remaining time-slice and disable it. */
// if (_tx_timer_time_slice)
// {
movi a3, _tx_timer_time_slice /* a3 = &_tx_timer_time_slice */
l32i a4, a3, 0 /* a4 = _tx_timer_time_slice */
beqz a4, .L_tx_thread_dont_save_ts
// _tx_thread_current_ptr -> tx_thread_time_slice
// = _tx_timer_time_slice;
// _tx_timer_time_slice = 0; */
s32i a4, a2, tx_thread_time_slice
movi a4, 0
s32i a4, a3, 0
// }
.L_tx_thread_dont_save_ts:
/* Clear the current task pointer. */
// _tx_thread_current_ptr = TX_NULL;
s32i a4, a0, 0 /* a4 == 0 == TX_NULL */
#if XCHAL_CP_NUM > 0
/* Save CPENABLE in thread's co-processor save area, and clear CPENABLE. */
rsr a3, CPENABLE
s16i a3, a2, tx_thread_cp_state + XT_CPENABLE
wsr a4, CPENABLE /* disable all co-processors */
#endif
.L_tx_thread_idle_system_restore:
/*
Return via the scheduler.
Scheduler returns eventually to this function's caller as if called by it.
At this point we are still on the system stack.
*/
// _tx_thread_schedule();
call0 _tx_thread_schedule /* never returns here */
// }
/* Flow never falls through here. */
.L_tx_thread_no_preempt_restore:
/* Restore interrupted thread. */
/* Pickup the saved stack pointer. */
// SP = _tx_thread_current_ptr -> tx_thread_stack_ptr;
l32i sp, a2, tx_thread_stack_ptr
.L_tx_thread_nested_restore:
/* Recover the saved context and return to the point of interrupt. */
call0 _xt_context_restore
/*
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
and the interrupt stack frame is deallocated in the exit dispatcher.
At this point we are on the thread's stack.
*/
l32i a0, sp, XT_STK_EXIT
ret
// }
#endif /* XCHAL_HAVE_XEA2 */

View File

@@ -0,0 +1,157 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
#if XCHAL_HAVE_XEA2
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function saves the context of an executing thread in the */
/* beginning of interrupt processing. The function also ensures that */
/* the system stack is used upon return to the calling ISR. */
/* */
/* Interrupts remain disabled and no exceptions are triggered! */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.globl _tx_thread_context_save
.type _tx_thread_context_save,@function
.align 4
_tx_thread_context_save:
/*
Please note: Control flow might seem strange. This is because it has been
optimized to avoid taken branches in the longest normal path (the critical
one for worst-case latency), presumed to be a non-nested interrupt and
non-idle) and to hide pipeline interlock cycles where possible.
*/
/*
Save a couple of scratch regs to work with that are preserved over the
call to _xt_context_save. The latter assumes the interruptee's values
of these are already saved and these regs contain different data to be
preserved, so doesn't save them in the stack frame, and thereby requires
that its caller have already saved them in the interrupt stack frame.
We end up with a12 = return address, a13 and a0 are scratch.
*/
s32i a12, sp, XT_STK_A12
s32i a13, sp, XT_STK_A13
/* Check for a nested interrupt condition and increment nesting count. */
// if (_tx_thread_system_state++)
// {
movi a13, _tx_thread_system_state /* a13 = & interrupt nesting count */
mov a12, a0 /* a12 = save ret addr (free a0) */
l32i a0, a13, 0 /* increment interrupt nesting count */
addi a0, a0, 1
s32i a0, a13, 0
bnei a0, 1, .L_tx_thread_nested_save /* was !=0 before increment? */
// }
.Ln_tx_thread_not_nested_save:
/* Otherwise, not nested, check to see if a thread was running. */
// else
// {
// if (_tx_thread_current_ptr)
// {
movi a0, _tx_thread_current_ptr
l32i a13, a0, 0 /* a13 = current thread ctrl blk */
beqz a13, .L_tx_thread_idle_system_save
/* Save the rest of the interrupted context. */
call0 _xt_context_save
/* Save the current stack pointer in the thread's control block. */
// _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
s32i sp, a13, tx_thread_stack_ptr
// }
/* Switch to the system stack and return to ISR. */
.L_tx_thread_idle_system_save:
/*
If interrupted in the idle state, it's not necessary to save any context.
But even in the idle case where we are already on the system stack, it is
necessary to reset the (system) stack pointer so a series of consecutive
interrupts in the idle state do not keep moving the SP downward.
*/
// sp = _tx_thread_system_stack_ptr;
movi a13, _tx_thread_system_stack_ptr
mov a0, a12 /* retrieve return address */
l32i sp, a13, 0
ret
// }
.L_tx_thread_nested_save:
/* Nested interrupt condition. */
/* Save the rest of the interrupted context and return to ISR. */
call0 _xt_context_save
mov a0, a12 /* retrieve return address */
ret
// }
#endif /* XCHAL_HAVE_XEA2 */

View File

@@ -0,0 +1,81 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_api.h"
#include "xtensa_rtos.h"
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* NOTE: In earlier versions this was implemented in assembly. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
UINT _tx_thread_interrupt_control(UINT new_posture)
{
#if XCHAL_HAVE_XEA2
UINT ret = XT_RSIL(15);
XT_WSR_PS((ret & ~0xF) | (new_posture & 0xF));
XT_RSYNC();
return ret & 0xF;
#else
UINT ret = xthal_disable_interrupts();
xthal_restore_interrupts(new_posture);
return ret;
#endif
}

View File

@@ -0,0 +1,253 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_port.h"
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.globl _tx_thread_schedule
.type _tx_thread_schedule,@function
.align 4
_tx_thread_schedule:
#if XCHAL_HAVE_XEA3
/* Skip "entry" - nothing to save, never returns. */
movi a2, PS_STACK_KERNEL | PS_DI /* Set PS.STACK = Kernel and */
movi a3, PS_STACK_MASK | PS_DI_MASK /* disable interrupts. */
xps a2, a3
#ifdef __XTENSA_CALL0_ABI__
mov a15, a1 /* Dispatch code expects a15 = old SP */
#endif
movi a0, _xt_dispatch + 3 /* Jump to dispatch code. It will */
ret /* check for ready thread or idle */
/* and handle accordingly. */
ill /* Should never get back here. */
#else
/*
Note on Windowed ABI:
Callers of this don't expect it to return to them. Most use 'call0'.
The only windowed (C) caller is _tx_initialize_kernel_enter().
There are no args or results to pass. So we don't really care if the
window gets rotated. We can omit the 'entry' altogether and avoid the
need for a special "no entry" entrypoint to this function.
*/
#ifdef XT_ENABLE_TIMING_TEST_HACK
/* For timing_test "TS" numbers. INTERNAL USE ONLY. */
/* Always use CALL0. We may be here with windowing disabled. */
.extern scheduler_return
call0 scheduler_return
#endif
/*
Wait for a thread to execute (Idle Loop).
First ensure interrupts (except hi-pri) are disabled so result
of reading _tx_thread_execute_ptr can't change before testing.
While there's no thread ready, enable interrupts and wait in a
low power state, then disable interrupts and repeat the test.
*/
// do
// {
movi a3, _tx_thread_execute_ptr
.L_tx_thread_schedule_loop: /* Idle Loop. */
XT_INTS_DISABLE(a2) /* disable interrupts if not already */
l32i a2, a3, 0 /* a2 = _tx_thread_execute_ptr */
bnez a2, .L_tx_thread_schedule_ready
waiti 0 /* enable interrupts and wait for */
/* interrupt in low power state */
j .L_tx_thread_schedule_loop
// }
// while(_tx_thread_execute_ptr == TX_NULL);
.L_tx_thread_schedule_ready:
/* Yes! We have a thread to execute. Lockout interrupts and
transfer control to it. Interrupts are already disabled. */
/* Setup the current thread pointer. */
// _tx_thread_current_ptr = _tx_thread_execute_ptr;
movi a3, _tx_thread_current_ptr
l32i a0, a2, tx_thread_run_count
s32i a2, a3, 0 /* a2 = _tx_thread_current_ptr (TCB) */
/* Increment the run count for this thread. */
// _tx_thread_current_ptr -> tx_thread_run_count++;
addi a3, a0, 1
movi a0, _tx_timer_time_slice
s32i a3, a2, tx_thread_run_count
/* Setup time-slice, if present. */
// _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
l32i a3, a2, tx_thread_time_slice
s32i a3, a0, 0
#ifdef TX_THREAD_SAFE_CLIB
// Load library-specific global context ptr address. */
#if XSHAL_CLIB == XTHAL_CLIB_NEWLIB
movi a0, _impure_ptr
#elif XSHAL_CLIB == XTHAL_CLIB_XCLIB
movi a0, _reent_ptr
#else
#error TX_THREAD_SAFE_CLIB defined with unsupported C library.
#endif
l32i a3, a2, tx_thread_clib_ptr
s32i a3, a0, 0 /* point to thread's reent struct */
#endif
/* Switch to the thread's stack. */
// SP = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
l32i sp, a2, tx_thread_stack_ptr
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the thread entry function to indicate the thread is executing. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_thread_enter
#else
call8 _tx_execution_thread_enter
#endif
#endif
/* Determine if an interrupt frame or a synchronous task suspension frame
is present. */
l32i a3, a2, tx_thread_solicited
bnez a3, .L_tx_thread_synch_return
.Ln_tx_thread_asynch_return:
#if XCHAL_CP_NUM > 0
/* Restore thread's CPENABLE (enable co-processors this thread owns). */
l16ui a3, a2, tx_thread_cp_state + XT_CPENABLE
wsr a3, CPENABLE
#endif
/* Here we return from unsolicited entry with an interrupt stack frame. */
call0 _xt_context_restore
/* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
#ifdef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
#if XCHAL_CP_NUM > 0
rsync /* ensure wsr.CPENABLE has completed */
#endif
/*
This does not return to its caller, but to the selected thread.
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
and the interrupt stack frame is deallocated in the exit dispatcher.
*/
l32i a0, sp, XT_STK_EXIT
ret
.L_tx_thread_synch_return:
/* Here we return from a solicited entry with a solicited stack frame. */
movi a0, TX_FALSE
l32i a3, sp, XT_STK_PS
s32i a0, a2, tx_thread_solicited
#ifdef __XTENSA_CALL0_ABI__
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
l32i a0, sp, XT_STK_PC /* return address */
#if XCHAL_CP_NUM > 0
/* CPENABLE should already be clear (it was cleared on entry to kernel). */
rsync /* ensure wsr.CPENABLE has completed */
#endif
wsr a3, PS /* no need to sync PS, delay is OK */
/* This does not return to its caller, but to the selected thread. */
#ifdef __XTENSA_CALL0_ABI__
/* 'addi sp, sp, imm' could turn into 'addmi, addi' sequence and make */
/* the sp briefly point to an illegal stack location. Avoid that. */
addi a2, sp, XT_STK_FRMSZ
mov sp, a2
ret
#else
retw
#endif
#endif /* XCHAL_HAVE_XEA3 */
// }

View File

@@ -0,0 +1,158 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame looks like an interrupt frame or a solicited frame */
/* depending on the exception architecture of the target hardware. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.globl _tx_thread_stack_build
.type _tx_thread_stack_build,@function
.align 4
_tx_thread_stack_build:
ENTRY0
/* Get logical base of stack area (top). */
l32i a5, a2, tx_thread_stack_end /* get top-1 of stack area */
addi a5, a5, 1 /* undo the -1 */
srli a5, a5, 4 /* force 16-byte alignment */
slli a5, a5, 4 /* a5 = post-dispatch SP (frame top) */
/* Allocate space for the frame (frame size is already 16-byte aligned). */
addi a4, a5, -XT_STK_FRMSZ /* a4 = pre-dispatch SP (frame base) */
/* Set the thread's SP. */
s32i a4, a2, tx_thread_stack_ptr
#if !XCHAL_HAVE_XEA2
addi a4, a4, XT_STK_XTRA_SZ /* a4 = base of exception frame */
#endif
/* Clear the entire frame. (XEA3: only exception frame) */
movi a6, 0 /* a6 = 0 */
mov a7, a4 /* a7 = ptr to current word */
1: s32i a6, a7, 0 /* clear current word */
addi a7, a7, 4 /* point to next word */
bltu a7, a5, 1b /* repeat until frame top */
#if XCHAL_HAVE_XEA2
s32i a5, a4, XT_STK_A1 /* save post-dispatch SP in frame */
#endif
/* Indicate a solicited or interrupted stack frame. */
#if XCHAL_HAVE_XEA2
movi a7, 0 /* interrupted */
#else
movi a7, 0 /* solicited */
#endif
s32i a7, a2, tx_thread_solicited
/*
Terminate GDB backtrace in this thread at the "return function" by ensuring
it's A0 == 0. Since frame was cleared, don't need to do this explicitly.
s32i a6, a4, XT_STK_A0
*/
/* Set the return address to the return function. */
/* Start thread via user exception exit dispatcher (could use any). */
#if XCHAL_HAVE_XEA2
movi a5, _xt_user_exit
s32i a5, a4, XT_STK_EXIT
#else
movi a5, 0
s32i a5, a4, XT_STK_ATOMCTL
#endif
s32i a3, a4, XT_STK_PC
/*
Set thread's initial PS for C code, all int levels enabled.
XEA2: Since we dispatch via level 1 (_xt_user_exit), must set PS.EXCM,
which will be cleared by 'rfe' after the dispatcher, to prevent
interrupts happening when PS is restored during the exit dispatcher.
XEA3: nothing special, other than setting the thread stack type.
*/
#if XCHAL_HAVE_XEA2
#ifdef __XTENSA_CALL0_ABI__
movi a6, PS_UM | PS_EXCM
#else
movi a6, PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1) /* pretend 'call4' */
#endif
#else
movi a6, PS_STACK_FIRSTKER
#endif
s32i a6, a4, XT_STK_PS
#if XCHAL_HAVE_XEA2
#ifdef XT_USE_SWPRI
/* Set the initial virtual priority mask value to all 1's */
movi a3, -1
s32i a3, a4, XT_STK_VPRI
#endif
#endif
RET0
// }

View File

@@ -0,0 +1,287 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the system. Only a minimal context */
/* is saved since the compiler assumes temp registers are going to get */
/* slicked by a function call anyway. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.globl _tx_thread_system_return
.type _tx_thread_system_return,@function
.align 4
_tx_thread_system_return:
/*
Set up solicited stack frame and save minimal context (including a0).
Since this is solicited, no need to save regs compiler doesn't preserve.
*/
#if XCHAL_HAVE_XEA3
#ifdef __XTENSA_CALL0_ABI__
addi sp, sp, -16
#else
entry sp, 48
#endif
s32i a0, sp, 0 /* save return address */
#else
#ifdef __XTENSA_CALL0_ABI__
addi a2, sp, -XT_STK_FRMSZ /* avoid addi/addmi relaxation that */
mov sp, a2 /* might temporarily move sp up */
#else
entry sp, XT_STK_FRMSZ
#endif
#endif
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_thread_exit
#else
call8 _tx_execution_thread_exit
#endif
#endif
#if XCHAL_HAVE_XEA3
#ifdef __XTENSA_CALL0_ABI__
#else
ssai 0
spillw /* spill all registers */
#endif
/*
Save register state into exception frame. This is safe to do with
interrupts enabled, but we will have to revert SP to point above
the exception frame because that is what the dispatch code expects.
Must disable interrupts before that.
*/
movi a0, .Lret
rsr.ps a2
addi sp, sp, -XT_STK_XFRM_SZ
s32i a0, sp, XT_STK_PC /* save return PC */
s32i a2, sp, XT_STK_PS /* save PS */
#ifdef __XTENSA_CALL0_ABI__
s32i a12, sp, XT_STK_A12 /* callee-saved registers */
s32i a13, sp, XT_STK_A13
s32i a14, sp, XT_STK_A14
s32i a15, sp, XT_STK_A15
#endif
movi a2, PS_STACK_KERNEL | PS_DI /* Set PS.STACK = Kernel and */
movi a8, PS_STACK_MASK | PS_DI_MASK /* disable interrupts. */
xps a2, a8
movi a3, _tx_thread_current_ptr /* a3 = &_tx_thread_current_ptr */
movi a2, TX_TRUE
l32i a4, a3, 0 /* a4 = _tx_thread_current_ptr */
movi a5, 0
s32i a2, a4, tx_thread_solicited /* mark as solicited switch */
#if XCHAL_CP_NUM > 0
/* Save coprocessor callee-saved state (if any). At this point CPENABLE */
/* should still reflect which CPs were in use (enabled). */
call0 _xt_coproc_savecs
/* Clear CPENABLE and give up all co-procs. */
s16i a5, a4, tx_thread_cp_state + XT_CPENABLE
wsr a5, CPENABLE /* disable all co-processors */
#endif
addi sp, sp, XT_STK_XFRM_SZ /* restore SP */
addi a2, sp, -XT_STK_FRMSZ
s32i a2, a4, tx_thread_stack_ptr /* Save SP in TCB */
#ifdef __XTENSA_CALL0_ABI__
mov a15, sp /* Dispatch code expects a15 = old a1 */
#endif
s32i a5, a3, 0 /* Clear _tx_thread_current_ptr */
movi a0, _xt_dispatch + 3 /* Jump to dispatch code */
ret
/* Execution returns here. Interrupts should be disabled. */
/* NOTE: we expect original SP to have been restored. */
.align 4
.Lret:
addi sp, sp, -XT_STK_XFRM_SZ /* Prepare to restore state */
l32i a2, sp, XT_STK_PS /* Retrieve PS value */
#ifdef __XTENSA_CALL0_ABI__
l32i a12, sp, XT_STK_A12 /* Callee-saved registers */
l32i a13, sp, XT_STK_A13
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
addi sp, sp, XT_STK_XFRM_SZ
wsr.ps a2 /* Safe to enable interrupts */
rsync
#ifdef __XTENSA_CALL0_ABI__
l32i a0, sp, 0
addi sp, sp, 16
ret
#else
l32i a0, sp, 0
retw
#endif
#else /* XEA1 or XEA2 */
rsr a2, PS
s32i a0, sp, XT_STK_PC
s32i a2, sp, XT_STK_PS
#ifdef __XTENSA_CALL0_ABI__
s32i a12, sp, XT_STK_A12
s32i a13, sp, XT_STK_A13
s32i a14, sp, XT_STK_A14
s32i a15, sp, XT_STK_A15
#else
/*
Spill register windows. Calling xthal_window_spill() causes extra spills and
reloads, so we set things up to call the _nw version instead to save cycles.
*/
movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) // (using a6 ensures any window using this a4..a7 is spilled)
mov a4, a0 // save a0
and a2, a2, a6 // clear WOE, INTLEVEL
addi a2, a2, XCHAL_EXCM_LEVEL // set INTLEVEL
wsr a2, PS
rsync
call0 xthal_window_spill_nw
l32i a0, sp, XT_STK_PS
wsr a0, PS // Restore PS value
rsync
#endif
#if XCHAL_CP_NUM > 0
/* Save coprocessor callee-saved state (if any). At this point CPENABLE */
/* should still reflect which CPs were in use (enabled). */
call0 _xt_coproc_savecs
#endif
/*
We do not return directly from this function to its caller.
Register usage from here on:
a0 = scratch (return address has been saved in stack frame)
a1 = stack ptr (thread, then system)
a2 = &_tx_thread_current_ptr
a3 = _tx_thread_current_ptr (thread control block)
a4 = &_tx_timer_time_slice
*/
/* Lock out interrupts (except hi-pri). */
/* Grab thread control block of current thread. */
movi a2, _tx_thread_current_ptr /* a2 = &_tx_thread_current_ptr */
XT_INTS_DISABLE(a0)
l32i a3, a2, 0 /* a3 points to TCB */
/* Mark as having solicited entry to kernel (used on exit). */
movi a0, TX_TRUE
s32i a0, a3, tx_thread_solicited
/* Save current stack and switch to system stack. */
// _tx_thread_current_ptr -> tx_thread_stack_ptr = SP;
// SP = _tx_thread_system_stack_ptr;
movi a5, _tx_thread_system_stack_ptr /* a5 = & system stack ptr */
s32i sp, a3, tx_thread_stack_ptr
movi a4, _tx_timer_time_slice /* a4 = &_tx_timer_time_slice */
l32i sp, a5, 0 /* sp = system stack ptr */
/* Determine if the time-slice is active. */
// if (_tx_timer_time_slice)
// {
l32i a0, a4, 0
beqz a0, .L_tx_thread_dont_save_ts
/* Save time-slice for the thread and clear current time-slice. */
// _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
// _tx_timer_time_slice = 0;
s32i a0, a3, tx_thread_time_slice
movi a0, 0 /* a0 == 0 == TX_NULL */
s32i a0, a4, 0
// }
.L_tx_thread_dont_save_ts:
/* Clear the current thread pointer. */
// _tx_thread_current_ptr = TX_NULL;
s32i a0, a2, 0 /* a0 == 0 == TX_NULL */
#if XCHAL_CP_NUM > 0
/* Clear CPENABLE and give up all co-procs. */
s16i a0, a3, tx_thread_cp_state + XT_CPENABLE
wsr a0, CPENABLE /* disable all co-processors */
#endif
/*
Return via the scheduler.
Scheduler returns eventually to this function's caller as if called by it.
*/
call0 _tx_thread_schedule /* never returns here */
#endif /* XCHAL_HAVE_XEA3 */
// }

View File

@@ -0,0 +1,277 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
#include "xtensa_rtos.h"
#include "tx_api_asm.h"
#ifndef TX_NO_TIMER
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* interrupt context save/restore functions are called along with the */
/* expiration functions. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
.globl _tx_timer_interrupt
.type _tx_timer_interrupt,@function
.align 4
_tx_timer_interrupt:
#ifdef __XTENSA_CALL0_ABI__
/* Define local variable spill offsets in stack frame for Call0 ABI. */
#define __tx_timer_interrupt_a0 0 /* ENTRY()/RET() saves/restores */
#define __tx_timer_interrupt_a2 4 /* preserve a2 */
#define __tx_timer_interrupt_a3 8 /* preserve a3 */
#endif
ENTRY(16)
.globl tx_timer_user_isr
.weak tx_timer_user_isr
movi a2, tx_timer_user_isr
beqz a2, 1f
#ifdef __XTENSA_CALL0_ABI__
callx0 a2
#else
callx8 a2
#endif
1:
/*
Xtensa timers work by comparing a cycle counter with a preset value.
Once the match occurs an interrupt is generated, and the handler has
to set a new cycle count into the comparator. To avoid clock drift
due to interrupt latency, the new cycle count is computed from the old,
not the time the interrupt was serviced. However if a timer interrupt
is ever serviced more than one tick late, it is necessary to process
multiple ticks until the new cycle count is in the future, otherwise
the next timer interrupt would not occur until after the cycle counter
had wrapped (2^32 cycles later).
do {
ticks++;
old_ccompare = read_ccompare_i();
write_ccompare_i( old_ccompare + divisor );
service one tick;
diff = read_ccount() - old_ccompare;
} while ( diff > divisor );
*/
.L_tx_timer_catchup:
/* Increment the system clock. */
// _tx_timer_system_clock++;
movi a2, _tx_timer_system_clock /* a2 = &_tx_timer_system_clock */
l32i a3, a2, 0 /* a3 = _tx_timer_system_clock++ */
addi a3, a3, 1
s32i a3, a2, 0
/* Update the timer comparator for the next tick. */
#ifdef XT_CLOCK_FREQ
movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */
#else
movi a3, _xt_tick_divisor
l32i a2, a3, 0 /* a2 = comparator increment */
#endif
rsr a3, XT_CCOMPARE /* a3 = old comparator value */
add a4, a3, a2 /* a4 = new comparator value */
wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */
esync
/* Test for time-slice expiration. */
// if (_tx_timer_time_slice)
// {
movi a4, _tx_timer_time_slice /* a4 = &_tx_timer_time_slice */
l32i a5, a4, 0 /* a5 = _tx_timer_time_slice */
beqz a5, .L_tx_timer_no_time_slice
/* Decrement the time_slice. */
// _tx_timer_time_slice--;
addi a5, a5, -1
s32i a5, a4, 0
/* Check for expiration. */
// if (_tx_timer_time_slice == 0)
bnez a5, .L_tx_timer_no_time_slice
/* Set the time-slice expired flag. */
// _tx_timer_expired_time_slice = TX_TRUE;
movi a4, _tx_timer_expired_time_slice
movi a5, TX_TRUE
s32i a5, a4, 0
// }
.L_tx_timer_no_time_slice:
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
movi a4, _tx_timer_current_ptr /* a4 = &_tx_timer_current_ptr */
l32i a5, a4, 0 /* a5 = _tx_timer_current_ptr */
l32i a6, a5, 0 /* a6 = *_tx_timer_current_ptr */
beqz a6, .L_tx_timer_no_timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
movi a6, _tx_timer_expired
movi a7, TX_TRUE
s32i a7, a6, 0
j .L_tx_timer_done
// }
// else
// {
.L_tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
movi a6, _tx_timer_list_end
l32i a6, a6, 0 /* a6 = _tx_timer_list_end */
addi a5, a5, 4 /* a5 = ++_tx_timer_current_ptr */
bne a5, a6, .L_tx_timer_skip_wrap
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
movi a6, _tx_timer_list_start
l32i a5, a6, 0 /* a5 = _tx_timer_list_start */
.L_tx_timer_skip_wrap:
s32i a5, a4, 0 /* _tx_timer_current_ptr = a5 */
// }
.L_tx_timer_done:
/* See if anything has expired. */
// if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
// {
#ifdef __XTENSA_CALL0_ABI__
/* Preserve a2 and a3 across calls. */
s32i a2, sp, __tx_timer_interrupt_a2
s32i a3, sp, __tx_timer_interrupt_a3
#endif
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
movi a4, _tx_timer_expired
l32i a5, a4, 0
beqz a5, .L_tx_timer_dont_activate
/* Call the timer expiration processing. */
// _tx_timer_expiration_process();
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_timer_expiration_process
#else
call8 _tx_timer_expiration_process
#endif
// }
.L_tx_timer_dont_activate:
/* Did time slice expire? */
// if (_tx_timer_expired_time_slice)
// {
movi a4, _tx_timer_expired_time_slice
l32i a5, a4, 0
beqz a5, .L_tx_timer_not_ts_expiration
/* Time slice interrupted thread. */
// _tx_thread_time_slice();
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_thread_time_slice
#else
call8 _tx_thread_time_slice
#endif
// }
.L_tx_timer_not_ts_expiration:
#ifdef __XTENSA_CALL0_ABI__
/* Restore a2 and a3. */
l32i a2, sp, __tx_timer_interrupt_a2
l32i a3, sp, __tx_timer_interrupt_a3
#endif
// }
.Ln_tx_timer_nothing_expired:
/* Check if we need to process more ticks to catch up. */
esync /* ensure comparator update complete */
rsr a4, CCOUNT /* a4 = cycle count */
sub a4, a4, a3 /* diff = ccount - old comparator */
blt a2, a4, .L_tx_timer_catchup /* repeat while diff > divisor */
RET(16)
// }
#endif /* TX_NO_TIMER */

View File

@@ -0,0 +1,123 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Support for Xtensa applications */
/** */
/**************************************************************************/
/**************************************************************************/
#include "tx_user.h"
#ifdef TX_ENABLE_STACK_CHECKING
/* Include necessary system files. */
#include "tx_api.h"
#include "xtensa_rtos.h"
#ifdef XT_BOARD
#include <xtensa/xtbsp.h>
#endif
#ifdef XT_SIMULATOR
#include <xtensa/simcall.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Callback to notify of a stack overflow when registered with */
/* tx_stack_error_notify and stack checking is enabled (ThreadX */
/* is compiled with TX_ENABLE_STACK_CHECKING defined). */
/* */
/* The handler notifies the user in any/all of the following ways: */
/* - A message via the simulator (extremely reliable, simulator only). */
/* - A message on the board's display (emulation board only). */
/* - A message to stdout (uses low-level write to avoid printf which */
/* is large and would trash state the user might want to examine). */
/* The most reliable methods are done first. Several might work. */
/* */
/* After notifying the user as best it can, the handler stops the */
/* application in the most reliable of the following ways: */
/* - Passes control to the debugger (if attached). */
/* - Terminates the simulation (simulator only). */
/* - Panics. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
VOID _tx_xtensa_stack_error_handler(TX_THREAD * thread)
{
#ifdef XT_SIMULATOR
register int32_t sc __asm__ ("a2") = SYS_log_msg;
register char * msg __asm__ ("a3")
= "**** Stack overflow in thread 0x%08x.\n";
register TX_THREAD * thd __asm__ ("a4") = thread;
__asm__ volatile ("simcall" :: "a" (sc), "a" (msg), "a" (thd) );
#endif
#ifdef XT_BOARD
xtbsp_display_string("StkOflow");
#endif
write(1, "**** Stack overflow in thread \"", 31);
write(1, thread->tx_thread_name, strlen(thread->tx_thread_name));
write(1, "\"\n", 2);
#ifdef XT_SIMULATOR
sc = SYS_gdb_abort;
__asm__ volatile ("simcall"); /* control to debugger or exit */
#else
__asm__ volatile ("break 1, 15"); /* control to debugger or panic */
#endif
}
#endif /* TX_ENABLE_STACK_CHECKING */

View File

@@ -0,0 +1,433 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* XTENSA CONTEXT SAVE AND RESTORE ROUTINES */
/* */
/* Low-level functions for handling generic context save and restore of */
/* registers not specifically addressed by the interrupt vectors and */
/* handlers. Those registers (not handled by these functions) are PC, PS, */
/* A0, A1 (SP). Except for the calls to RTOS functions, this code is */
/* generic to Xtensa. */
/* */
/* Note that in Call0 ABI, interrupt handlers are expected to preserve */
/* the callee-save regs (A12-A15), which is always the case if the */
/* handlers are coded in C. However A12, A13 are made available as */
/* scratch registers for interrupt dispatch code, so are presumed saved */
/* anyway, and are always restored even in Call0 ABI. Only A14, A15 are */
/* truly handled as callee-save regs. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include "xtensa_rtos.h"
#ifdef XT_USE_OVLY
#include <xtensa/overlay_os_asm.h>
#endif
.text
#if XCHAL_HAVE_XEA2
/***************************************************************************
_xt_context_save
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
interrupt stack frame defined in xtensa_rtos.h.
Its counterpart is _xt_context_restore (which also restores A12, A13).
Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
This function preserves A12 & A13 in order to provide the caller with 2 scratch
regs that need not be saved over the call to this function. The choice of which
2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
to avoid moving data more than necessary. Caller can assign regs accordingly.
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Original A12, A13 have already been saved in the interrupt stack frame.
Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
point of interruption.
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
A12, A13 as at entry (preserved).
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
***************************************************************************/
.global _xt_context_save
.type _xt_context_save,@function
.align 4
_xt_context_save:
s32i a2, sp, XT_STK_A2
s32i a3, sp, XT_STK_A3
s32i a4, sp, XT_STK_A4
s32i a5, sp, XT_STK_A5
s32i a6, sp, XT_STK_A6
s32i a7, sp, XT_STK_A7
s32i a8, sp, XT_STK_A8
s32i a9, sp, XT_STK_A9
s32i a10, sp, XT_STK_A10
s32i a11, sp, XT_STK_A11
/*
Call0 ABI callee-saved regs a12-15 do not need to be saved here.
a12-13 are the caller's responsibility so it can use them as scratch.
So only need to save a14-a15 here for Windowed ABI (not Call0).
*/
#ifndef __XTENSA_CALL0_ABI__
s32i a14, sp, XT_STK_A14
s32i a15, sp, XT_STK_A15
#endif
rsr a3, SAR
s32i a3, sp, XT_STK_SAR
#if XCHAL_HAVE_LOOPS
rsr a3, LBEG
s32i a3, sp, XT_STK_LBEG
rsr a3, LEND
s32i a3, sp, XT_STK_LEND
rsr a3, LCOUNT
s32i a3, sp, XT_STK_LCOUNT
#endif
#if XCHAL_HAVE_EXCLUSIVE
/* Save and clear state of ATOMCTL */
movi a3, 0
getex a3
s32i a3, sp, XT_STK_ATOMCTL
#endif
#if XT_USE_SWPRI
/* Save virtual priority mask */
movi a3, _xt_vpri_mask
l32i a3, a3, 0
s32i a3, sp, XT_STK_VPRI
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
mov a9, a0 /* preserve ret addr */
#endif
#ifndef __XTENSA_CALL0_ABI__
/*
To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
Need to save a9,12,13 temporarily (in frame temps) and recover originals.
Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
and underflow exceptions disabled (assured by PS.EXCM == 1).
*/
s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
s32i a13, sp, XT_STK_TMP1
s32i a9, sp, XT_STK_TMP2
/*
Save the overlay state if we are supporting overlays. Since we just saved
three registers, we can conveniently use them here. Note that as of now,
overlays only work for windowed calling ABI.
*/
#ifdef XT_USE_OVLY
l32i a9, sp, XT_STK_PC /* recover saved PC */
_xt_overlay_get_state a9, a12, a13
s32i a9, sp, XT_STK_OVLY /* save overlay state */
#endif
l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
l32i a13, sp, XT_STK_A13
l32i a9, sp, XT_STK_A9
addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
addi sp, sp, -XT_STK_FRMSZ
l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */
l32i a13, sp, XT_STK_TMP1
l32i a9, sp, XT_STK_TMP2
#endif
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_save_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we assume a9,12,13 are preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
addi a2, sp, XT_STK_EXTRA /* where to save it */
# if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* align dynamically >16 bytes */
# endif
call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */
#endif
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
mov a0, a9 /* retrieve ret addr */
#endif
ret
/*******************************************************************************
_xt_context_restore
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
stack frame defined in xtensa_rtos.h .
Its counterpart is _xt_context_save (whose caller saved A12, A13).
Caller is responsible to restore PC, PS, A0, A1 (SP).
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Other processor state except PC, PS, A0, A1 (SP), is as at the point
of interruption.
*******************************************************************************/
.global _xt_context_restore
.type _xt_context_restore,@function
.align 4
_xt_context_restore:
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_restore_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we only assume a13 is preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
mov a13, a0 /* preserve ret addr */
addi a2, sp, XT_STK_EXTRA /* where to find it */
# if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
and a2, a2, a3 /* align dynamically >16 bytes */
# endif
call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
mov a0, a13 /* retrieve ret addr */
#endif
#if XCHAL_HAVE_LOOPS
l32i a2, sp, XT_STK_LBEG
l32i a3, sp, XT_STK_LEND
wsr a2, LBEG
l32i a2, sp, XT_STK_LCOUNT
wsr a3, LEND
wsr a2, LCOUNT
#endif
#if XCHAL_HAVE_EXCLUSIVE
/* Restore state of ATOMCTL */
l32i a2, sp, XT_STK_ATOMCTL
getex a2
#endif
#ifdef XT_USE_OVLY
/*
If we are using overlays, this is a good spot to check if we need
to restore an overlay for the incoming task. Here we have a bunch
of registers to spare. Note that this step is going to use a few
bytes of storage below SP (SP-20 to SP-32) if an overlay is going
to be restored.
*/
l32i a2, sp, XT_STK_PC /* retrieve PC */
l32i a3, sp, XT_STK_PS /* retrieve PS */
l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
_xt_overlay_check_map a2, a3, a4, a5, a6
s32i a2, sp, XT_STK_PC /* save updated PC */
s32i a3, sp, XT_STK_PS /* save updated PS */
#endif
#ifdef XT_USE_SWPRI
/* Restore virtual interrupt priority and interrupt enable */
movi a3, _xt_intdata
l32i a4, a3, 0 /* a4 = _xt_intenable */
l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
and a4, a4, a5
wsr a4, INTENABLE /* update INTENABLE */
s32i a5, a3, 4 /* restore _xt_vpri_mask */
#endif
l32i a3, sp, XT_STK_SAR
l32i a2, sp, XT_STK_A2
wsr a3, SAR
l32i a3, sp, XT_STK_A3
l32i a4, sp, XT_STK_A4
l32i a5, sp, XT_STK_A5
l32i a6, sp, XT_STK_A6
l32i a7, sp, XT_STK_A7
l32i a8, sp, XT_STK_A8
l32i a9, sp, XT_STK_A9
l32i a10, sp, XT_STK_A10
l32i a11, sp, XT_STK_A11
/*
Call0 ABI callee-saved regs a12-15 do not need to be restored here.
However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
so need to be restored anyway, despite being callee-saved in Call0.
*/
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
#ifndef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
ret
#endif /* XCHAL_HAVE_XEA3 */
/*******************************************************************************
_xt_coproc_init
Initializes global co-processor management data, setting all co-processors
to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
Called during initialization of the RTOS, before any threads run.
This may be called from normal Xtensa single-threaded application code which
might use co-processors. The Xtensa run-time initialization enables all
co-processors. They must remain enabled here, else a co-processor exception
might occur outside of a thread, which the exception handler doesn't expect.
Entry Conditions:
Xtensa single-threaded run-time environment is in effect.
No thread is yet running.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_init(void)
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_init
.type _xt_coproc_init,@function
.align 4
_xt_coproc_init:
ENTRY0
/* Initialize thread co-processor ownerships to 0 (unowned). */
movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
addi a3, a2, XCHAL_CP_MAX << 2 /* a3 = top+1 of owner array */
movi a4, 0 /* a4 = 0 (unowned) */
1: s32i a4, a2, 0
addi a2, a2, 4
bltu a2, a3, 1b
RET0
#endif
/*******************************************************************************
_xt_coproc_release
Releases any and all co-processors owned by a given thread. The thread is
identified by it's co-processor state save area defined in xtensa_context.h .
Must be called before a thread's co-proc save area is deleted to avoid
memory corruption when the exception handler tries to save the state.
May be called when a thread terminates or completes but does not delete
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).
Entry Conditions:
A2 = Pointer to base of co-processor state save area.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_release(void * coproc_sa_base)
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_release
.type _xt_coproc_release,@function
.align 4
_xt_coproc_release:
ENTRY0 /* a2 = base of save area */
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
movi a5, 0 /* a5 = 0 (unowned) */
#if XCHAL_HAVE_XEA3
movi a6, PS_DI
xps a6, a6 /* lock interrupts */
#else
rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
#endif
1: l32i a7, a3, 0 /* a7 = owner at a3 */
bne a2, a7, 2f /* if (coproc_sa_base == owner) */
s32i a5, a3, 0 /* owner = unowned */
2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
bltu a3, a4, 1b /* repeat until end of array */
3:
wsr a6, PS /* restore interrupts */
rsync
RET0
#endif

View File

@@ -0,0 +1,578 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa coprocessor handling routines. This code is only active if */
/* one or more coprocessors are present. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include <xtensa/config/specreg.h>
#include <xtensa/coreasm.h>
#include "xtensa_context.h"
#include "xtensa_rtos.h"
#if XCHAL_CP_NUM > 0
//-----------------------------------------------------------------------------
// Coprocessor related state and precomputed values.
//-----------------------------------------------------------------------------
// Table of coprocessor owners, identified by thread's CP save area pointer.
// Zero means coprocessor is not owned.
.data
.global _xt_coproc_owner_sa
.align 16,,XCHAL_CP_MAX << 2 // minimize crossing cache boundaries
_xt_coproc_owner_sa:
.rept XCHAL_CP_MAX
.word 0
.endr
// Bitmask table for CP n's enable bit, indexed by coprocessor number.
.section .rodata, "a"
.global _xt_coproc_mask
.align 16,,8 // try to keep it all in one cache line
.set i, 0
_xt_coproc_mask:
.rept XCHAL_CP_MAX
.long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask
.set i, i+1
.endr
// Offset to CP n save area in thread's CP save area.
.global _xt_coproc_sa_offset
.align 16 // minimize crossing cache boundaries
_xt_coproc_sa_offset:
.word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
.word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
//-----------------------------------------------------------------------------
// _xt_coproc_handler
//
// Handles coprocessor exceptions and manages lazy context switching between
// multiple threads sharing the coprocessor(s).
// Register use:
// a0 - on entry, return address (must have been called via call0).
// a1 - pointing to valid exception stack frame.
// a2 - on entry, must hold coprocessor index. On exit, 0 if OK.
// a3-a15 - may all be used and trashed by this routine.
//-----------------------------------------------------------------------------
.text
.align 4
.global _xt_coproc_handler
_xt_coproc_handler:
mov a7, a0 // a7 = return address
mov a5, a2 // a5 = CP index n
// Get coprocessor state save area of new owner thread
call0 XT_RTOS_CP_STATE // a15 = new owner's save area
beqz a15, .L_xt_coproc_invalid // not in a thread (invalid)
l32i a4, a15, XT_CP_ASA // actual save area address
beqz a4, .L_xt_coproc_invalid // thread has no save area
// Enable the co-processor's bit in CPENABLE
movi a0, _xt_coproc_mask
rsr a4, CPENABLE // a4 = CPENABLE
addx4 a0, a5, a0 // a0 = &_xt_coproc_mask[n]
l32i a0, a0, 0 // a0 = (n << 16) | (1 << n)
movi a3, _xt_coproc_owner_sa
extui a2, a0, 0, 16 // coprocessor bitmask portion
or a4, a4, a2 // a4 = CPENABLE | (1 << n)
wsr a4, CPENABLE
// Get old coprocessor owner thread (save area ptr) and assign new one
addx4 a3, a5, a3 // a3 = &_xt_coproc_owner_sa[n]
l32i a2, a3, 0 // a2 = old owner's save area
s32i a15, a3, 0 // _xt_coproc_owner_sa[n] = new
rsync // ensure wsr.CPENABLE is complete
// Do we need to context-switch this coprocessor ?
beq a15, a2, .L_xt_coproc_done // new owner == old, we're done
// if no old owner then nothing to save
beqz a2, .L_check_new
// If old owner not actively using CP then nothing to save.
l16ui a4, a2, XT_CPENABLE // a4 = old owner's CPENABLE
bnone a4, a0, .L_check_new // old owner not using CP
.L_save_old:
// We need to save old owner's coprocessor state
movi a5, _xt_coproc_sa_offset
// Mark old owner state as no longer active (CPENABLE bit n clear)
xor a4, a4, a0 // clear CP in old owner's CPENABLE
s16i a4, a2, XT_CPENABLE // update old owner's CPENABLE
extui a4, a0, 16, 5 // a4 = CP index = n
addx4 a5, a4, a5 // a5 = &_xt_coproc_sa_offset[n]
// Mark old owner state as saved (CPSTORED bit n set)
l16ui a4, a2, XT_CPSTORED // a4 = old owner's CPSTORED
l32i a5, a5, 0 // a5 = XT_CP[n]_SA offset
or a4, a4, a0 // set CP in old owner's CPSTORED
s16i a4, a2, XT_CPSTORED // update old owner's CPSTORED
l32i a2, a2, XT_CP_ASA // ptr to actual (aligned) save area
extui a3, a0, 16, 5 // a3 = CP index = n
add a2, a2, a5 // a2 = old owner's area for CP n
// The config-specific HAL macro invoked below destroys a2-a6.
// It is theoretically possible for Xtensa processor designers to write TIE
// that causes more address registers to be affected, but it is generally
// unlikely. If that ever happens, more registers needs to be saved/restored
// around this macro invocation, and the value in a15 needs to be recomputed.
xchal_cpi_store_funcbody
.L_check_new:
// Check if any state has to be restored for new owner.
// NOTE: a15 = new owner's save area, cannot be zero when we get here.
l16ui a3, a15, XT_CPSTORED // a3 = new owner's CPSTORED
movi a4, _xt_coproc_sa_offset
bnone a3, a0, .L_check_cs // full CP not saved, check callee-saved
xor a3, a3, a0 // CPSTORED bit is set, clear it
s16i a3, a15, XT_CPSTORED // update new owner's CPSTORED
// Adjust new owner's save area pointers to area for CP n.
extui a3, a0, 16, 5 // a3 = CP index = n
addx4 a4, a3, a4 // a4 = &_xt_coproc_sa_offset[n]
l32i a4, a4, 0 // a4 = XT_CP[n]_SA
l32i a5, a15, XT_CP_ASA // ptr to actual (aligned) save area
add a2, a4, a5 // a2 = new owner's area for CP
// The config-specific HAL macro invoked below destroys a2-a6.
// It is theoretically possible for Xtensa processor designers to write TIE
// that causes more address registers to be affected, but it is generally
// unlikely. If that ever happens, more registers needs to be saved/restored
// around this macro invocation.
xchal_cpi_load_funcbody
.L_xt_coproc_done:
movi a2, 0 // a2 <- 0 == OK
.L_xt_coproc_err:
mov a0, a7 // return address
ret
.L_check_cs:
// a0 = CP mask in low bits, a15 = new owner's save area.
l16ui a2, a15, XT_CP_CS_ST // a2 = mask of CPs saved
bnone a2, a0, .L_xt_coproc_done // if no match then done
and a2, a2, a0 // a2 = which CPs to restore
extui a2, a2, 0, 8 // extract low 8 bits
call0 _xt_coproc_restorecs // restore CP registers
j .L_xt_coproc_done
.L_xt_coproc_invalid:
// Coprocessor exception occurred outside a thread or the thread
// did not allocate space to save coprocessor state. Return error.
movi a2, 1
j .L_xt_coproc_err
//-----------------------------------------------------------------------------
// _tx_thread_coproc_state
//
// Helper function to return the save area for the current thread, if any.
// Returns, in a15, the pointer to the save area if any, else zero.
// If in interrupt context, returns zero. Only uses a15.
// Must be called only via call0.
//-----------------------------------------------------------------------------
.global _tx_thread_coproc_state
.type _tx_thread_coproc_state,@function
.align 4
_tx_thread_coproc_state:
// return ( _tx_thread_system_state == 0 && _tx_thread_current_ptr != 0
// ? (&_tx_thread_current_ptr->tx_thread_cp_state) : 0 )
movi a15, _tx_thread_system_state // check if interrupt state
l32i a15, a15, 0
bnez a15, 1f
movi a15, _tx_thread_current_ptr // check if thread running
l32i a15, a15, 0
beqz a15, 2f
// Return base address of current thread's co-prcoessor save area.
addi a15, a15, tx_thread_cp_state
ret
1:
movi a15, 0 // return error
2:
ret
//-----------------------------------------------------------------------------
// _xt_coproc_savecs
//
// If there is a current thread and it has a coprocessor state save area, then
// save all callee-saved state into this area. This function is called from the
// solicited context switch handler. It calls a system-specific function to get
// the coprocessor save area base address.
//
// Entry conditions:
// - The thread being switched out is still the current thread.
// - CPENABLE state reflects which coprocessors are active.
// - Registers have been saved/spilled already.
//
// Exit conditions:
// - All necessary CP callee-saved state has been saved.
// - Registers a7-a15 have been trashed.
//
// Must be called from assembly code only, using CALL0.
//-----------------------------------------------------------------------------
.global _xt_coproc_savecs
.type _xt_coproc_savecs,@function
.align 4
_xt_coproc_savecs:
// At entry, CPENABLE should be showing which CPs are enabled.
rsr a11, CPENABLE // a11 = which CPs are enabled
beqz a11, .Ldone // quick exit if none
mov a14, a0 // save return address
call0 XT_RTOS_CP_STATE // get address of CP save area
mov a0, a14 // restore return address
beqz a15, .Ldone // if none then nothing to do
l32i a14, a15, XT_CP_ASA // a14 = base of aligned save area
beqz a14, .Ldone // no save area, nothing to do
s16i a11, a15, XT_CP_CS_ST // save mask of CPs being stored
movi a13, _xt_coproc_sa_offset // array of CP save offsets
l32i a15, a15, XT_CP_ASA // a15 = base of aligned save area
#if XCHAL_CP0_SA_SIZE
bbci.l a11, 0, 2f // CP 0 not enabled
l32i a14, a13, 0 // a14 = _xt_coproc_sa_offset[0]
add a12, a14, a15 // a12 = save area for CP 0
xchal_cp0_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a11, 1, 2f // CP 1 not enabled
l32i a14, a13, 4 // a14 = _xt_coproc_sa_offset[1]
add a12, a14, a15 // a12 = save area for CP 1
xchal_cp1_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a11, 2, 2f
l32i a14, a13, 8
add a12, a14, a15
xchal_cp2_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a11, 3, 2f
l32i a14, a13, 12
add a12, a14, a15
xchal_cp3_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a11, 4, 2f
l32i a14, a13, 16
add a12, a14, a15
xchal_cp4_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a11, 5, 2f
l32i a14, a13, 20
add a12, a14, a15
xchal_cp5_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a11, 6, 2f
l32i a14, a13, 24
add a12, a14, a15
xchal_cp6_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a11, 7, 2f
l32i a14, a13, 28
add a12, a14, a15
xchal_cp7_store a12, a7, a8, a9, a10 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone:
ret
//-----------------------------------------------------------------------------
// _xt_coproc_restorecs
//
// Restore any callee-saved coprocessor state for the incoming thread.
// This function is called from coprocessor exception handling, when giving
// ownership to a thread that solicited a context switch earlier. It calls a
// system-specific function to get the coprocessor save area base address.
//
// Entry conditions:
// - The incoming thread is set as the current thread.
// - CPENABLE is set up correctly for all required coprocessors.
// - a2 = mask of coprocessors to be restored.
//
// Exit conditions:
// - All necessary CP callee-saved state has been restored.
// - CPENABLE - unchanged.
// - Registers a2, a8-a15 have been trashed.
//
// Must be called from assembly code only, using CALL0.
//-----------------------------------------------------------------------------
.global _xt_coproc_restorecs
.type _xt_coproc_restorecs,@function
.align 4
_xt_coproc_restorecs:
mov a14, a0 // save return address
call0 XT_RTOS_CP_STATE // get address of CP save area
mov a0, a14 // restore return address
beqz a15, .Ldone2 // if none then nothing to do
l32i a14, a15, XT_CP_ASA // a14 = base of aligned save area
beqz a14, .Ldone2 // no save area, nothing to do
l16ui a13, a15, XT_CP_CS_ST // a13 = which CPs have been saved
xor a13, a13, a2 // clear the ones being restored
s16i a13, a15, XT_CP_CS_ST // update saved CP mask
movi a13, _xt_coproc_sa_offset // array of CP save offsets
l32i a15, a15, XT_CP_ASA // a15 = base of aligned save area
#if XCHAL_CP0_SA_SIZE
bbci.l a2, 0, 2f // CP 0 not enabled
l32i a14, a13, 0 // a14 = _xt_coproc_sa_offset[0]
add a12, a14, a15 // a12 = save area for CP 0
xchal_cp0_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP1_SA_SIZE
bbci.l a2, 1, 2f // CP 1 not enabled
l32i a14, a13, 4 // a14 = _xt_coproc_sa_offset[1]
add a12, a14, a15 // a12 = save area for CP 1
xchal_cp1_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP2_SA_SIZE
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a12, a14, a15
xchal_cp2_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP3_SA_SIZE
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a12, a14, a15
xchal_cp3_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP4_SA_SIZE
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a12, a14, a15
xchal_cp4_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP5_SA_SIZE
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a12, a14, a15
xchal_cp5_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP6_SA_SIZE
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a12, a14, a15
xchal_cp6_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
#if XCHAL_CP7_SA_SIZE
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a12, a14, a15
xchal_cp7_load a12, a8, a9, a10, a11 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
.Ldone2:
ret
#if XCHAL_HAVE_XEA3
//-----------------------------------------------------------------------------
// For XEA3, coprocessor exceptions come here. This is a wrapper function that
// calls _xt_coproc_handler() to do the actual work. Since the handler can be
// interrupted make sure that no context switch occurs.
//-----------------------------------------------------------------------------
.text
.global _xt_coproc_exc
.type _xt_coproc_exc,@function
.align 4
_xt_coproc_exc:
#ifdef __XTENSA_CALL0_ABI__
addi a1, a1, -16 // reserve 16 bytes on stack
s32i a0, a1, 0 // save return address
s32i a2, a1, 4 // save a2
s32i a15, a1, 8 // must save a15 (see dispatch)
l32i a2, a1, 4
l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause
extui a2, a3, 8, 4 // a2 <- CP index
call0 _xt_coproc_handler
l32i a0, a1, 0 // restore return address
l32i a15, a1, 8 // restore a15
addi a1, a1, 16
ret
#else
entry a1, 48 // reserve 16 bytes on stack
s32i a0, a1, 0 // save return address
l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause
extui a2, a3, 8, 4 // a2 <- CP index
call0 _xt_coproc_handler
l32i a0, a1, 0 // restore return address
retw
#endif
#endif // XCHAL_HAVE_XEA3
#if XCHAL_HAVE_XEA2
//-----------------------------------------------------------------------------
// XEA2 coprocessor exception dispatcher. Save enough state to be able to call
// the coprocessor handler, then restore and return.
//-----------------------------------------------------------------------------
.text
.global _xt_coproc_exc
.type _xt_coproc_exc,@function
.align 4
_xt_coproc_exc:
mov a0, sp // Allocate stack frame
addi sp, sp, -XT_STK_FRMSZ
s32i a0, sp, XT_STK_A1 // save SP
#if XCHAL_HAVE_WINDOWED
s32e a0, sp, -12 // for debug backtrace
#endif
rsr a0, PS
s32i a0, sp, XT_STK_PS // save PS
rsr a0, EPC_1
s32i a0, sp, XT_STK_PC // save PC
rsr a0, EXCSAVE_1
s32i a0, sp, XT_STK_A0 // retrieve and save a0
#if XCHAL_HAVE_WINDOWED
s32e a0, sp, -16 // for debug backtrace
#endif
s32i a2, sp, XT_STK_A2
s32i a3, sp, XT_STK_A3
s32i a4, sp, XT_STK_A4
s32i a5, sp, XT_STK_A5
s32i a6, sp, XT_STK_A6
s32i a7, sp, XT_STK_A7
s32i a8, sp, XT_STK_A8
s32i a9, sp, XT_STK_A9
s32i a10, sp, XT_STK_A10
s32i a11, sp, XT_STK_A11
s32i a12, sp, XT_STK_A12
s32i a13, sp, XT_STK_A13
s32i a14, sp, XT_STK_A14
s32i a15, sp, XT_STK_A15
rsr a3, EXCCAUSE // a3 <- exccause
addi a2, a3, -EXCCAUSE_CP0_DISABLED // a2 <- CP index
call0 _xt_coproc_handler
mov a0, a2 // save return value
l32i a2, sp, XT_STK_A2
l32i a3, sp, XT_STK_A3
l32i a4, sp, XT_STK_A4
l32i a5, sp, XT_STK_A5
l32i a6, sp, XT_STK_A6
l32i a7, sp, XT_STK_A7
l32i a8, sp, XT_STK_A8
l32i a9, sp, XT_STK_A9
l32i a10, sp, XT_STK_A10
l32i a11, sp, XT_STK_A11
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
bnez a0, .Lfail // abort if failure
l32i a0, sp, XT_STK_PC
wsr a0, EPC_1 // restore PC
l32i a0, sp, XT_STK_PS
wsr a0, PS // restore PS
l32i a0, sp, XT_STK_A0
addi a1, a1, XT_STK_FRMSZ // deallocate stack frame
rfe
.Lfail:
call0 _xt_panic
#endif // XCHAL_HAVE_XEA2
#endif // XCHAL_CP_NUM > 0

View File

@@ -0,0 +1,67 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa initialization routines. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#ifdef XT_BOARD
#include <xtensa/xtbsp.h>
#endif
#include "xtensa_rtos.h"
#ifdef XT_RTOS_TIMER_INT
#ifndef XT_CLOCK_FREQ
uint32_t _xt_tick_divisor = 0; /* cached number of cycles per tick */
/*
Compute and initialize at run-time the tick divisor (the number of
processor clock cycles in an RTOS tick, used to set the tick timer).
Called when the processor clock frequency is not known at compile-time.
*/
void _xt_tick_divisor_init(void)
{
#ifdef XT_BOARD
_xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC;
#else
#error "No way to obtain processor clock frequency"
#endif /* XT_BOARD */
}
#endif /* XT_CLOCK_FREQ */
#endif /* XT_RTOS_TIMER_INT */

View File

@@ -0,0 +1,213 @@
/*******************************************************************************
Copyright (c) 2006-2019 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
/******************************************************************************
Xtensa-specific interrupt and exception functions for RTOS ports.
Also see xtensa_intr_asm.S.
******************************************************************************/
#include <stdlib.h>
#include <xtensa/config/core.h>
#include <xtensa/core-macros.h>
#include "xtensa_api.h"
#if XCHAL_HAVE_EXCEPTIONS
/* Handler table is in xtensa_intr_asm.S */
extern xt_exc_handler _xt_exception_table[XCHAL_EXCCAUSE_NUM];
/*
Default handler for unhandled exceptions.
*/
void xt_unhandled_exception(XtExcFrame *frame)
{
(void) frame; /* Keep compiler happy */
exit(-1);
}
/*
This function registers a handler for the specified exception.
The function returns the address of the previous handler.
On error, it returns 0.
*/
xt_exc_handler xt_set_exception_handler(uint32_t n, xt_exc_handler f)
{
xt_exc_handler old;
if (n >= XCHAL_EXCCAUSE_NUM) {
return 0; /* invalid exception number */
}
old = _xt_exception_table[n];
if (f != NULL) {
_xt_exception_table[n] = f;
}
else {
_xt_exception_table[n] = &xt_unhandled_exception;
}
return old;
}
#endif
#if XCHAL_HAVE_INTERRUPTS
#if XCHAL_HAVE_XEA2
/* Defined in xtensa_intr_asm.S */
extern uint32_t _xt_intenable;
extern uint32_t _xt_vpri_mask;
#endif
/* Handler table is in xtensa_intr_asm.S */
typedef struct xt_handler_table_entry {
void * handler;
void * arg;
} xt_handler_table_entry;
#if (XT_USE_INT_WRAPPER || XCHAL_HAVE_XEA3)
extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS + 1];
#else
extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS];
#endif
/*
Default handler for unhandled interrupts.
*/
void xt_unhandled_interrupt(void * arg)
{
(void) arg; /* Keep compiler happy */
exit(-1);
}
/*
This function registers a handler for the specified interrupt. The "arg"
parameter specifies the argument to be passed to the handler when it is
invoked. The function returns the address of the previous handler.
On error, it returns 0.
*/
xt_handler xt_set_interrupt_handler(uint32_t n, xt_handler f, void * arg)
{
xt_handler_table_entry * entry;
xt_handler old;
if (n >= XCHAL_NUM_INTERRUPTS) {
return 0; /* invalid interrupt number */
}
#if XCHAL_HAVE_XEA2
if (Xthal_intlevel[n] > XCHAL_EXCM_LEVEL) {
return 0; /* priority level too high to safely handle in C */
}
#endif
#if (XT_USE_INT_WRAPPER || XCHAL_HAVE_XEA3)
entry = _xt_interrupt_table + n + 1;
#else
entry = _xt_interrupt_table + n;
#endif
old = entry->handler;
if (f != NULL) {
entry->handler = f;
entry->arg = arg;
}
else {
entry->handler = &xt_unhandled_interrupt;
entry->arg = (void*)n;
}
return old;
}
/*
This function enables the interrupt whose number is specified as
the argument.
*/
void xt_interrupt_enable(uint32_t intnum)
{
#if XCHAL_HAVE_XEA2
uint32_t ps = XT_RSIL(15);
// New INTENABLE = (_xt_intenable | mask) & _xt_vpri_mask.
_xt_intenable |= (1 << intnum);
XT_WSR_INTENABLE(_xt_intenable & _xt_vpri_mask);
XT_WSR_PS(ps);
XT_RSYNC();
#else
xthal_interrupt_enable(intnum);
#endif
}
/*
This function disables the interrupt whose number is specified as
the argument.
*/
void xt_interrupt_disable(uint32_t intnum)
{
#if XCHAL_HAVE_XEA2
uint32_t ps = XT_RSIL(15);
// New INTENABLE = (_xt_intenable & ~mask) & _xt_vpri_mask.
_xt_intenable &= ~(1 << intnum);
XT_WSR_INTENABLE(_xt_intenable & _xt_vpri_mask);
XT_WSR_PS(ps);
XT_RSYNC();
#else
xthal_interrupt_disable(intnum);
#endif
}
/*
This function triggers the specified interrupt.
*/
void xt_interrupt_trigger(uint32_t intnum)
{
xthal_interrupt_trigger(intnum);
}
/*
This function clears the specified interrupt.
*/
void xt_interrupt_clear(uint32_t intnum)
{
xthal_interrupt_clear(intnum);
}
#endif /* XCHAL_HAVE_INTERRUPTS */

View File

@@ -0,0 +1,156 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa interrupt handling data and assembly routines. */
/* Also see xtensa_intr.c. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include <xtensa/hal.h>
#include <xtensa/config/core.h>
#include "tx_port.h"
#include "xtensa_context.h"
#if XCHAL_HAVE_INTERRUPTS
/*
-------------------------------------------------------------------------------
INTENABLE virtualization information.
-------------------------------------------------------------------------------
*/
#if XCHAL_HAVE_XEA2
.data
.global _xt_intdata
.align 8
_xt_intdata:
.global _xt_intenable
.type _xt_intenable,@object
.size _xt_intenable,4
.global _xt_vpri_mask
.type _xt_vpri_mask,@object
.size _xt_vpri_mask,4
_xt_intenable: .word 0 /* Virtual INTENABLE */
_xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */
#endif
/*
-------------------------------------------------------------------------------
System interrupt stack.
-------------------------------------------------------------------------------
*/
#if (XCHAL_HAVE_XEA2 || XCHAL_HAVE_ISB)
.data
#else
.section .intr.top, "aw"
#endif
.global _xt_interrupt_stack
.global _xt_interrupt_stack_top
.align 16
_xt_interrupt_stack:
.space TX_SYSTEM_STACK_SIZE
_xt_interrupt_stack_top:
/*
-------------------------------------------------------------------------------
Table of C-callable interrupt handlers for each interrupt. For XEA2 configs,
not all slots can be filled, because interrupts at level > EXCM_LEVEL will
not be dispatched to a C handler by default.
-------------------------------------------------------------------------------
*/
#if (XCHAL_HAVE_XEA2 || XCHAL_HAVE_ISB)
.data
#else
.section .intr.data, "aw"
#endif
.global _xt_interrupt_table
.align 16
_xt_interrupt_table:
/*
-------------------------------------------------------------------------------
If using the interrupt wrapper, make the first entry in the interrupt table
point to the wrapper (XEA3) or leave it empty (XEA2).
-------------------------------------------------------------------------------
*/
#if XCHAL_HAVE_XEA3
.word xt_interrupt_wrapper
.word 0
#elif XT_USE_INT_WRAPPER
.word 0
.word 0
#endif
.set i, 0
.rept XCHAL_NUM_INTERRUPTS
.word xt_unhandled_interrupt /* handler address */
.word i /* handler arg (default: intnum) */
.set i, i+1
.endr
#endif /* XCHAL_HAVE_INTERRUPTS */
#if XCHAL_HAVE_EXCEPTIONS
/*
-------------------------------------------------------------------------------
Table of C-callable exception handlers for each exception. Note that not all
slots will be active, because some exceptions (e.g. coprocessor exceptions)
are always handled by the OS and cannot be hooked by user handlers.
-------------------------------------------------------------------------------
*/
.data
.global _xt_exception_table
.align 4
_xt_exception_table:
.rept XCHAL_EXCCAUSE_NUM
.word xt_unhandled_exception /* handler address */
.endr
#endif

View File

@@ -0,0 +1,125 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa-specific interrupt handler wrapper. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include <xtensa/config/core.h>
#include <xtensa/core-macros.h>
#include "xtensa_rtos.h"
#include "xtensa_api.h"
#include "tx_api.h"
#include "tx_thread.h"
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#include "tx_execution_profile.h"
#endif
#if (XCHAL_HAVE_XEA3 && XCHAL_HAVE_INTERRUPTS)
/* Table of interrupt hooks. Used for testing ONLY. */
#ifdef XT_INTEXC_HOOKS
volatile XT_INTEXC_HOOK _xt_intexc_hooks[XT_INTEXC_HOOK_NUM];
#endif
/* Handler table is in xtensa_intr_asm.S */
typedef struct xt_handler_table_entry {
void * handler;
void * arg;
} xt_handler_table_entry;
extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS + 1];
extern int32_t xt_sw_intnum;
static int32_t xt_wflag;
/**************************************************************************/
/* Wrapper for interrupt handlers. Argument is (intnum << 2). */
/* Execution comes here from the dispatch code if the wrapper is */
/* enabled. */
/**************************************************************************/
void
xt_interrupt_wrapper(void * arg)
{
uint32_t intnum = (uint32_t)(arg) >> 2;
xt_handler_table_entry * entry;
xt_handler handler;
/* Increment interrupt nest counter. */
_tx_thread_system_state++;
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR enter function to indicate an ISR is executing. */
_tx_execution_isr_enter();
#endif
/* Load handler address and argument from table. Note that the
first entry in the table points to this wrapper, so we have
to skip ahead one.
*/
entry = _xt_interrupt_table + intnum + 1;
handler = (xt_handler) entry->handler;
(*handler)(entry->arg);
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR exit function to indicate an ISR is complete. */
_tx_execution_isr_exit();
#endif
/* If a context switch is pending, trigger the SW interrupt
to process the switch. Set an internal flag so we don't
trigger the sw interrupt again when handling it.
*/
if (xt_wflag != 0) {
xt_wflag = 0;
}
else if (_tx_thread_current_ptr != _tx_thread_execute_ptr) {
xt_wflag = 1;
xt_interrupt_trigger(xt_sw_intnum);
}
/* Decrement interrupt nest counter. */
_tx_thread_system_state--;
}
#endif /* XCHAL_HAVE_XEA3 && XCHAL_HAVE_INTERRUPTS */

View File

@@ -0,0 +1,109 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa overlay manager OS hooks for ThreadX. XEA2 only. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#ifdef XT_USE_OVLY
#include <xtensa/overlay.h>
#include "tx_api.h"
/* Required to work around a bug in the overlay header. */
#ifdef XT_DISABLE_OVERLAYS
#undef xt_overlay_fatal_error
#define xt_overlay_fatal_error(id)
#endif
/* Mutex object that controls access to the overlay. Currently only one
* overlay region is supported so one mutex suffices.
*/
static TX_MUTEX xt_overlay_mutex;
/**************************************************************************/
/* This function should be overridden to provide OS specific init such */
/* as the creation of a mutex lock that can be used for overlay locking. */
/* Typically this mutex would be set up with priority inheritance. See */
/* overlay manager documentation for more details. */
/**************************************************************************/
void
xt_overlay_init_os(void)
{
/* Create the mutex for overlay access. Priority inheritance is
* required.
*/
UINT status =
tx_mutex_create (&xt_overlay_mutex, "xt_overlay_lock", TX_INHERIT);
if (status != TX_SUCCESS) {
xt_overlay_fatal_error (-1);
}
}
/**************************************************************************/
/* This function locks access to shared overlay resources, typically */
/* by acquiring a mutex. */
/**************************************************************************/
void
xt_overlay_lock(void)
{
UINT status = tx_mutex_get (&xt_overlay_mutex, TX_WAIT_FOREVER);
if (status != TX_SUCCESS) {
xt_overlay_fatal_error (-1);
}
}
/**************************************************************************/
/* This function releases access to shared overlay resources, typically */
/* by unlocking a mutex. */
/**************************************************************************/
void
xt_overlay_unlock(void)
{
UINT status = tx_mutex_put (&xt_overlay_mutex);
if (status != TX_SUCCESS) {
xt_overlay_fatal_error (-1);
}
}
#endif /* XT_USE_OVLY */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,392 @@
/**************************************************************************/
/* Copyright (c) Cadence Design Systems, Inc. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* Xtensa exception and interrupt dispatch for XEA3. */
/* */
/* Interrupt handlers and user exception handlers support interaction */
/* with the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT */
/* before and after calling the user's specific interrupt handlers. */
/* */
/* Users can install application-specific interrupt handlers by calling */
/* xt_set_interrupt_handler(). These handlers can be written in C and */
/* must follow the C calling convention. The handler table is indexed by */
/* the interrupt number. Each handler may be provided with an argument. */
/* */
/* Users can install application-specific exception handlers in the */
/* same way, by calling xt_set_exception_handler(). One handler slot is */
/* provided for each exception type. Note that some exceptions are */
/* handled by the porting layer itself, and cannot be taken over by */
/* application code. These are the alloca, syscall, and coprocessor */
/* exceptions. */
/* */
/* Exception handlers can be written in C, and must follow C calling */
/* convention. Each handler is passed a pointer to an exception frame as */
/* its single argument. The exception frame is created on the stack and */
/* holds the saved context of the thread that took the exception. If the */
/* handler returns, the context will be restored and the instruction */
/* that caused the exception will be retried. If the handler makes any */
/* changes to the saved state in the exception frame, the changes will */
/* be applied when restoring the context. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
#include <xtensa/config/core.h>
#include <xtensa/coreasm.h>
#if XCHAL_HAVE_XEA3
#include "xtensa_context.h"
#if (XCHAL_HW_VERSION < XTENSA_HWVERSION_RH_2016_2)
#error Xtensa HW earlier than RH_2016.2 not supported.
#endif
//-----------------------------------------------------------------------------
// The entry point vectors are common for call0 and windowed configurations.
//-----------------------------------------------------------------------------
.extern _DoubleExceptionHandler
.extern _xtos_exc_dispatch
.section .DispatchVector.text, "ax"
#if XCHAL_HAVE_VECBASE
.align 64 // 64-byte alignment needed when vecbase
#else // is relocatable
.align 4
#endif
.org 0 // Fixed offset for Reset Vector
.global _DispatchVector
.weak _DispatchVector
_DispatchVector:
j _JumpToResetHandler
.org 3 // Reserved
.local _Reserved1
_Reserved1:
ill
.org 6 // Fixed offset for Double Exception Vector
.global _DoubleExceptionVector
.weak _DoubleExceptionVector
_DoubleExceptionVector:
j _DoubleExceptionHandler
.org 9 // Reserved
.local _Reserved2
_Reserved2:
ill
//-----------------------------------------------------------------------------
// Start of dispatch code.
//-----------------------------------------------------------------------------
.org 12 // Fixed offset for Tailchain entry point
.global _xt_dispatch
_xt_dispatch:
#ifdef __XTENSA_CALL0_ABI__
// NOTE: for call0, a15 is expected to be holding the previous stack pointer
// when we get to the Tailchain segment.
s32si.x4 a2, a15 // Select interrupt, a2 <- (intnum << 2)
movi a0, 0
l32dis.it a0, a0 // a0 <- wrapper addr (handler_table[0])
s32stk a9, a15, 96 // Set new stack pointer
#if XT_STK_XTRA_SZ
addi a1, a1, -XT_STK_XTRA_SZ // Adjust for extra save area
#endif
s32dis.h a0, a0 // Jump to handler if interrupt else fall through
// Note this also clears local exclusive monitor
#else // windowed
s32si.x4 a10, a1 // Select interrupt, a10 <- (intnum << 2)
movi a8, 0
l32dis.it a8, a8 // a8 <- wrapper addr (handler_table[0])
s32stk a9, a1, 96 // Set new stack pointer
#if XT_STK_XTRA_SZ
addi a9, a9, -XT_STK_XTRA_SZ // Adjust for extra save area
#endif
s32dis.h a8, a8 // Jump to handler if interrupt else fall through
// Note this also clears local exclusive monitor
#endif // __XTENSA_CALL0_ABI__
.Lexit:
j _xt_exit
#ifndef __XTENSA_CALL0_ABI__
.org 36 // Fixed offset for Underflow segment
.global _xt_underflow
_xt_underflow:
l32e a8, a1, -64 // a8 <- [a1-32]
l32e a9, a1, -64 // a9 <- [a1-28]
l32e a10, a1, -64 // a10 <- [a1-24]
l32e a11, a1, -64 // a11 <- [a1-20]
l32e a12, a1, -64 // a12 <- [a1-16]
l32e a13, a1, -64 // a13 <- [a1-12]
l32e a14, a1, -64 // a14 <- [a1-8]
l32e a15, a1, -64 // a15 <- [a1-4] ; Return (branch to EPC)
#endif
.org 60 // Fixed offset for Save/Overflow segment
.global _xt_save
_xt_save:
#ifdef __XTENSA_CALL0_ABI__
s32e a0, a1, -64 // [a1-64] <- a0
s32e a2, a1, -48 // [a1-56] <- a2 ; a2 <- EPC
s32e a3, a1, -64 // [a1-52] <- a3
s32e a4, a1, -64 // [a1-48] <- a4
s32e a5, a1, -64 // [a1-44] <- a5
s32e a6, a1, -64 // [a1-40] <- a6
s32e a7, a1, -64 // [a1-36] <- a7
#else
.global _xt_overflow
_xt_overflow:
#endif
s32e a8, a1, -52 // [a1-32] <- a8 ; a8 <- ExcVAddr
s32e a9, a1, -28 // [a1-28] <- a9 ; a9 <- PS/SAR
s32e a10, a1, -48 // [a1-24] <- a10 ; a10 <- EPC
s32e a11, a1, -24 // [a1-20] <- a11 ; a11 <- ExcCause
s32e a12, a1, -44 // [a1-16] <- a12 ; a12 <- LBEG
s32e a13, a1, -40 // [a1-12] <- a13 ; a13 <- LEND
s32e a14, a1, -36 // [a1-8] <- a14 ; a14 <- LCOUNT
s32e a15, a1, -32 // [a1-4] <- a15 ; a15 <- a1
// If Overflow then return (branch to EPC)
_xt_entry:
s32e a8, a1, -4 // [a1-68] <- a8 (ExcVAddr)
s32e a11, a1, -8 // [a1-72] <- a11 (ExcCause)
#if XCHAL_HAVE_LOOPS
s32e a12, a1, -20 // [a1-84] <- a12 (LBEG)
s32e a13, a1, -24 // [a1-88] <- a13 (LEND)
s32e a14, a1, -28 // [a1-92] <- a14 (LCOUNT)
#endif
#if XCHAL_HAVE_EXCLUSIVE
movi a12, 0
getex a12
s32e a12, a1, -32 // [a1-96] <- a12 (ATOMCTL)
#endif
j 1f // make room for literals
.align 4
.literal_position
.Le1:
.word _xt_exception_table
1:
// Call OS-specific code for additional work to be done. Stay on interruptee's
// stack in case more saves are required into stack frame.
// NOTE: OS-specific code can use a8, a12-a14, (+a2-a7: call0, a15: windowed).
// ALL other registers must be preserved.
XT_RTOS_INT_ENTER
// This sequence checks the interrupt controller and loads the interrupt
// number if available, and also loads the wrapper handler address.
// If there is an interrupt, execution will branch to the wrapper which
// will then forward to the correct handler.
// All this happens only if there is a pending interrupt. If not, execution
// falls through to exception handling.
#ifdef __XTENSA_CALL0_ABI__
s32si.x4 a2, a1 // [a1-80] <- a2 (EPC) ; a2 <- (intnum << 2)
movi a0, 0
l32dis.it a0, a0 // a0 <- wrapper addr (handler_table[0])
s32stk a9, a1, 96 // [a1-76] <- a9 (PS/SAR) ; a1 = a1-96
#if XT_STK_XTRA_SZ
addi a1, a1, -XT_STK_XTRA_SZ // Adjust for extra save area
#endif
s32dis.h a0, a0 // Jump to handler if interrupt else fall through
#else // windowed
s32si.x4 a10, a1 // [a1-80] <- a10 (EPC) ; a10 <- (intnum << 2)
movi a8, 0
l32dis.it a8, a8 // a8 <- wrapper addr (handler_table[0])
s32stk a9, a1, 96 // [a1-76] <- a9 (PS/SAR) ; a9 = a1-96
#if XT_STK_XTRA_SZ
addi a9, a9, -XT_STK_XTRA_SZ // Adjust for extra save area
#endif
s32dis.h a8, a8 // Jump to handler if interrupt else fall through
#endif // __XTENSA_CALL0_ABI__
// At this point we have:
// (note window has rotated for windowed ABI)
// a0 holds return address (Tailchain+3)
// For call0:
// a11 holds ExcCause, also saved in [oldsp - 72]
// a15 holds exception SP, a1 points to exception frame
// For windowed:
// a3 holds ExcCause, also saved in [oldsp - 72]
// a1 points to exception frame
.global _xt_exception
_xt_exception:
l32r a2, .Le1 // Load exc table address
#ifdef __XTENSA_CALL0_ABI__
mov a3, a11 // Copy exception cause to a3
#endif
extui a4, a3, 0, 4 // Extract exception cause
addx4 a2, a4, a2 // Index into exc table
l32i a4, a2, 0 // Load handler address
#if XT_STK_XTRA_SZ
addi a2, a1, XT_STK_XTRA_SZ // Argument = Exception frame ptr
#else
mov a2, a1 // Argument = Exception frame ptr
#endif
jx a4 // Return directly from handler
// Exit/restore sequence
.global _xt_exit
_xt_exit:
#ifdef __XTENSA_CALL0_ABI__
mov a1, a15 // Restore stack pointer
#endif
// Run OS-specific code to determine what to restore.
// Interrupts will remain disabled through this sequence.
// WARNING: stack pointer may change within this macro
// so all restores off the stack must happen afterwards.
XT_RTOS_INT_EXIT
.global _xt_restore
_xt_restore:
// Some loads must happen before DISPST = Restore, as these
// will not be accessible via L32E once DISPST = Restore.
#if XCHAL_HAVE_EXCLUSIVE
l32e a12, a1, -32 // a12 <- [a1-96] (ATOMCTL)
getex a12
#endif
l32e a10, a1, -12 // a10 <- [a1-76] (PS/SAR)
l32e a12, a1, -20 // a12 <- [a1-84] (LBEG)
l32e a13, a1, -24 // a13 <- [a1-88] (LEND)
l32e a14, a1, -28 // a14 <- [a1-92] (LCOUNT)
l32dis.epc a11, a1 // a11 <- [a1-80] (EPC)
// If interrupt goto tailchain else fall through
#ifdef __XTENSA_CALL0_ABI__
l32e a0, a1, -64 // a0 <- [a1-64]
l32e a2, a1, -64 // a2 <- [a1-56]
l32e a3, a1, -64 // a3 <- [a1-52]
l32e a4, a1, -64 // a4 <- [a1-48]
l32e a5, a1, -64 // a5 <- [a1-44]
l32e a6, a1, -64 // a6 <- [a1-40]
l32e a7, a1, -64 // a7 <- [a1-36]
#endif
// Important: the following restrictions must be observed:
// 1) The LCOUNT register must be restored after LBEG/LEND.
// 2) There must be at least 3 instructions between the LCOUNT
// restore and the last L32E (the one that branches).
l32e a12, a1, -44 // LBEG <- a12, a12 <- [a1-16]
l32e a13, a1, -40 // LEND <- a13, a13 <- [a1-12]
l32e a14, a1, -36 // LCOUNT <- a14, a14 <- [a1-8]
l32e a8, a1, -64 // a8 <- [a1-32]
l32e a9, a1, -64 // a9 <- [a1-28]
l32e a10, a1, -60 // PS/SAR <- a10, a10 <- [a1-24]
l32e a11, a1, -48 // EPC <- a11, a11 <- [a1-20]
l32e a15, a1, -64 // a15 <- [a1-4], Branch to EPC if no interrupt
// If interrupt, branch to Tailchain
//-----------------------------------------------------------------------------
// Branch to reset handler code from here. Use CALL0 as a branch, will expand
// to CALLX0 if needed when built with the -mlongcalls option.
//-----------------------------------------------------------------------------
.align 4
.local _JumpToResetHandler
_JumpToResetHandler:
call0 _ResetHandler
//-----------------------------------------------------------------------------
// Idle loop. On interrupt, no state needs saving.
//-----------------------------------------------------------------------------
.align 4
.global _xt_idle
_xt_idle:
movi a14, _xt_interrupt_stack_top
mov a1, a14 // a1 <- Top of interrupt stack
movi a14, 0 // 0 = Normal
wsr.ms a14 // Set DISPST = Normal
rsync
waiti 0 // Wait for interrupt
memw // HW erratum 569
//-----------------------------------------------------------------------------
// Scheduler interrupt handler. Triggered by context switch. At this time only
// useful for windowed ABI to spill register windows.
//-----------------------------------------------------------------------------
.align 4
.global xt_sched_handler
xt_sched_handler:
#ifdef __XTENSA_WINDOWED_ABI__
entry a1, 32
ssai 1
spillw
retw
#else
ret
#endif
#endif // XCHAL_HAVE_XEA3