Release 6.2.0

This commit is contained in:
Tiejun Zhou
2022-10-26 23:41:13 +00:00
parent b871c33620
commit 3e8e85cdc1
173 changed files with 26264 additions and 3989 deletions

View File

@@ -34,8 +34,8 @@
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -51,19 +51,21 @@
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function restores the interrupt context if it is processing a */
/* nested interrupt. If not, it returns to the interrupt thread if no */
/* preemption is necessary. Otherwise, if preemption is necessary or */
/* if no thread was running, the function returns to the scheduler. */
/* */
/* RELEASE HISTORY */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/**************************************************************************/
/* */
/**************************************************************************/
/* DESCRIPTION */
/* */
/* This function restores the interrupt context if it is processing a */
/* nested interrupt. If not, it returns to the interrupt thread if no */
/* preemption is necessary. Otherwise, if preemption is necessary or */
/* if no thread was running, the function returns to the scheduler. */
/* */
/* RELEASE HISTORY */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* 10-31-2022 Scott Larson Updated EPK definitions, */
/* resulting in version 6.2.0 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
@@ -73,16 +75,16 @@
_tx_thread_context_restore:
/*
Please note: Control flow might seem strange. This is because it has been
optimized to avoid taken branches in the longest normal path (the critical
one for worst-case latency), presumed to be a non-nested interrupt that
Please note: Control flow might seem strange. This is because it has been
optimized to avoid taken branches in the longest normal path (the critical
one for worst-case latency), presumed to be a non-nested interrupt that
preempts) and to hide pipeline interlock cycles where possible.
*/
/* Lockout interrupts. */
XT_INTS_DISABLE(a0)
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_isr_exit
@@ -106,7 +108,7 @@ _tx_thread_context_restore:
.Ln_tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
// else if (((_tx_thread_current_ptr)
// else if (((_tx_thread_current_ptr)
// && (_tx_thread_current_ptr == _tx_thread_execute_ptr))
// || (_tx_thread_preempt_disable))
// {
@@ -124,7 +126,7 @@ _tx_thread_context_restore:
// /* the no-preempt case has moved down so we fall-thru to preempt */
bgei a3, 1, .L_tx_thread_no_preempt_restore
// }
// else
// {
@@ -137,7 +139,7 @@ _tx_thread_context_restore:
/* Store standard preserved registers. */
/*
Call0 ABI callee-saved regs a12-15 need to be saved before preemption.
However a12-13 were saved for scratch by _tx_thread_context_save().
However a12-13 were saved for scratch by _tx_thread_context_save().
*/
#ifdef __XTENSA_CALL0_ABI__ /* Call0: now save callee-save regs */
s32i a14, a3, XT_STK_A14
@@ -151,7 +153,7 @@ _tx_thread_context_restore:
l32i a4, a3, 0 /* a4 = _tx_timer_time_slice */
beqz a4, .L_tx_thread_dont_save_ts
// _tx_thread_current_ptr -> tx_thread_time_slice
// _tx_thread_current_ptr -> tx_thread_time_slice
// = _tx_timer_time_slice;
// _tx_timer_time_slice = 0; */
s32i a4, a2, tx_thread_time_slice
@@ -175,7 +177,7 @@ _tx_thread_context_restore:
.L_tx_thread_idle_system_restore:
/*
/*
Return via the scheduler.
Scheduler returns eventually to this function's caller as if called by it.
At this point we are still on the system stack.
@@ -202,8 +204,8 @@ _tx_thread_context_restore:
call0 _xt_context_restore
/*
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
and the interrupt stack frame is deallocated in the exit dispatcher.
At this point we are on the thread's stack.
*/

View File

@@ -34,8 +34,8 @@
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -49,21 +49,23 @@
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* 10-31-2022 Scott Larson Updated EPK definitions, */
/* resulting in version 6.2.0 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
@@ -96,8 +98,8 @@ _tx_thread_schedule:
Note on Windowed ABI:
Callers of this don't expect it to return to them. Most use 'call0'.
The only windowed (C) caller is _tx_initialize_kernel_enter().
There are no args or results to pass. So we don't really care if the
window gets rotated. We can omit the 'entry' altogether and avoid the
There are no args or results to pass. So we don't really care if the
window gets rotated. We can omit the 'entry' altogether and avoid the
need for a special "no entry" entrypoint to this function.
*/
@@ -108,11 +110,11 @@ _tx_thread_schedule:
call0 scheduler_return
#endif
/*
/*
Wait for a thread to execute (Idle Loop).
First ensure interrupts (except hi-pri) are disabled so result
First ensure interrupts (except hi-pri) are disabled so result
of reading _tx_thread_execute_ptr can't change before testing.
While there's no thread ready, enable interrupts and wait in a
While there's no thread ready, enable interrupts and wait in a
low power state, then disable interrupts and repeat the test.
*/
// do
@@ -130,7 +132,7 @@ _tx_thread_schedule:
// while(_tx_thread_execute_ptr == TX_NULL);
.L_tx_thread_schedule_ready:
/* Yes! We have a thread to execute. Lockout interrupts and
transfer control to it. Interrupts are already disabled. */
@@ -170,7 +172,7 @@ _tx_thread_schedule:
// SP = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
l32i sp, a2, tx_thread_stack_ptr
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_thread_enter
@@ -197,8 +199,8 @@ _tx_thread_schedule:
/* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
#ifdef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
#if XCHAL_CP_NUM > 0
@@ -207,25 +209,25 @@ _tx_thread_schedule:
/*
This does not return to its caller, but to the selected thread.
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
Must return via the exit dispatcher corresponding to the entrypoint
from which this was called. Interruptee's A0, A1, PS, PC are restored
and the interrupt stack frame is deallocated in the exit dispatcher.
*/
l32i a0, sp, XT_STK_EXIT
l32i a0, sp, XT_STK_EXIT
ret
.L_tx_thread_synch_return:
/* Here we return from a solicited entry with a solicited stack frame. */
movi a0, TX_FALSE
l32i a3, sp, XT_STK_PS
l32i a3, sp, XT_STK_PS
s32i a0, a2, tx_thread_solicited
#ifdef __XTENSA_CALL0_ABI__
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
l32i a12, sp, XT_STK_A12
l32i a13, sp, XT_STK_A13
l32i a14, sp, XT_STK_A14
l32i a15, sp, XT_STK_A15
#endif
l32i a0, sp, XT_STK_PC /* return address */

View File

@@ -34,8 +34,8 @@
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
@@ -48,21 +48,23 @@
.text
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the system. Only a minimal context */
/* is saved since the compiler assumes temp registers are going to get */
/* slicked by a function call anyway. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* */
/**************************************************************************/
/**************************************************************************/
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the system. Only a minimal context */
/* is saved since the compiler assumes temp registers are going to get */
/* slicked by a function call anyway. */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
/* 10-31-2022 Scott Larson Updated EPK definitions, */
/* resulting in version 6.2.0 */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
@@ -91,7 +93,7 @@ _tx_thread_system_return:
#endif
#endif
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
#ifdef __XTENSA_CALL0_ABI__
call0 _tx_execution_thread_exit
@@ -144,7 +146,7 @@ _tx_thread_system_return:
call0 _xt_coproc_savecs
/* Clear CPENABLE and give up all co-procs. */
s16i a5, a4, tx_thread_cp_state + XT_CPENABLE
s16i a5, a4, tx_thread_cp_state + XT_CPENABLE
wsr a5, CPENABLE /* disable all co-processors */
#endif
@@ -276,7 +278,7 @@ _tx_thread_system_return:
wsr a0, CPENABLE /* disable all co-processors */
#endif
/*
/*
Return via the scheduler.
Scheduler returns eventually to this function's caller as if called by it.
*/