Release ARMv7-A architecture ports and add tx_user.h to GNU port assembly files (#250)

* Release ARMv7-A architecture ports

* Add tx_user.h to GNU port assembly files

* Update GitHub action to perform check for Cortex-A ports
This commit is contained in:
TiejunZhou
2023-04-19 17:56:09 +08:00
committed by GitHub
parent 23680f5e5f
commit 672c5e953e
416 changed files with 43000 additions and 463 deletions

View File

@@ -0,0 +1,327 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores the interrupt context if it is processing a */
/* nested interrupt. If not, it returns to the interrupt thread if no */
/* preemption is necessary. Otherwise, if preemption is necessary or */
/* if no thread was running, the function returns to the scheduler. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling routine */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
// {
.global _tx_thread_context_restore
.type _tx_thread_context_restore, @function
_tx_thread_context_restore:
/* Lockout interrupts. */
MSR DAIFSet, 0x3 // Lockout interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
BL _tx_execution_isr_exit // Call the ISR exit function
#endif
/* Pickup the CPU ID. */
MRS x8, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x8, #16, #8 // Isolate cluster ID
#endif
UBFX x8, x8, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x8, #8, #8 // Isolate cluster ID
#endif
UBFX x8, x8, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x8, x8, x2, LSL #2 // Calculate CPU ID
#endif
/* Determine if interrupts are nested. */
// if (--_tx_thread_system_state)
// {
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x8, LSL #2] // Pickup system state
SUB w2, w2, #1 // Decrement the counter
STR w2, [x3, x8, LSL #2] // Store the counter
CMP w2, #0 // Was this the first interrupt?
BEQ __tx_thread_not_nested_restore // If so, not a nested restore
/* Interrupts are nested. */
/* Just recover the saved registers and return to the point of
interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
#ifdef EL1
MSR SPSR_EL1, x4 // Setup SPSR for return
MSR ELR_EL1, x5 // Setup point of interrupt
#else
#ifdef EL2
MSR SPSR_EL2, x4 // Setup SPSR for return
MSR ELR_EL2, x5 // Setup point of interrupt
#else
MSR SPSR_EL3, x4 // Setup SPSR for return
MSR ELR_EL3, x5 // Setup point of interrupt
#endif
#endif
LDP x18, x19, [sp], #16 // Recover x18, x19
LDP x16, x17, [sp], #16 // Recover x16, x17
LDP x14, x15, [sp], #16 // Recover x14, x15
LDP x12, x13, [sp], #16 // Recover x12, x13
LDP x10, x11, [sp], #16 // Recover x10, x11
LDP x8, x9, [sp], #16 // Recover x8, x9
LDP x6, x7, [sp], #16 // Recover x6, x7
LDP x4, x5, [sp], #16 // Recover x4, x5
LDP x2, x3, [sp], #16 // Recover x2, x3
LDP x0, x1, [sp], #16 // Recover x0, x1
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
// }
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
// else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
// || (_tx_thread_preempt_disable))
// {
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, x8, LSL #3] // Pickup actual current thread pointer
CMP x0, #0 // Is it NULL?
BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
LDR x2, [x3, x8, LSL #3] // Pickup actual execute thread pointer
CMP x0, x2 // Is the same thread highest priority?
BEQ __tx_thread_no_preempt_restore // Same thread in the execute list,
// no preemption needs to happen
LDR x3, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x3, #4] // Pickup the owning core
CMP w3, w8 // Is it this core?
BNE __tx_thread_preempt_restore // No, proceed to preempt thread
LDR x3, =_tx_thread_preempt_disable // Pickup preempt disable address
LDR w2, [x3, #0] // Pickup actual preempt disable flag
CMP w2, #0 // Is it set?
BEQ __tx_thread_preempt_restore // No, okay to preempt this thread
__tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
// sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
/* Recover the saved context and return to the point of interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
#ifdef EL1
MSR SPSR_EL1, x4 // Setup SPSR for return
MSR ELR_EL1, x5 // Setup point of interrupt
#else
#ifdef EL2
MSR SPSR_EL2, x4 // Setup SPSR for return
MSR ELR_EL2, x5 // Setup point of interrupt
#else
MSR SPSR_EL3, x4 // Setup SPSR for return
MSR ELR_EL3, x5 // Setup point of interrupt
#endif
#endif
LDP x18, x19, [sp], #16 // Recover x18, x19
LDP x16, x17, [sp], #16 // Recover x16, x17
LDP x14, x15, [sp], #16 // Recover x14, x15
LDP x12, x13, [sp], #16 // Recover x12, x13
LDP x10, x11, [sp], #16 // Recover x10, x11
LDP x8, x9, [sp], #16 // Recover x8, x9
LDP x6, x7, [sp], #16 // Recover x6, x7
LDP x4, x5, [sp], #16 // Recover x4, x5
LDP x2, x3, [sp], #16 // Recover x2, x3
LDP x0, x1, [sp], #16 // Recover x0, x1
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
// }
// else
// {
__tx_thread_preempt_restore:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
STP x20, x21, [sp, #-16]! // Save x20, x21
STP x22, x23, [sp, #-16]! // Save x22, x23
STP x24, x25, [sp, #-16]! // Save x24, x25
STP x26, x27, [sp, #-16]! // Save x26, x27
STP x28, x29, [sp, #-16]! // Save x28, x29
#ifdef ENABLE_ARM_FP
LDR w3, [x0, #268] // Pickup FP enable flag
CMP w3, #0 // Is FP enabled?
BEQ _skip_fp_save // No, skip FP save
STP q0, q1, [sp, #-32]! // Save q0, q1
STP q2, q3, [sp, #-32]! // Save q2, q3
STP q4, q5, [sp, #-32]! // Save q4, q5
STP q6, q7, [sp, #-32]! // Save q6, q7
STP q8, q9, [sp, #-32]! // Save q8, q9
STP q10, q11, [sp, #-32]! // Save q10, q11
STP q12, q13, [sp, #-32]! // Save q12, q13
STP q14, q15, [sp, #-32]! // Save q14, q15
STP q16, q17, [sp, #-32]! // Save q16, q17
STP q18, q19, [sp, #-32]! // Save q18, q19
STP q20, q21, [sp, #-32]! // Save q20, q21
STP q22, q23, [sp, #-32]! // Save q22, q23
STP q24, q25, [sp, #-32]! // Save q24, q25
STP q26, q27, [sp, #-32]! // Save q26, q27
STP q28, q29, [sp, #-32]! // Save q28, q29
STP q30, q31, [sp, #-32]! // Save q30, q31
MRS x2, FPSR // Pickup FPSR
MRS x3, FPCR // Pickup FPCR
STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
_skip_fp_save:
#endif
STP x4, x5, [sp, #-16]! // Save x4 (SPSR_EL3), x5 (ELR_E3)
MOV x3, sp // Move sp into x3
STR x3, [x0, #8] // Save stack pointer in thread control
// block
LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
LDR x4, [x3, x8, LSL #3] // Pickup system stack pointer
MOV sp, x4 // Setup system stack pointer
/* Save the remaining time-slice and disable it. */
// if (_tx_timer_time_slice)
// {
LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
LDR w2, [x3, x8, LSL #2] // Pickup time-slice
CMP w2, #0 // Is it active?
BEQ __tx_thread_dont_save_ts // No, don't save it
// _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
// _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
STR w2, [x3, x8, LSL #2] // Disable global time-slice flag
// }
__tx_thread_dont_save_ts:
/* Clear the current task pointer. */
// _tx_thread_current_ptr = TX_NULL;
MOV x2, #0 // NULL value
STR x2, [x1, x8, LSL #3] // Clear current thread pointer
/* Set bit indicating this thread is ready for execution. */
MOV x2, #1 // Build ready flag
STR w2, [x0, #260] // Set thread's ready flag
DMB ISH // Ensure that accesses to shared resource have completed
/* Return to the scheduler. */
// _tx_thread_schedule();
// }
__tx_thread_idle_system_restore:
/* Just return back to the scheduler! */
LDR x1, =_tx_thread_schedule // Build address for _tx_thread_schedule
#ifdef EL1
MSR ELR_EL1, x1 // Setup point of interrupt
// MOV x1, #0x4 // Setup EL1 return
// MSR spsr_el1, x1 // Move into SPSR
#else
#ifdef EL2
MSR ELR_EL2, x1 // Setup point of interrupt
// MOV x1, #0x8 // Setup EL2 return
// MSR spsr_el2, x1 // Move into SPSR
#else
MSR ELR_EL3, x1 // Setup point of interrupt
// MOV x1, #0xC // Setup EL3 return
// MSR spsr_el3, x1 // Move into SPSR
#endif
#endif
ERET // Return to scheduler
// }

View File

@@ -0,0 +1,259 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves the context of an executing thread in the */
/* beginning of interrupt processing. The function also ensures that */
/* the system stack is used upon return to the calling ISR. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_thread_context_save(VOID)
// {
.global _tx_thread_context_save
.type _tx_thread_context_save, @function
_tx_thread_context_save:
/* Upon entry to this routine, it is assumed that IRQ/FIQ interrupts are locked
out, x29 (frame pointer), x30 (link register) are saved, we are in the proper EL,
and all other registers are intact. */
/* Check for a nested interrupt condition. */
// if (_tx_thread_system_state++)
// {
STP x0, x1, [sp, #-16]! // Save x0, x1
STP x2, x3, [sp, #-16]! // Save x2, x3
/* Pickup the CPU ID. */
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x2, LSL #2 // Calculate CPU ID
#endif
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x1, LSL #2] // Pickup system state
CMP w2, #0 // Is this the first interrupt?
BEQ __tx_thread_not_nested_save // Yes, not a nested context save
/* Nested interrupt condition. */
ADD w2, w2, #1 // Increment the nested interrupt counter
STR w2, [x3, x1, LSL #2] // Store it back in the variable
/* Save the rest of the scratch registers on the stack and return to the
calling ISR. */
STP x4, x5, [sp, #-16]! // Save x4, x5
STP x6, x7, [sp, #-16]! // Save x6, x7
STP x8, x9, [sp, #-16]! // Save x8, x9
STP x10, x11, [sp, #-16]! // Save x10, x11
STP x12, x13, [sp, #-16]! // Save x12, x13
STP x14, x15, [sp, #-16]! // Save x14, x15
STP x16, x17, [sp, #-16]! // Save x16, x17
STP x18, x19, [sp, #-16]! // Save x18, x19
#ifdef EL1
MRS x0, SPSR_EL1 // Pickup SPSR
MRS x1, ELR_EL1 // Pickup ELR (point of interrupt)
#else
#ifdef EL2
MRS x0, SPSR_EL2 // Pickup SPSR
MRS x1, ELR_EL2 // Pickup ELR (point of interrupt)
#else
MRS x0, SPSR_EL3 // Pickup SPSR
MRS x1, ELR_EL3 // Pickup ELR (point of interrupt)
#endif
#endif
STP x0, x1, [sp, #-16]! // Save SPSR, ELR
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is executing. */
STP x29, x30, [sp, #-16]! // Save x29, x30
BL _tx_execution_isr_enter // Call the ISR enter function
LDP x29, x30, [sp], #16 // Recover x29, x30
#endif
/* Return to the ISR. */
RET // Return to ISR
__tx_thread_not_nested_save:
// }
/* Otherwise, not nested, check to see if a thread was running. */
// else if (_tx_thread_current_ptr)
// {
ADD w2, w2, #1 // Increment the interrupt counter
STR w2, [x3, x1, LSL #2] // Store it back in the variable
LDR x2, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x2, x1, LSL #3] // Pickup current thread pointer
CMP x0, #0 // Is it NULL?
BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
// scheduling loop - nothing needs saving!
/* Save minimal context of interrupted thread. */
STP x4, x5, [sp, #-16]! // Save x4, x5
STP x6, x7, [sp, #-16]! // Save x6, x7
STP x8, x9, [sp, #-16]! // Save x8, x9
STP x10, x11, [sp, #-16]! // Save x10, x11
STP x12, x13, [sp, #-16]! // Save x12, x13
STP x14, x15, [sp, #-16]! // Save x14, x15
STP x16, x17, [sp, #-16]! // Save x16, x17
STP x18, x19, [sp, #-16]! // Save x18, x19
#ifdef EL1
MRS x4, SPSR_EL1 // Pickup SPSR
MRS x5, ELR_EL1 // Pickup ELR (point of interrupt)
#else
#ifdef EL2
MRS x4, SPSR_EL2 // Pickup SPSR
MRS x5, ELR_EL2 // Pickup ELR (point of interrupt)
#else
MRS x4, SPSR_EL3 // Pickup SPSR
MRS x5, ELR_EL3 // Pickup ELR (point of interrupt)
#endif
#endif
STP x4, x5, [sp, #-16]! // Save SPSR, ELR
/* Save the current stack pointer in the thread's control block. */
// _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
MOV x4, sp //
STR x4, [x0, #8] // Save thread stack pointer
/* Switch to the system stack. */
// sp = _tx_thread_system_stack_ptr;
LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x2, LSL #2 // Calculate CPU ID
#endif
LDR x4, [x3, x1, LSL #3] // Pickup system stack pointer
MOV sp, x4 // Setup system stack pointer
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
/* Call the ISR enter function to indicate an ISR is executing. */
STP x29, x30, [sp, #-16]! // Save x29, x30
BL _tx_execution_isr_enter // Call the ISR enter function
LDP x29, x30, [sp], #16 // Recover x29, x30
#endif
RET // Return to caller
// }
// else
// {
__tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
/* Not much to do here, just adjust the stack pointer, and return to IRQ
processing. */
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR enter function to indicate an ISR is executing. */
STP x29, x30, [sp, #-16]! // Save x29, x30
BL _tx_execution_isr_enter // Call the ISR enter function
LDP x29, x30, [sp], #16 // Recover x29, x30
#endif
ADD sp, sp, #48 // Recover saved registers
RET // Continue IRQ processing
// }
// }

View File

@@ -0,0 +1,310 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* added memory barrier, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
// {
.global _tx_thread_schedule
.type _tx_thread_schedule, @function
_tx_thread_schedule:
/* Enable interrupts. */
MSR DAIFClr, 0x3 // Enable interrupts
/* Pickup the CPU ID. */
MRS x20, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x1, x20, #16, #8 // Isolate cluster ID
#endif
UBFX x20, x20, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x1, x20, #8, #8 // Isolate cluster ID
#endif
UBFX x20, x20, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x20, x20, x1, LSL #2 // Calculate CPU ID
#endif
/* Wait for a thread to execute. */
// do
// {
LDR x1, =_tx_thread_execute_ptr // Address of thread execute ptr
#ifdef TX_ENABLE_WFI
__tx_thread_schedule_loop:
MSR DAIFSet, 0x3 // Lockout interrupts
LDR x0, [x1, x20, LSL #3] // Pickup next thread to execute
CMP x0, #0 // Is it NULL?
BNE _tx_thread_schedule_thread //
MSR DAIFClr, 0x3 // Enable interrupts
WFI //
B __tx_thread_schedule_loop // Keep looking for a thread
_tx_thread_schedule_thread:
#else
MSR DAIFSet, 0x3 // Lockout interrupts
LDR x0, [x1, x20, LSL #3] // Pickup next thread to execute
CMP x0, #0 // Is it NULL?
BEQ _tx_thread_schedule // Keep looking for a thread
#endif
// }
// while(_tx_thread_execute_ptr == TX_NULL);
/* Get the lock for accessing the thread's ready bit. */
MOV w2, #280 // Build offset to the lock
ADD x2, x0, x2 // Get the address to the lock
LDAXR w3, [x2] // Pickup the lock value
CMP w3, #0 // Check if it's available
BNE _tx_thread_schedule // No, lock not available
MOV w3, #1 // Build the lock set value
STXR w4, w3, [x2] // Try to get the lock
CMP w4, #0 // Check if we got the lock
BNE _tx_thread_schedule // No, another core got it first
DMB ISH // Ensure write to lock completes
/* Now make sure the thread's ready bit is set. */
LDR w3, [x0, #260] // Pickup the thread ready bit
CMP w3, #0 // Is it set?
BNE _tx_thread_ready_for_execution // Yes, schedule the thread
/* The ready bit isn't set. Release the lock and jump back to the scheduler. */
MOV w3, #0 // Build clear value
STR w3, [x2] // Release the lock
DMB ISH // Ensure write to lock completes
B _tx_thread_schedule // Jump back to the scheduler
_tx_thread_ready_for_execution:
/* We have a thread to execute. */
/* Clear the ready bit and release the lock. */
MOV w3, #0 // Build clear value
STR w3, [x0, #260] // Store it back in the thread control block
DMB ISH
MOV w3, #0 // Build clear value for the lock
STR w3, [x2] // Release the lock
DMB ISH
/* Setup the current thread pointer. */
// _tx_thread_current_ptr = _tx_thread_execute_ptr;
LDR x2, =_tx_thread_current_ptr // Pickup address of current thread
STR x0, [x2, x20, LSL #3] // Setup current thread pointer
DMB ISH
LDR x1, [x1, x20, LSL #3] // Reload the execute pointer
CMP w0, w1 // Did it change?
BEQ _execute_pointer_did_not_change // If not, skip handling
/* In the time between reading the execute pointer and assigning
it to the current pointer, the execute pointer was changed by
some external code. If the current pointer was still null when
the external code checked if a core preempt was necessary, then
it wouldn't have done it and a preemption will be missed. To
handle this, undo some things and jump back to the scheduler so
it can schedule the new thread. */
MOV w1, #0 // Build clear value
STR x1, [x2, x20, LSL #3] // Clear current thread pointer
MOV w1, #1 // Build set value
STR w1, [x0, #260] // Re-set the ready bit
DMB ISH //
B _tx_thread_schedule // Jump back to the scheduler to schedule the new thread
_execute_pointer_did_not_change:
/* Increment the run count for this thread. */
// _tx_thread_current_ptr -> tx_thread_run_count++;
LDR w2, [x0, #4] // Pickup run counter
LDR w3, [x0, #36] // Pickup time-slice for this thread
ADD w2, w2, #1 // Increment thread run-counter
STR w2, [x0, #4] // Store the new run counter
/* Setup time-slice, if present. */
// _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
LDR x2, =_tx_timer_time_slice // Pickup address of time slice
// variable
LDR x4, [x0, #8] // Switch stack pointers
MOV sp, x4 //
STR w3, [x2, x20, LSL #2] // Setup time-slice
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread entry function to indicate the thread is executing. */
MOV x19, x0 // Save x0
BL _tx_execution_thread_enter // Call the thread execution enter function
MOV x0, x19 // Restore x0
#endif
/* Switch to the thread's stack. */
// sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
/* Determine if an interrupt frame or a synchronous task suspension frame
is present. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL1
CMP x5, #0 // Check for synchronous context switch (ELR_EL1 = NULL)
BEQ _tx_solicited_return
#ifdef EL1
MSR SPSR_EL1, x4 // Setup SPSR for return
MSR ELR_EL1, x5 // Setup point of interrupt
#else
#ifdef EL2
MSR SPSR_EL2, x4 // Setup SPSR for return
MSR ELR_EL2, x5 // Setup point of interrupt
#else
MSR SPSR_EL3, x4 // Setup SPSR for return
MSR ELR_EL3, x5 // Setup point of interrupt
#endif
#endif
#ifdef ENABLE_ARM_FP
LDR w1, [x0, #268] // Pickup FP enable flag
CMP w1, #0 // Is FP enabled?
BEQ _skip_interrupt_fp_restore // No, skip FP restore
LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
MSR FPSR, x0 // Recover FPSR
MSR FPCR, x1 // Recover FPCR
LDP q30, q31, [sp], #32 // Recover q30, q31
LDP q28, q29, [sp], #32 // Recover q28, q29
LDP q26, q27, [sp], #32 // Recover q26, q27
LDP q24, q25, [sp], #32 // Recover q24, q25
LDP q22, q23, [sp], #32 // Recover q22, q23
LDP q20, q21, [sp], #32 // Recover q20, q21
LDP q18, q19, [sp], #32 // Recover q18, q19
LDP q16, q17, [sp], #32 // Recover q16, q17
LDP q14, q15, [sp], #32 // Recover q14, q15
LDP q12, q13, [sp], #32 // Recover q12, q13
LDP q10, q11, [sp], #32 // Recover q10, q11
LDP q8, q9, [sp], #32 // Recover q8, q9
LDP q6, q7, [sp], #32 // Recover q6, q7
LDP q4, q5, [sp], #32 // Recover q4, q5
LDP q2, q3, [sp], #32 // Recover q2, q3
LDP q0, q1, [sp], #32 // Recover q0, q1
_skip_interrupt_fp_restore:
#endif
LDP x28, x29, [sp], #16 // Recover x28
LDP x26, x27, [sp], #16 // Recover x26, x27
LDP x24, x25, [sp], #16 // Recover x24, x25
LDP x22, x23, [sp], #16 // Recover x22, x23
LDP x20, x21, [sp], #16 // Recover x20, x21
LDP x18, x19, [sp], #16 // Recover x18, x19
LDP x16, x17, [sp], #16 // Recover x16, x17
LDP x14, x15, [sp], #16 // Recover x14, x15
LDP x12, x13, [sp], #16 // Recover x12, x13
LDP x10, x11, [sp], #16 // Recover x10, x11
LDP x8, x9, [sp], #16 // Recover x8, x9
LDP x6, x7, [sp], #16 // Recover x6, x7
LDP x4, x5, [sp], #16 // Recover x4, x5
LDP x2, x3, [sp], #16 // Recover x2, x3
LDP x0, x1, [sp], #16 // Recover x0, x1
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
_tx_solicited_return:
#ifdef ENABLE_ARM_FP
LDR w1, [x0, #268] // Pickup FP enable flag
CMP w1, #0 // Is FP enabled?
BEQ _skip_solicited_fp_restore // No, skip FP restore
LDP x0, x1, [sp], #16 // Pickup FPSR, FPCR
MSR FPSR, x0 // Recover FPSR
MSR FPCR, x1 // Recover FPCR
LDP q14, q15, [sp], #32 // Recover q14, q15
LDP q12, q13, [sp], #32 // Recover q12, q13
LDP q10, q11, [sp], #32 // Recover q10, q11
LDP q8, q9, [sp], #32 // Recover q8, q9
_skip_solicited_fp_restore:
#endif
LDP x27, x28, [sp], #16 // Recover x27, x28
LDP x25, x26, [sp], #16 // Recover x25, x26
LDP x23, x24, [sp], #16 // Recover x23, x24
LDP x21, x22, [sp], #16 // Recover x21, x22
LDP x19, x20, [sp], #16 // Recover x19, x20
LDP x29, x30, [sp], #16 // Recover x29, x30
MSR DAIF, x4 // Recover DAIF
RET // Return to caller
// }

View File

@@ -0,0 +1,90 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_core_get Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets the currently running core number and returns it.*/
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* Core ID */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_core_get
.type _tx_thread_smp_core_get, @function
_tx_thread_smp_core_get:
MRS x0, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x1, x0, #16, #8 // Isolate cluster ID
#endif
UBFX x0, x0, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x1, x0, #8, #8 // Isolate cluster ID
#endif
UBFX x0, x0, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x0, x0, x1, LSL #2 // Calculate CPU ID
#endif
RET

View File

@@ -0,0 +1,91 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
#define ICC_SGI1R_EL1 S3_0_C12_C11_5
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_core_preempt Cortex-A35-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function preempts the specified core in situations where the */
/* thread corresponding to this core is no longer ready or when the */
/* core must be used for a higher-priority thread. If the specified is */
/* the current core, this processing is skipped since the will give up */
/* control subsequently on its own. */
/* */
/* INPUT */
/* */
/* core The core to preempt */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_core_preempt
.type _tx_thread_smp_core_preempt, @function
_tx_thread_smp_core_preempt:
DSB ISH
#ifdef TX_ARMV8_2
MOV x2, #0x1 // Build the target list field
LSL x3, x0, #16 // Build the affinity1 field
ORR x2, x2, x3 // Combine the fields
#else
MOV x2, #0x1 //
LSL x2, x2, x0 // Shift by the core ID
#endif
MSR ICC_SGI1R_EL1, x2 // Issue inter-core interrupt
RET

View File

@@ -0,0 +1,95 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_current_state_get Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is gets the current state of the calling core. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_current_state_get
.type _tx_thread_smp_current_state_get, @function
_tx_thread_smp_current_state_get:
MRS x1, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x2, x2, x3, LSL #2 // Calculate CPU ID
#endif
LDR x3, =_tx_thread_system_state // Pickup the base of the current system state array
LDR w0, [x3, x2, LSL #2] // Pickup the current system state for this core
MSR DAIF, x1 // Restore interrupt posture
RET

View File

@@ -0,0 +1,95 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_current_thread_get Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is gets the current thread of the calling core. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_current_thread_get
.type _tx_thread_smp_current_thread_get, @function
_tx_thread_smp_current_thread_get:
MRS x1, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x2, x2, x3, LSL #2 // Calculate CPU ID
#endif
LDR x3, =_tx_thread_current_ptr // Pickup the base of the current thread pointer array
LDR x0, [x3, x2, LSL #3] // Pickup the current thread pointer for this core
MSR DAIF, x1 // Restore interrupt posture
RET

View File

@@ -0,0 +1,144 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_initialize_wait Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is the place where additional cores wait until */
/* initialization is complete before they enter the thread scheduling */
/* loop. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* Hardware */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_initialize_wait
.type _tx_thread_smp_initialize_wait, @function
_tx_thread_smp_initialize_wait:
/* Lockout interrupts. */
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the Core ID. */
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x2, x2, x3, LSL #2 // Calculate CPU ID
#endif
/* Make sure the system state for this core is TX_INITIALIZE_IN_PROGRESS before we check the release
flag. */
LDR w1, =0xF0F0F0F0 // Build TX_INITIALIZE_IN_PROGRESS flag
LDR x3, =_tx_thread_system_state // Pickup the base of the current system state array
wait_for_initialize:
LDR w0, [x3, x2, LSL #2] // Pickup the current system state for this core
CMP w0, w1 // Make sure the TX_INITIALIZE_IN_PROGRESS flag is set
BNE wait_for_initialize // Not equal, just spin here
/* Save the system stack pointer for this core. */
LDR x0, =_tx_thread_system_stack_ptr // Pickup address of system stack ptr
MOV x1, sp // Pickup SP
SUB x1, x1, #15 //
BIC x1, x1, #0xF // Get 16-bit alignment
STR x1, [x0, x2, LSL #3] // Store system stack pointer
/* Pickup the release cores flag. */
LDR x4, =_tx_thread_smp_release_cores_flag // Build address of release cores flag
wait_for_release:
LDR w0, [x4, #0] // Pickup the flag
CMP w0, #0 // Is it set?
BEQ wait_for_release // Wait for the flag to be set
/* Core 0 has released this core. */
/* Clear this core's system state variable. */
MOV x0, #0 // Build clear value
STR w0, [x3, x2, LSL #2] // Set the current system state for this core to zero
/* Now wait for core 0 to finish it's initialization. */
core_0_wait_loop:
LDR w0, [x3, #0] // Pickup the current system state for core 0
CMP w0, #0 // Is it 0?
BNE core_0_wait_loop // No, keep waiting for core 0 to finish its initialization
/* Initialization is complete, enter the scheduling loop! */
B _tx_thread_schedule // Enter the scheduling loop for this core
RET

View File

@@ -0,0 +1,75 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_low_level_initialize Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function performs low-level initialization of the booting */
/* core. */
/* */
/* INPUT */
/* */
/* number_of_cores Number of cores */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_high_level ThreadX high-level init */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_low_level_initialize
.type _tx_thread_smp_low_level_initialize, @function
_tx_thread_smp_low_level_initialize:
RET

View File

@@ -0,0 +1,137 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
/* Include macros for modifying the wait list. */
#include "tx_thread_smp_protection_wait_list_macros.h"
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets protection for running inside the ThreadX */
/* source. This is acomplished by a combination of a test-and-set */
/* flag and periodically disabling interrupts. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* Previous Status Register */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* improved SMP code, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
.type _tx_thread_smp_protect, @function
_tx_thread_smp_protect:
/* Disable interrupts so we don't get preempted. */
MRS x0, DAIF // Pickup current interrupt posture
MSR DAIFSet, 0x3 // Lockout interrupts
/* Pickup the CPU ID. */
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
LDR x1, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x1, #4] // Pickup the owning core
CMP w3, w2 // Is it this core?
BEQ _owned // Yes, the protection is already owned
LDAXR w4, [x1, #0] // Pickup the protection flag
CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
B _tx_thread_smp_protect // On waking, restart the protection attempt
_get_protection:
MOV x4, #1 // Build lock value
STXR w5, w4, [x1] // Attempt to get the protection
CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
B _tx_thread_smp_protect // Restart the protection attempt
_got_protection:
DMB ISH //
STR w2, [x1, #4] // Save owning core
_owned:
LDR w5, [x1, #8] // Pickup ownership count
ADD w5, w5, #1 // Increment ownership count
STR w5, [x1, #8] // Store ownership count
DMB ISH //
RET

View File

@@ -0,0 +1,302 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
.macro _tx_thread_smp_protect_lock_got
/* Set the currently owned core. */
// _tx_thread_smp_protection.tx_thread_smp_protect_core = this_core;
STR w1, [x2, #4] // Store this core
/* Increment the protection count. */
// _tx_thread_smp_protection.tx_thread_smp_protect_count++;
LDR w3, [x2, #8] // Pickup ownership count
ADD w3, w3, #1 // Increment ownership count
STR w3, [x2, #8] // Store ownership count
DMB ISH
.endm
.macro _tx_thread_smp_protect_remove_from_front_of_list
/* Remove ourselves from the list. */
// _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head++] = 0xFFFFFFFF;
MOV w3, #0xFFFFFFFF // Build the invalid core value
LDR x4, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
LDR w5, [x4] // Get the value of the head
LDR x6, =_tx_thread_smp_protect_wait_list // Get the address of the list
STR w3, [x6, x5, LSL #2] // Store the invalid core value
ADD w5, w5, #1 // Increment the head
/* Did we wrap? */
// if (_tx_thread_smp_protect_wait_list_head == TX_THREAD_SMP_MAX_CORES + 1)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_size // Load address of core list size
LDR w3, [x3] // Load the max cores value
CMP w5, w3 // Compare the head to it
BNE _store_new_head\@ // Are we at the max?
// _tx_thread_smp_protect_wait_list_head = 0;
EOR w5, w5, w5 // We're at the max. Set it to zero
// }
_store_new_head\@:
STR w5, [x4] // Store the new head
/* We have the lock! */
DMB ISH // Ensure write to protection finishes
// return;
.endm
.macro _tx_thread_smp_protect_wait_list_lock_get
// VOID _tx_thread_smp_protect_wait_list_lock_get()
// {
/* We do this until we have the lock. */
// while (1)
// {
_tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@:
// Is the list lock available? */
// _tx_thread_smp_protect_wait_list_lock_protect_in_force = load_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force);
LDR x1, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
LDAXR w2, [x1] // Pickup the protection flag
// if (protect_in_force == 0)
// {
CMP w2, #0
BNE _tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@ // No, protection not available
/* Try to get the list. */
// int status = store_exclusive(&_tx_thread_smp_protect_wait_list_lock_protect_in_force, 1);
MOV w2, #1 // Build lock value
STXR w3, w2, [x1] // Attempt to get the protection
/* if (status == SUCCESS) */
CMP w3, #0
BNE _tx_thread_smp_protect_wait_list_lock_get__try_to_get_lock\@ // Did it fail? If so, try again.
/* We have the lock! */
// return;
.endm
.macro _tx_thread_smp_protect_wait_list_add
// VOID _tx_thread_smp_protect_wait_list_add(UINT new_core)
// {
/* We're about to modify the list, so get the list lock. */
// _tx_thread_smp_protect_wait_list_lock_get();
STP x1, x2, [sp, #-16]! // Save registers we'll be using
_tx_thread_smp_protect_wait_list_lock_get
LDP x1, x2, [sp], #16
/* Add this core. */
// _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_tail++] = new_core;
LDR x3, =_tx_thread_smp_protect_wait_list_tail // Get the address of the tail
LDR w4, [x3] // Get the value of tail
LDR x5, =_tx_thread_smp_protect_wait_list // Get the address of the list
STR w1, [x5, x4, LSL #2] // Store the new core value
ADD w4, w4, #1 // Increment the tail
/* Did we wrap? */
// if (_tx_thread_smp_protect_wait_list_tail == _tx_thread_smp_protect_wait_list_size)
// {
LDR x5, =_tx_thread_smp_protect_wait_list_size // Load max cores address
LDR w5, [x5] // Load max cores value
CMP w4, w5 // Compare max cores to tail
BNE _tx_thread_smp_protect_wait_list_add__no_wrap\@ // Did we wrap?
// _tx_thread_smp_protect_wait_list_tail = 0;
MOV w4, #0
// }
_tx_thread_smp_protect_wait_list_add__no_wrap\@:
STR w4, [x3] // Store the new tail value.
DMB ISH // Ensure that accesses to shared resource have completed
/* Release the list lock. */
// _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
MOV w3, #0 // Build lock value
LDR x4, =_tx_thread_smp_protect_wait_list_lock_protect_in_force
STR w3, [x4] // Store the new value
DMB ISH // Ensure write to protection finishes
.endm
.macro _tx_thread_smp_protect_wait_list_remove
// VOID _tx_thread_smp_protect_wait_list_remove(UINT core)
// {
/* Get the core index. */
// UINT core_index;
// for (core_index = 0;; core_index++)
EOR w4, w4, w4 // Clear for 'core_index'
LDR x2, =_tx_thread_smp_protect_wait_list // Get the address of the list
// {
_tx_thread_smp_protect_wait_list_remove__check_cur_core\@:
/* Is this the core? */
// if (_tx_thread_smp_protect_wait_list[core_index] == core)
// {
// break;
LDR w3, [x2, x4, LSL #2] // Get the value at the current index
CMP w3, w8 // Did we find the core?
BEQ _tx_thread_smp_protect_wait_list_remove__found_core\@
// }
ADD w4, w4, #1 // Increment cur index
B _tx_thread_smp_protect_wait_list_remove__check_cur_core\@ // Restart the loop
// }
_tx_thread_smp_protect_wait_list_remove__found_core\@:
/* We're about to modify the list. Get the lock. We need the lock because another
core could be simultaneously adding (a core is simultaneously trying to get
the inter-core lock) or removing (a core is simultaneously being preempted,
like what is currently happening). */
// _tx_thread_smp_protect_wait_list_lock_get();
MOV x6, x1
_tx_thread_smp_protect_wait_list_lock_get
MOV x1, x6
/* We remove by shifting. */
// while (core_index != _tx_thread_smp_protect_wait_list_tail)
// {
_tx_thread_smp_protect_wait_list_remove__compare_index_to_tail\@:
LDR x2, =_tx_thread_smp_protect_wait_list_tail // Load tail address
LDR w2, [x2] // Load tail value
CMP w4, w2 // Compare cur index and tail
BEQ _tx_thread_smp_protect_wait_list_remove__removed\@
// UINT next_index = core_index + 1;
MOV w2, w4 // Move current index to next index register
ADD w2, w2, #1 // Add 1
// if (next_index == _tx_thread_smp_protect_wait_list_size)
// {
LDR x3, =_tx_thread_smp_protect_wait_list_size
LDR w3, [x3]
CMP w2, w3
BNE _tx_thread_smp_protect_wait_list_remove__next_index_no_wrap\@
// next_index = 0;
MOV w2, #0
// }
_tx_thread_smp_protect_wait_list_remove__next_index_no_wrap\@:
// list_cores[core_index] = list_cores[next_index];
LDR x5, =_tx_thread_smp_protect_wait_list // Get the address of the list
LDR w3, [x5, x2, LSL #2] // Get the value at the next index
STR w3, [x5, x4, LSL #2] // Store the value at the current index
// core_index = next_index;
MOV w4, w2
B _tx_thread_smp_protect_wait_list_remove__compare_index_to_tail\@
// }
_tx_thread_smp_protect_wait_list_remove__removed\@:
/* Now update the tail. */
// if (_tx_thread_smp_protect_wait_list_tail == 0)
// {
LDR x5, =_tx_thread_smp_protect_wait_list_tail // Load tail address
LDR w4, [x5] // Load tail value
CMP w4, #0
BNE _tx_thread_smp_protect_wait_list_remove__tail_not_zero\@
// _tx_thread_smp_protect_wait_list_tail = _tx_thread_smp_protect_wait_list_size;
LDR x2, =_tx_thread_smp_protect_wait_list_size
LDR w4, [x2]
// }
_tx_thread_smp_protect_wait_list_remove__tail_not_zero\@:
// _tx_thread_smp_protect_wait_list_tail--;
SUB w4, w4, #1
STR w4, [x5] // Store new tail value
DMB ISH // Ensure that accesses to shared resource have completed
/* Release the list lock. */
// _tx_thread_smp_protect_wait_list_lock_protect_in_force = 0;
MOV w2, #0 // Build lock value
LDR x4, =_tx_thread_smp_protect_wait_list_lock_protect_in_force // Load lock address
STR w2, [x4] // Store the new value
DMB ISH // Ensure write to protection finishes
/* We're no longer waiting. Note that this should be zero since, again,
this function is only called when a thread preemption is occurring. */
// _tx_thread_smp_protect_wait_counts[core]--;
LDR x4, =_tx_thread_smp_protect_wait_counts // Load wait list counts
LDR w2, [x4, x8, LSL #2] // Load waiting value
SUB w2, w2, #1 // Subtract 1
STR w2, [x4, x8, LSL #2] // Store new waiting value
.endm

View File

@@ -0,0 +1,75 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_time_get Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets the global time value that is used for debug */
/* information and event tracing. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* 32-bit time stamp */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_time_get
.type _tx_thread_smp_time_get, @function
_tx_thread_smp_time_get:
MOV x0, #0 // FIXME: Get timer
RET

View File

@@ -0,0 +1,129 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function releases previously obtained protection. The supplied */
/* previous SR is restored. If the value of _tx_thread_system_state */
/* and _tx_thread_preempt_disable are both zero, then multithreading */
/* is enabled as well. */
/* */
/* INPUT */
/* */
/* Previous Status Register */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* 04-25-2022 William E. Lamie Modified comments, removed */
/* FIFO queueing, */
/* resulting in version 6.1.11 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
.type _tx_thread_smp_unprotect, @function
_tx_thread_smp_unprotect:
MSR DAIFSet, 0x3 // Lockout interrupts
MRS x1, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #16, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x2, x1, #8, #8 // Isolate cluster ID
#endif
UBFX x1, x1, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x1, x1, x2, LSL #2 // Calculate CPU ID
#endif
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
LDR w3, [x2, #4] // Pickup the owning core
CMP w1, w3 // Is it this core?
BNE _still_protected // If this is not the owning core, protection is in force elsewhere
LDR w3, [x2, #8] // Pickup the protection count
CMP w3, #0 // Check to see if the protection is still active
BEQ _still_protected // If the protection count is zero, protection has already been cleared
SUB w3, w3, #1 // Decrement the protection count
STR w3, [x2, #8] // Store the new count back
CMP w3, #0 // Check to see if the protection is still active
BNE _still_protected // If the protection count is non-zero, protection is still in force
LDR x2,=_tx_thread_preempt_disable // Build address of preempt disable flag
LDR w3, [x2] // Pickup preempt disable flag
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
DMB ISH // Ensure that accesses to shared resource have completed
MOV w3, #0 // Build release protection value
STR w3, [x2, #0] // Release the protection
DSB ISH // To ensure update of the protection occurs before other CPUs awake
_still_protected:
#ifdef TX_ENABLE_WFE
SEV // Send event to other CPUs, wakes anyone waiting on the protection (using WFE)
#endif
MSR DAIF, x0 // Restore interrupt posture
RET

View File

@@ -0,0 +1,165 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread */
/* function_ptr Pointer to entry function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
// {
.global _tx_thread_stack_build
.type _tx_thread_stack_build, @function
_tx_thread_stack_build:
/* Build an interrupt frame. On Cortex-A35 it should look like this:
Stack Top: SSPR Initial SSPR
ELR Point of interrupt
x28 Initial value for x28
not used Not used
x26 Initial value for x26
x27 Initial value for x27
x24 Initial value for x24
x25 Initial value for x25
x22 Initial value for x22
x23 Initial value for x23
x20 Initial value for x20
x21 Initial value for x21
x18 Initial value for x18
x19 Initial value for x19
x16 Initial value for x16
x17 Initial value for x17
x14 Initial value for x14
x15 Initial value for x15
x12 Initial value for x12
x13 Initial value for x13
x10 Initial value for x10
x11 Initial value for x11
x8 Initial value for x8
x9 Initial value for x9
x6 Initial value for x6
x7 Initial value for x7
x4 Initial value for x4
x5 Initial value for x5
x2 Initial value for x2
x3 Initial value for x3
x0 Initial value for x0
x1 Initial value for x1
x29 Initial value for x29 (frame pointer)
x30 Initial value for x30 (link register)
0 For stack backtracing
Stack Bottom: (higher memory address) */
LDR x4, [x0, #24] // Pickup end of stack area
BIC x4, x4, #0xF // Ensure 16-byte alignment
/* Actually build the stack frame. */
MOV x2, #0 // Build clear value
MOV x3, #0 //
STP x2, x3, [x4, #-16]! // Set backtrace to 0
STP x2, x3, [x4, #-16]! // Set initial x29, x30
STP x2, x3, [x4, #-16]! // Set initial x0, x1
STP x2, x3, [x4, #-16]! // Set initial x2, x3
STP x2, x3, [x4, #-16]! // Set initial x4, x5
STP x2, x3, [x4, #-16]! // Set initial x6, x7
STP x2, x3, [x4, #-16]! // Set initial x8, x9
STP x2, x3, [x4, #-16]! // Set initial x10, x11
STP x2, x3, [x4, #-16]! // Set initial x12, x13
STP x2, x3, [x4, #-16]! // Set initial x14, x15
STP x2, x3, [x4, #-16]! // Set initial x16, x17
STP x2, x3, [x4, #-16]! // Set initial x18, x19
STP x2, x3, [x4, #-16]! // Set initial x20, x21
STP x2, x3, [x4, #-16]! // Set initial x22, x23
STP x2, x3, [x4, #-16]! // Set initial x24, x25
STP x2, x3, [x4, #-16]! // Set initial x26, x27
STP x2, x3, [x4, #-16]! // Set initial x28
#ifdef EL1
MOV x2, #0x4 // Build initial SPSR (EL1)
#else
#ifdef EL2
MOV x2, #0x8 // Build initial SPSR (EL2)
#else
MOV x2, #0xC // Build initial SPSR (EL3)
#endif
#endif
MOV x3, x1 // Build initial ELR
STP x2, x3, [x4, #-16]! // Set initial SPSR & ELR
/* Setup stack pointer. */
// thread_ptr -> tx_thread_stack_ptr = x2;
STR x4, [x0, #8] // Save stack pointer in thread's
MOV x3, #1 // Build ready flag
STR w3, [x0, #260] // Set ready flag
RET // Return to caller
// }

View File

@@ -0,0 +1,194 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the ThreadX system. Only a */
/* minimal context is saved since the compiler assumes temp registers */
/* are going to get slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 01-31-2022 Andres Mlinar Updated comments, */
/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_thread_system_return(VOID)
// {
.global _tx_thread_system_return
.type _tx_thread_system_return, @function
_tx_thread_system_return:
/* Save minimal context on the stack. */
MRS x0, DAIF // Pickup DAIF
MSR DAIFSet, 0x3 // Lockout interrupts
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
STP x19, x20, [sp, #-16]! // Save x19, x20
STP x21, x22, [sp, #-16]! // Save x21, x22
STP x23, x24, [sp, #-16]! // Save x23, x24
STP x25, x26, [sp, #-16]! // Save x25, x26
STP x27, x28, [sp, #-16]! // Save x27, x28
MRS x8, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x8, #16, #8 // Isolate cluster ID
#endif
UBFX x8, x8, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x8, #8, #8 // Isolate cluster ID
#endif
UBFX x8, x8, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x8, x8, x3, LSL #2 // Calculate CPU ID
#endif
LDR x5, =_tx_thread_current_ptr // Pickup address of current ptr
LDR x6, [x5, x8, LSL #3] // Pickup current thread pointer
#ifdef ENABLE_ARM_FP
LDR w7, [x6, #268] // Pickup FP enable flag
CMP w7, #0 // Is FP enabled?
BEQ _skip_fp_save // No, skip FP save
STP q8, q9, [sp, #-32]! // Save q8, q9
STP q10, q11, [sp, #-32]! // Save q10, q11
STP q12, q13, [sp, #-32]! // Save q12, q13
STP q14, q15, [sp, #-32]! // Save q14, q15
MRS x2, FPSR // Pickup FPSR
MRS x3, FPCR // Pickup FPCR
STP x2, x3, [sp, #-16]! // Save FPSR, FPCR
_skip_fp_save:
#endif
MOV x1, #0 // Clear x1
STP x0, x1, [sp, #-16]! // Save DAIF and clear value for ELR_EK1
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
MOV x19, x5 // Save x5
MOV x20, x6 // Save x6
MOV x21, x8 // Save x2
BL _tx_execution_thread_exit // Call the thread exit function
MOV x8, x21 // Restore x2
MOV x5, x19 // Restore x5
MOV x6, x20 // Restore x6
#endif
LDR x2, =_tx_timer_time_slice // Pickup address of time slice
LDR w1, [x2, x8, LSL #2] // Pickup current time slice
/* Save current stack and switch to system stack. */
// _tx_thread_current_ptr[core] -> tx_thread_stack_ptr = sp;
// sp = _tx_thread_system_stack_ptr[core];
MOV x4, sp //
STR x4, [x6, #8] // Save thread stack pointer
LDR x3, =_tx_thread_system_stack_ptr // Pickup address of system stack
LDR x4, [x3, x8, LSL #3] // Pickup system stack pointer
MOV sp, x4 // Setup system stack pointer
/* Determine if the time-slice is active. */
// if (_tx_timer_time_slice[core])
// {
MOV x4, #0 // Build clear value
CMP w1, #0 // Is a time-slice active?
BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
/* Save the current remaining time-slice. */
// _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
// _tx_timer_time_slice = 0;
STR w4, [x2, x8, LSL #2] // Clear time-slice
STR w1, [x6, #36] // Store current time-slice
// }
__tx_thread_dont_save_ts:
/* Clear the current thread pointer. */
// _tx_thread_current_ptr = TX_NULL;
STR x4, [x5, x8, LSL #3] // Clear current thread pointer
/* Set ready bit in thread control block. */
MOV x3, #1 // Build ready value
STR w3, [x6, #260] // Make the thread ready
DMB ISH //
/* Now clear protection. It is assumed that protection is in force whenever this routine is called. */
LDR x3, =_tx_thread_smp_protection // Pickup address of protection structure
LDR x1, =_tx_thread_preempt_disable // Build address to preempt disable flag
STR w4, [x1, #0] // Clear preempt disable flag
STR w4, [x3, #8] // Cear protection count
MOV x1, #0xFFFFFFFF // Build invalid value
STR w1, [x3, #4] // Set core to an invalid value
DMB ISH // Ensure that accesses to shared resource have completed
STR w4, [x3, #0] // Clear protection
DSB ISH // To ensure update of the shared resource occurs before other CPUs awake
SEV // Send event to other CPUs, wakes anyone waiting on a mutex (using WFE)
B _tx_thread_schedule // Jump to scheduler!
// }

View File

@@ -0,0 +1,198 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.text
.align 3
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt ARMv8-A-SMP */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* interrupt context save/restore functions are called along with the */
/* expiration functions. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
// VOID _tx_timer_interrupt(VOID)
// {
.global _tx_timer_interrupt
.type _tx_timer_interrupt, @function
_tx_timer_interrupt:
MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #16, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
UBFX x3, x2, #8, #8 // Isolate cluster ID
#endif
UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
ADDS x2, x2, x3, LSL #2 // Calculate CPU ID
#endif
CMP x2, #0 // Is this core 0?
BEQ __tx_process_timer // If desired core, continue processing
RET // Simply return if different core
__tx_process_timer:
/* Upon entry to this routine, it is assumed that context save has already
been called, and therefore the compiler scratch registers are available
for use. */
STP x27, x28, [sp, #-16]! // Save x27, x28
STP x29, x30, [sp, #-16]! // Save x29 (frame pointer), x30 (link register)
/* Get inter-core protection. */
BL _tx_thread_smp_protect // Get inter-core protection
MOV x28, x0 // Save the return value in preserved register
/* Increment the system clock. */
// _tx_timer_system_clock++;
LDR x1, =_tx_timer_system_clock // Pickup address of system clock
LDR w0, [x1, #0] // Pickup system clock
ADD w0, w0, #1 // Increment system clock
STR w0, [x1, #0] // Store new system clock
/* Test for timer expiration. */
// if (*_tx_timer_current_ptr)
// {
LDR x1, =_tx_timer_current_ptr // Pickup current timer pointer addr
LDR x0, [x1, #0] // Pickup current timer
LDR x2, [x0, #0] // Pickup timer list entry
CMP x2, #0 // Is there anything in the list?
BEQ __tx_timer_no_timer // No, just increment the timer
/* Set expiration flag. */
// _tx_timer_expired = TX_TRUE;
LDR x3, =_tx_timer_expired // Pickup expiration flag address
MOV w2, #1 // Build expired value
STR w2, [x3, #0] // Set expired flag
B __tx_timer_done // Finished timer processing
// }
// else
// {
__tx_timer_no_timer:
/* No timer expired, increment the timer pointer. */
// _tx_timer_current_ptr++;
ADD x0, x0, #8 // Move to next timer
/* Check for wrap-around. */
// if (_tx_timer_current_ptr == _tx_timer_list_end)
LDR x3, =_tx_timer_list_end // Pickup addr of timer list end
LDR x2, [x3, #0] // Pickup list end
CMP x0, x2 // Are we at list end?
BNE __tx_timer_skip_wrap // No, skip wrap-around logic
/* Wrap to beginning of list. */
// _tx_timer_current_ptr = _tx_timer_list_start;
LDR x3, =_tx_timer_list_start // Pickup addr of timer list start
LDR x0, [x3, #0] // Set current pointer to list start
__tx_timer_skip_wrap:
STR x0, [x1, #0] // Store new current timer pointer
// }
__tx_timer_done:
/* Did a timer expire? */
// if (_tx_timer_expired)
// {
LDR x1, =_tx_timer_expired // Pickup addr of expired flag
LDR w0, [x1, #0] // Pickup timer expired flag
CMP w0, #0 // Check for timer expiration
BEQ __tx_timer_dont_activate // If not set, skip timer activation
/* Process timer expiration. */
// _tx_timer_expiration_process();
BL _tx_timer_expiration_process // Call the timer expiration handling routine
// }
__tx_timer_dont_activate:
/* Call time-slice processing. */
// _tx_thread_time_slice();
BL _tx_thread_time_slice // Call time-slice processing
/* Release inter-core protection. */
MOV x0, x28 // Pass the previous status register back
BL _tx_thread_smp_unprotect // Release protection
LDP x29, x30, [sp], #16 // Recover x29, x30
LDP x27, x28, [sp], #16 // Recover x27, x28
RET // Return to caller
// }