Update on 16 Dec 2022. Expand to see details.

b5d5df511 #include tx_user.h in assembly files for cortex-m ports
33e04e3d5 initial port of MIPS SMP for GHS and GNU
2eda2c17d capitalize extensions for M23 asm files
21c354ccb Fix armv7-m MPU settings for corner case, unify txm_module_port.h files
4a1ff93f9 remove uneeded include for ac6
c823e91ff update riscv iar example for latest iar tools
5559d185d check module stack for overlap (not kernel stack)
efa9ce7b7 apply patch from mobileye to fix time slice processing
75fdcb722 Updated copy_armv7_cm.yml
de04b9904 initialize unused MPU settings so that aliasing will work
79b317b60 add config directory to IAR RISC-V port in order to use simulator
This commit is contained in:
Scott Larson
2022-12-16 08:16:32 +00:00
parent b42c5acd8b
commit 4e62226eea
148 changed files with 21660 additions and 50 deletions

View File

@@ -0,0 +1,335 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
#define GIC_SH_WEDGE 0xbbdc0280 /* For Inter-processor interrupts on MALTA board. */
INITIAL_SR = 0xFF00 # All IM bits set
SW_INTERRUPT_0 = 0x0100 # Software interrupt 0
SW_INTERRUPT_1 = 0x0200 # Software interrupt 1
INTERRUPT_0 = 0x0400 # Interrupt 0
INTERRUPT_1 = 0x0800 # Interrupt 1
INTERRUPT_2 = 0x1000 # Interrupt 2
INTERRUPT_3 = 0x2000 # Interrupt 3
INTERRUPT_4 = 0x4000 # Interrupt 4
INTERRUPT_5 = 0x8000 # Interrupt 5
EXCEPTION_VECTOR = 0x00000180 # General exception vector
TEN_MS_COUNT = 120000 # 10 ms clock rate
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_initialize_low_level(VOID)
{ */
.globl _tx_initialize_low_level
_tx_initialize_low_level:
di # Ensure interrupts are disabled
ehb #
mfc0 $8, $12 # Pickup current SR
ori $8, $8, INITIAL_SR # Build initial SR
mtc0 $8, $12 # Setup SR
/* Save the system stack pointer. */
/* _tx_thread_system_stack_ptr = (VOID_PTR) (SP); */
la $8, _tx_thread_system_stack_ptr # Pickup address of system
/* # stack pointer */
sw $29, ($8) # Save system stack pointer
/* Save the first available memory address. */
/* _tx_initialize_unused_memory = (VOID_PTR) _free_memory; */
la $9, _free_memory # Pickup first free address
la $10, _tx_initialize_unused_memory # Pickup address of unused
/* # memory */
sw $9, ($10) # Save unused memory address
/* Set up the counter/compare registers to generate a periodic interrupt. */
mtc0 $0, $9 # Initialize CP0 timer count register to zero
ehb #
li $9,TEN_MS_COUNT # Default value
mtc0 $9, $11 # Set timer compare register
ehb #
/* Done, return to caller. */
j $31 # Return to caller
nop # Delay slot
/* } */
/* Define the interrupt/exception handler trampoline code. This needs to
be copied to address 0x80000180 cached or 0xA0000180 non-cache. */
.globl _tx_exception_trampoline
_tx_exception_trampoline:
la $26,_tx_exception_handler # Pickup exception handler address
j $26 # Jump to exception handler
nop # Delay slot
nop # Fill with nops....
nop #
nop #
nop #
nop #
_tx_exception_trampoline_end:
/* Define the actual interrupt/exception handler. Since this routine must handle
multiple exceptions, the context save/restore functions are called automatically.
Application specific ISR processing added to this routine must be inserted into
the proper place and use registers in accordance with the MIPS compiler, i.e.
$16-$23 (s0-s7) and $30 (s8) must be saved if they are used. C functions called
from this area will automatically save/restore these registers if they are used. */
.globl _tx_exception_handler
_tx_exception_handler:
mfc0 $26, $13 # Pickup the cause register
ehb #
andi $26, $26, 0x3C # Isolate the exception code
bne $26, $0, _tx_error_exceptions # If non-zero, an error exception is present
nop # Delay slot
la $27, _tx_thread_smp_system_error # Build address to system error flag
lw $27, ($27) # Pickup system error flag
_system_error_loop:
bne $27, $0, _system_error_loop # If error, just sit here!
nop
/* Otherwise, an interrupt exception is present. Call context save before we
process normal interrupts. */
la $26, _tx_thread_context_save # Pickup address of context save function
jalr $27,$26 # Call context save
nop # Delay slot
/* Perform interrupt processing here! When context save returns, interrupts are
disabled and all compiler scratch registers are available. Also, s0 is saved and
is used in this function to hold the contents of the CAUSE register. */
mfc0 $16, $13 # Pickup the cause register
/* Interrupts may be re-enabled after this point. */
/* Check for Interrupt 0. */
andi $8, $16, INTERRUPT_0 # Isolate interrupt 0 flag
beqz $8, _tx_not_interrupt_0 # If not set, skip interrupt 0 processing
nop # Delay slot
/* Interrupt 0 processing goes here! */
#ifdef TX_ENABLE_EVENT_TRACE
li $4,1 # Build interrupt type
la $9, _tx_trace_isr_enter_insert # Build interrupt enter logging address
jal $9 # Call interrupt enter event logging
nop #
#endif
/* Clear inter-processor interrupt (and increment counter). */
mfc0 $8, $4,2 # Pickup UserLocal (VPE number)
la $9, _tx_thread_smp_inter_core_interrupts # Address of inter-processor interrupt
sll $8, $8, 2 # Build offset to proper counter index
addu $9, $9, $8 # Build address of this VPE's counter
lw $8, 0($9) # Pickup current value
addiu $8, $8, 1 # Increment current value
sw $8, 0($9) # Store value back
li $8, GIC_SH_WEDGE #
mfc0 $9, $15, 1 # Get cp0 EBase
ext $9, $9, 0, 10 # Extract CPUNum
addiu $9, 0x20 # Offset to base of IPI interrupts.
sw $9, 0($8) # Clear this IPI.
#ifdef TX_ENABLE_EVENT_TRACE
li $4,1 # Build interrupt type
la $9, _tx_trace_isr_exit_insert # Build interrupt exit logging address
jal $9 # Call interrupt exit event logging
nop #
#endif
_tx_not_interrupt_0:
/* Check for Interrupt 1. */
andi $8, $16, INTERRUPT_1 # Isolate interrupt 1 flag
beqz $8, _tx_not_interrupt_1 # If not set, skip interrupt 1 processing
nop # Delay slot
/* Interrupt 1 processing goes here! */
_tx_not_interrupt_1:
/* Check for Interrupt 2. */
andi $8, $16, INTERRUPT_2 # Isolate interrupt 2 flag
beqz $8, _tx_not_interrupt_2 # If not set, skip interrupt 2 processing
nop # Delay slot
/* Interrupt 2 processing goes here! */
_tx_not_interrupt_2:
/* Check for Interrupt 3. */
andi $8, $16, INTERRUPT_3 # Isolate interrupt 3 flag
beqz $8, _tx_not_interrupt_3 # If not set, skip interrupt 3 processing
nop # Delay slot
/* Interrupt 3 processing goes here! */
_tx_not_interrupt_3:
/* Check for Interrupt 4. */
andi $8, $16, INTERRUPT_4 # Isolate interrupt 4 flag
beqz $8, _tx_not_interrupt_4 # If not set, skip interrupt 4 processing
nop # Delay slot
/* Interrupt 4 processing goes here! */
_tx_not_interrupt_4:
/* Check for Interrupt 5. */
andi $8, $16, INTERRUPT_5 # Isolate interrupt 5 flag
beqz $8, _tx_not_interrupt_5 # If not set, skip interrupt 5 processing
nop # Delay slot
/* Interrupt 5 processing goes here! */
/* Interrupt 5 is the count/compare timer interrupt. */
#ifdef TX_ENABLE_EVENT_TRACE
li $4,0 # Build interrupt type
la $9, _tx_trace_isr_enter_insert # Build interrupt enter logging address
jal $9 # Call interrupt enter event logging
nop #
#endif
/* Interrupt 5 is the count/compare timer interrupt. */
mtc0 $0, $9 # Initialize CP0 count register to zero
ehb #
li $9, TEN_MS_COUNT # 10 ms @ 66 MHz
mtc0 $9, $11 # Set compare register, reset count reg.
ehb #
/* Call the ThreadX timer routine. */
la $8, _tx_timer_interrupt # Build timer interrupt address
jal $8 # Call timer interrupt handler
nop #
#ifdef TX_ENABLE_EVENT_TRACE
li $4,0 # Build interrupt type
la $9, _tx_trace_isr_exit_insert # Build interrupt exit logging address
jal $9 # Call interrupt exit event logging
nop #
#endif
_tx_not_interrupt_5:
/* Check for Software Interrupt 0. */
andi $8, $16, SW_INTERRUPT_0 # Isolate software interrupt 0 flag
beqz $8, _tx_not_interrupt_sw_0 # If not set, skip sw interrupt 0 processing
nop # Delay slot
/* Software interrupt 0 processing goes here! */
_tx_not_interrupt_sw_0:
/* Check for Software Interrupt 1. */
andi $8, $16, SW_INTERRUPT_1 # Isolate software interrupt 1 flag
beqz $8, _tx_not_interrupt_sw_1 # If not set, skip sw interrupt 1 processing
nop # Delay slot
/* Software interrupt 1 processing goes here! */
_tx_not_interrupt_sw_1:
la $8, _tx_thread_context_restore # Pickup address of context restore function
j $8 # Jump to context restore - does not return!
nop # Delay slot
/* Error Exception processing goes here! */
.globl _tx_error_exceptions
_tx_error_exceptions:
b _tx_error_exceptions # Default error exception processing
nop # Delay slot
/* Reference the build options and the version ID to ensure they are part of the image. */
la $8, _tx_build_options
la $9, _tx_version_id

View File

@@ -0,0 +1,409 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_TCBind $2,2
#define C0_TCContext $2,5
#define C0_VPECtl $1,1
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function restores the interrupt context if it is processing a */
/* nested interrupt. If not, it returns to the interrupt thread if no */
/* preemption is necessary. Otherwise, if preemption is necessary or */
/* if no thread was running, the function returns to the scheduler. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling routine */
/* */
/* CALLED BY */
/* */
/* ISRs Interrupt Service Routines */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_thread_context_restore(VOID)
{ */
.globl _tx_thread_context_restore
_tx_thread_context_restore:
/* Lockout interrupts. */
di # Disable interrupts
ehb #
mfc0 $25, UserLocal # Pickup VPE ID
sll $24, $25, 2 # Build index based on VPE number
/* Determine if interrupts are nested. */
/* if (--_tx_thread_system_state[VPE])
{ */
la $9, _tx_thread_system_state # Pickup addr of nested interrupt count
addu $9, $9, $24 # Index by VPE
lw $8, ($9) # Pickup nested interrupt count
subu $8, $8, 1 # Decrement the nested interrupt counter
beqz $8,_tx_thread_not_nested_restore # If 0, not nested restore
sw $8, ($9) # Store new nested count
/* Interrupts are nested. */
/* Just recover the saved registers and return to the point of
interrupt. */
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
bne $25, $0, _tx_skip_nest_restore # If not VPE 0, skip FPU scratch restore
nop #
lw $8, 384($29) # Recover fcr31
ctc1 $8, $31 # Setup fcr31
ldc1 $f19, 224($29) # Recover f19
ldc1 $f18, 232($29) # Recover f18
ldc1 $f17, 240($29) # Recover f17
ldc1 $f16, 248($29) # Recover f16
ldc1 $f15, 256($29) # Recover f15
ldc1 $f14, 264($29) # Recover f14
ldc1 $f13, 272($29) # Recover f13
ldc1 $f12, 280($29) # Recover f12
ldc1 $f11, 288($29) # Recover f11
ldc1 $f10, 296($29) # Recover f10
ldc1 $f9, 304($29) # Recover f9
ldc1 $f8, 312($29) # Recover f8
ldc1 $f7, 320($29) # Recover f7
ldc1 $f6, 328($29) # Recover f6
ldc1 $f5, 336($29) # Recover f5
ldc1 $f4, 344($29) # Recover f4
ldc1 $f3, 352($29) # Recover f3
ldc1 $f2, 360($29) # Recover f2
ldc1 $f1, 368($29) # Recover f1
ldc1 $f0, 376($29) # Recover f0
_tx_skip_nest_restore:
#endif
/* Recover standard registers. */
lw $16, 36($29) # Recover s0
lw $8, 40($29) # Recover hi
lw $9, 44($29) # Recover low
mthi $8 # Setup hi
mtlo $9 # Setup lo
lw $8,124($29) # Recover EPC
lw $9,120($29) # Recover SR
mtc0 $8, $14 # Setup EPC
ehb #
lw $25, 48($29) # Recover t9
mtc0 $9, $12 # Restore SR
ehb #
lw $24, 52($29) # Recover t8
lw $15, 56($29) # Recover t7
lw $14, 60($29) # Recover t6
lw $13, 64($29) # Recover t5
lw $12, 68($29) # Recover t4
lw $11, 72($29) # Recover t3
lw $10, 76($29) # Recover t2
lw $9, 80($29) # Recover t1
lw $8, 84($29) # Recover t0
lw $7, 88($29) # Recover a3
lw $6, 92($29) # Recover a2
lw $5, 96($29) # Recover a1
lw $4, 100($29) # Recover a0
lw $3, 104($29) # Recover v1
lw $2, 108($29) # Recover v0
.set noat
lw $1, 112($29) # Recover at
.set at
lw $31,116($29) # Recover ra
addu $29, $29, 392 # Recover stack frame
eret # Return to point of interrupt
nop # Delay
/* } */
_tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
/* else if (((_tx_thread_current_ptr[VPE]) && (_tx_thread_current_ptr[VPE] == _tx_thread_tc_execute_list[VPE])
|| (_tx_thread_preempt_disable))
{ */
la $9, _tx_thread_current_ptr # Pickup address of current ptr
addu $9, $9, $24 # Build address of current pointer for this VPE
lw $8, ($9) # Pickup current thread pointer
beqz $8, _tx_thread_idle_system_restore # If NULL, idle system restore
nop #
la $11, _tx_thread_execute_ptr # Pickup address of execute thread pointer
addu $11, $11, $24 # Add VPE index here to see if this is the thread
lw $10, ($11) # Pickup thread execute pointer
beq $8, $10, _tx_thread_no_preempt_restore # If the current and execute are the same then, restore the current thread
nop # Delay slot
la $10, _tx_thread_smp_protection # Build address of protection structure
lw $11, 8($10) # Pickup the VPE with protection
bne $11, $25,_tx_thread_preempt_restore # If this is a different VPE, preempt current thread
nop #
la $13, _tx_thread_preempt_disable # Pickup address of preempt disable flag
lw $12, ($13) # Pickup preempt disable flag
beq $12, $0, _tx_thread_preempt_restore # If not set, preempt interrupted thread
nop # Delay slot
_tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
/* SP = _tx_thread_current_ptr[VPE] -> tx_thread_stack_ptr; */
lw $29, 8($8) # Switch back to thread's stack
/* Recover the saved context and return to the point of interrupt. */
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
lw $15, 176($8) # Pickup FPU enable flag
bne $25, $0, _tx_skip_int_restore # If not VPE 0, skip FPU scratch restore
nop # Delay
beq $15, $0, _tx_skip_int_restore # If FPU not enabled, skip FPU scratch restore
nop #
lw $9, 384($29) # Recover fcr31
ctc1 $9, $31 # Setup fcr31
ldc1 $f19, 224($29) # Recover f19
ldc1 $f18, 232($29) # Recover f18
ldc1 $f17, 240($29) # Recover f17
ldc1 $f16, 248($29) # Recover f16
ldc1 $f15, 256($29) # Recover f15
ldc1 $f14, 264($29) # Recover f14
ldc1 $f13, 272($29) # Recover f13
ldc1 $f12, 280($29) # Recover f12
ldc1 $f11, 288($29) # Recover f11
ldc1 $f10, 296($29) # Recover f10
ldc1 $f9, 304($29) # Recover f9
ldc1 $f8, 312($29) # Recover f8
ldc1 $f7, 320($29) # Recover f7
ldc1 $f6, 328($29) # Recover f6
ldc1 $f5, 336($29) # Recover f5
ldc1 $f4, 344($29) # Recover f4
ldc1 $f3, 352($29) # Recover f3
ldc1 $f2, 360($29) # Recover f2
ldc1 $f1, 368($29) # Recover f1
ldc1 $f0, 376($29) # Recover f0
_tx_skip_int_restore:
#endif
/* Recover standard registers. */
lw $16, 36($29) # Recover s0
lw $8, 40($29) # Recover hi
lw $9, 44($29) # Recover low
mthi $8 # Setup hi
mtlo $9 # Setup lo
lw $8,124($29) # Recover EPC
lw $9,120($29) # Recover SR
mtc0 $8, $14 # Setup EPC
ehb #
lw $25, 48($29) # Recover t9
mtc0 $9, $12 # Restore SR
ehb #
lw $24, 52($29) # Recover t8
lw $15, 56($29) # Recover t7
lw $14, 60($29) # Recover t6
lw $13, 64($29) # Recover t5
lw $12, 68($29) # Recover t4
lw $11, 72($29) # Recover t3
lw $10, 76($29) # Recover t2
lw $9, 80($29) # Recover t1
lw $8, 84($29) # Recover t0
lw $7, 88($29) # Recover a3
lw $6, 92($29) # Recover a2
lw $5, 96($29) # Recover a1
lw $4, 100($29) # Recover a0
lw $3, 104($29) # Recover v1
lw $2, 108($29) # Recover v0
.set noat
lw $1, 112($29) # Recover at
.set at
lw $31,116($29) # Recover ra
addu $29, $29, 392 # Recover stack frame
eret # Return to point of interrupt
nop # Delay
/* }
else
{ */
_tx_thread_preempt_restore:
/* Save remaining context on the thread's stack. */
lw $9, 8($8) # Pickup thread's stack pointer
ori $12, $0, 1 # Build interrupt stack type
sw $12, ($9) # Store stack type
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
lw $15, 176($8) # Pickup FPU enable flag
bne $25, $0, _tx_skip_preserved_save # If not VPE 0, skip FPU preserved save
nop #
beq $15, $0, _tx_skip_preserved_save # If FPU not enabled, skip FPU preserved save
nop
lw $7, 384($9) # Recover fcr31
ctc1 $7, $31 # Setup fcr31
sdc1 $f31, 128($9) # Store f31
sdc1 $f30, 136($9) # Store f30
sdc1 $f29, 144($9) # Store f29
sdc1 $f28, 152($9) # Store f28
sdc1 $f27, 160($9) # Store f27
sdc1 $f26, 168($9) # Store f26
sdc1 $f25, 176($9) # Store f25
sdc1 $f24, 184($9) # Store f24
sdc1 $f23, 192($9) # Store f23
sdc1 $f22, 200($9) # Store f22
sdc1 $f21, 208($9) # Store f21
sdc1 $f20, 216($9) # Store f20
_tx_skip_preserved_save:
#endif
/* Store standard preserved registers. */
sw $30, 4($9) # Store s8
sw $23, 8($9) # Store s7
sw $22, 12($9) # Store s6
sw $21, 16($9) # Store s5
sw $20, 20($9) # Store s4
sw $19, 24($9) # Store s3
sw $18, 28($9) # Store s2
sw $17, 32($9) # Store s1
/* # Note: s0 is already stored! */
#ifdef TX_ENABLE_EVENT_LOGGING
or $17, $24, $0 # Save VPE index offset value
or $16, $8, $0 # Save thread pointer into non-volatile
or $4, $8, $0 # Move thread pointer into input register
la $9, _tx_el_thread_preempted # Build address of thread preempted event routine
jal $9 # Call event logging routine
nop # Delay slot
or $8, $16, $0 # Recover thread pointer
or $24, $17, $0 # Reciver VPE index offset value
#endif
/* Save the remaining time-slice and disable it. */
/* if (_tx_timer_time_slice[VPE])
{ */
la $10, _tx_timer_time_slice # Pickup time slice variable address
addu $10, $10, $24 # Build index into time-slice
/* Check for time-slice race condition. */
la $12, _tx_timer_interrupt_active
__time_slice_wait:
lw $13, 0($12)
bne $13, $0, __time_slice_wait
lw $9, ($10) # Pickup time slice
la $12, _tx_thread_current_ptr # Pickup current thread pointer address
addu $12, $12, $24 # Build VPE index
beqz $9, _tx_thread_dont_save_ts # If 0, skip time slice processing
nop # Delay slot
/* _tx_thread_current_ptr[VPE] -> tx_thread_time_slice = _tx_timer_time_slice[VPE]
_tx_timer_time_slice[VPE] = 0; */
sw $9, 24($8) # Save current time slice
sw $0, ($10) # Clear global time slice
/* } */
_tx_thread_dont_save_ts:
/* Clear the current task pointer. */
/* _tx_thread_current_ptr[VPE] = TX_NULL; */
sw $0, ($12) # Clear current thread pointer
/* Set bit indicating the thread is ready for scheduling. */
lw $9, 152($8) # Pickup the thread's VPE control register
ori $9, $9, 0x8000 # Set ready bit (bit 15)
sync
sw $9, 152($8) # Make this thread ready for scheduling
/* Return to the scheduler. */
/* _tx_thread_schedule(); */
_tx_thread_idle_system_restore:
/* Just return back to the scheduler! */
mfc0 $15, $12 # Pickup SR
li $8, 0xFFFFFFFD # Build mask for EXL bit
and $15, $15, $8 # Clear EXL bit
ori $15, $15, 1 # Set IE bit
mtc0 $15, $12 # Setup new SR with IE enabled
ehb #
la $8, _tx_thread_schedule # Build address of scheduling loop
jr $8 # Return to scheduler
nop # Delay slot
/* } */

View File

@@ -0,0 +1,295 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_TCBind $2,2
#define C0_TCContext $2,5
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_save MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function saves the context of an executing thread in the */
/* beginning of interrupt processing. The function also ensures that */
/* the system stack is used upon return to the calling ISR. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ISRs */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_thread_context_save(VOID)
{ */
.globl _tx_thread_context_save
_tx_thread_context_save:
/* Upon entry to this routine, it is assumed that interrupts are locked
out and the stack is exactly were it was when the interrupt occurred.
The return address is in $27 (k1). */
subu $29, $29, 392 # Allocate space for a full stack frame
/* # even though the whole thing might
# not be needed for awhile */
sw $25, 48($29) # Store t9
sw $24, 52($29) # Store t8
sw $8, 84($29) # Save t0
mfc0 $25, UserLocal # Pickup VPE ID
sll $24, $25, 2 # Build index based on VPE number
la $26, _tx_thread_system_state # Pickup address of system state
addu $26, $26, $24 # Index by VPE
lw $8, ($26) # Pickup system state
/* Check for a nested interrupt condition. */
/* if (_tx_thread_system_state[VPE]++)
{ */
beqz $8, _tx_thread_not_nested_save # If 0, first interrupt condition
addu $8, $8, 1 # Increment the nested interrupt counter
/* Nested interrupt condition. */
sw $8, ($26) # Store the interrupt counter
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
bne $25, $0, _tx_skip_nest_int_save # If not VPE 0, skip FPU save
nop #
/* Save scratch floating point registers. */
cfc1 $8, $31 # Pickup floating point control reg
sdc1 $f19, 224($29) # Save f19
sdc1 $f18, 232($29) # Save f18
sdc1 $f17, 240($29) # Save f17
sdc1 $f16, 248($29) # Save f16
sdc1 $f15, 256($29) # Save f15
sdc1 $f14, 264($29) # Save f14
sdc1 $f13, 272($29) # Save f13
sdc1 $f12, 280($29) # Save f12
sdc1 $f11, 288($29) # Save f11
sdc1 $f10, 296($29) # Save f10
sdc1 $f9, 304($29) # Save f9
sdc1 $f8, 312($29) # Save f8
sdc1 $f7, 320($29) # Save f7
sdc1 $f6, 328($29) # Save f6
sdc1 $f5, 336($29) # Save f5
sdc1 $f4, 344($29) # Save f4
sdc1 $f3, 352($29) # Save f3
sdc1 $f2, 360($29) # Save f2
sdc1 $f1, 368($29) # Save f1
sdc1 $f0, 376($29) # Save f0
sw $8, 384($29) # Save fcr31
_tx_skip_nest_int_save:
#endif
/* Save the rest of the scratch registers on the stack and return to the
calling ISR. */
sw $16, 36($29) # Store s0
mfhi $8 # Pickup hi
mflo $26 # Pickup lo
sw $8, 40($29) # Store hi
sw $26, 44($29) # Store lo
sw $15, 56($29) # Store t7
sw $14, 60($29) # Store t6
sw $13, 64($29) # Store t5
sw $12, 68($29) # Store t4
sw $11, 72($29) # Store t3
sw $10, 76($29) # Store t2
sw $9, 80($29) # Store t1
sw $7, 88($29) # Store a3
sw $6, 92($29) # Store a2
sw $5, 96($29) # Store a1
sw $4, 100($29) # Store a0
sw $3, 104($29) # Store v1
sw $2, 108($29) # Store v0
.set noat
sw $1, 112($29) # Store at
.set at
sw $31, 116($29) # Store ra
mfc0 $8, $12 # Pickup SR
mfc0 $9, $14 # Pickup EPC
sw $8, 120($29) # Store SR
sw $9, 124($29) # Store EPC
/* Return to the ISR. */
j $27 # Return to ISR
nop #
_tx_thread_not_nested_save:
/* } */
/* Otherwise, not nested, check to see if a thread was running. */
/* else if (_tx_thread_current_ptr[VPE])
{ */
sw $8, ($26) # Store the interrupt counter
la $26, _tx_thread_current_ptr # Pickup address of current ptr
addu $26, $26, $24 # Build address of current pointer for this VPE
lw $8, ($26) # Pickup current thread pointer
beqz $8, _tx_thread_idle_system_save # If NULL, idle system was interrupted
sw $16, 36($29) # Store s0
/* Save minimal context of interrupted thread. */
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in TC 0. */
bne $25, $0, _tx_skip_int_save # If not VPE 0, skip FPU save
nop # Delay
lw $26, 176($8) # Pickup FPU enable flag
beq $26, $0, _tx_skip_int_save # If FPU not enabled, skip FPU save
nop # Delay
/* Save scratch floating point registers. */
cfc1 $8, $31 # Pickup floating point control reg
sdc1 $f19, 224($29) # Save f19
sdc1 $f18, 232($29) # Save f18
sdc1 $f17, 240($29) # Save f17
sdc1 $f16, 248($29) # Save f16
sdc1 $f15, 256($29) # Save f15
sdc1 $f14, 264($29) # Save f14
sdc1 $f13, 272($29) # Save f13
sdc1 $f12, 280($29) # Save f12
sdc1 $f11, 288($29) # Save f11
sdc1 $f10, 296($29) # Save f10
sdc1 $f9, 304($29) # Save f9
sdc1 $f8, 312($29) # Save f8
sdc1 $f7, 320($29) # Save f7
sdc1 $f6, 328($29) # Save f6
sdc1 $f5, 336($29) # Save f5
sdc1 $f4, 344($29) # Save f4
sdc1 $f3, 352($29) # Save f3
sdc1 $f2, 360($29) # Save f2
sdc1 $f1, 368($29) # Save f1
sdc1 $f0, 376($29) # Save f0
sw $8, 384($29) # Save fcr31
_tx_skip_int_save:
#endif
/* Save the standard scratch registers. */
mfhi $8 # Pickup hi
mflo $26 # Pickup lo
sw $8, 40($29) # Store hi
sw $26, 44($29) # Store lo
sw $15, 56($29) # Store t7
sw $14, 60($29) # Store t6
sw $13, 64($29) # Store t5
sw $12, 68($29) # Store t4
sw $11, 72($29) # Store t3
sw $10, 76($29) # Store t2
sw $9, 80($29) # Store t1
sw $7, 88($29) # Store a3
sw $6, 92($29) # Store a2
sw $5, 96($29) # Store a1
sw $4, 100($29) # Store a0
sw $3, 104($29) # Store v1
sw $2, 108($29) # Store v0
.set noat
sw $1, 112($29) # Store at
.set at
sw $31, 116($29) # Store ra
mfc0 $8, $12 # Pickup SR
mfc0 $9, $14 # Pickup EPC
sw $8, 120($29) # Store SR
sw $9, 124($29) # Store EPC
li $8, 1 # Build stack type
sw $8, ($29) # Store stack type
/* Save the current stack pointer in the thread's control block. */
/* _tx_thread_current_ptr[VPE] -> tx_thread_stack_ptr = sp; */
/* Switch to the system stack. */
/* sp = _tx_thread_system_stack_ptr[VPE]; */
la $9, _tx_thread_current_ptr # Pickup address of current ptr
addu $9, $9, $24 # Build address of current pointer for this VPE
lw $11, ($9) # Pickup current thread pointer
la $10,_tx_thread_system_stack_ptr # Pickup the stack pointer address
sw $29, 8($11) # Save stack pointer
addu $10, $10, $24 # Build offset to system stack pointer
lw $29, ($10) # Switch to system stack
j $27 # Return to ISR
nop #
/* }
else
{ */
_tx_thread_idle_system_save:
/* Interrupt occurred in the scheduling loop. */
addu $29, $29, 392 # Recover the reserved stack space
j $27 # Return to ISR
nop #
/* }
} */

View File

@@ -0,0 +1,88 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
RETURN_MASK = 0x0001
SET_SR_MASK_U = 0xFFFF
SET_SR_MASK_L = 0xFFFE
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_interrupt_control MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for changing the interrupt lockout */
/* posture of the system. */
/* */
/* INPUT */
/* */
/* new_posture New interrupt lockout posture */
/* */
/* OUTPUT */
/* */
/* old_posture Old interrupt lockout posture */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* Application Code */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* UINT _tx_thread_interrupt_control(UINT new_posture)
{ */
.globl _tx_thread_interrupt_control
_tx_thread_interrupt_control:
/* Pickup current interrupt lockout posture. */
mfc0 $8, $12 # Pickup current SR
andi $2, $8, RETURN_MASK # Return value back to caller
/* Apply the new interrupt posture. */
lui $9, SET_SR_MASK_U # Build set SR mask
ori $9, $9, SET_SR_MASK_L #
and $8, $8, $9 # Isolate interrupt lockout bits
or $8, $8, $4 # Put new lockout bits in
mtc0 $8, $12 # Set new interrupt lockout
jr.hb $31 # Return to caller
nop # Delay slot
/* } */

View File

@@ -0,0 +1,369 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_TCBind $2,2
#define C0_TCContext $2,5
#define C0_TCHalt $2,4
#ifdef TX_THREAD_SMP_WAKEUP_LOGIC
.globl TX_MIPS32_1004K_VPE_YIELD
#endif
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function waits for a thread control block pointer to appear in */
/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
/* in the variable, the corresponding thread is resumed. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_thread_schedule(VOID)
{ */
.globl _tx_thread_schedule_idle_system
_tx_thread_schedule_idle_system:
#ifndef TX_THREAD_SMP_WAKEUP_LOGIC
ei # Enable interrupts
ehb #
li $11,-1 #
yield $11, $11 # Yield so this VPE does not consume all the cycles
lw $8, ($9) # Pickup next thread to execute
beq $8, $0, _tx_thread_schedule_idle_system # If not-NULL, check the thread's ready bit
nop # Delay slot
#else
la $8, TX_MIPS32_1004K_VPE_YIELD # Get call-out address
jalr $8 # Make the call
or $4, $0, $9 # Pass the properly indexed _tx_thread_execute_ptr[x]
#endif
.globl _tx_thread_schedule
_tx_thread_schedule:
/* Enable interrupts. */
ei # Enable interrupts
ehb #
/* Disable interrupts. */
di # Disable interrupts
ehb #
/* Pickup the executing VPE number. */
mfc0 $25, UserLocal # Pickup VPE ID
sll $25, $25, 2 # Build index based on VPE number
/* Calculate the execute pointer for this VPE. */
la $9, _tx_thread_execute_ptr # Pickup starting address of execute list
addu $9, $9, $25 # Build address of execute pointer for this TC
/* Wait for a thread to execute. */
/* do
{ */
_tx_thread_schedule_loop:
lw $8, ($9) # Pickup next thread to execute
beq $8, $0, _tx_thread_schedule_idle_system # If not-NULL, check the thread's ready bit
nop # Delay slot
_tx_thread_check_ready_bit:
lw $9, 152($8) # Pickup the thread's VPE control register
andi $10, $9, 0x8000 # Pickup ready bit (bit 15)
bne $10, $0, _tx_thread_is_ready # If ready bit is set, actually schedule the thread
andi $9, $9, 0x7FFF # Clear the ready bit (bit 15)
b _tx_thread_schedule # Resume at the top of the scheduling loop
nop
_tx_thread_is_ready:
sw $9, 152($8) # Store the cleared ready bit to prevent any other VPE from scheduling this thread
sync
/* }
while(_tx_thread_execute_ptr[VPE] == TX_NULL); */
_tx_thread_schedule_thread:
/* Yes! We have a thread to execute. Interrupts and multithreading are locked out.
Pickup the thread's register context, enable multithreading, and transfer control to
the thread. */
/* Save this thread in the context register of the TC. */
mtc0 $8, C0_TCContext # Set TCContext to current thread
ehb #
#ifdef TX_ENABLE_EVENT_LOGGING
or $16, $8, $0 # Save thread pointer into non-volatile
or $4, $8, $0 # Move thread pointer into input register
la $9, _tx_el_thread_running # Build address of thread running event routine
jal $9 # Call event logging routine
nop # Delay slot
or $8, $16, $0 # Recover thread pointer
#endif
/* Setup the current thread pointer. */
/* _tx_thread_current_ptr[VPE] = _tx_thread_execute_ptr[VPE]; */
la $9, _tx_thread_current_ptr # Pickup current thread pointer address
addu $9, $9, $25 # Offset to VPE specific entry
sw $8, ($9) # Set current thread pointer
/* Increment the run count for this thread. */
/* _tx_thread_current_ptr[VPE] -> tx_thread_run_count++; */
lw $10, 4($8) # Pickup run count
lw $11, 24($8) # Pickup time slice value
addu $10, $10, 1 # Increment run count
sw $10, 4($8) # Store new run count
/* Setup time-slice, if present. */
/* _tx_timer_time_slice[VPE] = _tx_thread_current_ptr[VPE] -> tx_thread_time_slice; */
la $10, _tx_timer_time_slice # Pickup time-slice variable address
addu $10, $10, $25 # Offset to VPE specific time-slice
/* Switch to the thread's stack. */
/* SP = _tx_thread_current_ptr[VPE] -> tx_thread_stack_ptr; */
lw $29, 8($8) # Switch to thread's stack
lw $15, 176($8) # Pickup FPU enable flag in TX_THREAD structure
sw $11, ($10) # Store new time-slice
/* Determine if an interrupt frame or a synchronous task suspension frame
is present. */
lw $10, ($29) # Pickup stack type
beqz $10, _tx_thread_synch_return # If 0, solicited thread return
nop # Delay slot
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
lw $9,120($29) # Recover SR
li $10,0xDFFFFFFF # Mask for FPU enable bit
mfc0 $8, UserLocal # Pickup VPE ID
and $9, $9, $10 # Build SR with FPU enable bit masked
bne $8, $0, _tx_skip_fpu_int_restore # If not VPE 0, skip FPU restore
li $10, 0x20000000 # Build FPU enable bit
or $9, $9, $10 # Build SR with FPU enable
beq $15, $0, _tx_skip_fpu_int_restore # If FPU not enabled, skip FPU restore
nop
lw $8, 384($29) # Recover fcr31
ctc1 $8, $31 # Setup fcr31
ldc1 $f31, 128($29) # Recover f31
ldc1 $f30, 136($29) # Recover f30
ldc1 $f29, 144($29) # Recover f29
ldc1 $f28, 152($29) # Recover f28
ldc1 $f27, 160($29) # Recover f27
ldc1 $f26, 168($29) # Recover f26
ldc1 $f25, 176($29) # Recover f25
ldc1 $f24, 184($29) # Recover f24
ldc1 $f23, 192($29) # Recover f23
ldc1 $f22, 200($29) # Recover f22
ldc1 $f21, 208($29) # Recover f21
ldc1 $f20, 216($29) # Recover f20
ldc1 $f19, 224($29) # Recover f19
ldc1 $f18, 232($29) # Recover f18
ldc1 $f17, 240($29) # Recover f17
ldc1 $f16, 248($29) # Recover f16
ldc1 $f15, 256($29) # Recover f15
ldc1 $f14, 264($29) # Recover f14
ldc1 $f13, 272($29) # Recover f13
ldc1 $f12, 280($29) # Recover f12
ldc1 $f11, 288($29) # Recover f11
ldc1 $f10, 296($29) # Recover f10
ldc1 $f9, 304($29) # Recover f9
ldc1 $f8, 312($29) # Recover f8
ldc1 $f7, 320($29) # Recover f7
ldc1 $f6, 328($29) # Recover f6
ldc1 $f5, 336($29) # Recover f5
ldc1 $f4, 344($29) # Recover f4
ldc1 $f3, 352($29) # Recover f3
ldc1 $f2, 360($29) # Recover f2
ldc1 $f1, 368($29) # Recover f1
ldc1 $f0, 376($29) # Recover f0
_tx_skip_fpu_int_restore:
sw $9,120($29) # Store new SR
#endif
/* Recover standard registers. */
lw $8,124($29) # Recover EPC
lw $9,120($29) # Recover SR
mtc0 $8, $14 # Setup EPC
ehb #
lw $30, 4($29) # Recover s8
mtc0 $9, $12 # Restore SR
ehb # Clear hazards
lw $23, 8($29) # Recover s7
lw $22, 12($29) # Recover s6
lw $21, 16($29) # Recover s5
lw $20, 20($29) # Recover s4
lw $19, 24($29) # Recover s3
lw $18, 28($29) # Recover s2
lw $17, 32($29) # Recover s1
lw $16, 36($29) # Recover s0
lw $8, 40($29) # Recover hi
lw $9, 44($29) # Recover low
mthi $8 # Setup hi
mtlo $9 # Setup lo
lw $25, 48($29) # Recover t9
lw $24, 52($29) # Recover t8
lw $15, 56($29) # Recover t7
lw $14, 60($29) # Recover t6
lw $13, 64($29) # Recover t5
lw $12, 68($29) # Recover t4
lw $11, 72($29) # Recover t3
lw $10, 76($29) # Recover t2
lw $9, 80($29) # Recover t1
lw $8, 84($29) # Recover t0
lw $7, 88($29) # Recover a3
lw $6, 92($29) # Recover a2
lw $5, 96($29) # Recover a1
lw $4, 100($29) # Recover a0
lw $3, 104($29) # Recover v1
lw $2, 108($29) # Recover v0
.set noat
lw $1, 112($29) # Recover at
.set at
lw $31,116($29) # Recover ra
addu $29, $29, 392 # Recover stack frame
emt # Enable multithreading again
eret # Return to point of interrupt
_tx_thread_synch_return:
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0. */
lw $9,52($29) # Recover SR
li $10,0xDFFFFFFF # Mask for FPU enable bit
mfc0 $8, UserLocal # Pickup VPE ID
and $9, $9, $10 # Build SR with FPU enable bit masked
bne $8, $0, _tx_skip_fpu_sync_restore # If not TC 0, skip FPU restore
li $10, 0x20000000 # Build FPU enable bit
or $9, $9, $10 # Build SR with FPU enable
beq $15, $0, _tx_skip_fpu_sync_restore # If FPU not enabled, skip FPU restore
nop
lw $8, 152($29) # Recover fcr31
ctc1 $8, $31 # Setup fcr31
ldc1 $f31, 56($29) # Recover f31
ldc1 $f30, 64($29) # Recover f30
ldc1 $f29, 72($29) # Recover f29
ldc1 $f28, 80($29) # Recover f28
ldc1 $f27, 88($29) # Recover f27
ldc1 $f26, 96($29) # Recover f26
ldc1 $f25, 104($29) # Recover f25
ldc1 $f24, 112($29) # Recover f24
ldc1 $f23, 120($29) # Recover f23
ldc1 $f22, 128($29) # Recover f22
ldc1 $f21, 136($29) # Recover f21
ldc1 $f20, 144($29) # Recover f20
_tx_skip_fpu_sync_restore:
sw $9,52($29) # Store new SR
#endif
/* Recover standard preserved registers. */
lw $30, 4($29) # Recover s8
lw $23, 8($29) # Recover s7
lw $22, 12($29) # Recover s6
lw $21, 16($29) # Recover s5
lw $20, 20($29) # Recover s4
lw $19, 24($29) # Recover s3
lw $18, 28($29) # Recover s2
lw $17, 32($29) # Recover s1
lw $16, 36($29) # Recover s0
lw $8, 40($29) # Recover hi
lw $9, 44($29) # Recover low
mthi $8 # Setup hi
mtlo $9 # Setup lo
lw $8, 52($29) # Recover SR
lw $31, 48($29) # Recover ra
addu $29, $29, 160 # Recover stack space
mtc0 $8, $12 # Restore SR
ehb # Clear hazards
emt # Enable multithreading
jr.hb $31 # Return to thread
nop #
/* } */

View File

@@ -0,0 +1,69 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_core_get MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets the currently running core number and returns it.*/
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* Core ID */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_core_get
_tx_thread_smp_core_get:
mfc0 $2, UserLocal # Pickup VPE ID
j $31 # Return to caller
and $2, $2, 0xFF # Isolate the VPE number

View File

@@ -0,0 +1,79 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define GIC_SH_WEDGE 0xbbdc0280 /* For Inter-processor interrupts on MALTA board. */
#define GIC_SH_COUNTER_LO 0xbbdc0010 /* Lower 32-bits of GIC common counter */
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_core_preempt MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function preempts the specified core in situations where the */
/* thread corresponding to this core is no longer ready or when the */
/* core must be used for a higher-priority thread. If the specified is */
/* the current core, this processing is skipped since the will give up */
/* control subsequently on its own. */
/* */
/* INPUT */
/* */
/* core The core to preempt */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_core_preempt
_tx_thread_smp_core_preempt:
sync
la $8, GIC_SH_WEDGE # Build address
li $9, 0x80000020 #
addu $9, $4, $9 # Build exact VPE to interrupt
sw $9, 0($8) # Interrupt the selected VPE
jr.hb $31 # Return to caller
nop # Delay slot

View File

@@ -0,0 +1,78 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_Status $12
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_current_state_get MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is gets the current state of the calling core. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_current_state_get
_tx_thread_smp_current_state_get:
di $8 # Disable interrupts
ehb #
la $12, _tx_thread_system_state # Pickup start of the current state array
mfc0 $25, UserLocal # Pickup VPE ID
sll $25, $25, 2 # Build index based on VPE number
addu $12, $12, $25 # Build address of current state for this VPE
lw $2, ($12) # Pickup current state
mtc0 $8, C0_Status # Restore interrupts
jr.hb $31 # Return to caller
nop #

View File

@@ -0,0 +1,77 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_Status $12
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_current_thread_get MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is gets the current thread of the calling core. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_current_thread_get
_tx_thread_smp_current_thread_get:
di $10 # Disable interrupts
ehb #
mfc0 $25, UserLocal # Pickup VPE ID
la $12, _tx_thread_current_ptr # Pickup the current thread pointer
sll $25, $25, 2 # Build index based on VPE number
addu $12, $12, $25 # Build address of current thread pointer for this VPE
lw $2, ($12) # Pickup current thread pointer
mtc0 $10, C0_Status # Restore interrupt posture
jr.hb $31 # Return to caller
nop # Delay slot

View File

@@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define INITIAL_SR 0xFF00 /* All IM bits set */
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_initialize_wait MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is the place where additional cores wait until */
/* initialization is complete before they enter the thread scheduling */
/* loop. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* Hardware */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_initialize_wait
_tx_thread_smp_initialize_wait:
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
cfc1 $8, $31 # Pickup current FPU control reg
la $9, _tx_thread_smp_initial_fpu_control_register
sw $8, ($9) # Save FPU control reg
#endif
/* Pickup the release cores flag. */
la $8, _tx_thread_smp_release_cores_flag # Build address of release cores flag
wait_for_release:
lw $9, ($8) # Pickup release cores flag
beq $9, $0, wait_for_release # Wait here until it is set
nop
/* Core 0 has released this core. */
/* Pickup the core ID. */
mfc0 $8, UserLocal # Pickup VPE ID
sll $8, $8, 2 # Build index based on VPE number
/* Clear this core's system state variable. */
la $9, _tx_thread_system_state # Build address of system state variable
addu $9, $9, $8 #
sw $0, ($9) # Clear this VPE's system state entry
/* Now wait for core 0 to finish it's initialization. */
di # Disable interrupts
ehb #
mfc0 $8, $12 # Pickup current SR
ori $8, $8, INITIAL_SR # Build initial SR
mtc0 $8, $12 # Setup SR
core_0_wait_loop:
la $8, _tx_thread_system_state # Build address of system state variable of logical VPE 0
lw $9, ($8) # Pickup system state
bne $9, $0, core_0_wait_loop # If non-zero, keep waiting
nop #
la $8, _tx_thread_schedule # Otherwise, initialization is done
jr $8 # Enter main scheduling loop
nop #

View File

@@ -0,0 +1,73 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define C0_Status $12
#define C0_Cause $13
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_low_level_initialize MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function performs low-level initialization of the booting */
/* core. */
/* */
/* INPUT */
/* */
/* number_of_cores Number of cores */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_high_level ThreadX high-level init */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_low_level_initialize
_tx_thread_smp_low_level_initialize:
/* Nothing needed in this port. */
jr.hb $31 # Return to caller
nop # Clear hazards

View File

@@ -0,0 +1,119 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_Status $12
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets protection for running inside the ThreadX */
/* source. This is ccomplished by a combination of a test-and-set */
/* flag and periodically disabling interrupts. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* Previous Status Register */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_protect
_tx_thread_smp_protect:
di $2 # Disable interrupts
ehb #
sync
mfc0 $25, UserLocal # Pickup VPE ID
la $10, _tx_thread_smp_protection # Build address to protection structure
lw $9, ($10) # Pickup the protection
bne $9, $0, _already_owned # If non-zero, then the protection is already in force
nop #
ll $11, ($10) # Pickup the protection in force flag
bne $11, $0, _get_retry # Is the protection still available?
li $11, 1 # Build protection in force flag
sc $11, ($10) # Attempt to get the semaphore
beq $11, $0, _get_retry # If successful, we got the protection!
sync
sw $25, 8($10) # Save VPE
sw $11, 12($10) # Setup the initial count
#ifdef TX_THREAD_SMP_DEBUG_ENABLE
sll $13, $25, 2 # Build index based on VPE number
la $12, _tx_thread_current_ptr # Pickup the current thread pointer
addu $12, $12, $13 # Build address of current thread pointer for this VPE
lw $9, ($12) # Pickup current thread pointer
sw $31, 16($10) # Save caller info
sw $2, 20($10) # Save SR
sw $9, 4($10) # Save the current thread pointer
#endif
j $31 # Return to caller
nop
_get_retry:
mtc0 $2, C0_Status # Restore interrupt posture
ehb #
b _tx_thread_smp_protect # Try to get the protection again
nop #
_already_owned:
lw $9, 8($10) # Pickup the owned VPE
beq $9, $25, _have_the_protection # If equal, we already have the protection
lw $12, 12($10) # Pickup protection count
mtc0 $2, C0_Status # Restore interrupt posture
ehb #
b _tx_thread_smp_protect # Try to get the protection again
nop #
_have_the_protection:
addu $12, $12, 1 # Increment
j $31 # Return to caller
sw $12, 12($10) # Store back the protection count

View File

@@ -0,0 +1,80 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define GIC_SH_COUNTER_LO 0xbbdc0010 /* Lower 32-bits of GIC common counter */
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_time_get MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function gets the global time value that is used for debug */
/* information and event tracing. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* 32-bit time stamp */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_time_get
_tx_thread_smp_time_get:
la $8, GIC_SH_COUNTER_LO # Pickup address of GIC_SH_COUNTER_LO
lw $2, 0($8) # Pickup 32-bit counter
jr $31
nop
.globl _tx_thread_smp_time_get_upper
_tx_thread_smp_time_get_upper:
addu $2, $0, 0 # Just return 0 for the upper, since the time in only 32-bits
jr $31
nop

View File

@@ -0,0 +1,102 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread - Low Level SMP Support */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_Status $12
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function releases previously obtained protection. The supplied */
/* previous SR is restored. If the value of _tx_thread_system_state */
/* and _tx_thread_preempt_disable are both zero, then multithreading */
/* is enabled as well. */
/* */
/* INPUT */
/* */
/* Previous Status Register */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* ThreadX Source */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
.globl _tx_thread_smp_unprotect
_tx_thread_smp_unprotect:
di # Disable interrupts
ehb #
sync
mfc0 $25, UserLocal # Pickup VPE ID
la $10, _tx_thread_smp_protection # Build address of protection structure
lw $9, 8($10) # Pickup owning VPE
bne $9, $25, _still_protected # Not the same VPE, protection is in force somewhere else
lw $12, 12($10) # Pickup protection count
beq $12, $0, _still_protected # If zero, protection is not in force anymore
subu $12, $12, 1 # Decrement
sw $12, 12($10) # Store back the protection count
bne $12, $0, _still_protected # If non-zero, nested protection condition
nop #
la $11, _tx_thread_preempt_disable # Build address of preempt disable flag
lw $12, ($11) # Pickup preempt disable flag
bne $12, $0, _still_protected # Don't release protection if preempt disable flag is set
li $8, 0xFFFFFFFF # Setup invalid value
sw $8, 8($10) # Mark VPE as invalid
#ifdef TX_THREAD_SMP_DEBUG_ENABLE
sw $31, 24($10) # Remember the caller of the unprotect
#endif
_release_protect_loop:
sync
sw $0, ($10) # Clear protection
sync
_still_protected:
mtc0 $4, C0_Status # Restore interrupt posture
jr.hb $31 # Return to caller
nop #

View File

@@ -0,0 +1,279 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
INITIAL_SR = 0xFF03 # Interrupt enable previous
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_build MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function builds a stack frame on the supplied thread's stack. */
/* The stack frame results in a fake interrupt return to the supplied */
/* function pointer. */
/* */
/* INPUT */
/* */
/* thread_ptr Pointer to thread control blk */
/* function_ptr Pointer to return function */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_thread_create Create thread service */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
{ */
.globl _tx_thread_stack_build
_tx_thread_stack_build:
/* Build a fake interrupt frame. The form of the fake interrupt stack
on the MIPS32_interAptiv should look like the following after it is built:
Stack Top: 1 (00) Interrupt stack frame type
$30 (04) Initial S8
$23 (08) Initial S7
$22 (12) Initial S6
$21 (16) Initial S5
$20 (20) Initial S4
$19 (24) Initial S3
$18 (28) Initial S2
$17 (32) Initial S1
$16 (36) Initial S0
hi (40) Initial HI register
lo (44) Initial LO register
$25 (48) Initial t9
$24 (52) Initial t8
$15 (56) Initial t7
$14 (60) Initial t6
$13 (64) Initial t5
$12 (68) Initial t4
$11 (72) Initial t3
$10 (76) Initial t2
$9 (80) Initial t1
$8 (84) Initial t0
$7 (88) Initial a3
$6 (92) Initial a2
$5 (96) Initial a1
$4 (100) Initial a0
$3 (104) Initial v1
$2 (108) Initial v0
$1 (112) Initial at
$31 (116) Initial ra
SR (120) Initial SR
EPC (124) Initial EPC
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
$f31 (128) Initial f31
$f30 (136) Initial f30
$f29 (144) Initial f29
$f28 (152) Initial f28
$f27 (160) Initial f27
$f26 (168) Initial f26
$f25 (176) Initial f25
$f24 (184) Initial f24
$f23 (192) Initial f23
$f22 (200) Initial f22
$f21 (208) Initial f21
$f20 (216) Initial f20
$f19 (224) Initial f19
$f18 (232) Initial f18
$f17 (240) Initial f17
$f16 (248) Initial f16
$f15 (256) Initial f15
$f14 (264) Initial f14
$f13 (272) Initial f13
$f12 (280) Initial f12
$f11 (288) Initial f11
$f10 (296) Initial f10
$f9 (304) Initial f9
$f8 (312) Initial f8
$f7 (320) Initial f7
$f6 (328) Initial f6
$f5 (336) Initial f5
$f4 (344) Initial f4
$f3 (352) Initial f3
$f2 (360) Initial f2
$f1 (368) Initial f1
$f0 (376) Initial f0
FCR31 (384) Initial fcr31
unused (388) Unused Word
#endif
Stack Bottom: (higher memory address) */
lw $8, 16($4) # Pickup end of stack area
ori $9, $0, 7 # Build double alignment mask
not $9, $9 #
and $8, $8, $9 # Make sure double word alignment
/* Actually build the stack frame. */
subu $8, $8, 392 # Allocate space for the stack frame
ori $9, $0, 1 # Build stack type
sw $9, ($8) # Place stack type on the top
sw $0, 4($8) # Initial s8
sw $0, 8($8) # Initial s7
sw $0, 12($8) # Initial s6
sw $0, 16($8) # Initial s5
sw $0, 20($8) # Initial s4
sw $0, 24($8) # Initial s3
sw $0, 28($8) # Initial s2
sw $0, 32($8) # Initial s1
sw $0, 36($8) # Initial s0
sw $0, 40($8) # Initial hi
sw $0, 44($8) # Initial lo
sw $0, 48($8) # Initial t9
sw $0, 52($8) # Initial t8
sw $0, 56($8) # Initial t7
sw $0, 60($8) # Initial t6
sw $0, 64($8) # Initial t5
sw $0, 68($8) # Initial t4
sw $0, 72($8) # Initial t3
sw $0, 76($8) # Initial t2
sw $0, 80($8) # Initial t1
sw $0, 84($8) # Initial t0
sw $0, 88($8) # Initial a3
sw $0, 92($8) # Initial a2
sw $0, 96($8) # Initial a1
sw $0, 100($8) # Initial a0
sw $0, 104($8) # Initial v1
sw $0, 108($8) # Initial v0
sw $0, 112($8) # Initial at
sw $0, 116($8) # Initial ra
mfc0 $10, $12 # Pickup current SR
li $9,0xDFFFFFFC # Preserve upper portion of SR - except for FP
and $9, $10, $9 # Clear the lower SR bits
ori $9, $9, INITIAL_SR # Build initial SR
sw $9, 120($8) # Initial SR
sw $5, 124($8) # Initial EPC
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
sw $0, 128($8) # Initial f31
sw $0, 132($8) #
sw $0, 136($8) # Initial f30
sw $0, 140($8) #
sw $0, 144($8) # Initial f29
sw $0, 148($8) #
sw $0, 152($8) # Initial f28
sw $0, 156($8) #
sw $0, 160($8) # Initial f27
sw $0, 164($8) #
sw $0, 168($8) # Initial f26
sw $0, 172($8) #
sw $0, 176($8) # Initial f25
sw $0, 180($8) #
sw $0, 184($8) # Initial f24
sw $0, 188($8) #
sw $0, 192($8) # Initial f23
sw $0, 196($8) #
sw $0, 200($8) # Initial f22
sw $0, 204($8) #
sw $0, 208($8) # Initial f21
sw $0, 212($8) #
sw $0, 216($8) # Initial f20
sw $0, 220($8) #
sw $0, 224($8) # Initial f19
sw $0, 228($8) #
sw $0, 232($8) # Initial f18
sw $0, 236($8) #
sw $0, 240($8) # Initial f17
sw $0, 244($8) #
sw $0, 248($8) # Initial f16
sw $0, 252($8) #
sw $0, 256($8) # Initial f15
sw $0, 260($8) #
sw $0, 264($8) # Initial f14
sw $0, 268($8) #
sw $0, 272($8) # Initial f13
sw $0, 276($8) #
sw $0, 280($8) # Initial f12
sw $0, 284($8) #
sw $0, 288($8) # Initial f11
sw $0, 292($8) #
sw $0, 296($8) # Initial f10
sw $0, 300($8) #
sw $0, 304($8) # Initial f9
sw $0, 308($8) #
sw $0, 312($8) # Initial f8
sw $0, 316($8) #
sw $0, 320($8) # Initial f7
sw $0, 324($8) #
sw $0, 328($8) # Initial f6
sw $0, 332($8) #
sw $0, 336($8) # Initial f5
sw $0, 340($8) #
sw $0, 344($8) # Initial f4
sw $0, 348($8) #
sw $0, 352($8) # Initial f3
sw $0, 356($8) #
sw $0, 360($8) # Initial f2
sw $0, 364($8) #
sw $0, 368($8) # Initial f1
sw $0, 372($8) #
la $9, _tx_thread_initial_fpu_control_register
lw $10, ($9) # Pickup initial FPU control registrer
sw $0, 376($8) # Initial f0
sw $0, 380($8) #
sw $10, 384($8) # Inherit initial fcr31
#endif
/* Set bit indicating the thread is ready for scheduling. */
lw $9, 152($4) # Pickup the thread's VPE control register
ori $9, $9, 0x8000 # Set ready bit (bit 15)
sw $9, 152($4) # Make this thread ready for scheduling
/* Setup stack pointer. */
/* thread_ptr -> tx_thread_stack_ptr = t0; */
j $31 # Return to caller
sw $8, 8($4) # Save stack pointer in thread's
/* # control block */
/* } */

View File

@@ -0,0 +1,206 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Thread */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_TCBind $2,2
#define C0_TCContext $2,5
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_return MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is target processor specific. It is used to transfer */
/* control from a thread back to the system. Only a minimal context */
/* is saved since the compiler assumes temp registers are going to get */
/* slicked by a function call anyway. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_schedule Thread scheduling loop */
/* */
/* CALLED BY */
/* */
/* ThreadX components */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_thread_system_return(VOID)
{ */
.globl _tx_thread_system_return
_tx_thread_system_return:
di $10 # Disable interrupts
ehb #
/* Save minimal context on the stack. */
subu $29, $29, 160 # Allocate space on the stack
sw $0, ($29) # Solicited stack type
sw $30, 4($29) # Save s8
sw $23, 8($29) # Save s7
sw $22, 12($29) # Save s6
sw $21, 16($29) # Save s5
sw $20, 20($29) # Save s4
sw $19, 24($29) # Save s3
sw $18, 28($29) # Save s2
sw $17, 32($29) # Save s1
sw $16, 36($29) # Save s0
mfhi $8 # Pickup hi
mflo $9 # Pickup lo
sw $8, 40($29) # Save hi
sw $9, 44($29) # Save lo
sw $31, 48($29) # Save ra
sw $10, 52($29) # Save SR
mfc0 $25, UserLocal # Pickup VPE ID
#ifdef TX_ENABLE_64BIT_FPU_SUPPORT
/* Check if FPU is enabled for this thread. Note that threads with FPU enabled will only be
scheduled in VPE 0 and thus only need to be saved for TC VPE 0. */
bne $25, $0, _tx_skip_fpu_sync_save # If not VPE 0, skip FPU save
la $9, _tx_thread_current_ptr # Pickup address of pointer
lw $8, ($9) # Pickup current thread pointer
lw $15, 176($8) # Pickup FPU enable flag
beq $15, $0, _tx_skip_fpu_sync_save # If FPU not enabled, skip FPU save
nop #
/* Save preserved floating point registers. */
cfc1 $8, $31 # Pickup floating point control reg
sdc1 $f31, 56($29) # Save f31
sdc1 $f30, 64($29) # Save f30
sdc1 $f29, 72($29) # Save f29
sdc1 $f28, 80($29) # Save f28
sdc1 $f27, 88($29) # Save f27
sdc1 $f26, 96($29) # Save f26
sdc1 $f25, 104($29) # Save f25
sdc1 $f24, 112($29) # Save f24
sdc1 $f23, 120($29) # Save f23
sdc1 $f22, 128($29) # Save f22
sdc1 $f21, 136($29) # Save f21
sdc1 $f20, 144($29) # Save f20
sw $8, 152($29) # Save fcr31
_tx_skip_fpu_sync_save:
#endif
la $9, _tx_thread_current_ptr # Pickup address of pointer
sll $25, $25, 2 # Build index based on VPE number
addu $9, $9, $25 # Build address of current thread pointer for this VPE
lw $8, ($9) # Pickup current thread pointer
la $10,_tx_thread_system_stack_ptr # Pickup stack pointer address
/* Save current stack and switch to system stack. */
/* _tx_thread_current_ptr[VPE] -> tx_thread_stack_ptr = SP;
SP = _tx_thread_system_stack_ptr[VPE]; */
sw $29, 8($8) # Save stack pointer
addu $10, $10, $25 # Build index to system stack pointer array
lw $29, ($10) # Switch to system stack
/* Determine if the time-slice is active. */
/* if (_tx_timer_time_slice[VPE])
{ */
la $13, _tx_timer_time_slice # Pickup time slice variable addr
addu $13, $13, $25 # Index into time-slice variable
lw $11, 0($13) # Pickup time slice value
la $12, _tx_thread_schedule # Pickup address of scheduling loop
beqz $11, _tx_thread_dont_save_ts # If no time-slice, don't save it
nop # Delay slot
/* Save time-slice for the thread and clear the current time-slice. */
/* _tx_thread_current_ptr[VPE] -> tx_thread_time_slice = _tx_timer_time_slice[VPE];
_tx_timer_time_slice[VPE] = 0; */
sw $11, 24($8) # Save time-slice for thread
sw $0, ($13) # Clear time-slice variable
/* } */
_tx_thread_dont_save_ts:
/* Clear the current thread pointer. */
/* _tx_thread_current_ptr[VPE] = TX_NULL; */
sw $0, ($9) # Clear current thread pointer
/* Set bit indicating the thread is ready for scheduling. */
lw $9, 152($8) # Pickup the thread's VPE control register
ori $9, $9, 0x8000 # Set ready bit (bit 15)
sync
sw $9, 152($8) # Make this thread ready for scheduling
la $10,_tx_thread_smp_protection # Build address of protection structure
lw $9, 4($10) # Pickup owning thread
#ifdef TX_THREAD_SMP_DEBUG_ENABLE
_error_loop:
bne $8, $9, _error_loop # If the owner is not this thread, we have a problem!
nop #
#endif
la $13, _tx_thread_preempt_disable # Pickup address of preempt disable
sw $0, ($13) # Clear the preempt disable
li $11, 0xFFFFFFFF # Build invalid VPE value
sw $11, 8($10) # Set protection VPE to invalid
sw $31, 24($10) # Save caller in protect structure
_release_protect_loop:
sync
sw $0, ($10) # Release protection
sync
j $12 # Return to thread scheduler
nop
/* } */

View File

@@ -0,0 +1,219 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Timer */
/** */
/**************************************************************************/
/**************************************************************************/
#define UserLocal $4,2
#define C0_TCBind $2,2
.text
.set noreorder
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_timer_interrupt MIPS32_interAptiv/GNU */
/* 6.x */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function processes the hardware timer interrupt. This */
/* processing includes incrementing the system clock and checking for */
/* time slice and/or timer expiration. If either is found, the */
/* interrupt context save/restore functions are called along with the */
/* expiration functions. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* _tx_thread_smp_protect Get protection */
/* _tx_thread_smp_unprotect Release protection */
/* _tx_timer_expiration_process Timer expiration processing */
/* _tx_thread_time_slice Time slice interrupted thread */
/* */
/* CALLED BY */
/* */
/* interrupt vector */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* xx-xx-xxxx Scott Larson Initial Version 6.x */
/* */
/**************************************************************************/
/* VOID _tx_timer_interrupt(VOID)
{ */
.globl _tx_timer_interrupt
_tx_timer_interrupt:
/* Check VPE and throw away any timer interrupts for anything other than VPE 0. */
mfc0 $8, UserLocal # Pickup VPE ID
beq $8, $0, _handle_timer_interrupt # If 0, VPE 0 should handle the interrupt
nop
jr $31 # Other VPE simply returns
nop
_handle_timer_interrupt:
subu $29, $29, 16 # Allocate some storage on the stack
sw $31, 4($29) # Save ra
sw $16, 8($29) # Save preserved register s0
/* Get protection before the timer variables are updated. */
/* _tx_thread_smp_protect(); */
jal _tx_thread_smp_protect # Get VPE protection
nop #
addu $16, $2, 0 # Save return value
/* Increment timer interrupt active counter. */
/* _tx_timer_interrupt_active++; */
la $9, _tx_timer_interrupt_active # Build address of timer interrupt active count
lw $8, ($9) # Pickup timer interrupt active count
addu $8, $8, 1 # Increment timer interrupt active count
sw $8, ($9) # Store new timer interrupt active count
sync
/* Increment the system clock. */
/* _tx_timer_system_clock++; */
la $9, _tx_timer_system_clock # Pickup address of system clock
lw $8, ($9) # Pickup system clock
addu $8, $8, 1 # Increment system clock
sw $8, ($9) # Store new system clock
/* Test for timer expiration. */
/* if (*_tx_timer_current_ptr)
{ */
la $13, _tx_timer_expired # Pickup address of timer expired flag
lw $10, ($13) # Pickup the timer expired flag
bne $10, $0, _tx_timer_done # If already expired, skip expiration processing
nop #
la $9, _tx_timer_current_ptr # Pickup address of current ptr
lw $8, ($9) # Pickup current pointer
la $13, _tx_timer_expired # Pickup address of timer expired flag
lw $10, ($8) # Pickup the current timer entry
ori $12, $0, 1 # Build TX_TRUE flag
beqz $10, _tx_timer_no_timer # If NULL, no timer has expired
nop # Delay slot
/* Set expiration flag. */
/* _tx_timer_expired = TX_TRUE; */
ori $15, $0, 2 # Set local expired flag
b _tx_timer_done # Finished timer processing
sw $12, ($13) # Set expired flag in memory
/* }
else
{ */
_tx_timer_no_timer:
ori $15, $0, 0 # Set expired flag to false
/* No timer expired, increment the timer pointer. */
/* _tx_timer_current_ptr++; */
/* Check for wrap-around. */
/* if (_tx_timer_current_ptr == _tx_timer_list_end) */
la $12, _tx_timer_list_end # Pickup address of list end pointer
lw $11, ($12) # Pickup actual list end
addu $8, $8, 4 # Point to next timer entry
bne $8, $11, _tx_timer_skip_wrap # If not same, good pointer
sw $8, ($9) # Store new timer pointer
/* Wrap to beginning of list. */
/* _tx_timer_current_ptr = _tx_timer_list_start; */
la $12, _tx_timer_list_start # Pickup address of list start pointer
lw $10, ($12) # Pickup start of the list
sw $10, ($9) # Store new timer pointer
_tx_timer_skip_wrap:
/* } */
_tx_timer_done:
/* Did a timer expire? */
/* if (_tx_timer_expired)
{ */
beqz $15, _tx_timer_dont_activate # No, timer not expired
nop # Delay slot
/* Call the timer expiration processing. */
/* _tx_timer_expiration_process(void); */
la $9, _tx_timer_expiration_process # Build address of _tx_timer_expiratoin_process routine
jal $9 # Call _tx_timer_expiration_process
nop
lw $15, ($29) # Recover local expired flag
/* } */
_tx_timer_dont_activate:
/* Call time-slice processing. */
/* _tx_thread_time_slice(); */
la $9, _tx_thread_time_slice # Pickup address of time slice function
jal $9 # Call time slice
nop # Delay slot
/* Decrement timer interrupt active counter. */
/* _tx_timer_interrupt_active--; */
la $9, _tx_timer_interrupt_active # Build address of timer interrupt active count
lw $8, ($9) # Pickup timer interrupt active count
subu $8, $8, 1 # Decrement timer interrupt active count
sw $8, ($9) # Store new timer interrupt active count
sync
/* Release VPE protection. */
/* _tx_thread_smp_unprotect(); */
addu $4, $16, 0 # Setup input parameter
jal _tx_thread_smp_unprotect # Release protection
nop #
lw $31, 4($29) # Recover ra
lw $16, 8($29) # Recover s0
addu $29, $29, 16 # Recover stack space
j $31 # Return to caller
nop # Delay slot
/* } */