Release 6.1.9

This commit is contained in:
Yuxin Zhou
2021-10-14 00:51:26 +00:00
parent 215df45d4b
commit 1af8404c54
1812 changed files with 60698 additions and 249862 deletions

View File

@@ -170,7 +170,7 @@ ULONG lower_tbu;
/* Save the pool's address in the block for when it is released! */
temp_ptr = TX_BLOCK_POOL_TO_UCHAR_POINTER_CONVERT(pool_ptr);
*next_block_ptr = temp_ptr;
#ifdef TX_ENABLE_EVENT_TRACE
/* Check that the event time stamp is unchanged. A different
@@ -182,7 +182,7 @@ ULONG lower_tbu;
/* Is the time stamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
@@ -200,7 +200,7 @@ ULONG lower_tbu;
/* Set status to success. */
status = TX_SUCCESS;
/* Restore interrupts. */
TX_RESTORE
}
@@ -229,7 +229,7 @@ ULONG lower_tbu;
{
/* Prepare for suspension of this thread. */
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
/* Increment the total suspensions counter. */
@@ -261,7 +261,7 @@ ULONG lower_tbu;
/* Pickup the number of suspended threads. */
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
/* Increment the number of suspended threads. */
(pool_ptr -> tx_block_pool_suspended_count)++;
@@ -322,11 +322,11 @@ ULONG lower_tbu;
allocate event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the time-stamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
/* Disable interrupts to remove the suspended thread from the block pool. */
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_block_pool_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
/* Check for a NULL byte pool pointer. */
if (pool_ptr != TX_NULL)
{
/* Check for valid pool ID. */
if (pool_ptr -> tx_block_pool_id == TX_BLOCK_POOL_ID)
{
@@ -133,13 +133,13 @@ TX_THREAD *previous_thread;
suspended_count = pool_ptr -> tx_block_pool_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
pool_ptr -> tx_block_pool_suspension_list = TX_NULL;
}
@@ -157,7 +157,7 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (pool_ptr -> tx_block_pool_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
pool_ptr -> tx_block_pool_suspension_list = next_thread;
}
@@ -168,7 +168,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_BLOCK_MEMORY)
{
/* Timeout condition and the thread still suspended on the block pool.
/* Timeout condition and the thread still suspended on the block pool.
Setup return error status and resume the thread. */
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -97,7 +97,7 @@ TX_BLOCK_POOL *previous_pool;
an ALIGN_TYPE (typically this is a 32-bit ULONG). This helps guarantee proper alignment. */
block_size = (((block_size + (sizeof(ALIGN_TYPE))) - ((ALIGN_TYPE) 1))/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
/* Round the pool size down to something that is evenly divisible by
/* Round the pool size down to something that is evenly divisible by
an ALIGN_TYPE (typically this is a 32-bit ULONG). */
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
@@ -106,7 +106,7 @@ TX_BLOCK_POOL *previous_pool;
pool_ptr -> tx_block_pool_start = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
pool_ptr -> tx_block_pool_size = pool_size;
pool_ptr -> tx_block_pool_block_size = (UINT) block_size;
/* Calculate the total number of blocks. */
total_blocks = pool_size/(block_size + (sizeof(UCHAR *)));
@@ -145,7 +145,7 @@ TX_BLOCK_POOL *previous_pool;
/* Set the last block's forward pointer to NULL. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(block_ptr);
*block_link_ptr = TX_NULL;
/* Setup the starting pool address. */
pool_ptr -> tx_block_pool_available_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
@@ -180,7 +180,7 @@ TX_BLOCK_POOL *previous_pool;
pool_ptr -> tx_block_pool_created_previous = previous_pool;
pool_ptr -> tx_block_pool_created_next = next_pool;
}
/* Increment the created count. */
_tx_block_pool_created_count++;
@@ -208,7 +208,7 @@ TX_BLOCK_POOL *previous_pool;
/* Not enough memory for one block, return appropriate error. */
status = TX_SIZE_ERROR;
}
/* Return completion status. */
return(status);
}

View File

@@ -126,7 +126,7 @@ TX_BLOCK_POOL *previous_pool;
/* See if we have to update the created list head pointer. */
if (_tx_block_pool_created_ptr == pool_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_block_pool_created_ptr = next_pool;
}
@@ -148,14 +148,14 @@ TX_BLOCK_POOL *previous_pool;
on this block pool. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -77,8 +77,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
ULONG *total_blocks, TX_THREAD **first_suspended,
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
ULONG *total_blocks, TX_THREAD **first_suspended,
ULONG *suspended_count, TX_BLOCK_POOL **next_pool)
{
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the block pool. */
if (name != TX_NULL)
{
*name = pool_ptr -> tx_block_pool_name;
}
/* Retrieve the number of available blocks in the block pool. */
if (available_blocks != TX_NULL)
{
*available_blocks = (ULONG) pool_ptr -> tx_block_pool_available;
}
/* Retrieve the total number of blocks in the block pool. */
if (total_blocks != TX_NULL)
{
*total_blocks = (ULONG) pool_ptr -> tx_block_pool_total;
}
/* Retrieve the first thread suspended on this block pool. */
if (first_suspended != TX_NULL)
{
*first_suspended = pool_ptr -> tx_block_pool_suspension_list;
}
/* Retrieve the number of threads suspended on this block pool. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) pool_ptr -> tx_block_pool_suspended_count;
}
/* Retrieve the pointer to the next block pool created. */
if (next_pool != TX_NULL)
{
*next_pool = pool_ptr -> tx_block_pool_created_next;
}

View File

@@ -93,7 +93,7 @@ UINT status;
/* Determine if this is a legal request. */
if (pool_ptr == TX_NULL)
{
/* Block pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -101,13 +101,13 @@ UINT status;
/* Determine if the pool ID is invalid. */
else if (pool_ptr -> tx_block_pool_id != TX_BLOCK_POOL_ID)
{
/* Block pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
else
{
/* Disable interrupts. */
TX_DISABLE
@@ -123,28 +123,28 @@ UINT status;
/* Retrieve the number of allocations from this block pool. */
if (allocates != TX_NULL)
{
*allocates = pool_ptr -> tx_block_pool_performance_allocate_count;
}
/* Retrieve the number of blocks released to this block pool. */
if (releases != TX_NULL)
{
*releases = pool_ptr -> tx_block_pool_performance_release_count;
}
/* Retrieve the number of thread suspensions on this block pool. */
if (suspensions != TX_NULL)
{
*suspensions = pool_ptr -> tx_block_pool_performance_suspension_count;
}
/* Retrieve the number of thread timeouts on this block pool. */
if (timeouts != TX_NULL)
{
*timeouts = pool_ptr -> tx_block_pool_performance_timeout_count;
}
@@ -157,7 +157,7 @@ UINT status;
#else
UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (pool_ptr != TX_NULL)
{
@@ -191,7 +191,7 @@ UINT status;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -100,28 +100,28 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of block allocations. */
if (allocates != TX_NULL)
{
*allocates = _tx_block_pool_performance_allocate_count;
}
/* Retrieve the total number of blocks released. */
if (releases != TX_NULL)
{
*releases = _tx_block_pool_performance_release_count;
}
/* Retrieve the total number of block pool thread suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_block_pool_performance_suspension_count;
}
/* Retrieve the total number of block pool thread timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_block_pool_performance_timeout_count;
}
@@ -139,35 +139,35 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (allocates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (releases != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
/* Return completion status. */
return(status);
#endif

View File

@@ -130,7 +130,7 @@ UINT list_changed;
/* Remember the suspension count and head pointer. */
head_ptr = pool_ptr -> tx_block_pool_suspension_list;
/* Default the highest priority thread to the thread at the front of the list. */
priority_thread_ptr = head_ptr;
@@ -142,7 +142,7 @@ UINT list_changed;
/* Set the list changed flag to false. */
list_changed = TX_FALSE;
/* Search through the list to find the highest priority thread. */
do
{
@@ -160,33 +160,33 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != pool_ptr -> tx_block_pool_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != pool_ptr -> tx_block_pool_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
/* Move the thread pointer to the next thread. */
thread_ptr = thread_ptr -> tx_thread_suspended_next;
}
@@ -202,7 +202,7 @@ UINT list_changed;
/* Setup search pointer. */
thread_ptr = priority_thread_ptr -> tx_thread_suspended_next;
/* Reset the list changed flag. */
list_changed = TX_FALSE;
}
@@ -212,12 +212,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -90,7 +90,7 @@ TX_THREAD *previous_thread;
/* Disable interrupts to put this block back in the pool. */
TX_DISABLE
/* Pickup the pool pointer which is just previous to the starting
/* Pickup the pool pointer which is just previous to the starting
address of the block that the caller sees. */
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(block_ptr);
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, (sizeof(UCHAR *)));
@@ -121,7 +121,7 @@ TX_THREAD *previous_thread;
/* Decrement the number of threads suspended. */
(pool_ptr -> tx_block_pool_suspended_count)--;
/* Pickup the suspended count. */
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
@@ -148,7 +148,7 @@ TX_THREAD *previous_thread;
next_thread -> tx_thread_suspended_previous = previous_thread;
previous_thread -> tx_thread_suspended_next = next_thread;
}
/* Prepare for resumption of the first thread. */
/* Clear cleanup routine to avoid timeout. */

View File

@@ -181,17 +181,17 @@ ULONG lower_tbu;
/* Determine if we are finished. */
if (work_ptr != TX_NULL)
{
/* Yes, we have found a block the search is finished. */
finished = TX_TRUE;
}
else
{
/* No block was found, does this thread still own the pool? */
if (pool_ptr -> tx_byte_pool_owner == thread_ptr)
{
/* Yes, then we have looked through the entire pool and haven't found the memory. */
finished = TX_TRUE;
}
@@ -217,7 +217,7 @@ ULONG lower_tbu;
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
@@ -242,7 +242,7 @@ ULONG lower_tbu;
/* Restore interrupts. */
TX_RESTORE
/* Set the status to success. */
status = TX_SUCCESS;
}
@@ -304,7 +304,7 @@ ULONG lower_tbu;
/* Increment the suspension count. */
(pool_ptr -> tx_byte_pool_suspended_count)++;
/* Setup suspension list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -366,7 +366,7 @@ ULONG lower_tbu;
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
@@ -396,7 +396,7 @@ ULONG lower_tbu;
}
else
{
/* Restore interrupts. */
TX_RESTORE

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
/* Disable interrupts to remove the suspended thread from the byte pool. */
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_byte_pool_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
/* Check for a NULL byte pool pointer. */
if (pool_ptr != TX_NULL)
{
/* Check for valid pool ID. */
if (pool_ptr -> tx_byte_pool_id == TX_BYTE_POOL_ID)
{
@@ -126,18 +126,18 @@ TX_THREAD *previous_thread;
/* Decrement the suspension count. */
pool_ptr -> tx_byte_pool_suspended_count--;
/* Pickup the suspended count. */
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
}
@@ -155,7 +155,7 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (pool_ptr -> tx_byte_pool_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
pool_ptr -> tx_byte_pool_suspension_list = next_thread;
}
@@ -166,7 +166,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_BYTE_MEMORY)
{
/* Timeout condition and the thread still suspended on the byte pool.
/* Timeout condition and the thread still suspended on the byte pool.
Setup return error status and resume the thread. */
#ifdef TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -89,7 +89,7 @@ ALIGN_TYPE *free_ptr;
/* Initialize the byte pool control block to all zeros. */
TX_MEMSET(pool_ptr, 0, (sizeof(TX_BYTE_POOL)));
/* Round the pool size down to something that is evenly divisible by
/* Round the pool size down to something that is evenly divisible by
an ULONG. */
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
@@ -104,17 +104,17 @@ ALIGN_TYPE *free_ptr;
pool_ptr -> tx_byte_pool_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
pool_ptr -> tx_byte_pool_search = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
/* Initially, the pool will have two blocks. One large block at the
/* Initially, the pool will have two blocks. One large block at the
beginning that is available and a small allocated block at the end
of the pool that is there just for the algorithm. Be sure to count
the available block's header in the available bytes count. */
pool_ptr -> tx_byte_pool_available = pool_size - ((sizeof(VOID *)) + (sizeof(ALIGN_TYPE)));
pool_ptr -> tx_byte_pool_fragments = ((UINT) 2);
/* Each block contains a "next" pointer that points to the next block in the pool followed by a ALIGN_TYPE
field that contains either the constant TX_BYTE_BLOCK_FREE (if the block is free) or a pointer to the
owning pool (if the block is allocated). */
/* Calculate the end of the pool's memory area. */
block_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
block_ptr = TX_UCHAR_POINTER_ADD(block_ptr, pool_size);
@@ -177,7 +177,7 @@ ALIGN_TYPE *free_ptr;
/* Increment the number of created byte pools. */
_tx_byte_pool_created_count++;
/* Optional byte pool create extended processing. */
TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)

View File

@@ -110,7 +110,7 @@ TX_BYTE_POOL *previous_pool;
/* Decrement the number of byte pools created. */
_tx_byte_pool_created_count--;
/* See if the byte pool is the only one on the list. */
if (_tx_byte_pool_created_count == TX_EMPTY)
{
@@ -130,7 +130,7 @@ TX_BYTE_POOL *previous_pool;
/* See if we have to update the created list head pointer. */
if (_tx_byte_pool_created_ptr == pool_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_byte_pool_created_ptr = next_pool;
}
@@ -144,7 +144,7 @@ TX_BYTE_POOL *previous_pool;
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
pool_ptr -> tx_byte_pool_suspended_count = TX_NO_SUSPENSIONS;
/* Restore interrupts. */
TX_RESTORE
@@ -152,14 +152,14 @@ TX_BYTE_POOL *previous_pool;
on this byte pool. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -77,8 +77,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
ULONG *fragments, TX_THREAD **first_suspended,
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
ULONG *fragments, TX_THREAD **first_suspended,
ULONG *suspended_count, TX_BYTE_POOL **next_pool)
{
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the byte pool. */
if (name != TX_NULL)
{
*name = pool_ptr -> tx_byte_pool_name;
}
/* Retrieve the number of available bytes in the byte pool. */
if (available_bytes != TX_NULL)
{
*available_bytes = pool_ptr -> tx_byte_pool_available;
}
/* Retrieve the total number of bytes in the byte pool. */
if (fragments != TX_NULL)
{
*fragments = (ULONG) pool_ptr -> tx_byte_pool_fragments;
}
/* Retrieve the first thread suspended on this byte pool. */
if (first_suspended != TX_NULL)
{
*first_suspended = pool_ptr -> tx_byte_pool_suspension_list;
}
/* Retrieve the number of threads suspended on this byte pool. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) pool_ptr -> tx_byte_pool_suspended_count;
}
/* Retrieve the pointer to the next byte pool created. */
if (next_pool != TX_NULL)
{
*next_pool = pool_ptr -> tx_byte_pool_created_next;
}

View File

@@ -102,15 +102,15 @@ UINT status;
/* Determine if this is a legal request. */
if (pool_ptr == TX_NULL)
{
/* Byte pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the pool ID is invalid. */
else if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
{
/* Byte pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -132,7 +132,7 @@ UINT status;
/* Retrieve the number of allocates on this byte pool. */
if (allocates != TX_NULL)
{
*allocates = pool_ptr -> tx_byte_pool_performance_allocate_count;
}
@@ -146,35 +146,35 @@ UINT status;
/* Retrieve the number of fragments searched in this byte pool. */
if (fragments_searched != TX_NULL)
{
*fragments_searched = pool_ptr -> tx_byte_pool_performance_search_count;
}
/* Retrieve the number of fragments merged on this byte pool. */
if (merges != TX_NULL)
{
*merges = pool_ptr -> tx_byte_pool_performance_merge_count;
}
/* Retrieve the number of fragment splits on this byte pool. */
if (splits != TX_NULL)
{
*splits = pool_ptr -> tx_byte_pool_performance_split_count;
}
/* Retrieve the number of suspensions on this byte pool. */
if (suspensions != TX_NULL)
{
*suspensions = pool_ptr -> tx_byte_pool_performance_suspension_count;
}
/* Retrieve the number of timeouts on this byte pool. */
if (timeouts != TX_NULL)
{
*timeouts = pool_ptr -> tx_byte_pool_performance_timeout_count;
}
@@ -184,7 +184,7 @@ UINT status;
/* Return completion status. */
status = TX_SUCCESS;
}
/* Return completion status. */
return(status);
#else
@@ -195,55 +195,55 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (pool_ptr != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (allocates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (releases != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (fragments_searched != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (merges != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (splits != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -109,58 +109,58 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of byte pool allocates. */
if (allocates != TX_NULL)
{
*allocates = _tx_byte_pool_performance_allocate_count;
}
/* Retrieve the total number of byte pool releases. */
if (releases != TX_NULL)
{
*releases = _tx_byte_pool_performance_release_count;
}
/* Retrieve the total number of byte pool fragments searched. */
if (fragments_searched != TX_NULL)
{
*fragments_searched = _tx_byte_pool_performance_search_count;
}
/* Retrieve the total number of byte pool fragments merged. */
if (merges != TX_NULL)
{
*merges = _tx_byte_pool_performance_merge_count;
}
/* Retrieve the total number of byte pool fragment splits. */
if (splits != TX_NULL)
{
*splits = _tx_byte_pool_performance_split_count;
}
/* Retrieve the total number of byte pool suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_byte_pool_performance_suspension_count;
}
/* Retrieve the total number of byte pool timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_byte_pool_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -215,7 +215,7 @@ UINT status;
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
/* Return completion status. */
return(status);
#endif

View File

@@ -161,19 +161,19 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != pool_ptr -> tx_byte_pool_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != pool_ptr -> tx_byte_pool_suspended_count)
{
@@ -212,12 +212,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -98,7 +98,7 @@ UCHAR **suspend_info_ptr;
/* Default to successful status. */
status = TX_SUCCESS;
/* Set the pool pointer to NULL. */
pool_ptr = TX_NULL;
@@ -109,7 +109,7 @@ UCHAR **suspend_info_ptr;
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(memory_ptr);
if (work_ptr != TX_NULL)
{
/* Back off the memory pointer to pickup its header. */
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, ((sizeof(UCHAR *)) + (sizeof(ALIGN_TYPE))));
@@ -127,7 +127,7 @@ UCHAR **suspend_info_ptr;
/* See if we have a valid pool pointer. */
if (pool_ptr == TX_NULL)
{
/* Return pointer error. */
status = TX_PTR_ERROR;
}
@@ -137,10 +137,10 @@ UCHAR **suspend_info_ptr;
/* See if we have a valid pool. */
if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
{
/* Return pointer error. */
status = TX_PTR_ERROR;
/* Reset the pool pointer is NULL. */
pool_ptr = TX_NULL;
}
@@ -163,13 +163,13 @@ UCHAR **suspend_info_ptr;
/* Determine if the pointer is valid. */
if (pool_ptr == TX_NULL)
{
/* Restore interrupts. */
TX_RESTORE
}
else
{
/* At this point, we know that the pointer is valid. */
/* Pickup thread pointer. */
@@ -201,7 +201,7 @@ UCHAR **suspend_info_ptr;
/* Update the number of available bytes in the pool. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
next_block_ptr = *block_link_ptr;
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
/* Determine if the free block is prior to current search pointer. */
@@ -215,8 +215,8 @@ UCHAR **suspend_info_ptr;
/* Determine if there are threads suspended on this byte pool. */
if (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
{
/* Now examine the suspension list to find threads waiting for
/* Now examine the suspension list to find threads waiting for
memory. Maybe it is now available! */
while (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
{
@@ -245,7 +245,7 @@ UCHAR **suspend_info_ptr;
/* If there is not enough memory, break this loop! */
if (work_ptr == TX_NULL)
{
/* Break out of the loop. */
break;
}
@@ -257,7 +257,7 @@ UCHAR **suspend_info_ptr;
/* Also, makes sure the memory size is the same. */
if (susp_thread_ptr -> tx_thread_suspend_info == memory_size)
{
/* Remove the suspended thread from the list. */
/* Decrement the number of threads suspended. */
@@ -302,7 +302,7 @@ UCHAR **suspend_info_ptr;
/* Clear the memory pointer to indicate that it was given to the suspended thread. */
work_ptr = TX_NULL;
/* Put return status into the thread control block. */
susp_thread_ptr -> tx_thread_suspend_status = TX_SUCCESS;
@@ -328,11 +328,11 @@ UCHAR **suspend_info_ptr;
TX_DISABLE
}
}
/* Determine if the memory was given to the suspended thread. */
if (work_ptr != TX_NULL)
{
/* No, it wasn't given to the suspended thread. */
/* Put the memory back on the available list since this thread is no longer
@@ -345,7 +345,7 @@ UCHAR **suspend_info_ptr;
/* Update the number of available bytes in the pool. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
next_block_ptr = *block_link_ptr;
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
/* Determine if the current pointer is before the search pointer. */
@@ -357,7 +357,7 @@ UCHAR **suspend_info_ptr;
}
}
}
/* Restore interrupts. */
TX_RESTORE
@@ -366,7 +366,7 @@ UCHAR **suspend_info_ptr;
}
else
{
/* No, threads suspended, restore interrupts. */
TX_RESTORE
}

View File

@@ -104,11 +104,11 @@ TX_THREAD *previous_thread;
/* Setup pointer to event flags control block. */
group_ptr = TX_VOID_TO_EVENT_FLAGS_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
/* Check for a NULL event flags control block pointer. */
if (group_ptr != TX_NULL)
{
/* Is the group pointer ID valid? */
if (group_ptr -> tx_event_flags_group_id == TX_EVENT_FLAGS_ID)
{
@@ -133,9 +133,9 @@ TX_THREAD *previous_thread;
/* Pickup the suspension head. */
suspension_head = group_ptr -> tx_event_flags_group_suspension_list;
/* Determine if the cleanup is being done while a set operation was interrupted. If the
/* Determine if the cleanup is being done while a set operation was interrupted. If the
suspended count is non-zero and the suspension head is NULL, the list is being processed
and cannot be touched from here. The suspension list removal will instead take place
and cannot be touched from here. The suspension list removal will instead take place
inside the event flag set code. */
if (suspension_head != TX_NULL)
{
@@ -144,7 +144,7 @@ TX_THREAD *previous_thread;
/* Decrement the local suspension count. */
suspended_count--;
/* Store the updated suspended count. */
group_ptr -> tx_event_flags_group_suspended_count = suspended_count;
@@ -153,7 +153,7 @@ TX_THREAD *previous_thread;
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
}
@@ -161,17 +161,17 @@ TX_THREAD *previous_thread;
{
/* At least one more thread is on the same suspension list. */
/* Update the links of the adjacent threads. */
next_thread = thread_ptr -> tx_thread_suspended_next;
previous_thread = thread_ptr -> tx_thread_suspended_previous;
next_thread -> tx_thread_suspended_previous = previous_thread;
previous_thread -> tx_thread_suspended_next = next_thread;
/* Determine if we need to update the head pointer. */
if (suspension_head == thread_ptr)
{
/* Update the list head pointer. */
group_ptr -> tx_event_flags_group_suspension_list = next_thread;
}
@@ -179,7 +179,7 @@ TX_THREAD *previous_thread;
}
else
{
/* In this case, the search pointer in an interrupted event flag set must be reset. */
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;
}
@@ -189,7 +189,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
{
/* Timeout condition and the thread still suspended on the event flags group.
/* Timeout condition and the thread still suspended on the event flags group.
Setup return error status and resume the thread. */
#ifdef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO
@@ -216,8 +216,8 @@ TX_THREAD *previous_thread;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread! Check for preemption even though we are executing
from the system timer thread right now which normally executes at the
/* Resume the thread! Check for preemption even though we are executing
from the system timer thread right now which normally executes at the
highest priority. */
_tx_thread_system_resume(thread_ptr);

View File

@@ -86,7 +86,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Setup the basic event flags group fields. */
group_ptr -> tx_event_flags_group_name = name_ptr;
/* Disable interrupts to put the event flags group on the created list. */
TX_DISABLE
@@ -121,7 +121,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Increment the number of created event flag groups. */
_tx_event_flags_created_count++;
/* Optional event flag group create extended processing. */
TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)

View File

@@ -106,7 +106,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Decrement the number of created event flag groups. */
_tx_event_flags_created_count--;
/* See if this group is the only one on the list. */
if (_tx_event_flags_created_count == TX_EMPTY)
{
@@ -126,7 +126,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* See if we have to update the created list head pointer. */
if (_tx_event_flags_created_ptr == group_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_event_flags_created_ptr = next_group;
}
@@ -144,18 +144,18 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Restore interrupts. */
TX_RESTORE
/* Walk through the event flag suspension list to resume any and all threads
/* Walk through the event flag suspension list to resume any and all threads
suspended on this group. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the number of suspended threads. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -130,16 +130,16 @@ UINT interrupted_set_request;
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -150,7 +150,7 @@ UINT interrupted_set_request;
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -164,7 +164,7 @@ UINT interrupted_set_request;
/* Determine whether or not clearing needs to take place. */
if (clear_request == TX_TRUE)
{
/* Yes, clear the flags that satisfied this request. */
group_ptr -> tx_event_flags_group_current =
group_ptr -> tx_event_flags_group_current & (~requested_flags);
@@ -190,16 +190,16 @@ UINT interrupted_set_request;
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -211,7 +211,7 @@ UINT interrupted_set_request;
to see if any are present. */
flags_satisfied = (current_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -235,7 +235,7 @@ UINT interrupted_set_request;
set request. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
{
@@ -252,7 +252,7 @@ UINT interrupted_set_request;
event clearing until the set operation is complete. */
/* Remember the events to clear. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear | requested_flags;
}
else
@@ -279,7 +279,7 @@ UINT interrupted_set_request;
/* Determine if the preempt disable flag is non-zero. */
if (_tx_thread_preempt_disable != ((UINT) 0))
{
/* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
status = TX_NO_EVENTS;
}
@@ -296,7 +296,7 @@ UINT interrupted_set_request;
/* Increment the number of event flags suspensions on this semaphore. */
group_ptr -> tx_event_flags_group___performance_suspension_count++;
#endif
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)
@@ -325,7 +325,7 @@ UINT interrupted_set_request;
/* Pickup the suspended count. */
suspended_count = group_ptr -> tx_event_flags_group_suspended_count;
/* Setup suspension list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -350,7 +350,7 @@ UINT interrupted_set_request;
/* Increment the number of threads suspended. */
group_ptr -> tx_event_flags_group_suspended_count++;
/* Set the state to suspended. */
thread_ptr -> tx_thread_state = TX_EVENT_FLAG;
@@ -377,10 +377,10 @@ UINT interrupted_set_request;
/* Call actual thread suspension routine. */
_tx_thread_system_suspend(thread_ptr);
/* Disable interrupts. */
TX_DISABLE
/* Return the completion status. */
status = thread_ptr -> tx_thread_suspend_status;
#endif
@@ -388,7 +388,7 @@ UINT interrupted_set_request;
}
else
{
/* Immediate return, return error completion. */
status = TX_NO_EVENTS;
}

View File

@@ -79,8 +79,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
TX_THREAD **first_suspended, ULONG *suspended_count,
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_EVENT_FLAGS_GROUP **next_group)
{
@@ -102,7 +102,7 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the event flag group. */
if (name != TX_NULL)
{
*name = group_ptr -> tx_event_flags_group_name;
}
@@ -111,31 +111,31 @@ TX_INTERRUPT_SAVE_AREA
{
/* Pickup the current flags and apply delayed clearing. */
*current_flags = group_ptr -> tx_event_flags_group_current &
*current_flags = group_ptr -> tx_event_flags_group_current &
~group_ptr -> tx_event_flags_group_delayed_clear;
}
/* Retrieve the first thread suspended on this event flag group. */
if (first_suspended != TX_NULL)
{
*first_suspended = group_ptr -> tx_event_flags_group_suspension_list;
}
/* Retrieve the number of threads suspended on this event flag group. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) group_ptr -> tx_event_flags_group_suspended_count;
}
/* Retrieve the pointer to the next event flag group created. */
if (next_group != TX_NULL)
{
*next_group = group_ptr -> tx_event_flags_group_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -94,15 +94,15 @@ UINT status;
/* Determine if this is a legal request. */
if (group_ptr == TX_NULL)
{
/* Event flags group pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the event group ID is invalid. */
else if (group_ptr -> tx_event_flags_group_id != TX_EVENT_FLAGS_ID)
{
/* Event flags group pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -124,34 +124,34 @@ UINT status;
/* Retrieve the number of set operations on this event flag group. */
if (sets != TX_NULL)
{
*sets = group_ptr -> tx_event_flags_group_performance_set_count;
}
/* Retrieve the number of get operations on this event flag group. */
if (gets != TX_NULL)
{
*gets = group_ptr -> tx_event_flags_group__performance_get_count;
}
/* Retrieve the number of thread suspensions on this event flag group. */
if (suspensions != TX_NULL)
{
*suspensions = group_ptr -> tx_event_flags_group___performance_suspension_count;
}
/* Retrieve the number of thread timeouts on this event flag group. */
if (timeouts != TX_NULL)
{
*timeouts = group_ptr -> tx_event_flags_group____performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return successful completion. */
status = TX_SUCCESS;
}

View File

@@ -101,37 +101,37 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of event flag set operations. */
if (sets != TX_NULL)
{
*sets = _tx_event_flags_performance_set_count;
}
/* Retrieve the total number of event flag get operations. */
if (gets != TX_NULL)
{
*gets = _tx_event_flags_performance_get_count;
}
/* Retrieve the total number of event flag thread suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_event_flags_performance_suspension_count;
}
/* Retrieve the total number of event flag thread timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_event_flags_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;

View File

@@ -137,7 +137,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
set request. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
{
@@ -154,15 +154,15 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
event clearing until the set operation is complete. */
/* Remember the events to clear. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear | ~flags_to_set;
}
else
{
#endif
/* Previous set operation was not interrupted, simply clear the
specified flags by "ANDing" the flags into the current events
/* Previous set operation was not interrupted, simply clear the
specified flags by "ANDing" the flags into the current events
of the group. */
group_ptr -> tx_event_flags_group_current =
group_ptr -> tx_event_flags_group_current & flags_to_set;
@@ -195,7 +195,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
{
/* Yes, we need to neutralize the delayed clearing as well. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear & ~flags_to_set;
}
#endif
@@ -210,7 +210,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
if (group_ptr -> tx_event_flags_group_suspension_list != TX_NULL)
{
/* Determine if there is just a single thread waiting on the event
/* Determine if there is just a single thread waiting on the event
flag group. */
if (suspended_count == ((UINT) 1))
{
@@ -223,7 +223,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Pickup the current event flags. */
current_event_flags = group_ptr -> tx_event_flags_group_current;
/* Pickup the suspend information. */
requested_flags = thread_ptr -> tx_thread_suspend_info;
@@ -236,16 +236,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_event_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -256,7 +256,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_event_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -315,7 +315,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
else
{
/* Otherwise, the event flag requests of multiple threads must be
/* Otherwise, the event flag requests of multiple threads must be
examined. */
/* Setup thread pointer, keep a local copy of the head pointer. */
@@ -325,7 +325,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Clear the suspended list head pointer to thwart manipulation of
the list in ISR's while we are processing here. */
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
/* Setup the satisfied thread pointers. */
satisfied_list = TX_NULL;
last_satisfied = TX_NULL;
@@ -382,16 +382,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_event_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -402,13 +402,13 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_event_flags & requested_flags);
}
/* Check to see if the thread had a timeout or wait abort during the event search processing.
If so, just set the flags satisfied to ensure the processing here removes the thread from
/* Check to see if the thread had a timeout or wait abort during the event search processing.
If so, just set the flags satisfied to ensure the processing here removes the thread from
the suspension list. */
if (thread_ptr -> tx_thread_state != TX_EVENT_FLAG)
{
/* Simply set the satisfied flags to 1 in order to remove the thread from the suspension list. */
flags_satisfied = ((ULONG) 1);
}
@@ -421,7 +421,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Set the preempt check flag. */
preempt_check = TX_TRUE;
/* Determine if the thread is still suspended on the event flag group. If not, a wait
abort must have been done from an ISR. */
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
@@ -437,11 +437,11 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Determine whether or not clearing needs to take place. */
if (clear_request == TX_TRUE)
{
/* Yes, clear the flags that satisfied this request. */
group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & ~requested_flags;
}
/* Prepare for resumption of the first thread. */
/* Clear cleanup routine to avoid timeout. */
@@ -478,7 +478,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
list. */
if (suspended_list == thread_ptr)
{
/* Yes, head pointer needs to be updated. */
suspended_list = thread_ptr -> tx_thread_suspended_next;
}
@@ -494,7 +494,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* First thread on the satisfied list. */
satisfied_list = thread_ptr;
last_satisfied = thread_ptr;
/* Setup initial next pointer. */
thread_ptr -> tx_thread_suspended_next = TX_NULL;
}
@@ -502,7 +502,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
{
/* Not the first thread on the satisfied list. */
/* Link it up at the end. */
last_satisfied -> tx_thread_suspended_next = thread_ptr;
thread_ptr -> tx_thread_suspended_next = TX_NULL;
@@ -515,7 +515,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Decrement the suspension count. */
suspended_count--;
} while (suspended_count != TX_NO_SUSPENSIONS);
/* Setup the group's suspension list head again. */
@@ -543,7 +543,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
thread_ptr = satisfied_list;
while(thread_ptr != TX_NULL)
{
/* Get next pointer first. */
next_thread_ptr = thread_ptr -> tx_thread_suspended_next;
@@ -586,7 +586,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Determine if we need to set the reset search field. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
/* We interrupted a search of an event flag group suspension
list. Make sure we reset the search. */
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;

View File

@@ -44,9 +44,9 @@
#include "tx_byte_pool.h"
/* Define the unused memory pointer. The value of the first available
/* Define the unused memory pointer. The value of the first available
memory address is placed in this variable in the low-level
initialization function. The content of this variable is passed
initialization function. The content of this variable is passed
to the application's system definition function. */
VOID *_tx_initialize_unused_memory;

View File

@@ -98,8 +98,8 @@ VOID _tx_initialize_kernel_enter(VOID)
/* No, the initialization still needs to take place. */
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -109,9 +109,9 @@ VOID _tx_initialize_kernel_enter(VOID)
/* Invoke the low-level initialization to handle all processor specific
initialization issues. */
_tx_initialize_low_level();
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
function. */
_tx_initialize_high_level();
@@ -122,8 +122,8 @@ VOID _tx_initialize_kernel_enter(VOID)
/* Optional processing extension. */
TX_INITIALIZE_KERNEL_ENTER_EXTENSION
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -131,7 +131,7 @@ VOID _tx_initialize_kernel_enter(VOID)
first available memory address to it. */
tx_application_define(_tx_initialize_unused_memory);
/* Set the system state in preparation for entering the thread
/* Set the system state in preparation for entering the thread
scheduler. */
_tx_thread_system_state = TX_INITIALIZE_IS_FINISHED;

View File

@@ -76,8 +76,8 @@
VOID _tx_initialize_kernel_setup(VOID)
{
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -87,9 +87,9 @@ VOID _tx_initialize_kernel_setup(VOID)
/* Invoke the low-level initialization to handle all processor specific
initialization issues. */
_tx_initialize_low_level();
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
function. */
_tx_initialize_high_level();

View File

@@ -92,7 +92,7 @@ ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2)
{
ULONG value;
value = (ULONG)(ptr1 - ptr2);
return(value);
}
@@ -150,7 +150,7 @@ ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount)
ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2)
{
ULONG value;
value = (ULONG)(ptr1 - ptr2);
return(value);
}
@@ -362,7 +362,7 @@ TX_THREAD *trace_thread_ptr;
#endif
trace_event_ptr++;
if (trace_event_ptr >= _tx_trace_buffer_end_ptr)
{
{
trace_event_ptr = _tx_trace_buffer_start_ptr;
_tx_trace_buffer_current_ptr = trace_event_ptr;
_tx_trace_header_ptr -> tx_trace_header_buffer_current_pointer = (ULONG) trace_event_ptr;
@@ -813,7 +813,7 @@ UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer)
/* Return a UCHAR pointer. */
return((UCHAR *) ((VOID *) pointer));
}
#endif

View File

@@ -100,14 +100,14 @@ TX_THREAD *previous_thread;
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
/* Setup pointer to mutex control block. */
mutex_ptr = TX_VOID_TO_MUTEX_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
/* Check for NULL mutex pointer. */
if (mutex_ptr != TX_NULL)
{
/* Determine if the mutex ID is valid. */
if (mutex_ptr -> tx_mutex_id == TX_MUTEX_ID)
{
@@ -133,7 +133,7 @@ TX_THREAD *previous_thread;
suspended_count = mutex_ptr -> tx_mutex_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -147,7 +147,7 @@ TX_THREAD *previous_thread;
{
/* At least one more thread is on the same suspension list. */
/* Update the links of the adjacent threads. */
next_thread = thread_ptr -> tx_thread_suspended_next;
previous_thread = thread_ptr -> tx_thread_suspended_previous;
@@ -157,18 +157,18 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (mutex_ptr -> tx_mutex_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
mutex_ptr -> tx_mutex_suspension_list = next_thread;
}
}
/* Now we need to determine if this cleanup is from a terminate, timeout,
or from a wait abort. */
if (thread_ptr -> tx_thread_state == TX_MUTEX_SUSP)
{
/* Timeout condition and the thread still suspended on the mutex.
/* Timeout condition and the thread still suspended on the mutex.
Setup return error status and resume the thread. */
#ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
@@ -194,7 +194,7 @@ TX_THREAD *previous_thread;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread! */
_tx_thread_system_resume(thread_ptr);
@@ -208,7 +208,7 @@ TX_THREAD *previous_thread;
}
}
}
/* Restore interrupts. */
TX_RESTORE
#endif
@@ -269,21 +269,21 @@ UINT status;
/* Disable interrupts. */
TX_DISABLE
/* Temporarily disable preemption. */
_tx_thread_preempt_disable++;
/* Loop to look at all the mutexes. */
do
{
/* Pickup the mutex head pointer. */
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
/* Determine if there is a mutex. */
if (mutex_ptr != TX_NULL)
{
/* Yes, set the ownership count to 1. */
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
@@ -307,10 +307,10 @@ UINT status;
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
}
} while (mutex_ptr != TX_NULL);
/* Restore preemption. */
_tx_thread_preempt_disable--;
/* Restore interrupts. */
TX_RESTORE
}

View File

@@ -88,7 +88,7 @@ TX_MUTEX *previous_mutex;
/* Setup the basic mutex fields. */
mutex_ptr -> tx_mutex_name = name_ptr;
mutex_ptr -> tx_mutex_inherit = inherit;
/* Disable interrupts to place the mutex on the created list. */
TX_DISABLE
@@ -126,7 +126,7 @@ TX_MUTEX *previous_mutex;
/* Increment the ownership count. */
_tx_mutex_created_count++;
/* Optional mutex create extended processing. */
TX_MUTEX_CREATE_EXTENSION(mutex_ptr)

View File

@@ -110,7 +110,7 @@ UINT status;
/* Decrement the created count. */
_tx_mutex_created_count--;
/* See if the mutex is the only one on the list. */
if (_tx_mutex_created_count == TX_EMPTY)
{
@@ -130,7 +130,7 @@ UINT status;
/* See if we have to update the created list head pointer. */
if (_tx_mutex_created_ptr == mutex_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_mutex_created_ptr = next_mutex;
}
@@ -156,7 +156,7 @@ UINT status;
{
/* Yes, remove this mutex from the owned list. */
/* Set the ownership count to 1. */
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
@@ -184,14 +184,14 @@ UINT status;
on this mutex. */
while (suspended_count != ((ULONG) 0))
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
@@ -215,7 +215,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread. */
_tx_thread_system_resume(thread_ptr);
#endif

View File

@@ -126,7 +126,7 @@ UINT status;
/* Determine if priority inheritance is required. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
/* Remember the current priority of thread. */
mutex_ptr -> tx_mutex_original_priority = thread_ptr -> tx_thread_priority;
@@ -178,7 +178,7 @@ UINT status;
else if (mutex_ptr -> tx_mutex_owner == thread_ptr)
{
/* The owning thread is requesting the mutex again, just
/* The owning thread is requesting the mutex again, just
increment the ownership count. */
mutex_ptr -> tx_mutex_ownership_count++;
@@ -279,7 +279,7 @@ UINT status;
previous_thread -> tx_thread_suspended_next = thread_ptr;
next_thread -> tx_thread_suspended_previous = thread_ptr;
}
/* Increment the suspension count. */
mutex_ptr -> tx_mutex_suspended_count++;
@@ -288,7 +288,7 @@ UINT status;
#ifdef TX_NOT_INTERRUPTABLE
/* Determine if we need to raise the priority of the thread
/* Determine if we need to raise the priority of the thread
owning the mutex. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
@@ -304,7 +304,7 @@ UINT status;
/* Determine if we have to update inherit priority level of the mutex owner. */
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
{
/* Remember the new priority inheritance priority. */
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
}
@@ -347,7 +347,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Determine if we need to raise the priority of the thread
/* Determine if we need to raise the priority of the thread
owning the mutex. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
@@ -363,7 +363,7 @@ UINT status;
/* Determine if we have to update inherit priority level of the mutex owner. */
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
{
/* Remember the new priority inheritance priority. */
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
}

View File

@@ -79,7 +79,7 @@
/* */
/**************************************************************************/
UINT _tx_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_MUTEX **next_mutex)
{
@@ -101,45 +101,45 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the mutex. */
if (name != TX_NULL)
{
*name = mutex_ptr -> tx_mutex_name;
}
/* Retrieve the current ownership count of the mutex. */
if (count != TX_NULL)
{
*count = ((ULONG) mutex_ptr -> tx_mutex_ownership_count);
}
/* Retrieve the current owner of the mutex. */
if (owner != TX_NULL)
{
*owner = mutex_ptr -> tx_mutex_owner;
}
/* Retrieve the first thread suspended on this mutex. */
if (first_suspended != TX_NULL)
{
*first_suspended = mutex_ptr -> tx_mutex_suspension_list;
}
/* Retrieve the number of threads suspended on this mutex. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) mutex_ptr -> tx_mutex_suspended_count;
}
/* Retrieve the pointer to the next mutex created. */
if (next_mutex != TX_NULL)
{
*next_mutex = mutex_ptr -> tx_mutex_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -99,15 +99,15 @@ UINT status;
/* Determine if this is a legal request. */
if (mutex_ptr == TX_NULL)
{
/* Mutex pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the mutex ID is invalid. */
else if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Mutex pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -129,45 +129,45 @@ UINT status;
/* Retrieve the number of puts on this mutex. */
if (puts != TX_NULL)
{
*puts = mutex_ptr -> tx_mutex_performance_put_count;
}
/* Retrieve the number of gets on this mutex. */
if (gets != TX_NULL)
{
*gets = mutex_ptr -> tx_mutex_performance_get_count;
}
/* Retrieve the number of suspensions on this mutex. */
if (suspensions != TX_NULL)
{
*suspensions = mutex_ptr -> tx_mutex_performance_suspension_count;
}
/* Retrieve the number of timeouts on this mutex. */
if (timeouts != TX_NULL)
{
*timeouts = mutex_ptr -> tx_mutex_performance_timeout_count;
}
/* Retrieve the number of priority inversions on this mutex. */
if (inversions != TX_NULL)
{
*inversions = mutex_ptr -> tx_mutex_performance_priority_inversion_count;
}
/* Retrieve the number of priority inheritances on this mutex. */
if (inheritances != TX_NULL)
{
*inheritances = mutex_ptr -> tx_mutex_performance__priority_inheritance_count;
}
/* Restore interrupts. */
TX_RESTORE
}
@@ -225,7 +225,7 @@ UINT status;
status = TX_FEATURE_NOT_ENABLED;
}
#endif
/* Return completion status. */
return(status);
}

View File

@@ -82,7 +82,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
ULONG *timeouts, ULONG *inversions, ULONG *inheritances)
{
@@ -106,51 +106,51 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of mutex puts. */
if (puts != TX_NULL)
{
*puts = _tx_mutex_performance_put_count;
}
/* Retrieve the total number of mutex gets. */
if (gets != TX_NULL)
{
*gets = _tx_mutex_performance_get_count;
}
/* Retrieve the total number of mutex suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_mutex_performance_suspension_count;
}
/* Retrieve the total number of mutex timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_mutex_performance_timeout_count;
}
/* Retrieve the total number of mutex priority inversions. */
if (inversions != TX_NULL)
{
*inversions = _tx_mutex_performance_priority_inversion_count;
}
/* Retrieve the total number of mutex priority inheritances. */
if (inheritances != TX_NULL)
{
*inheritances = _tx_mutex_performance__priority_inheritance_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -159,43 +159,43 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (puts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (gets != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (inversions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (inheritances != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -164,28 +164,28 @@ UINT status;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != mutex_ptr -> tx_mutex_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != mutex_ptr -> tx_mutex_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
@@ -215,12 +215,12 @@ UINT status;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -116,17 +116,17 @@ UINT map_index;
/* Change thread priority to the new mutex priority-inheritance priority. */
thread_ptr -> tx_thread_priority = new_priority;
/* Determine how to setup the thread's preemption-threshold. */
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
{
/* Change thread preemption-threshold to the user's preemption-threshold. */
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
}
else
{
/* Change the thread preemption-threshold to the new threshold. */
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
@@ -156,22 +156,22 @@ UINT map_index;
/* Call actual non-interruptable thread suspension routine. */
_tx_thread_system_ni_suspend(thread_ptr, ((ULONG) 0));
/* At this point, the preempt disable flag is still set, so we still have
/* At this point, the preempt disable flag is still set, so we still have
protection against all preemption. */
/* Change thread priority to the new mutex priority-inheritance priority. */
thread_ptr -> tx_thread_priority = new_priority;
/* Determine how to setup the thread's preemption-threshold. */
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
{
/* Change thread preemption-threshold to the user's preemption-threshold. */
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
}
else
{
/* Change the thread preemption-threshold to the new threshold. */
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
@@ -198,29 +198,29 @@ UINT map_index;
/* Restore interrupts. */
TX_RESTORE
/* The thread is ready and must first be removed from the list. Call the
/* The thread is ready and must first be removed from the list. Call the
system suspend function to accomplish this. */
_tx_thread_system_suspend(thread_ptr);
/* Disable interrupts. */
TX_DISABLE
/* At this point, the preempt disable flag is still set, so we still have
/* At this point, the preempt disable flag is still set, so we still have
protection against all preemption. */
/* Change thread priority to the new mutex priority-inheritance priority. */
thread_ptr -> tx_thread_priority = new_priority;
/* Determine how to setup the thread's preemption-threshold. */
if (thread_ptr -> tx_thread_user_preempt_threshold < new_priority)
{
/* Change thread preemption-threshold to the user's preemption-threshold. */
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
}
else
{
/* Change the thread preemption-threshold to the new threshold. */
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
@@ -240,14 +240,14 @@ UINT map_index;
/* Disable interrupts. */
TX_DISABLE
#endif
/* Pickup the next thread to execute. */
next_execute_ptr = _tx_thread_execute_ptr;
/* Determine if this thread is not the next thread to execute. */
if (thread_ptr != next_execute_ptr)
{
/* Make sure the thread is still ready. */
if (thread_ptr -> tx_thread_state == TX_READY)
{
@@ -269,7 +269,7 @@ UINT map_index;
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
if (original_priority < new_priority)
{
/* Ensure that this thread is placed at the front of the priority list. */
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
}
@@ -287,7 +287,7 @@ UINT map_index;
/* Compare the next thread to execute thread's priority against the thread's preemption-threshold. */
if (thread_ptr -> tx_thread_preempt_threshold <= next_execute_ptr -> tx_thread_priority)
{
/* We must swap execute pointers to enforce the preemption-threshold of a thread coming out of
priority inheritance. */
_tx_thread_execute_ptr = thread_ptr;
@@ -295,7 +295,7 @@ UINT map_index;
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
if (original_priority < new_priority)
{
/* Ensure that this thread is placed at the front of the priority list. */
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
}
@@ -330,7 +330,7 @@ UINT map_index;
}
#ifndef TX_NOT_INTERRUPTABLE
/* Restore interrupts. */
TX_RESTORE
#endif

View File

@@ -131,8 +131,8 @@ UINT inheritance_priority;
/* Check to see if the mutex is owned by the calling thread. */
if (mutex_ptr -> tx_mutex_owner != current_thread)
{
/* Determine if the preempt disable flag is set, indicating that
/* Determine if the preempt disable flag is set, indicating that
the caller is not the application but from ThreadX. In such
cases, the thread mutex owner does not need to match. */
if (_tx_thread_preempt_disable == ((UINT) 0))
@@ -147,11 +147,11 @@ UINT inheritance_priority;
status = TX_NOT_OWNED;
}
}
/* Determine if we should continue. */
if (status == TX_NOT_DONE)
{
/* Decrement the mutex ownership count. */
mutex_ptr -> tx_mutex_ownership_count--;
@@ -182,9 +182,9 @@ UINT inheritance_priority;
{
/* The mutex is now available. */
/* Remove this mutex from the owned mutex list. */
/* Decrement the ownership count. */
thread_ptr -> tx_thread_owned_mutex_count--;
@@ -218,14 +218,14 @@ UINT inheritance_priority;
/* Determine if the simple, non-suspension, non-priority inheritance case is present. */
if (mutex_ptr -> tx_mutex_suspension_list == TX_NULL)
{
/* Is this a priority inheritance mutex? */
if (mutex_ptr -> tx_mutex_inherit == TX_FALSE)
{
/* Yes, we are done - set the mutex owner to NULL. */
mutex_ptr -> tx_mutex_owner = TX_NULL;
/* Restore interrupts. */
TX_RESTORE
@@ -233,11 +233,11 @@ UINT inheritance_priority;
status = TX_SUCCESS;
}
}
/* Determine if the processing is complete. */
if (status == TX_NOT_DONE)
{
/* Initialize original owner and thread priority. */
old_owner = TX_NULL;
old_priority = thread_ptr -> tx_thread_user_priority;
@@ -257,8 +257,8 @@ UINT inheritance_priority;
/* Default the inheritance priority to disabled. */
inheritance_priority = ((UINT) TX_MAX_PRIORITIES);
/* Search the owned mutexes for this thread to determine the highest priority for this
/* Search the owned mutexes for this thread to determine the highest priority for this
former mutex owner to return to. */
next_mutex = thread_ptr -> tx_thread_owned_mutex_list;
while (next_mutex != TX_NULL)
@@ -267,8 +267,8 @@ UINT inheritance_priority;
/* Does this mutex support priority inheritance? */
if (next_mutex -> tx_mutex_inherit == TX_TRUE)
{
/* Determine if highest priority field of the mutex is higher than the priority to
/* Determine if highest priority field of the mutex is higher than the priority to
restore. */
if (next_mutex -> tx_mutex_highest_priority_waiting < inheritance_priority)
{
@@ -284,7 +284,7 @@ UINT inheritance_priority;
/* Are we at the end of the list? */
if (next_mutex == thread_ptr -> tx_thread_owned_mutex_list)
{
/* Yes, set the next mutex to NULL. */
next_mutex = TX_NULL;
}
@@ -298,14 +298,14 @@ UINT inheritance_priority;
/* Undo the temporarily preemption disable. */
_tx_thread_preempt_disable--;
#endif
/* Set the inherit priority to that of the highest priority thread waiting on the mutex. */
thread_ptr -> tx_thread_inherit_priority = inheritance_priority;
/* Determine if the inheritance priority is less than the default old priority. */
if (inheritance_priority < old_priority)
{
/* Yes, update the old priority. */
old_priority = inheritance_priority;
}
@@ -332,7 +332,7 @@ UINT inheritance_priority;
TX_RESTORE
#endif
/* Call the mutex prioritize processing to ensure the
/* Call the mutex prioritize processing to ensure the
highest priority thread is resumed. */
#ifdef TX_MISRA_ENABLE
do
@@ -375,17 +375,17 @@ UINT inheritance_priority;
TX_RESTORE
#endif
/* Mutex is not owned, but it is possible that a thread that
/* Mutex is not owned, but it is possible that a thread that
caused a priority inheritance to occur is no longer waiting
on the mutex. */
/* Setup the highest priority waiting thread. */
mutex_ptr -> tx_mutex_highest_priority_waiting = (UINT) TX_MAX_PRIORITIES;
/* Determine if we need to restore priority. */
if ((mutex_ptr -> tx_mutex_owner) -> tx_thread_priority != old_priority)
{
/* Yes, restore the priority of thread. */
_tx_mutex_priority_change(mutex_ptr -> tx_mutex_owner, old_priority);
}
@@ -424,7 +424,7 @@ UINT inheritance_priority;
/* Remember the old mutex owner. */
old_owner = mutex_ptr -> tx_mutex_owner;
/* Setup owner thread priority information. */
mutex_ptr -> tx_mutex_original_priority = thread_ptr -> tx_thread_priority;
@@ -473,7 +473,7 @@ UINT inheritance_priority;
/* Decrement the suspension count. */
mutex_ptr -> tx_mutex_suspended_count--;
/* Pickup the suspended count. */
suspended_count = mutex_ptr -> tx_mutex_suspended_count;
@@ -482,7 +482,7 @@ UINT inheritance_priority;
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
mutex_ptr -> tx_mutex_suspension_list = TX_NULL;
}
@@ -536,7 +536,7 @@ UINT inheritance_priority;
_tx_mutex_prioritize(mutex_ptr);
#endif
}
/* Now, pickup the list head and set the priority. */
/* Determine if there still are threads suspended for this mutex. */
@@ -551,11 +551,11 @@ UINT inheritance_priority;
/* Restore previous priority needs to be restored after priority
inheritance. */
/* Determine if we need to restore priority. */
if (old_owner -> tx_thread_priority != old_priority)
{
/* Restore priority of thread. */
_tx_mutex_priority_change(old_owner, old_priority);
}
@@ -579,7 +579,7 @@ UINT inheritance_priority;
{
/* Yes, priority inheritance is requested. */
/* Determine if there are any more threads still suspended on the mutex. */
if (mutex_ptr -> tx_mutex_suspended_count != TX_NO_SUSPENSIONS)
{
@@ -594,7 +594,7 @@ UINT inheritance_priority;
#else
_tx_mutex_prioritize(mutex_ptr);
#endif
/* Now, pickup the list head and set the priority. */
/* Optional processing extension. */
@@ -618,11 +618,11 @@ UINT inheritance_priority;
/* Restore previous priority needs to be restored after priority
inheritance. */
/* Is the priority different? */
if (old_owner -> tx_thread_priority != old_priority)
{
/* Restore the priority of thread. */
_tx_mutex_priority_change(old_owner, old_priority);
}
@@ -631,7 +631,7 @@ UINT inheritance_priority;
/* Resume thread. */
_tx_thread_system_resume(thread_ptr);
#endif
/* Return a successful status. */
status = TX_SUCCESS;
}
@@ -645,7 +645,7 @@ UINT inheritance_priority;
/* Restore interrupts. */
TX_RESTORE
/* Caller does not own the mutex. */
status = TX_NOT_OWNED;
}

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
/* Disable interrupts to remove the suspended thread from the queue. */
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_queue_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -122,7 +122,7 @@ TX_THREAD *previous_thread;
#endif
/* Yes, we still have thread suspension! */
/* Clear the suspension cleanup flag. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
@@ -168,7 +168,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_QUEUE_SUSP)
{
/* Timeout condition and the thread still suspended on the queue.
/* Timeout condition and the thread still suspended on the queue.
Setup return error status and resume the thread. */
#ifdef TX_QUEUE_ENABLE_PERFORMANCE_INFO
@@ -183,17 +183,17 @@ TX_THREAD *previous_thread;
/* Setup return status. */
if (queue_ptr -> tx_queue_enqueued != TX_NO_MESSAGES)
{
/* Queue full timeout! */
thread_ptr -> tx_thread_suspend_status = TX_QUEUE_FULL;
}
else
{
/* Queue empty timeout! */
thread_ptr -> tx_thread_suspend_status = TX_QUEUE_EMPTY;
}
#ifdef TX_NOT_INTERRUPTABLE
/* Resume the thread! */

View File

@@ -74,7 +74,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
UINT _tx_queue_create(TX_QUEUE *queue_ptr, CHAR *name_ptr, UINT message_size,
VOID *queue_start, ULONG queue_size)
{
@@ -91,7 +91,7 @@ TX_QUEUE *previous_queue;
/* Setup the basic queue fields. */
queue_ptr -> tx_queue_name = name_ptr;
/* Save the message size in the control block. */
queue_ptr -> tx_queue_message_size = message_size;
@@ -100,7 +100,7 @@ TX_QUEUE *previous_queue;
capacity = (UINT) (queue_size / ((ULONG) (((ULONG) message_size) * (sizeof(ULONG)))));
used_words = capacity * message_size;
/* Save the starting address and calculate the ending address of
/* Save the starting address and calculate the ending address of
the queue. Note that the ending address is really one past the
end! */
queue_ptr -> tx_queue_start = TX_VOID_TO_ULONG_POINTER_CONVERT(queue_start);

View File

@@ -125,7 +125,7 @@ TX_QUEUE *previous_queue;
/* See if we have to update the created list head pointer. */
if (_tx_queue_created_ptr == queue_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_queue_created_ptr = next_queue;
}
@@ -147,14 +147,14 @@ TX_QUEUE *previous_queue;
on this queue. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -113,7 +113,7 @@ TX_THREAD *thread_ptr;
if (queue_ptr -> tx_queue_suspended_count != TX_NO_SUSPENSIONS)
{
/* Yes, there are threads suspended on this queue, they must be
/* Yes, there are threads suspended on this queue, they must be
resumed! */
/* Copy the information into temporary variables. */
@@ -141,24 +141,24 @@ TX_THREAD *thread_ptr;
thread_ptr = suspension_list;
while (suspended_count != ((ULONG) 0))
{
/* Decrement the suspension count. */
suspended_count--;
/* Check for a NULL thread pointer. */
if (thread_ptr == TX_NULL)
{
/* Get out of the loop. */
break;
}
/* Resume the next suspended thread. */
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
@@ -182,7 +182,7 @@ TX_THREAD *thread_ptr;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread. */
_tx_thread_system_resume(thread_ptr -> tx_thread_suspended_previous);
#endif

View File

@@ -123,7 +123,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
if (queue_ptr -> tx_queue_available_storage != ((UINT) 0))
{
/* Yes there is room in the queue. Now determine if there is a thread waiting
/* Yes there is room in the queue. Now determine if there is a thread waiting
for a message. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -137,20 +137,20 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
/* See if the read pointer is at the beginning of the queue area. */
if (queue_ptr -> tx_queue_read == queue_ptr -> tx_queue_start)
{
/* Adjust the read pointer to the last message at the end of the
queue. */
queue_ptr -> tx_queue_read = TX_ULONG_POINTER_SUB(queue_ptr -> tx_queue_end, queue_ptr -> tx_queue_message_size);
}
else
{
/* Not at the beginning of the queue, just move back one message. */
queue_ptr -> tx_queue_read = TX_ULONG_POINTER_SUB(queue_ptr -> tx_queue_read, queue_ptr -> tx_queue_message_size);
}
/* Simply place the message in the queue. */
/* Reduce the amount of available storage. */
queue_ptr -> tx_queue_available_storage--;
@@ -162,7 +162,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
destination = queue_ptr -> tx_queue_read;
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -241,7 +241,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -300,7 +300,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
/* Yes, suspension is requested. */
/* Prepare for suspension of this thread. */
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)
@@ -345,7 +345,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
next_thread -> tx_thread_suspended_previous = thread_ptr;
/* Update the suspension list to put this thread in front, which will put
the message that was removed in the proper relative order when room is
the message that was removed in the proper relative order when room is
made in the queue. */
queue_ptr -> tx_queue_suspension_list = thread_ptr;
}

View File

@@ -99,45 +99,45 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the queue. */
if (name != TX_NULL)
{
*name = queue_ptr -> tx_queue_name;
}
/* Retrieve the number of messages currently in the queue. */
if (enqueued != TX_NULL)
{
*enqueued = (ULONG) queue_ptr -> tx_queue_enqueued;
}
/* Retrieve the number of messages that will still fit in the queue. */
if (available_storage != TX_NULL)
{
*available_storage = (ULONG) queue_ptr -> tx_queue_available_storage;
}
/* Retrieve the first thread suspended on this queue. */
if (first_suspended != TX_NULL)
{
*first_suspended = queue_ptr -> tx_queue_suspension_list;
}
/* Retrieve the number of threads suspended on this queue. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) queue_ptr -> tx_queue_suspended_count;
}
/* Retrieve the pointer to the next queue created. */
if (next_queue != TX_NULL)
{
*next_queue = queue_ptr -> tx_queue_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -95,15 +95,15 @@ UINT status;
/* Determine if this is a legal request. */
if (queue_ptr == TX_NULL)
{
/* Queue pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the queue ID is invalid. */
else if (queue_ptr -> tx_queue_id != TX_QUEUE_ID)
{
/* Queue pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -125,45 +125,45 @@ UINT status;
/* Retrieve the number of messages sent to this queue. */
if (messages_sent != TX_NULL)
{
*messages_sent = queue_ptr -> tx_queue_performance_messages_sent_count;
}
/* Retrieve the number of messages received from this queue. */
if (messages_received != TX_NULL)
{
*messages_received = queue_ptr -> tx_queue_performance_messages_received_count;
}
/* Retrieve the number of empty queue suspensions on this queue. */
if (empty_suspensions != TX_NULL)
{
*empty_suspensions = queue_ptr -> tx_queue_performance_empty_suspension_count;
}
/* Retrieve the number of full queue suspensions on this queue. */
if (full_suspensions != TX_NULL)
{
*full_suspensions = queue_ptr -> tx_queue_performance_full_suspension_count;
}
/* Retrieve the number of full errors (no suspension!) on this queue. */
if (full_errors != TX_NULL)
{
*full_errors = queue_ptr -> tx_queue_performance_full_error_count;
}
/* Retrieve the number of timeouts on this queue. */
if (timeouts != TX_NULL)
{
*timeouts = queue_ptr -> tx_queue_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -106,51 +106,51 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of queue messages sent. */
if (messages_sent != TX_NULL)
{
*messages_sent = _tx_queue_performance_messages_sent_count;
}
/* Retrieve the total number of queue messages received. */
if (messages_received != TX_NULL)
{
*messages_received = _tx_queue_performance__messages_received_count;
}
/* Retrieve the total number of empty queue suspensions. */
if (empty_suspensions != TX_NULL)
{
*empty_suspensions = _tx_queue_performance_empty_suspension_count;
}
/* Retrieve the total number of full queue suspensions. */
if (full_suspensions != TX_NULL)
{
*full_suspensions = _tx_queue_performance_full_suspension_count;
}
/* Retrieve the total number of full errors. */
if (full_errors != TX_NULL)
{
*full_errors = _tx_queue_performance_full_error_count;
}
/* Retrieve the total number of queue timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_queue_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;

View File

@@ -161,28 +161,28 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != queue_ptr -> tx_queue_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != queue_ptr -> tx_queue_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
@@ -212,12 +212,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -118,7 +118,7 @@ UINT status;
/* Pickup the thread suspension count. */
suspended_count = queue_ptr -> tx_queue_suspended_count;
/* Determine if there is anything in the queue. */
if (queue_ptr -> tx_queue_enqueued != TX_NO_MESSAGES)
{
@@ -128,13 +128,13 @@ UINT status;
{
/* There is a message waiting in the queue and there are no suspensi. */
/* Setup source and destination pointers. */
source = queue_ptr -> tx_queue_read;
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -145,10 +145,10 @@ UINT status;
/* Yes, wrap around to the beginning. */
source = queue_ptr -> tx_queue_start;
}
/* Setup the queue read pointer. */
queue_ptr -> tx_queue_read = source;
/* Increase the amount of available storage. */
queue_ptr -> tx_queue_available_storage++;
@@ -160,18 +160,18 @@ UINT status;
}
else
{
/* At this point we know the queue is full. */
/* Pickup thread suspension list head pointer. */
thread_ptr = queue_ptr -> tx_queue_suspension_list;
/* Now determine if there is a queue front suspension active. */
/* Is the front suspension flag set? */
if (thread_ptr -> tx_thread_suspend_option == TX_TRUE)
{
/* Yes, a queue front suspension is present. */
/* Return the message associated with this suspension. */
@@ -181,11 +181,11 @@ UINT status;
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
/* Message is now in the caller's destination. See if this is the only suspended thread
/* Message is now in the caller's destination. See if this is the only suspended thread
on the list. */
suspended_count--;
if (suspended_count == TX_NO_SUSPENSIONS)
@@ -244,7 +244,7 @@ UINT status;
else
{
/* At this point, we know that the queue is full and there
/* At this point, we know that the queue is full and there
are one or more threads suspended trying to send another
message to this queue. */
@@ -253,7 +253,7 @@ UINT status;
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(destination_ptr);
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -267,7 +267,7 @@ UINT status;
/* Setup the queue read pointer. */
queue_ptr -> tx_queue_read = source;
/* Disable preemption. */
_tx_thread_preempt_disable++;
@@ -291,14 +291,14 @@ UINT status;
destination = queue_ptr -> tx_queue_write;
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
/* Determine if we are at the end. */
if (destination == queue_ptr -> tx_queue_end)
{
/* Yes, wrap around to the beginning. */
destination = queue_ptr -> tx_queue_start;
}
@@ -309,7 +309,7 @@ UINT status;
/* Pickup thread pointer. */
thread_ptr = queue_ptr -> tx_queue_suspension_list;
/* Message is now in the queue. See if this is the only suspended thread
/* Message is now in the queue. See if this is the only suspended thread
on the list. */
suspended_count--;
if (suspended_count == TX_NO_SUSPENSIONS)
@@ -378,7 +378,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Suspension is not allowed if the preempt disable flag is non-zero at this point - return error completion. */
status = TX_QUEUE_EMPTY;
}
@@ -395,7 +395,7 @@ UINT status;
/* Increment the number of empty suspensions on this queue. */
queue_ptr -> tx_queue_performance_empty_suspension_count++;
#endif
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)
@@ -477,7 +477,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Immediate return, return error completion. */
status = TX_QUEUE_EMPTY;
}

View File

@@ -81,7 +81,7 @@ UINT _tx_queue_send(TX_QUEUE *queue_ptr, VOID *source_ptr, ULONG wait_option)
{
TX_INTERRUPT_SAVE_AREA
TX_THREAD *thread_ptr;
ULONG *source;
ULONG *destination;
@@ -128,9 +128,9 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
/* Determine if there are suspended on this queue. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
/* No suspended threads, simply place the message in the queue. */
/* Reduce the amount of available storage. */
queue_ptr -> tx_queue_available_storage--;
@@ -142,7 +142,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
destination = queue_ptr -> tx_queue_write;
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -182,7 +182,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
else
{
/* There is a thread suspended on an empty queue. Simply
/* There is a thread suspended on an empty queue. Simply
copy the message to the suspended thread's destination
pointer. */
@@ -230,7 +230,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
destination = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
size = queue_ptr -> tx_queue_message_size;
/* Copy message. Note that the source and destination pointers are
/* Copy message. Note that the source and destination pointers are
incremented by the macro. */
TX_QUEUE_MESSAGE_COPY(source, destination, size)
@@ -274,7 +274,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
#endif
}
}
/* At this point, the queue is full. Determine if suspension is requested. */
else if (wait_option != TX_NO_WAIT)
{
@@ -302,7 +302,7 @@ VOID (*queue_send_notify)(struct TX_QUEUE_STRUCT *notify_queue_ptr);
/* Increment the number of full suspensions on this queue. */
queue_ptr -> tx_queue_performance_full_suspension_count++;
#endif
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
@@ -97,7 +97,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_semaphore_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -121,7 +121,7 @@ TX_THREAD *previous_thread;
/* Setup pointer to semaphore control block. */
semaphore_ptr = TX_VOID_TO_SEMAPHORE_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
#endif
/* Yes, we still have thread suspension! */
/* Clear the suspension cleanup flag. */
@@ -140,7 +140,7 @@ TX_THREAD *previous_thread;
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
semaphore_ptr -> tx_semaphore_suspension_list = TX_NULL;
}
@@ -154,7 +154,7 @@ TX_THREAD *previous_thread;
previous_thread = thread_ptr -> tx_thread_suspended_previous;
next_thread -> tx_thread_suspended_previous = previous_thread;
previous_thread -> tx_thread_suspended_next = next_thread;
/* Determine if we need to update the head pointer. */
if (semaphore_ptr -> tx_semaphore_suspension_list == thread_ptr)
{

View File

@@ -87,7 +87,7 @@ TX_SEMAPHORE *previous_semaphore;
/* Setup the basic semaphore fields. */
semaphore_ptr -> tx_semaphore_name = name_ptr;
semaphore_ptr -> tx_semaphore_count = initial_count;
/* Disable interrupts to place the semaphore on the created list. */
TX_DISABLE
@@ -119,7 +119,7 @@ TX_SEMAPHORE *previous_semaphore;
semaphore_ptr -> tx_semaphore_created_previous = previous_semaphore;
semaphore_ptr -> tx_semaphore_created_next = next_semaphore;
}
/* Increment the created count. */
_tx_semaphore_created_count++;

View File

@@ -126,7 +126,7 @@ TX_SEMAPHORE *previous_semaphore;
/* See if we have to update the created list head pointer. */
if (_tx_semaphore_created_ptr == semaphore_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_semaphore_created_ptr = next_semaphore;
}
@@ -148,14 +148,14 @@ TX_SEMAPHORE *previous_semaphore;
on this semaphore. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
@@ -179,7 +179,7 @@ TX_SEMAPHORE *previous_semaphore;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread. */
_tx_thread_system_resume(thread_ptr);
#endif

View File

@@ -77,7 +77,7 @@ UINT _tx_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option)
{
TX_INTERRUPT_SAVE_AREA
TX_THREAD *thread_ptr;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
@@ -123,7 +123,7 @@ UINT status;
/* Determine if the preempt disable flag is non-zero. */
if (_tx_thread_preempt_disable != ((UINT) 0))
{
/* Restore interrupts. */
TX_RESTORE
@@ -143,7 +143,7 @@ UINT status;
/* Increment the number of suspensions on this semaphore. */
semaphore_ptr -> tx_semaphore_performance_suspension_count++;
#endif
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)

View File

@@ -77,8 +77,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
TX_THREAD **first_suspended, ULONG *suspended_count,
UINT _tx_semaphore_info_get(TX_SEMAPHORE *semaphore_ptr, CHAR **name, ULONG *current_value,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_SEMAPHORE **next_semaphore)
{
@@ -100,38 +100,38 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the semaphore. */
if (name != TX_NULL)
{
*name = semaphore_ptr -> tx_semaphore_name;
}
/* Retrieve the current value of the semaphore. */
if (current_value != TX_NULL)
{
*current_value = semaphore_ptr -> tx_semaphore_count;
}
/* Retrieve the first thread suspended on this semaphore. */
if (first_suspended != TX_NULL)
{
*first_suspended = semaphore_ptr -> tx_semaphore_suspension_list;
}
/* Retrieve the number of threads suspended on this semaphore. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) semaphore_ptr -> tx_semaphore_suspended_count;
}
/* Retrieve the pointer to the next semaphore created. */
if (next_semaphore != TX_NULL)
{
*next_semaphore = semaphore_ptr -> tx_semaphore_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -93,15 +93,15 @@ UINT status;
/* Determine if this is a legal request. */
if (semaphore_ptr == TX_NULL)
{
/* Semaphore pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the semaphore ID is invalid. */
else if (semaphore_ptr -> tx_semaphore_id != TX_SEMAPHORE_ID)
{
/* Semaphore pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -123,31 +123,31 @@ UINT status;
/* Retrieve the number of puts on this semaphore. */
if (puts != TX_NULL)
{
*puts = semaphore_ptr -> tx_semaphore_performance_put_count;
}
/* Retrieve the number of gets on this semaphore. */
if (gets != TX_NULL)
{
*gets = semaphore_ptr -> tx_semaphore_performance_get_count;
}
/* Retrieve the number of suspensions on this semaphore. */
if (suspensions != TX_NULL)
{
*suspensions = semaphore_ptr -> tx_semaphore_performance_suspension_count;
}
/* Retrieve the number of timeouts on this semaphore. */
if (timeouts != TX_NULL)
{
*timeouts = semaphore_ptr -> tx_semaphore_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
@@ -161,37 +161,37 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (semaphore_ptr != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (puts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (gets != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -101,37 +101,37 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of semaphore puts. */
if (puts != TX_NULL)
{
*puts = _tx_semaphore_performance_put_count;
}
/* Retrieve the total number of semaphore gets. */
if (gets != TX_NULL)
{
*gets = _tx_semaphore_performance_get_count;
}
/* Retrieve the total number of semaphore suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_semaphore_performance_suspension_count;
}
/* Retrieve the total number of semaphore timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_semaphore_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -140,31 +140,31 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (puts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (gets != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -161,28 +161,28 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != semaphore_ptr -> tx_semaphore_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != semaphore_ptr -> tx_semaphore_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
@@ -214,12 +214,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -139,7 +139,7 @@ TX_THREAD *previous_thread;
{
/* A thread is suspended on this semaphore. */
/* Pickup the pointer to the first suspended thread. */
thread_ptr = semaphore_ptr -> tx_semaphore_suspension_list;

View File

@@ -118,7 +118,7 @@ ALIGN_TYPE updated_stack_start;
#ifdef TX_ENABLE_STACK_CHECKING
/* Ensure that there are two ULONG of 0xEF patterns at the top and
/* Ensure that there are two ULONG of 0xEF patterns at the top and
bottom of the thread's stack. This will be used to check for stack
overflow conditions during run-time. */
stack_size = ((stack_size/(sizeof(ULONG))) * (sizeof(ULONG))) - (sizeof(ULONG));
@@ -134,7 +134,7 @@ ALIGN_TYPE updated_stack_start;
/* Determine if the starting stack address is different. */
if (new_stack_start != updated_stack_start)
{
/* Yes, subtract another ULONG from the size to avoid going past the stack area. */
stack_size = stack_size - (sizeof(ULONG));
}
@@ -204,7 +204,7 @@ ALIGN_TYPE updated_stack_start;
/* Perform any additional thread setup activities for tool or user purpose. */
TX_THREAD_CREATE_INTERNAL_EXTENSION(thread_ptr)
/* Call the target specific stack frame building routine to build the
/* Call the target specific stack frame building routine to build the
thread's initial stack and to setup the actual stack pointer in the
control block. */
_tx_thread_stack_build(thread_ptr, _tx_thread_shell_entry);
@@ -246,7 +246,7 @@ ALIGN_TYPE updated_stack_start;
thread_ptr -> tx_thread_created_previous = previous_thread;
thread_ptr -> tx_thread_created_next = next_thread;
}
/* Increment the thread created count. */
_tx_thread_created_count++;
@@ -280,22 +280,22 @@ ALIGN_TYPE updated_stack_start;
/* Yes, this create call was made from initialization. */
/* Pickup the current thread execute pointer, which corresponds to the
highest priority thread ready to execute. Interrupt lockout is
not required, since interrupts are assumed to be disabled during
highest priority thread ready to execute. Interrupt lockout is
not required, since interrupts are assumed to be disabled during
initialization. */
saved_thread_ptr = _tx_thread_execute_ptr;
/* Determine if there is thread ready for execution. */
if (saved_thread_ptr != TX_NULL)
{
/* Yes, a thread is ready for execution when initialization completes. */
/* Save the current preemption-threshold. */
saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
/* For initialization, temporarily set the preemption-threshold to the
priority level to make sure the highest-priority thread runs once
/* For initialization, temporarily set the preemption-threshold to the
priority level to make sure the highest-priority thread runs once
initialization is complete. */
saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
}
@@ -328,7 +328,7 @@ ALIGN_TYPE updated_stack_start;
/* Call the resume thread function to make this thread ready. */
_tx_thread_system_resume(thread_ptr);
#endif
/* Determine if the thread's preemption-threshold needs to be restored. */
if (saved_thread_ptr != TX_NULL)
{

View File

@@ -83,7 +83,7 @@ UINT status;
/* Default status to success. */
status = TX_SUCCESS;
/* Lockout interrupts while the thread is being deleted. */
TX_DISABLE
@@ -106,7 +106,7 @@ UINT status;
/* Determine if the delete operation is okay. */
if (status == TX_SUCCESS)
{
/* Yes, continue with deleting the thread. */
/* Perform any additional activities for tool or user purpose. */
@@ -129,7 +129,7 @@ UINT status;
/* Decrement the number of created threads. */
_tx_thread_created_count--;
/* See if the thread is the only one on the list. */
if (_tx_thread_created_count == TX_EMPTY)
{
@@ -149,7 +149,7 @@ UINT status;
/* See if we have to update the created list head pointer. */
if (_tx_thread_created_ptr == thread_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_thread_created_ptr = next_thread;
}

View File

@@ -80,7 +80,7 @@ TX_THREAD *thread_ptr;
TX_INTERRUPT_SAVE_AREA
/* Disable interrupts to put the timer on the created list. */
TX_DISABLE

View File

@@ -80,8 +80,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
UINT _tx_thread_info_get(TX_THREAD *thread_ptr, CHAR **name, UINT *state, ULONG *run_count,
UINT *priority, UINT *preemption_threshold, ULONG *time_slice,
TX_THREAD **next_thread, TX_THREAD **next_suspended_thread)
{
@@ -103,59 +103,59 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the thread. */
if (name != TX_NULL)
{
*name = thread_ptr -> tx_thread_name;
}
/* Pickup the thread's current state. */
if (state != TX_NULL)
{
*state = thread_ptr -> tx_thread_state;
}
/* Pickup the number of times the thread has been scheduled. */
if (run_count != TX_NULL)
{
*run_count = thread_ptr -> tx_thread_run_count;
}
/* Pickup the thread's priority. */
if (priority != TX_NULL)
{
*priority = thread_ptr -> tx_thread_user_priority;
}
/* Pickup the thread's preemption-threshold. */
if (preemption_threshold != TX_NULL)
{
*preemption_threshold = thread_ptr -> tx_thread_user_preempt_threshold;
}
/* Pickup the thread's current time-slice. */
if (time_slice != TX_NULL)
{
*time_slice = thread_ptr -> tx_thread_time_slice;
}
/* Pickup the next created thread. */
if (next_thread != TX_NULL)
{
*next_thread = thread_ptr -> tx_thread_created_next;
}
/* Pickup the next thread suspended. */
if (next_suspended_thread != TX_NULL)
{
*next_suspended_thread = thread_ptr -> tx_thread_suspended_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -175,7 +175,7 @@ VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
ULONG _tx_build_options;
#ifdef TX_ENABLE_STACK_CHECKING
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
/* Define the global function pointer for stack error handling. If a stack error is
detected and the application has registered a stack error handler, it will be
@@ -277,7 +277,7 @@ const CHAR _tx_thread_special_string[] =
/* FUNCTION RELEASE */
/* */
/* _tx_thread_initialize PORTABLE C */
/* 6.1 */
/* 6.1.9 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -312,7 +312,10 @@ const CHAR _tx_thread_special_string[] =
/* resulting in version 6.1 */
/* 06-02-2021 Yuxin Zhou Modified comment(s), added */
/* Execution Profile support, */
/* resulting in version 6.1.7 */
/* resulting in version 6.1.7 */
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
VOID _tx_thread_initialize(VOID)

View File

@@ -97,7 +97,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
UINT _tx_thread_performance_info_get(TX_THREAD *thread_ptr, ULONG *resumptions, ULONG *suspensions,
ULONG *solicited_preemptions, ULONG *interrupt_preemptions, ULONG *priority_inversions,
ULONG *time_slices, ULONG *relinquishes, ULONG *timeouts, ULONG *wait_aborts, TX_THREAD **last_preempted_by)
{
@@ -111,15 +111,15 @@ UINT status;
/* Determine if this is a legal request. */
if (thread_ptr == TX_NULL)
{
/* Thread pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the thread ID is invalid. */
else if (thread_ptr -> tx_thread_id != TX_THREAD_ID)
{
/* Thread pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -141,73 +141,73 @@ UINT status;
/* Retrieve number of resumptions for this thread. */
if (resumptions != TX_NULL)
{
*resumptions = thread_ptr -> tx_thread_performance_resume_count;
}
/* Retrieve number of suspensions for this thread. */
if (suspensions != TX_NULL)
{
*suspensions = thread_ptr -> tx_thread_performance_suspend_count;
}
/* Retrieve number of solicited preemptions for this thread. */
if (solicited_preemptions != TX_NULL)
{
*solicited_preemptions = thread_ptr -> tx_thread_performance_solicited_preemption_count;
}
/* Retrieve number of interrupt preemptions for this thread. */
if (interrupt_preemptions != TX_NULL)
{
*interrupt_preemptions = thread_ptr -> tx_thread_performance_interrupt_preemption_count;
}
/* Retrieve number of priority inversions for this thread. */
if (priority_inversions != TX_NULL)
{
*priority_inversions = thread_ptr -> tx_thread_performance_priority_inversion_count;
}
/* Retrieve number of time-slices for this thread. */
if (time_slices != TX_NULL)
{
*time_slices = thread_ptr -> tx_thread_performance_time_slice_count;
}
/* Retrieve number of relinquishes for this thread. */
if (relinquishes != TX_NULL)
{
*relinquishes = thread_ptr -> tx_thread_performance_relinquish_count;
}
/* Retrieve number of timeouts for this thread. */
if (timeouts != TX_NULL)
{
*timeouts = thread_ptr -> tx_thread_performance_timeout_count;
}
/* Retrieve number of wait aborts for this thread. */
if (wait_aborts != TX_NULL)
{
*wait_aborts = thread_ptr -> tx_thread_performance_wait_abort_count;
}
/* Retrieve the pointer of the last thread that preempted this thread. */
if (last_preempted_by != TX_NULL)
{
*last_preempted_by = thread_ptr -> tx_thread_performance_last_preempting_thread;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -123,86 +123,86 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve total number of thread resumptions. */
if (resumptions != TX_NULL)
{
*resumptions = _tx_thread_performance_resume_count;
}
/* Retrieve total number of thread suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_thread_performance_suspend_count;
}
/* Retrieve total number of solicited thread preemptions. */
if (solicited_preemptions != TX_NULL)
{
*solicited_preemptions = _tx_thread_performance_solicited_preemption_count;
}
/* Retrieve total number of interrupt thread preemptions. */
if (interrupt_preemptions != TX_NULL)
{
*interrupt_preemptions = _tx_thread_performance_interrupt_preemption_count;
}
/* Retrieve total number of thread priority inversions. */
if (priority_inversions != TX_NULL)
{
*priority_inversions = _tx_thread_performance_priority_inversion_count;
}
/* Retrieve total number of thread time-slices. */
if (time_slices != TX_NULL)
{
*time_slices = _tx_thread_performance_time_slice_count;
}
/* Retrieve total number of thread relinquishes. */
if (relinquishes != TX_NULL)
{
*relinquishes = _tx_thread_performance_relinquish_count;
}
/* Retrieve total number of thread timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_thread_performance_timeout_count;
}
/* Retrieve total number of thread wait aborts. */
if (wait_aborts != TX_NULL)
{
*wait_aborts = _tx_thread_performance_wait_abort_count;
}
/* Retrieve total number of thread non-idle system returns. */
if (non_idle_returns != TX_NULL)
{
*non_idle_returns = _tx_thread_performance_non_idle_return_count;
}
/* Retrieve total number of thread idle system returns. */
if (idle_returns != TX_NULL)
{
*idle_returns = _tx_thread_performance_idle_return_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -211,73 +211,73 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (resumptions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (solicited_preemptions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (interrupt_preemptions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (priority_inversions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (time_slices != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (relinquishes != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (wait_aborts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (non_idle_returns != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (idle_returns != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -93,16 +93,16 @@ UINT status;
#ifdef TX_DISABLE_PREEMPTION_THRESHOLD
/* Only allow 0 (disable all preemption) and returning preemption-threshold to the
/* Only allow 0 (disable all preemption) and returning preemption-threshold to the
current thread priority if preemption-threshold is disabled. All other threshold
values are converted to 0. */
if (thread_ptr -> tx_thread_user_priority != new_threshold)
{
/* Is the new threshold zero? */
if (new_threshold != ((UINT) 0))
{
/* Convert the new threshold to disable all preemption, since preemption-threshold is
not supported. */
new_threshold = ((UINT) 0);
@@ -122,7 +122,7 @@ UINT status;
/* Determine if the new threshold is greater than the current user priority. */
if (new_threshold > thread_ptr -> tx_thread_user_priority)
{
/* Return error. */
status = TX_THRESH_ERROR;
}
@@ -174,13 +174,13 @@ UINT status;
/* Determine if the new threshold represents a higher priority than the priority inheritance threshold. */
if (new_threshold < thread_ptr -> tx_thread_inherit_priority)
{
/* Update the actual preemption-threshold with the new threshold. */
thread_ptr -> tx_thread_preempt_threshold = new_threshold;
}
else
{
/* Update the actual preemption-threshold with the priority inheritance. */
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
}
@@ -240,18 +240,18 @@ UINT status;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -265,7 +265,7 @@ UINT status;
/* Check for preemption. */
_tx_thread_system_preempt_check();
/* Disable interrupts. */
TX_DISABLE
}
@@ -275,7 +275,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(status);
}

View File

@@ -116,19 +116,19 @@ UINT original_priority;
block. */
thread_ptr -> tx_thread_user_priority = new_priority;
thread_ptr -> tx_thread_user_preempt_threshold = new_priority;
/* Determine if the actual thread priority should be setup, which is the
case if the new priority is higher than the priority inheritance. */
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
{
/* Change thread priority to the new user's priority. */
thread_ptr -> tx_thread_priority = new_priority;
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
else
{
/* Change thread priority to the priority inheritance. */
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
@@ -157,7 +157,7 @@ UINT original_priority;
/* Call actual non-interruptable thread suspension routine. */
_tx_thread_system_ni_suspend(thread_ptr, ((ULONG) 0));
/* At this point, the preempt disable flag is still set, so we still have
/* At this point, the preempt disable flag is still set, so we still have
protection against all preemption. */
/* Setup the new priority for this thread. */
@@ -168,14 +168,14 @@ UINT original_priority;
case if the new priority is higher than the priority inheritance. */
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
{
/* Change thread priority to the new user's priority. */
thread_ptr -> tx_thread_priority = new_priority;
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
else
{
/* Change thread priority to the priority inheritance. */
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
@@ -186,7 +186,7 @@ UINT original_priority;
#else
/* Increment the preempt disable flag by 2 to prevent system suspend from
/* Increment the preempt disable flag by 2 to prevent system suspend from
returning to the system. */
_tx_thread_preempt_disable = _tx_thread_preempt_disable + ((UINT) 3);
@@ -199,11 +199,11 @@ UINT original_priority;
/* Restore interrupts. */
TX_RESTORE
/* The thread is ready and must first be removed from the list. Call the
/* The thread is ready and must first be removed from the list. Call the
system suspend function to accomplish this. */
_tx_thread_system_suspend(thread_ptr);
/* At this point, the preempt disable flag is still set, so we still have
/* At this point, the preempt disable flag is still set, so we still have
protection against all preemption. */
/* Setup the new priority for this thread. */
@@ -214,14 +214,14 @@ UINT original_priority;
case if the new priority is higher than the priority inheritance. */
if (new_priority < thread_ptr -> tx_thread_inherit_priority)
{
/* Change thread priority to the new user's priority. */
thread_ptr -> tx_thread_priority = new_priority;
thread_ptr -> tx_thread_preempt_threshold = new_priority;
}
else
{
/* Change thread priority to the priority inheritance. */
thread_ptr -> tx_thread_priority = thread_ptr -> tx_thread_inherit_priority;
thread_ptr -> tx_thread_preempt_threshold = thread_ptr -> tx_thread_inherit_priority;
@@ -236,26 +236,26 @@ UINT original_priority;
/* Decrement the preempt disable flag. */
_tx_thread_preempt_disable--;
/* Pickup the next thread to execute. */
next_execute_ptr = _tx_thread_execute_ptr;
/* Determine if this thread is not the next thread to execute. */
if (thread_ptr != next_execute_ptr)
{
/* Make sure the thread is still ready. */
if (thread_ptr -> tx_thread_state == TX_READY)
{
/* Now check and see if this thread has an equal or higher priority. */
if (thread_ptr -> tx_thread_priority <= next_execute_ptr -> tx_thread_priority)
{
/* Now determine if this thread was the previously executing thread. */
if (thread_ptr == execute_ptr)
{
/* Yes, this thread was previously executing before we temporarily suspended and resumed
it in order to change the priority. A lower or same priority thread cannot be the next thread
to execute in this case since this thread really didn't suspend. Simply reset the execute
@@ -265,7 +265,7 @@ UINT original_priority;
/* Determine if we moved to a lower priority. If so, move the thread to the front of its priority list. */
if (original_priority < new_priority)
{
/* Ensure that this thread is placed at the front of the priority list. */
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr;
}
@@ -273,7 +273,7 @@ UINT original_priority;
}
}
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -112,7 +112,7 @@ TX_THREAD *thread_ptr;
/* Yes, there is another thread at this priority, make it the highest at
this priority level. */
_tx_thread_priority_list[priority] = thread_ptr -> tx_thread_ready_next;
/* Mark the new thread as the one to execute. */
_tx_thread_execute_ptr = thread_ptr -> tx_thread_ready_next;
}

View File

@@ -135,7 +135,7 @@ UINT status;
TX_MEMSET(thread_ptr -> tx_thread_stack_start, ((UCHAR) TX_STACK_FILL), thread_ptr -> tx_thread_stack_size);
#endif
/* Call the target specific stack frame building routine to build the
/* Call the target specific stack frame building routine to build the
thread's initial stack and to setup the actual stack pointer in the
control block. */
_tx_thread_stack_build(thread_ptr, _tx_thread_shell_entry);

View File

@@ -128,22 +128,22 @@ UINT map_index;
/* Yes, this resume call was made from initialization. */
/* Pickup the current thread execute pointer, which corresponds to the
highest priority thread ready to execute. Interrupt lockout is
not required, since interrupts are assumed to be disabled during
highest priority thread ready to execute. Interrupt lockout is
not required, since interrupts are assumed to be disabled during
initialization. */
saved_thread_ptr = _tx_thread_execute_ptr;
/* Determine if there is thread ready for execution. */
if (saved_thread_ptr != TX_NULL)
{
/* Yes, a thread is ready for execution when initialization completes. */
/* Save the current preemption-threshold. */
saved_threshold = saved_thread_ptr -> tx_thread_preempt_threshold;
/* For initialization, temporarily set the preemption-threshold to the
priority level to make sure the highest-priority thread runs once
/* For initialization, temporarily set the preemption-threshold to the
priority level to make sure the highest-priority thread runs once
initialization is complete. */
saved_thread_ptr -> tx_thread_preempt_threshold = saved_thread_ptr -> tx_thread_priority;
}
@@ -184,7 +184,7 @@ UINT map_index;
can only happen if this routine is called from initialization. */
saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
}
#ifdef TX_MISRA_ENABLE
/* Disable interrupts. */
@@ -197,11 +197,11 @@ UINT map_index;
/* Return successful completion. */
return(TX_SUCCESS);
#endif
#else
/* In-line thread resumption processing follows, which is effectively just taking the
/* In-line thread resumption processing follows, which is effectively just taking the
logic in tx_thread_system_resume.c and placing it here! */
/* Resume the thread! */
@@ -211,14 +211,14 @@ UINT map_index;
/* If trace is enabled, save the current event pointer. */
entry_ptr = _tx_trace_buffer_current_ptr;
#endif
/* Log the thread status change. */
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&execute_ptr), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -288,7 +288,7 @@ UINT map_index;
/* Pickup the execute pointer. Since it is going to be referenced multiple
times, it is placed in a local variable. */
execute_ptr = _tx_thread_execute_ptr;
/* Determine if no thread is currently executing. */
if (execute_ptr == TX_NULL)
{
@@ -300,7 +300,7 @@ UINT map_index;
{
/* Another thread has been scheduled for execution. */
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
if (priority < execute_ptr -> tx_thread_preempt_threshold)
{
@@ -341,7 +341,7 @@ UINT map_index;
}
else
{
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
@@ -378,18 +378,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -402,11 +402,11 @@ UINT map_index;
resume event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
be used by the trace analysis tool to show idle system conditions. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -453,7 +453,7 @@ UINT map_index;
{
/* No, there are other threads at this priority already ready. */
/* Just add this thread to the priority list. */
tail_ptr = head_ptr -> tx_thread_ready_previous;
tail_ptr -> tx_thread_ready_next = thread_ptr;
@@ -469,18 +469,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -517,7 +517,7 @@ UINT map_index;
can only happen if this routine is called from initialization. */
saved_thread_ptr -> tx_thread_preempt_threshold = saved_threshold;
}
/* Setup successful return status. */
status = TX_SUCCESS;
#endif

View File

@@ -119,7 +119,7 @@ VOID (*entry_exit_notify)(TX_THREAD *notify_thread_ptr, UINT type);
if (_tx_thread_mutex_release != TX_NULL)
{
/* Yes, call the mutex release function via a function pointer that
/* Yes, call the mutex release function via a function pointer that
is setup during mutex initialization. */
(_tx_thread_mutex_release)(thread_ptr);
}

View File

@@ -94,18 +94,18 @@ TX_THREAD *thread_ptr;
/* Restore interrupts. */
TX_RESTORE
/* Illegal caller of this service. */
status = TX_CALLER_ERROR;
}
/* Is the caller an ISR or Initialization? */
else if (TX_THREAD_GET_SYSTEM_STATE() != ((ULONG) 0))
{
/* Restore interrupts. */
TX_RESTORE
/* Illegal caller of this service. */
status = TX_CALLER_ERROR;
}
@@ -118,7 +118,7 @@ TX_THREAD *thread_ptr;
/* Restore interrupts. */
TX_RESTORE
/* Illegal caller of this service. */
status = TX_CALLER_ERROR;
}
@@ -130,7 +130,7 @@ TX_THREAD *thread_ptr;
/* Restore interrupts. */
TX_RESTORE
/* Just return with a successful status. */
status = TX_SUCCESS;
}
@@ -143,13 +143,13 @@ TX_THREAD *thread_ptr;
/* Restore interrupts. */
TX_RESTORE
/* Suspension is not allowed if the preempt disable flag is non-zero at this point - return error completion. */
status = TX_CALLER_ERROR;
}
else
{
/* If trace is enabled, insert this event into the trace buffer. */
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_SLEEP, TX_ULONG_TO_POINTER_CONVERT(timer_ticks), thread_ptr -> tx_thread_state, TX_POINTER_TO_ULONG_CONVERT(&status), 0, TX_TRACE_THREAD_EVENTS)
@@ -193,7 +193,7 @@ TX_THREAD *thread_ptr;
status = thread_ptr -> tx_thread_suspend_status;
}
}
/* Return completion status. */
return(status);
}

View File

@@ -94,7 +94,7 @@ ULONG size;
/* Pickup the current stack variables. */
stack_lowest = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_start);
/* Determine if the pointer is null. */
if (stack_lowest != TX_NULL)
{
@@ -105,11 +105,11 @@ ULONG size;
/* Determine if the pointer is null. */
if (stack_highest != TX_NULL)
{
/* Restore interrupts. */
TX_RESTORE
/* We need to binary search the remaining stack for missing 0xEFEFEFEF 32-bit data pattern.
/* We need to binary search the remaining stack for missing 0xEFEFEFEF 32-bit data pattern.
This is a best effort algorithm to find the highest stack usage. */
do
{
@@ -137,7 +137,7 @@ ULONG size;
/* Position to first used word - at this point we are within a few words. */
while (*stack_ptr == TX_STACK_FILL)
{
/* Position to next word in stack. */
stack_ptr = TX_ULONG_POINTER_ADD(stack_ptr, 1);
}
@@ -153,19 +153,19 @@ ULONG size;
{
/* Yes, thread is still created. */
/* Now check the new highest stack pointer is past the stack start. */
if (stack_ptr > (TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_start)))
{
/* Yes, now check that the new highest stack pointer is less than the previous highest stack pointer. */
if (stack_ptr < (TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_stack_highest_ptr)))
{
/* Yes, is the current highest stack pointer pointing at used memory? */
if (*stack_ptr != TX_STACK_FILL)
{
/* Yes, setup the highest stack usage. */
thread_ptr -> tx_thread_stack_highest_ptr = stack_ptr;
}

View File

@@ -26,8 +26,7 @@
/* Include necessary system files. */
#include "tx_api.h"
#ifndef TX_PORT_THREAD_STACK_ERROR_HANDLER
#if defined(TX_MISRA_ENABLE) || defined(TX_ENABLE_STACK_CHECKING)
#if defined(TX_MISRA_ENABLE) || defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
#include "tx_thread.h"
@@ -36,7 +35,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_error_handler PORTABLE C */
/* 6.1.7 */
/* 6.1.9 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -78,6 +77,9 @@
/* conditional compilation */
/* for ARMv8-M (Cortex M23/33) */
/* resulting in version 6.1.7 */
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr)
@@ -85,7 +87,7 @@ VOID _tx_thread_stack_error_handler(TX_THREAD *thread_ptr)
TX_INTERRUPT_SAVE_AREA
#ifdef TX_ENABLE_STACK_CHECKING
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
/* Disable interrupts. */
TX_DISABLE
@@ -109,12 +111,10 @@ TX_INTERRUPT_SAVE_AREA
/* Disable interrupts. */
TX_DISABLE
/* Restore interrupts. */
TX_RESTORE
}
#endif
}
#endif /* TX_MISRA_ENABLE */
#endif /* TX_PORT_THREAD_STACK_ERROR_HANDLER */
#endif

View File

@@ -26,9 +26,8 @@
/* Include necessary system files. */
#include "tx_api.h"
#ifndef TX_PORT_THREAD_STACK_ERROR_NOTIFY
#include "tx_thread.h"
#ifdef TX_ENABLE_STACK_CHECKING
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
#include "tx_trace.h"
#endif
@@ -38,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_stack_error_notify PORTABLE C */
/* 6.1.7 */
/* 6.1.9 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -79,12 +78,15 @@
/* conditional compilation */
/* for ARMv8-M (Cortex M23/33) */
/* resulting in version 6.1.7 */
/* 10-15-2021 Yuxin Zhou Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
UINT _tx_thread_stack_error_notify(VOID (*stack_error_handler)(TX_THREAD *thread_ptr))
{
#ifndef TX_ENABLE_STACK_CHECKING
#if !defined(TX_ENABLE_STACK_CHECKING) && !defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
UINT status;
@@ -98,13 +100,14 @@ UINT status;
}
else
{
/* Stack checking is not enabled, just return an error. */
status = TX_FEATURE_NOT_ENABLED;
}
/* Return completion status. */
return(status);
#else
TX_INTERRUPT_SAVE_AREA
@@ -129,5 +132,3 @@ TX_INTERRUPT_SAVE_AREA
return(TX_SUCCESS);
#endif
}
#endif /* TX_PORT_THREAD_STACK_ERROR_NOTIFY */

View File

@@ -109,7 +109,7 @@ UINT status;
/* Determine if we are in a thread context. */
if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
{
/* Yes, we are in a thread context. */
/* Determine if the current thread is also the suspending thread. */
@@ -120,13 +120,13 @@ UINT status;
if (_tx_thread_preempt_disable != ((UINT) 0))
{
/* Current thread cannot suspend when the preempt disable flag is non-zero,
/* Current thread cannot suspend when the preempt disable flag is non-zero,
return an error. */
status = TX_SUSPEND_ERROR;
}
}
}
/* Determine if the status is still successful. */
if (status == TX_SUCCESS)
{
@@ -163,7 +163,7 @@ UINT status;
/* Disable interrupts. */
TX_DISABLE
/* Return success. */
status = TX_SUCCESS;
#else
@@ -204,15 +204,15 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Always return success, since this function does not perform error
/* Always return success, since this function does not perform error
checking. */
return(status);
#else
/* In-line thread suspension processing follows, which is effectively just taking the
/* In-line thread suspension processing follows, which is effectively just taking the
logic in tx_thread_system_suspend.c and placing it here! */
UINT priority;
UINT base_priority;
ULONG priority_map;
@@ -270,7 +270,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Determine if we are in a thread context. */
if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
{
/* Yes, we are in a thread context. */
/* Determine if the current thread is also the suspending thread. */
@@ -281,19 +281,19 @@ ULONG time_stamp = ((ULONG) 0);
if (_tx_thread_preempt_disable != ((UINT) 0))
{
/* Current thread cannot suspend when the preempt disable flag is non-zero,
/* Current thread cannot suspend when the preempt disable flag is non-zero,
return an error. */
status = TX_SUSPEND_ERROR;
}
}
}
/* Determine if the status is still successful. */
if (status == TX_SUCCESS)
{
#ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
/* Increment the thread's suspend count. */
thread_ptr -> tx_thread_performance_suspend_count++;
@@ -322,7 +322,7 @@ ULONG time_stamp = ((ULONG) 0);
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -391,7 +391,7 @@ ULONG time_stamp = ((ULONG) 0);
else
{
/* This is the only thread at this priority ready to run. Set the head
/* This is the only thread at this priority ready to run. Set the head
pointer to NULL. */
_tx_thread_priority_list[priority] = TX_NULL;
@@ -483,13 +483,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
}
@@ -521,7 +521,7 @@ ULONG time_stamp = ((ULONG) 0);
}
else
{
/* Calculate the lowest bit set in the priority map. */
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
@@ -529,7 +529,7 @@ ULONG time_stamp = ((ULONG) 0);
_tx_thread_highest_priority = base_priority + priority_bit;
}
}
/* Determine if this thread is the thread designated to execute. */
if (thread_ptr == _tx_thread_execute_ptr)
{
@@ -599,7 +599,7 @@ ULONG time_stamp = ((ULONG) 0);
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
{
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
preemption-threshold. */
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
@@ -614,9 +614,9 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -636,7 +636,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Determine if there are any other bits set in this preempt map. */
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
{
/* No, clear the active bit to signify this preempt map has nothing set. */
TX_DIV32_BIT_SET(priority, priority_bit)
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
@@ -653,18 +653,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -677,13 +677,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
}
@@ -719,18 +719,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -747,9 +747,9 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -798,7 +798,7 @@ ULONG time_stamp = ((ULONG) 0);
_tx_thread_performance_non_idle_return_count++;
}
#endif
/* Preemption is needed - return to the system! */
_tx_thread_system_return();
}

View File

@@ -84,7 +84,7 @@ TX_THREAD *thread_ptr;
/* Determine if we are in a system state (ISR or Initialization) or internal preemption is disabled. */
if (combined_flags == ((ULONG) 0))
{
/* No, at thread execution level so continue checking for preemption. */
/* Pickup thread pointer. */

View File

@@ -140,7 +140,7 @@ UINT map_index;
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -158,7 +158,7 @@ UINT map_index;
if (thread_ptr -> tx_thread_suspending == TX_FALSE)
{
/* Thread is not in the process of suspending. Now check to make sure the thread
/* Thread is not in the process of suspending. Now check to make sure the thread
has not already been resumed. */
if (thread_ptr -> tx_thread_state != TX_READY)
{
@@ -166,9 +166,9 @@ UINT map_index;
/* No, now check to see if the delayed suspension flag is set. */
if (thread_ptr -> tx_thread_delayed_suspend == TX_FALSE)
{
/* Resume the thread! */
/* Make this thread ready. */
/* Change the state to ready. */
@@ -229,7 +229,7 @@ UINT map_index;
/* Pickup the execute pointer. Since it is going to be referenced multiple
times, it is placed in a local variable. */
execute_ptr = _tx_thread_execute_ptr;
/* Determine if no thread is currently executing. */
if (execute_ptr == TX_NULL)
{
@@ -239,9 +239,9 @@ UINT map_index;
}
else
{
/* Another thread has been scheduled for execution. */
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
if (priority < execute_ptr -> tx_thread_preempt_threshold)
{
@@ -282,7 +282,7 @@ UINT map_index;
}
else
{
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
@@ -301,7 +301,7 @@ UINT map_index;
/* Yes, modify the execute thread pointer. */
_tx_thread_execute_ptr = thread_ptr;
#ifndef TX_MISRA_ENABLE
/* If MISRA is not-enabled, insert a preemption and return in-line for performance. */
@@ -311,18 +311,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -335,11 +335,11 @@ UINT map_index;
resume event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
be used by the trace analysis tool to show idle system conditions. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -408,15 +408,15 @@ UINT map_index;
}
else
{
/* A resumption occurred in the middle of a previous thread suspension. */
/* Make sure the type of suspension under way is not a terminate or
thread completion. In either of these cases, do not void the
thread completion. In either of these cases, do not void the
interrupted suspension processing. */
if (thread_ptr -> tx_thread_state != TX_COMPLETED)
{
/* Make sure the thread isn't terminated. */
if (thread_ptr -> tx_thread_state != TX_TERMINATED)
{
@@ -439,7 +439,7 @@ UINT map_index;
}
else
{
/* Clear the delayed suspend flag and change the state. */
thread_ptr -> tx_thread_delayed_suspend = TX_FALSE;
thread_ptr -> tx_thread_state = TX_SUSPENDED;
@@ -462,18 +462,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -486,11 +486,11 @@ UINT map_index;
resume event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
be used by the trace analysis tool to show idle system conditions. */
#ifdef TX_MISRA_ENABLE
@@ -564,7 +564,7 @@ UINT state;
}
else
{
/* A resumption occurred in the middle of a previous thread suspension. */
/* Pickup the current thread state. */
@@ -575,20 +575,20 @@ UINT state;
/* Move the state into a different variable for MISRA compliance. */
temp_state = state;
#endif
/* Log the thread status change. */
TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_RESUME, thread_ptr, ((ULONG) state), TX_POINTER_TO_ULONG_CONVERT(&temp_state), TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr), TX_TRACE_INTERNAL_EVENTS)
/* Make sure the type of suspension under way is not a terminate or
thread completion. In either of these cases, do not void the
thread completion. In either of these cases, do not void the
interrupted suspension processing. */
if (state != TX_COMPLETED)
{
/* Check for terminated thread. */
if (state != TX_TERMINATED)
{
/* Clear the suspending flag. */
thread_ptr -> tx_thread_suspending = TX_FALSE;
@@ -653,7 +653,7 @@ UINT map_index;
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -681,7 +681,7 @@ UINT map_index;
TX_THREAD_STACK_CHECK(thread_ptr)
#endif
/* Thread is not in the process of suspending. Now check to make sure the thread
/* Thread is not in the process of suspending. Now check to make sure the thread
has not already been resumed. */
if (thread_ptr -> tx_thread_state != TX_READY)
{
@@ -752,7 +752,7 @@ UINT map_index;
/* Pickup the execute pointer. Since it is going to be referenced multiple
times, it is placed in a local variable. */
execute_ptr = _tx_thread_execute_ptr;
/* Determine if no thread is currently executing. */
if (execute_ptr == TX_NULL)
{
@@ -762,7 +762,7 @@ UINT map_index;
}
else
{
/* Check to see if this is a higher priority thread and determine if preemption is allowed. */
if (priority < execute_ptr -> tx_thread_preempt_threshold)
{
@@ -803,7 +803,7 @@ UINT map_index;
}
else
{
if (TX_THREAD_GET_SYSTEM_STATE() < TX_INITIALIZE_IN_PROGRESS)
{
@@ -831,18 +831,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -855,11 +855,11 @@ UINT map_index;
resume event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
be used by the trace analysis tool to show idle system conditions. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -903,7 +903,7 @@ UINT map_index;
{
/* No, there are other threads at this priority already ready. */
/* Just add this thread to the priority list. */
tail_ptr = head_ptr -> tx_thread_ready_previous;
tail_ptr -> tx_thread_ready_next = thread_ptr;
@@ -928,18 +928,18 @@ UINT map_index;
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -952,11 +952,11 @@ UINT map_index;
resume event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Does the timestamp match? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to NULL. This can
be used by the trace analysis tool to show idle system conditions. */
#ifdef TX_MISRA_ENABLE

View File

@@ -85,7 +85,7 @@ VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr)
{
TX_INTERRUPT_SAVE_AREA
UINT priority;
UINT base_priority;
ULONG priority_map;
@@ -136,7 +136,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Make sure the suspension is not a wait-forever. */
if (timeout != TX_WAIT_FOREVER)
{
/* Activate the thread timer with the timeout value setup in the caller. */
_tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
}
@@ -146,7 +146,7 @@ ULONG time_stamp = ((ULONG) 0);
_tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
}
#endif
/* Decrease the preempt disabled count. */
_tx_thread_preempt_disable--;
@@ -182,7 +182,7 @@ ULONG time_stamp = ((ULONG) 0);
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -256,7 +256,7 @@ ULONG time_stamp = ((ULONG) 0);
else
{
/* This is the only thread at this priority ready to run. Set the head
/* This is the only thread at this priority ready to run. Set the head
pointer to NULL. */
_tx_thread_priority_list[priority] = TX_NULL;
@@ -348,13 +348,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
}
@@ -386,9 +386,9 @@ ULONG time_stamp = ((ULONG) 0);
}
else
{
/* Other threads at different priority levels are ready to run. */
/* Calculate the lowest bit set in the priority map. */
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
@@ -463,7 +463,7 @@ ULONG time_stamp = ((ULONG) 0);
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
{
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
preemption-threshold. */
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
@@ -476,7 +476,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Determine if there are any other bits set in this preempt map. */
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
{
/* No, clear the active bit to signify this preempt map has nothing set. */
TX_DIV32_BIT_SET(priority, priority_bit)
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
@@ -493,18 +493,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -517,13 +517,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
}
@@ -559,18 +559,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -583,13 +583,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
@@ -655,14 +655,14 @@ ULONG time_stamp = ((ULONG) 0);
TX_INTERRUPT_SAVE_AREA
ULONG wait_option;
/* Disable interrupts. */
TX_DISABLE
/* Determine if the thread is still suspending. */
if (thread_ptr -> tx_thread_suspending == TX_TRUE)
{
/* Yes, prepare to call the non-interruptable system suspend function. */
/* Clear the thread suspending flag. */
@@ -670,7 +670,7 @@ ULONG wait_option;
/* Pickup the wait option. */
wait_option = thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks;
/* Decrement the preempt disable count. */
_tx_thread_preempt_disable--;
@@ -690,7 +690,7 @@ ULONG wait_option;
VOID _tx_thread_system_ni_suspend(TX_THREAD *thread_ptr, ULONG wait_option)
{
UINT priority;
UINT base_priority;
ULONG priority_map;
@@ -719,7 +719,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Determine if a timeout needs to be activated. */
if (thread_ptr == current_thread)
{
/* Is there a wait option? */
if (wait_option != TX_NO_WAIT)
{
@@ -727,7 +727,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Make sure it is not a wait-forever option. */
if (wait_option != TX_WAIT_FOREVER)
{
/* Setup the wait option. */
thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = wait_option;
@@ -735,7 +735,7 @@ ULONG time_stamp = ((ULONG) 0);
_tx_timer_system_activate(&(thread_ptr -> tx_thread_timer));
}
}
/* Reset time slice for current thread. */
_tx_timer_time_slice = thread_ptr -> tx_thread_new_time_slice;
}
@@ -774,7 +774,7 @@ ULONG time_stamp = ((ULONG) 0);
#ifdef TX_ENABLE_EVENT_TRACE
/* Save the time stamp for later comparison to verify that
the event hasn't been overwritten by the time we have
the event hasn't been overwritten by the time we have
computed the next thread to execute. */
if (entry_ptr != TX_NULL)
{
@@ -845,7 +845,7 @@ ULONG time_stamp = ((ULONG) 0);
else
{
/* This is the only thread at this priority ready to run. Set the head
/* This is the only thread at this priority ready to run. Set the head
pointer to NULL. */
_tx_thread_priority_list[priority] = TX_NULL;
@@ -937,13 +937,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = 0;
}
@@ -972,7 +972,7 @@ ULONG time_stamp = ((ULONG) 0);
}
else
{
/* Calculate the lowest bit set in the priority map. */
TX_LOWEST_SET_BIT_CALCULATE(priority_map, priority_bit)
@@ -980,7 +980,7 @@ ULONG time_stamp = ((ULONG) 0);
_tx_thread_highest_priority = base_priority + ((UINT) priority_bit);
}
}
/* Determine if the suspending thread is the thread designated to execute. */
if (thread_ptr == _tx_thread_execute_ptr)
{
@@ -1038,7 +1038,7 @@ ULONG time_stamp = ((ULONG) 0);
if (_tx_thread_highest_priority >= (_tx_thread_priority_list[priority] -> tx_thread_preempt_threshold))
{
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
/* Thread not allowed to execute until earlier preempted thread finishes or lowers its
preemption-threshold. */
_tx_thread_execute_ptr = _tx_thread_priority_list[priority];
@@ -1051,7 +1051,7 @@ ULONG time_stamp = ((ULONG) 0);
/* Determine if there are any other bits set in this preempt map. */
if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
{
/* No, clear the active bit to signify this preempt map has nothing set. */
TX_DIV32_BIT_SET(priority, priority_bit)
_tx_thread_preempted_map_active = _tx_thread_preempted_map_active & (~(priority_bit));
@@ -1068,18 +1068,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -1092,13 +1092,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
entry_ptr -> tx_trace_buffer_entry_information_field_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);
}
@@ -1131,18 +1131,18 @@ ULONG time_stamp = ((ULONG) 0);
/* Is the execute pointer different? */
if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
{
/* Move to next entry. */
_tx_thread_performance__execute_log_index++;
/* Check for wrap condition. */
if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
{
/* Set the index to the beginning. */
_tx_thread_performance__execute_log_index = ((UINT) 0);
}
/* Log the new execute pointer. */
_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] = _tx_thread_execute_ptr;
}
@@ -1155,13 +1155,13 @@ ULONG time_stamp = ((ULONG) 0);
suspend event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, set the "next thread pointer" to the new value of the
next thread to execute. This can be used by the trace analysis tool to keep
next thread to execute. This can be used by the trace analysis tool to keep
track of next thread execution. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_4 = TX_POINTER_TO_ULONG_CONVERT(_tx_thread_execute_ptr);

View File

@@ -201,7 +201,7 @@ ULONG suspension_sequence;
/* Thread state change. */
TX_THREAD_STATE_CHANGE(thread_ptr, TX_TERMINATED)
/* Set the suspending flag. This prevents the thread from being
/* Set the suspending flag. This prevents the thread from being
resumed before the cleanup routine is executed. */
thread_ptr -> tx_thread_suspending = TX_TRUE;
@@ -279,7 +279,7 @@ ULONG suspension_sequence;
if (_tx_thread_mutex_release != TX_NULL)
{
/* Yes, call the mutex release function via a function pointer that
/* Yes, call the mutex release function via a function pointer that
is setup during initialization. */
(_tx_thread_mutex_release)(thread_ptr);
}

View File

@@ -97,7 +97,7 @@ UINT preempt_disable;
/* Check this thread's stack. */
TX_THREAD_STACK_CHECK(thread_ptr)
/* Set the next thread pointer to NULL. */
next_thread_ptr = TX_NULL;
#endif
@@ -130,15 +130,15 @@ UINT preempt_disable;
/* Check to see if preemption-threshold is not being used. */
if (thread_ptr -> tx_thread_priority == thread_ptr -> tx_thread_preempt_threshold)
{
/* Preemption-threshold is not being used by this thread. */
/* There is another thread at this priority, make it the highest at
this priority level. */
_tx_thread_priority_list[thread_ptr -> tx_thread_priority] = thread_ptr -> tx_thread_ready_next;
/* Designate the highest priority thread as the one to execute. Don't use this
thread's priority as an index just in case a higher priority thread is now
/* Designate the highest priority thread as the one to execute. Don't use this
thread's priority as an index just in case a higher priority thread is now
ready! */
_tx_thread_execute_ptr = _tx_thread_priority_list[_tx_thread_highest_priority];
@@ -167,11 +167,11 @@ UINT preempt_disable;
/* Pickup the volatile information. */
system_state = TX_THREAD_GET_SYSTEM_STATE();
preempt_disable = _tx_thread_preempt_disable;
/* Insert this event into the trace buffer. */
TX_TRACE_IN_LINE_INSERT(TX_TRACE_TIME_SLICE, _tx_thread_execute_ptr, system_state, preempt_disable, TX_POINTER_TO_ULONG_CONVERT(&thread_ptr), TX_TRACE_INTERNAL_EVENTS)
#endif
/* Restore previous interrupt posture. */
TX_RESTORE

View File

@@ -105,7 +105,7 @@ TX_THREAD *current_thread;
/* Determine if this thread is the currently executing thread. */
if (thread_ptr == current_thread)
{
/* Yes, update the time-slice countdown variable. */
_tx_timer_time_slice = new_time_slice;
}

View File

@@ -128,7 +128,7 @@ ULONG suspension_sequence;
/* Increment the number of timeouts for this thread. */
thread_ptr -> tx_thread_performance_timeout_count++;
#endif
/* Pickup the cleanup routine address. */
suspend_cleanup = thread_ptr -> tx_thread_suspend_cleanup;
@@ -152,6 +152,7 @@ ULONG suspension_sequence;
/* Call any cleanup routines. */
if (suspend_cleanup != TX_NULL)
{
/* Yes, there is a function to call. */
(suspend_cleanup)(thread_ptr, suspension_sequence);
}

View File

@@ -95,14 +95,14 @@ ULONG suspension_sequence;
/* Determine if the thread is currently suspended. */
if (thread_ptr -> tx_thread_state < TX_SLEEP)
{
/* Thread is either ready, completed, terminated, or in a pure
/* Thread is either ready, completed, terminated, or in a pure
suspension condition. */
/* Restore interrupts. */
TX_RESTORE
/* Just return with an error message to indicate that
/* Just return with an error message to indicate that
nothing was done. */
status = TX_WAIT_ABORT_ERROR;
}
@@ -136,7 +136,7 @@ ULONG suspension_sequence;
{
/* Process all other suspension timeouts. */
/* Set the state to suspended. */
thread_ptr -> tx_thread_state = TX_SUSPENDED;
@@ -217,7 +217,7 @@ ULONG suspension_sequence;
/* Disable interrupts. */
TX_DISABLE
/* Decrement the disable preemption flag. */
_tx_thread_preempt_disable--;
@@ -225,7 +225,7 @@ ULONG suspension_sequence;
TX_RESTORE
#endif
/* Return with an error message to indicate that
/* Return with an error message to indicate that
nothing was done. */
status = TX_WAIT_ABORT_ERROR;
}

View File

@@ -35,7 +35,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_time_get PORTABLE C */
/* 6.1 */
/* 6.1.3 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -68,6 +68,8 @@
/* 05-19-2020 William E. Lamie Initial Version 6.0 */
/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
/* 12-31-2020 Andres Mlinar Modified comment(s), */
/* resulting in version 6.1.3 */
/* */
/**************************************************************************/
ULONG _tx_time_get(VOID)

View File

@@ -83,7 +83,7 @@ UINT status;
TX_DISABLE
#ifdef TX_ENABLE_EVENT_TRACE
/* If trace is enabled, insert this event into the trace buffer. */
TX_TRACE_IN_LINE_INSERT(TX_TRACE_TIMER_ACTIVATE, timer_ptr, 0, 0, 0, TX_TRACE_TIMER_EVENTS)
#endif

View File

@@ -77,7 +77,7 @@ UINT _tx_timer_change(TX_TIMER *timer_ptr, ULONG initial_ticks, ULONG reschedul
TX_INTERRUPT_SAVE_AREA
/* Disable interrupts to put the timer on the created list. */
TX_DISABLE
@@ -90,7 +90,7 @@ TX_INTERRUPT_SAVE_AREA
/* Determine if the timer is active. */
if (timer_ptr -> tx_timer_internal.tx_timer_internal_list_head == TX_NULL)
{
/* Setup the new expiration fields. */
timer_ptr -> tx_timer_internal.tx_timer_internal_remaining_ticks = initial_ticks;
timer_ptr -> tx_timer_internal.tx_timer_internal_re_initialize_ticks = reschedule_ticks;

View File

@@ -75,7 +75,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
UINT _tx_timer_create(TX_TIMER *timer_ptr, CHAR *name_ptr,
VOID (*expiration_function)(ULONG id), ULONG expiration_input,
ULONG initial_ticks, ULONG reschedule_ticks, UINT auto_activate)
{
@@ -95,7 +95,7 @@ TX_TIMER *previous_timer;
timer_ptr -> tx_timer_internal.tx_timer_internal_re_initialize_ticks = reschedule_ticks;
timer_ptr -> tx_timer_internal.tx_timer_internal_timeout_function = expiration_function;
timer_ptr -> tx_timer_internal.tx_timer_internal_timeout_param = expiration_input;
/* Disable interrupts to put the timer on the created list. */
TX_DISABLE
@@ -130,7 +130,7 @@ TX_TIMER *previous_timer;
/* Increment the number of created timers. */
_tx_timer_created_count++;
/* Optional timer create extended processing. */
TX_TIMER_CREATE_EXTENSION(timer_ptr)

View File

@@ -80,7 +80,7 @@ TX_TIMER_INTERNAL *previous_timer;
ULONG ticks_left;
UINT active_timer_list;
/* Setup internal timer pointer. */
internal_ptr = &(timer_ptr -> tx_timer_internal);
@@ -125,7 +125,7 @@ UINT active_timer_list;
active_timer_list = TX_TRUE;
}
}
/* Determine if the timer is on active timer list. */
if (active_timer_list == TX_TRUE)
{
@@ -139,7 +139,7 @@ UINT active_timer_list;
if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(list_head) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_current_ptr))
{
/* Calculate ticks left to expiration - just the difference between this
/* Calculate ticks left to expiration - just the difference between this
timer's entry and the current timer pointer. */
ticks_left = (ULONG) (TX_TIMER_POINTER_DIF(list_head,_tx_timer_current_ptr)) + ((ULONG) 1);
}
@@ -148,57 +148,57 @@ UINT active_timer_list;
/* Calculate the ticks left with a wrapped list condition. */
ticks_left = (ULONG) (TX_TIMER_POINTER_DIF(list_head,_tx_timer_list_start));
ticks_left = ticks_left + (ULONG) ((TX_TIMER_POINTER_DIF(_tx_timer_list_end, _tx_timer_current_ptr)) + ((ULONG) 1));
}
/* Adjust the remaining ticks accordingly. */
if (internal_ptr -> tx_timer_internal_remaining_ticks > TX_TIMER_ENTRIES)
{
/* Subtract off the last full pass through the timer list and add the
time left. */
internal_ptr -> tx_timer_internal_remaining_ticks =
internal_ptr -> tx_timer_internal_remaining_ticks =
(internal_ptr -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES) + ticks_left;
}
else
{
/* Just put the ticks left into the timer's remaining ticks. */
internal_ptr -> tx_timer_internal_remaining_ticks = ticks_left;
}
}
else
{
/* Determine if this is timer has just expired. */
if (_tx_timer_expired_timer_ptr != internal_ptr)
{
/* No, it hasn't expired. Now check for remaining time greater than the list
size. */
if (internal_ptr -> tx_timer_internal_remaining_ticks > TX_TIMER_ENTRIES)
{
/* Adjust the remaining ticks. */
internal_ptr -> tx_timer_internal_remaining_ticks =
internal_ptr -> tx_timer_internal_remaining_ticks =
internal_ptr -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES;
}
else
{
/* Set the remaining time to the reactivation time. */
internal_ptr -> tx_timer_internal_remaining_ticks = internal_ptr -> tx_timer_internal_re_initialize_ticks;
}
}
else
{
/* Set the remaining time to the reactivation time. */
internal_ptr -> tx_timer_internal_remaining_ticks = internal_ptr -> tx_timer_internal_re_initialize_ticks;
}
}
/* Pickup the next timer. */
next_timer = internal_ptr -> tx_timer_internal_active_next;
@@ -230,7 +230,7 @@ UINT active_timer_list;
if (*(list_head) == internal_ptr)
{
/* Update the next timer in the list with the list head
/* Update the next timer in the list with the list head
pointer. */
next_timer -> tx_timer_internal_list_head = list_head;

View File

@@ -106,7 +106,7 @@ TX_TIMER *previous_timer;
/* Decrement the number of created timers. */
_tx_timer_created_count--;
/* See if the timer is the only one on the list. */
if (_tx_timer_created_count == TX_EMPTY)
{
@@ -126,7 +126,7 @@ TX_TIMER *previous_timer;
/* See if we have to update the created list head pointer. */
if (_tx_timer_created_ptr == timer_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_timer_created_ptr = next_timer;
}

View File

@@ -105,7 +105,7 @@ TX_TIMER *timer_ptr;
#ifndef TX_TIMER_PROCESS_IN_ISR
/* Don't process in the ISR, wakeup the system timer thread to process the
/* Don't process in the ISR, wakeup the system timer thread to process the
timer expiration. */
/* Disable interrupts. */
@@ -132,8 +132,8 @@ TX_TIMER *timer_ptr;
#else
/* Process the timer expiration directly in the ISR. This increases the interrupt
processing, however, it eliminates the need for a system timer thread and associated
/* Process the timer expiration directly in the ISR. This increases the interrupt
processing, however, it eliminates the need for a system timer thread and associated
resources. */
/* Disable interrupts. */
@@ -151,7 +151,7 @@ TX_TIMER *timer_ptr;
{
/* Proceed with timer processing. */
/* Set the timer interrupt processing active flag. */
_tx_timer_processing_active = TX_TRUE;
@@ -159,7 +159,7 @@ TX_TIMER *timer_ptr;
do
{
/* First, move the current list pointer and clear the timer
/* First, move the current list pointer and clear the timer
expired value. This allows the interrupt handling portion
to continue looking for timer expirations. */
@@ -170,19 +170,19 @@ TX_TIMER *timer_ptr;
is one! */
if (expired_timers != TX_NULL)
{
expired_timers -> tx_timer_internal_list_head = &expired_timers;
}
/* Set the current list pointer to NULL. */
*_tx_timer_current_ptr = TX_NULL;
/* Move the current pointer up one timer entry wrap if we get to
/* Move the current pointer up one timer entry wrap if we get to
the end of the list. */
_tx_timer_current_ptr = TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, 1);
if (_tx_timer_current_ptr == _tx_timer_list_end)
{
_tx_timer_current_ptr = _tx_timer_list_start;
}
@@ -202,10 +202,10 @@ TX_TIMER *timer_ptr;
/* Something is on the list. Remove it and process the expiration. */
current_timer = expired_timers;
/* Pickup the next timer. */
next_timer = expired_timers -> tx_timer_internal_active_next;
/* Set the reactivate timer to NULL. */
reactivate_timer = TX_NULL;
@@ -214,7 +214,7 @@ TX_TIMER *timer_ptr;
{
/* Yes, this is the only timer in the list. */
/* Set the head pointer to NULL. */
expired_timers = TX_NULL;
}
@@ -222,7 +222,7 @@ TX_TIMER *timer_ptr;
{
/* No, not the only expired timer. */
/* Remove this timer from the expired list. */
previous_timer = current_timer -> tx_timer_internal_active_previous;
next_timer -> tx_timer_internal_active_previous = previous_timer;
@@ -237,7 +237,7 @@ TX_TIMER *timer_ptr;
/* In any case, the timer is now off of the expired list. */
/* Determine if the timer has expired or if it is just a really
/* Determine if the timer has expired or if it is just a really
big timer that needs to be placed in the list again. */
if (current_timer -> tx_timer_internal_remaining_ticks > TX_TIMER_ENTRIES)
{
@@ -253,25 +253,25 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expiration adjustments on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance__expiration_adjust_count++;
}
}
#endif
/* Decrement the remaining ticks of the timer. */
current_timer -> tx_timer_internal_remaining_ticks =
current_timer -> tx_timer_internal_remaining_ticks =
current_timer -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES;
/* Set the timeout function to NULL in order to bypass the
expiration. */
timeout_function = TX_NULL;
@@ -289,7 +289,7 @@ TX_TIMER *timer_ptr;
{
/* Timer did expire. */
#ifdef TX_TIMER_ENABLE_PERFORMANCE_INFO
/* Increment the total expirations counter. */
@@ -298,22 +298,22 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expirations on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance_expiration_count++;
}
}
#endif
/* Copy the calling function and ID into local variables before interrupts
/* Copy the calling function and ID into local variables before interrupts
are re-enabled. */
timeout_function = current_timer -> tx_timer_internal_timeout_function;
timeout_param = current_timer -> tx_timer_internal_timeout_param;
@@ -326,7 +326,7 @@ TX_TIMER *timer_ptr;
{
/* Make the timer appear that it is still active while processing
the expiration routine and with interrupts enabled. This will
the expiration routine and with interrupts enabled. This will
permit proper processing of a timer deactivate from both the
expiration routine and an ISR. */
current_timer -> tx_timer_internal_list_head = &reactivate_timer;
@@ -353,7 +353,7 @@ TX_TIMER *timer_ptr;
/* Call the timer-expiration function, if non-NULL. */
if (timeout_function != TX_NULL)
{
(timeout_function) (timeout_param);
}
@@ -381,16 +381,16 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expirations on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance_reactivate_count++;
}
}
@@ -418,7 +418,7 @@ TX_TIMER *timer_ptr;
/* At this point, we are ready to put the timer back on one of
the timer lists. */
/* Calculate the proper place for the timer. */
timer_list = TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, expiration_time);
if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(timer_list) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_list_end))
@@ -432,13 +432,13 @@ TX_TIMER *timer_ptr;
/* Now put the timer on this list. */
if ((*timer_list) == TX_NULL)
{
/* This list is NULL, just put the new timer on it. */
/* Setup the links in this timer. */
current_timer -> tx_timer_internal_active_next = current_timer;
current_timer -> tx_timer_internal_active_previous = current_timer;
/* Setup the list head pointer. */
*timer_list = current_timer;
}
@@ -474,7 +474,7 @@ TX_TIMER *timer_ptr;
_tx_timer_processing_active = TX_FALSE;
}
}
/* Restore interrupts. */
TX_RESTORE
#endif

View File

@@ -76,7 +76,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
UINT _tx_timer_info_get(TX_TIMER *timer_ptr, CHAR **name, UINT *active, ULONG *remaining_ticks,
ULONG *reschedule_ticks, TX_TIMER **next_timer)
{
@@ -101,10 +101,10 @@ UINT active_timer_list;
/* Retrieve the name of the timer. */
if (name != TX_NULL)
{
*name = timer_ptr -> tx_timer_name;
}
/* Pickup address of internal timer structure. */
internal_ptr = &(timer_ptr -> tx_timer_internal);
@@ -113,7 +113,7 @@ UINT active_timer_list;
/* Default active to false. */
timer_active = TX_FALSE;
/* Default the ticks left to the remaining ticks. */
ticks_left = internal_ptr -> tx_timer_internal_remaining_ticks;
@@ -146,7 +146,7 @@ UINT active_timer_list;
/* Calculate the amount of time that has elapsed since the timer
was activated. */
/* Setup the list head pointer. */
list_head = internal_ptr -> tx_timer_internal_list_head;
@@ -154,7 +154,7 @@ UINT active_timer_list;
if (internal_ptr -> tx_timer_internal_list_head >= _tx_timer_current_ptr)
{
/* Calculate ticks left to expiration - just the difference between this
/* Calculate ticks left to expiration - just the difference between this
timer's entry and the current timer pointer. */
ticks_left = ((TX_TIMER_POINTER_DIF(list_head, _tx_timer_current_ptr)) + ((ULONG) 1));
}
@@ -170,7 +170,7 @@ UINT active_timer_list;
/* Adjust the remaining ticks accordingly. */
if (internal_ptr -> tx_timer_internal_remaining_ticks > TX_TIMER_ENTRIES)
{
/* Subtract off the last full pass through the timer list and add the
time left. */
ticks_left = (internal_ptr -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES) + ticks_left;
@@ -179,14 +179,14 @@ UINT active_timer_list;
}
else
{
/* The timer is not on the actual timer list so it must either be being processed
or on a temporary list to be processed. */
/* Check to see if this timer is the timer currently being processed. */
if (_tx_timer_expired_timer_ptr == internal_ptr)
{
/* Timer dispatch routine is executing, waiting to execute, or just finishing. No more remaining ticks for this expiration. */
ticks_left = ((ULONG) 0);
}
@@ -195,17 +195,17 @@ UINT active_timer_list;
/* Timer is not the one being processed, which means it must be on the temporary expiration list
waiting to be processed. */
/* Calculate the remaining ticks for a timer in the process of expiring. */
if (ticks_left > TX_TIMER_ENTRIES)
{
/* Calculate the number of ticks remaining. */
ticks_left = internal_ptr -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES;
}
else
{
/* Timer dispatch routine is waiting to execute, no more remaining ticks for this expiration. */
ticks_left = ((ULONG) 0);
}
@@ -216,13 +216,13 @@ UINT active_timer_list;
/* Setup return values for an inactive timer. */
if (active != TX_NULL)
{
/* Setup the timer active indication. */
*active = timer_active;
}
if (remaining_ticks != TX_NULL)
{
/* Setup the default remaining ticks value. */
*remaining_ticks = ticks_left;
}
@@ -230,17 +230,17 @@ UINT active_timer_list;
/* Pickup the reschedule ticks value. */
if (reschedule_ticks != TX_NULL)
{
*reschedule_ticks = internal_ptr -> tx_timer_internal_re_initialize_ticks;
}
/* Pickup the next created application timer. */
if (next_timer != TX_NULL)
{
*next_timer = timer_ptr -> tx_timer_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -36,7 +36,7 @@
#ifndef TX_NO_TIMER
/* Define the system clock value that is continually incremented by the
/* Define the system clock value that is continually incremented by the
periodic timer interrupt processing. */
volatile ULONG _tx_timer_system_clock;
@@ -67,7 +67,7 @@ TX_TIMER_INTERNAL **_tx_timer_list_end;
TX_TIMER_INTERNAL **_tx_timer_current_ptr;
/* Define the timer expiration flag. This is used to indicate that a timer
/* Define the timer expiration flag. This is used to indicate that a timer
has expired. */
UINT _tx_timer_expired;
@@ -146,8 +146,8 @@ ULONG _tx_timer_performance_expiration_count;
/* Define the total number of timer expiration adjustments. These are required
if the expiration time is greater than the size of the timer list. In such
cases, the timer is placed at the end of the list and then reactivated
if the expiration time is greater than the size of the timer list. In such
cases, the timer is placed at the end of the list and then reactivated
as many times as necessary to finally achieve the resulting timeout. */
ULONG _tx_timer_performance__expiration_adjust_count;
@@ -241,26 +241,26 @@ UINT status;
#ifndef TX_TIMER_PROCESS_IN_ISR
/* Setup the variables associated with the system timer thread's stack and
/* Setup the variables associated with the system timer thread's stack and
priority. */
_tx_timer_stack_start = (VOID *) &_tx_timer_thread_stack_area[0];
_tx_timer_stack_size = ((ULONG) TX_TIMER_THREAD_STACK_SIZE);
_tx_timer_priority = ((UINT) TX_TIMER_THREAD_PRIORITY);
/* Create the system timer thread. This thread processes all of the timer
/* Create the system timer thread. This thread processes all of the timer
expirations and reschedules. Its stack and priority are defined in the
low-level initialization component. */
do
{
/* Create the system timer thread. */
status = _tx_thread_create(&_tx_timer_thread,
TX_CONST_CHAR_TO_CHAR_POINTER_CONVERT("System Timer Thread"),
_tx_timer_thread_entry,
((ULONG) TX_TIMER_ID),
_tx_timer_stack_start, _tx_timer_stack_size,
status = _tx_thread_create(&_tx_timer_thread,
TX_CONST_CHAR_TO_CHAR_POINTER_CONVERT("System Timer Thread"),
_tx_timer_thread_entry,
((ULONG) TX_TIMER_ID),
_tx_timer_stack_start, _tx_timer_stack_size,
_tx_timer_priority, _tx_timer_priority, TX_NO_TIME_SLICE, TX_DONT_START);
#ifdef TX_SAFETY_CRITICAL
/* Check return from thread create - if an error is detected throw an exception. */
@@ -274,9 +274,9 @@ UINT status;
/* Define timer initialize extension. */
TX_TIMER_INITIALIZE_EXTENSION(status)
} while (status != TX_SUCCESS);
#else
/* Clear the timer interrupt processing active flag. */

View File

@@ -96,15 +96,15 @@ UINT status;
/* Determine if this is a legal request. */
if (timer_ptr == TX_NULL)
{
/* Timer pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the timer ID is invalid. */
else if (timer_ptr -> tx_timer_id != TX_TIMER_ID)
{
/* Timer pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -123,38 +123,38 @@ UINT status;
/* Retrieve the number of activations of this timer. */
if (activates != TX_NULL)
{
*activates = timer_ptr -> tx_timer_performance_activate_count;
}
/* Retrieve the number of reactivations of this timer. */
if (reactivates != TX_NULL)
{
*reactivates = timer_ptr -> tx_timer_performance_reactivate_count;
}
/* Retrieve the number of deactivations of this timer. */
if (deactivates != TX_NULL)
{
*deactivates = timer_ptr -> tx_timer_performance_deactivate_count;
}
/* Retrieve the number of expirations of this timer. */
if (expirations != TX_NULL)
{
*expirations = timer_ptr -> tx_timer_performance_expiration_count;
}
/* Retrieve the number of expiration adjustments of this timer. */
if (expiration_adjusts != TX_NULL)
{
*expiration_adjusts = timer_ptr -> tx_timer_performance__expiration_adjust_count;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -101,35 +101,35 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of timer activations. */
if (activates != TX_NULL)
{
*activates = _tx_timer_performance_activate_count;
}
/* Retrieve the total number of timer reactivations. */
if (reactivates != TX_NULL)
{
*reactivates = _tx_timer_performance_reactivate_count;
}
/* Retrieve the total number of timer deactivations. */
if (deactivates != TX_NULL)
{
*deactivates = _tx_timer_performance_deactivate_count;
}
/* Retrieve the total number of timer expirations. */
if (expirations != TX_NULL)
{
*expirations = _tx_timer_performance_expiration_count;
}
/* Retrieve the total number of timer expiration adjustments. */
if (expiration_adjusts != TX_NULL)
{
*expiration_adjusts = _tx_timer_performance__expiration_adjust_count;
}
@@ -138,7 +138,7 @@ TX_INTERRUPT_SAVE_AREA
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -147,37 +147,37 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (activates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (reactivates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (deactivates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (expirations != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (expiration_adjusts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -93,11 +93,11 @@ ULONG expiration_time;
/* Determine if there is a timer to activate. */
if (remaining_ticks != ((ULONG) 0))
{
/* Determine if the timer is set to wait forever. */
if (remaining_ticks != TX_WAIT_FOREVER)
{
/* Valid timer activate request. */
/* Determine if the timer still needs activation. */
@@ -124,7 +124,7 @@ ULONG expiration_time;
/* At this point, we are ready to put the timer on one of
the timer lists. */
/* Calculate the proper place for the timer. */
timer_list = TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, expiration_time);
if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(timer_list) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_list_end))
@@ -134,11 +134,11 @@ ULONG expiration_time;
delta = TX_TIMER_POINTER_DIF(timer_list, _tx_timer_list_end);
timer_list = TX_TIMER_POINTER_ADD(_tx_timer_list_start, delta);
}
/* Now put the timer on this list. */
if ((*timer_list) == TX_NULL)
{
/* This list is NULL, just put the new timer on it. */
/* Setup the links in this timer. */

View File

@@ -101,14 +101,14 @@ TX_TIMER *timer_ptr;
silly compiler warnings. */
if (timer_thread_input == TX_TIMER_ID)
{
/* Yes, valid thread entry, proceed... */
/* Now go into an infinite loop to process timer expirations. */
while (TX_LOOP_FOREVER)
{
/* First, move the current list pointer and clear the timer
/* First, move the current list pointer and clear the timer
expired value. This allows the interrupt handling portion
to continue looking for timer expirations. */
TX_DISABLE
@@ -120,19 +120,19 @@ TX_TIMER *timer_ptr;
is one! */
if (expired_timers != TX_NULL)
{
expired_timers -> tx_timer_internal_list_head = &expired_timers;
}
/* Set the current list pointer to NULL. */
*_tx_timer_current_ptr = TX_NULL;
/* Move the current pointer up one timer entry wrap if we get to
/* Move the current pointer up one timer entry wrap if we get to
the end of the list. */
_tx_timer_current_ptr = TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, 1);
if (_tx_timer_current_ptr == _tx_timer_list_end)
{
_tx_timer_current_ptr = _tx_timer_list_start;
}
@@ -155,7 +155,7 @@ TX_TIMER *timer_ptr;
/* Pickup the next timer. */
next_timer = expired_timers -> tx_timer_internal_active_next;
/* Set the reactivate_timer to NULL. */
reactivate_timer = TX_NULL;
@@ -172,7 +172,7 @@ TX_TIMER *timer_ptr;
{
/* No, not the only expired timer. */
/* Remove this timer from the expired list. */
previous_timer = current_timer -> tx_timer_internal_active_previous;
next_timer -> tx_timer_internal_active_previous = previous_timer;
@@ -187,7 +187,7 @@ TX_TIMER *timer_ptr;
/* In any case, the timer is now off of the expired list. */
/* Determine if the timer has expired or if it is just a really
/* Determine if the timer has expired or if it is just a really
big timer that needs to be placed in the list again. */
if (current_timer -> tx_timer_internal_remaining_ticks > TX_TIMER_ENTRIES)
{
@@ -203,25 +203,25 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expiration adjustments on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance__expiration_adjust_count++;
}
}
#endif
/* Decrement the remaining ticks of the timer. */
current_timer -> tx_timer_internal_remaining_ticks =
current_timer -> tx_timer_internal_remaining_ticks =
current_timer -> tx_timer_internal_remaining_ticks - TX_TIMER_ENTRIES;
/* Set the timeout function to NULL in order to bypass the
expiration. */
timeout_function = TX_NULL;
@@ -248,22 +248,22 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expirations on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance_expiration_count++;
}
}
#endif
/* Copy the calling function and ID into local variables before interrupts
/* Copy the calling function and ID into local variables before interrupts
are re-enabled. */
timeout_function = current_timer -> tx_timer_internal_timeout_function;
timeout_param = current_timer -> tx_timer_internal_timeout_param;
@@ -276,12 +276,12 @@ TX_TIMER *timer_ptr;
{
/* Make the timer appear that it is still active while processing
the expiration routine and with interrupts enabled. This will
the expiration routine and with interrupts enabled. This will
permit proper processing of a timer deactivate from both the
expiration routine and an ISR. */
current_timer -> tx_timer_internal_list_head = &reactivate_timer;
current_timer -> tx_timer_internal_active_next = current_timer;
/* Setup the temporary timer list head pointer. */
reactivate_timer = current_timer;
}
@@ -303,7 +303,7 @@ TX_TIMER *timer_ptr;
/* Call the timer-expiration function, if non-NULL. */
if (timeout_function != TX_NULL)
{
(timeout_function) (timeout_param);
}
@@ -331,16 +331,16 @@ TX_TIMER *timer_ptr;
/* Determine if this is an application timer. */
if (current_timer -> tx_timer_internal_timeout_function != &_tx_thread_timeout)
{
/* Derive the application timer pointer. */
/* Pickup the application timer pointer. */
TX_USER_TIMER_POINTER_GET(current_timer, timer_ptr)
/* Increment the number of expirations on this timer. */
if (timer_ptr -> tx_timer_id == TX_TIMER_ID)
{
timer_ptr -> tx_timer_performance_reactivate_count++;
}
}
@@ -367,7 +367,7 @@ TX_TIMER *timer_ptr;
/* At this point, we are ready to put the timer back on one of
the timer lists. */
/* Calculate the proper place for the timer. */
timer_list = TX_TIMER_POINTER_ADD(_tx_timer_current_ptr, expiration_time);
if (TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(timer_list) >= TX_TIMER_INDIRECT_TO_VOID_POINTER_CONVERT(_tx_timer_list_end))
@@ -381,7 +381,7 @@ TX_TIMER *timer_ptr;
/* Now put the timer on this list. */
if ((*timer_list) == TX_NULL)
{
/* This list is NULL, just put the new timer on it. */
/* Setup the links in this timer. */
@@ -436,7 +436,7 @@ TX_TIMER *timer_ptr;
/* Build pointer to the timer thread. */
thread_ptr = &_tx_timer_thread;
/* Set the status to suspending, in order to indicate the
/* Set the status to suspending, in order to indicate the
suspension is in progress. */
thread_ptr -> tx_thread_state = TX_SUSPENDED;

View File

@@ -84,7 +84,7 @@ UINT _tx_trace_buffer_full_notify(VOID (*full_buffer_callback)(VOID *buffer))
/* Return success. */
return(TX_SUCCESS);
#else
UINT status;
@@ -93,13 +93,13 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (full_buffer_callback != TX_NULL)
{
/* Trace not enabled, return an error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Trace not enabled, return an error. */
status = TX_FEATURE_NOT_ENABLED;
}

Some files were not shown because too many files have changed in this diff Show More