From 1d9a155fd7add2a302b711b5568fb40c295aac0a Mon Sep 17 00:00:00 2001 From: Darian Leung Date: Tue, 31 Oct 2023 22:43:58 +0800 Subject: [PATCH 1/2] fix(freertos/idf): Fix xEventGroupGetBitsFromISR() critical section Adds missing critical section to xEventGroupGetBitsFromISR() that is required in dual-core SMP. --- components/freertos/FreeRTOS-Kernel/event_groups.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/components/freertos/FreeRTOS-Kernel/event_groups.c b/components/freertos/FreeRTOS-Kernel/event_groups.c index 4ad031aefd3..3c3a2905200 100644 --- a/components/freertos/FreeRTOS-Kernel/event_groups.c +++ b/components/freertos/FreeRTOS-Kernel/event_groups.c @@ -534,11 +534,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus ); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ From a442a6b65cd4917ffa9de63e736f08ef0de573c0 Mon Sep 17 00:00:00 2001 From: Darian Leung Date: Wed, 1 Nov 2023 00:02:58 +0800 Subject: [PATCH 2/2] refactor(freertos/idf): Update thread safety convenience macros This commit refactors some of the thread safety convenience macros by removing some repeated definitions and keeping them all in "freertos_idf_additions_priv.h" --- components/freertos/FreeRTOS-Kernel/queue.c | 70 ++------- components/freertos/FreeRTOS-Kernel/tasks.c | 138 +++++------------- .../freertos_tasks_c_additions.h | 8 +- .../esp_private/freertos_idf_additions_priv.h | 122 ++++++++++++---- 4 files changed, 142 insertions(+), 196 deletions(-) diff --git a/components/freertos/FreeRTOS-Kernel/queue.c b/components/freertos/FreeRTOS-Kernel/queue.c index 32e787a26d7..185dd47aefc 100644 --- a/components/freertos/FreeRTOS-Kernel/queue.c +++ b/components/freertos/FreeRTOS-Kernel/queue.c @@ -54,56 +54,6 @@ * correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ -/* Some code sections require extra critical sections when building for SMP - * ( configNUMBER_OF_CORES > 1 ). */ -#if ( configNUMBER_OF_CORES > 1 ) -/* Macros that Enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) - - static inline __attribute__( ( always_inline ) ) - void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskENTER_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskENTER_CRITICAL_ISR( pxLock ); - } - } - - static inline __attribute__( ( always_inline ) ) - void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskEXIT_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskEXIT_CRITICAL_ISR( pxLock ); - } - } -#else /* configNUMBER_OF_CORES > 1 */ - /* Macros that Enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) -#endif /* configNUMBER_OF_CORES > 1 */ - /* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() * calls are deterministic (as queue locks use scheduler suspension instead of * critical sections). However, the SMP implementation is non-deterministic @@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { if( pcQueueName != NULL ) { @@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } #endif /* configQUEUE_REGISTRY_SIZE */ @@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { /* Note there is nothing here to protect against another task adding or * removing entries from the registry while it is being searched. */ @@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); return pcReturn; } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ @@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { /* See if the handle of the queue being unregistered in actually in the * registry. */ @@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ #endif /* configQUEUE_REGISTRY_SIZE */ @@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue's xQueueLock as we are about to * access the queue. */ - taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); + prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); { #if ( queueUSE_LOCKS == 1 ) { @@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) #endif /* queueUSE_LOCKS == 1 */ } /* Release the previously taken xQueueLock. */ - taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); + prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); } #endif /* configUSE_TIMERS */ @@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* In SMP, queue sets have their own xQueueLock. Thus we need to also * acquire the queue set's xQueueLock before accessing it. */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); { if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { @@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously acquired queue set's xQueueLock. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); return xReturn; } diff --git a/components/freertos/FreeRTOS-Kernel/tasks.c b/components/freertos/FreeRTOS-Kernel/tasks.c index cbd9eb48aa7..d4747de6620 100644 --- a/components/freertos/FreeRTOS-Kernel/tasks.c +++ b/components/freertos/FreeRTOS-Kernel/tasks.c @@ -65,74 +65,6 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ -/* Some code sections require extra critical sections when building for SMP - * ( configNUMBER_OF_CORES > 1 ). */ -#if ( configNUMBER_OF_CORES > 1 ) - /* Macros that enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) - #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) - /* Macros that enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - /* Macros that enable/disable interrupts only when building for SMP */ - #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR() - #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus ) - - static inline __attribute__( ( always_inline ) ) - void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskENTER_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskENTER_CRITICAL_ISR( pxLock ); - } - } - - static inline __attribute__( ( always_inline ) ) - void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskEXIT_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskEXIT_CRITICAL_ISR( pxLock ); - } - } - -#else /* configNUMBER_OF_CORES > 1 */ - /* Macros that enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) - /* Macros that enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - /* Macros that enable/disable interrupts only when building for SMP */ - #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 ) - #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus ) - -#endif /* configNUMBER_OF_CORES > 1 */ - #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be @@ -1470,7 +1402,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Force a reschedule if it is the currently running task that has just * been deleted. */ @@ -1488,7 +1420,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* INCLUDE_vTaskDelete */ @@ -2366,7 +2298,7 @@ void vTaskStartScheduler( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { @@ -2381,7 +2313,7 @@ void vTaskStartScheduler( void ) xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* If configGENERATE_RUN_TIME_STATS is defined then the following * macro must be defined to configure the timer/counter used to generate @@ -2431,12 +2363,12 @@ void vTaskEndScheduler( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { xSchedulerRunning = pdFALSE; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); vPortEndScheduler(); } /*----------------------------------------------------------*/ @@ -2686,7 +2618,7 @@ TickType_t xTaskGetTickCountFromISR( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); { uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { @@ -2695,7 +2627,7 @@ TickType_t xTaskGetTickCountFromISR( void ) portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -3094,7 +3026,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL_SC_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SC_ONLY( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -3110,7 +3042,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_SC_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SC_ONLY( &xKernelLock ); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -3173,7 +3105,7 @@ BaseType_t xTaskIncrementTick( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures (unlike single core which calls this function with * interrupts disabled). */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) { @@ -3343,7 +3275,7 @@ BaseType_t xTaskIncrementTick( void ) /* Release the previously taken kernel lock as we have finished accessing * the kernel data structures. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); #if ( configUSE_TICK_HOOK == 1 ) { @@ -3566,7 +3498,7 @@ void vTaskSwitchContext( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures (unlike single core which calls this function with * either interrupts disabled or when the scheduler hasn't started yet). */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -3651,7 +3583,7 @@ void vTaskSwitchContext( void ) /* Release the previously taken kernel lock as we have finished accessing * the kernel data structures. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3666,7 +3598,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Place the event list item of the TCB in the appropriate event list. * This is placed in the list in priority order so the highest priority task @@ -3684,7 +3616,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3712,7 +3644,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -3729,7 +3661,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3749,7 +3681,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Place the event list item of the TCB in the appropriate event list. * In this case it is assume that this is the only task that is going to @@ -4548,7 +4480,7 @@ static void prvCheckTasksWaitingTermination( void ) /* A critical section is required for SMP in case another core modifies * the task simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); @@ -4650,7 +4582,7 @@ static void prvCheckTasksWaitingTermination( void ) } } /* Exit the previously entered critical section. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_TRACE_FACILITY */ @@ -4872,11 +4804,11 @@ static void prvResetNextTaskUnblockTime( void ) * For single-core a critical section is not required as this is not * called from an interrupt and the current TCB will always be the same * for any individual execution thread. */ - uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); + uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY(); { xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; } - taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); + prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); return xReturn; } @@ -4900,7 +4832,7 @@ static void prvResetNextTaskUnblockTime( void ) * * We use the ISR versions of interrupt macros as this function could be * called inside critical sections. */ - uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); + uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY(); { if( xSchedulerRunning == pdFALSE ) { @@ -4918,7 +4850,7 @@ static void prvResetNextTaskUnblockTime( void ) } } } - taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); + prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); return xReturn; } @@ -4935,7 +4867,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5018,7 +4950,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -5035,7 +4967,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { if( pxMutexHolder != NULL ) { @@ -5105,7 +5037,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -5124,7 +5056,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { if( pxMutexHolder != NULL ) { @@ -5220,7 +5152,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_MUTEXES */ @@ -5555,7 +5487,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* For SMP, we need to take the kernel lock here to ensure nothing else * modifies the task's event item value simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5566,7 +5498,7 @@ TickType_t uxTaskResetEventItemValue( void ) * queues and semaphores. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* Release the previously taken kernel lock. */ return uxReturn; @@ -5581,7 +5513,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5596,7 +5528,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pxCurrentTCBs[ xCurCoreID ]; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } diff --git a/components/freertos/esp_additions/freertos_tasks_c_additions.h b/components/freertos/esp_additions/freertos_tasks_c_additions.h index c7a9fe6ab8a..dc8cde3522e 100644 --- a/components/freertos/esp_additions/freertos_tasks_c_additions.h +++ b/components/freertos/esp_additions/freertos_tasks_c_additions.h @@ -507,12 +507,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return ulRunTimeCounter; } @@ -539,12 +539,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) { /* For SMP, we need to take the kernel lock here as we are about * to access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } else { diff --git a/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h b/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h index 596b313c448..2d520f8d3fa 100644 --- a/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h +++ b/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h @@ -13,6 +13,7 @@ #include "sdkconfig.h" #include "freertos/FreeRTOS.h" +#include "freertos/task.h" /* *INDENT-OFF* */ #ifdef __cplusplus @@ -26,52 +27,117 @@ /* * The following macros are convenience macros used to account for different - * thread safety behavior between Vanilla FreeRTOS (i.e., single-core) and ESP-IDF - * FreeRTOS (i.e., multi-core SMP). + * thread safety behavior between single-core and SMP in ESP-IDF FreeRTOS. * * For thread saftey... * - * - Vanilla FreeRTOS will use the following for thread safety (depending on situation) + * - Single-core will use the following for thread safety (depending on situation) * - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations * - Critical sections or disabling interrupts for deterministic operations - * - ESP-IDF FreeRTOS will always use critical sections (determinism is not supported) - * - * [refactor-todo]: Define these locally in each kernel source file (IDF-8161) + * - SMP will always use critical sections (determinism is not supported) */ #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) ) - #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) ) - #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } ) - #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - taskENTER_CRITICAL_ISR( ( pxLock ) ); \ - ( void ) ( uxInterruptStatus ); \ +/* Macros that use a critical section when building for SMP */ + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) taskENTER_CRITICAL( ( pxLock ) ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) ( { taskEXIT_CRITICAL( ( pxLock ) ); pdFALSE; } ) + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \ + { \ + taskENTER_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxStatus ); \ + } + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \ + { \ + taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxStatus ); \ } - #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ - ( void ) ( uxInterruptStatus ); \ + +/* Macros that enter/exit a critical section only when building for SMP */ + #define prvENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) + #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) + #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) + + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } } +/* Macros that enter/exit a critical section only when building for single-core */ + #define prvENTER_CRITICAL_SC_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SC_ONLY( pxLock ) + +/* Macros that enable/disable interrupts only when building for SMP */ + #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR() + #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus ) + #elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) - #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } ) - #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll() - #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - ( uxInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - ( void ) ( pxLock ); \ +/* Macros that suspend the scheduler or disables interrupts when building for single-core */ + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) ( { vTaskSuspendAll(); ( void ) ( pxLock ); } ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) xTaskResumeAll() + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \ + { \ + ( uxStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ + ( void ) ( pxLock ); \ } - #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxInterruptStatus ) ); \ - ( void ) ( pxLock ); \ + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \ + { \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxStatus ) ); \ + ( void ) ( pxLock ); \ } +/* Macros that enter/exit a critical section only when building for SMP */ + #define prvENTER_CRITICAL_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) + +/* Macros that enter/exit a critical section only when building for single-core */ + #define prvENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define prvEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + +/* Macros that enable/disable interrupts only when building for SMP */ + #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 ) + #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus ) + #endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */ /* - * In ESP-IDF FreeRTOS (i.e., multi-core SMP) uses spinlocks to protect different + * In ESP-IDF FreeRTOS under SMP builds, spinlocks are to protect different * groups of data. This function is a wrapper to take the "xKernelLock" spinlock * of tasks.c. * @@ -87,8 +153,6 @@ * vEventGroupDelete() as both those functions will directly access event lists * (which are kernel data structures). Thus, a wrapper function must be provided * to take/release the "xKernelLock" from outside tasks.c. - * - * [refactor-todo]: Extern this locally in event groups (IDF-8161) */ #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )