diff --git a/.gitignore b/.gitignore
index 9f3688281..5e3355558 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,7 +2,7 @@
build
modular-psu-firmware.eez-project-ui-state
src/third_party/stm32/Debug
-src/third_party/stm32_r1b1
+src/third_party/stm32_r1b1/Debug
src/eez/gui/document_stm32.h
src/eez/gui/document_stm32.cpp
diff --git a/src/eez/apps/psu/io_pins.cpp b/src/eez/apps/psu/io_pins.cpp
index 564982af3..e8f288234 100644
--- a/src/eez/apps/psu/io_pins.cpp
+++ b/src/eez/apps/psu/io_pins.cpp
@@ -51,7 +51,7 @@ int ioPinRead(int pin) {
#if EEZ_MCU_REVISION_R1B5
return HAL_GPIO_ReadPin(UART_RX_DIN1_GPIO_Port, UART_RX_DIN1_Pin) ? 1 : 0;
#else
- return HAL_GPIO_ReadPin(DIN1_GPIO_Port, _DIN1_Pin) ? 1 : 0;
+ return HAL_GPIO_ReadPin(DIN1_GPIO_Port, DIN1_Pin) ? 1 : 0;
#endif
} {
assert(pin == EXT_TRIG2);
diff --git a/src/third_party/stm32_r1b1/.code_review_properties b/src/third_party/stm32_r1b1/.code_review_properties
new file mode 100644
index 000000000..71244db5a
--- /dev/null
+++ b/src/third_party/stm32_r1b1/.code_review_properties
@@ -0,0 +1,80 @@
+
+
+ BaseType_t xCoRoutineCreate( + crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex + );+ * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: +
+ // Co-routine to be created. + void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + // This may not be necessary for const variables. + static const char cLedToFlash[ 2 ] = { 5, 6 }; + static const TickType_t uxFlashRates[ 2 ] = { 200, 400 }; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // This co-routine just delays for a fixed period, then toggles + // an LED. Two co-routines are created using this function, so + // the uxIndex parameter is used to tell the co-routine which + // LED to flash and how int32_t to delay. This assumes xQueue has + // already been created. + vParTestToggleLED( cLedToFlash[ uxIndex ] ); + crDELAY( xHandle, uxFlashRates[ uxIndex ] ); + } + + // Must end every co-routine with a call to crEND(); + crEND(); + } + + // Function that creates two co-routines. + void vOtherFunction( void ) + { + uint8_t ucParameterToPass; + TaskHandle_t xHandle; + + // Create two co-routines at priority 0. The first is given index 0 + // so (from the code above) toggles LED 5 every 200 ticks. The second + // is given index 1 so toggles LED 6 every 400 ticks. + for( uxIndex = 0; uxIndex < 2; uxIndex++ ) + { + xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex ); + } + } ++ * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ); + + +/** + * croutine. h + *
+ void vCoRoutineSchedule( void );+ * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: +
+ // This idle task hook will schedule a co-routine each time it is called. + // The rest of the idle task will execute between co-routine calls. + void vApplicationIdleHook( void ) + { + vCoRoutineSchedule(); + } + + // Alternatively, if you do not require any other part of the idle task to + // execute, the idle task hook can call vCoRoutineScheduler() within an + // infinite loop. + void vApplicationIdleHook( void ) + { + for( ;; ) + { + vCoRoutineSchedule(); + } + } ++ * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + *
+ crSTART( CoRoutineHandle_t xHandle );+ * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static int32_t ulAVariable; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Co-routine functionality goes here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) switch( ( ( CRCB_t * )( pxCRCB ) )->uxState ) { case 0: + +/** + * croutine. h + *
+ crEND();+ * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static int32_t ulAVariable; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Co-routine functionality goes here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = (__LINE__ * 2); return; case (__LINE__ * 2): +#define crSET_STATE1( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = ((__LINE__ * 2)+1); return; case ((__LINE__ * 2)+1): + +/** + * croutine. h + *
+ crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay );+ * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + // This may not be necessary for const variables. + // We are to delay for 200ms. + static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Delay for 200ms. + crDELAY( xHandle, xDelayTime ); + + // Do something here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); + +/** + *
+ crQUEUE_SEND( + CoRoutineHandle_t xHandle, + QueueHandle_t pxQueue, + void *pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t *pxResult + )+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: +
+ // Co-routine function that blocks for a fixed period then posts a number onto + // a queue. + static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static BaseType_t xNumberToPost = 0; + static BaseType_t xResult; + + // Co-routines must begin with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // This assumes the queue has already been created. + crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult ); + + if( xResult != pdPASS ) + { + // The message was not posted! + } + + // Increment the number to be posted onto the queue. + xNumberToPost++; + + // Delay for 100 ticks. + crDELAY( xHandle, 100 ); + } + + // Co-routines must end with a call to crEND(). + crEND(); + }+ * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRSend( ( pxQueue) , ( pvItemToQueue) , ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+ crQUEUE_RECEIVE( + CoRoutineHandle_t xHandle, + QueueHandle_t pxQueue, + void *pvBuffer, + TickType_t xTicksToWait, + BaseType_t *pxResult + )+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: +
+ // A co-routine receives the number of an LED to flash from a queue. It + // blocks on the queue until the number is received. + static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static BaseType_t xResult; + static UBaseType_t uxLEDToFlash; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Wait for data to become available on the queue. + crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + + if( xResult == pdPASS ) + { + // We received the LED to flash - flash it! + vParTestToggleLED( uxLEDToFlash ); + } + } + + crEND(); + }+ * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+ crQUEUE_SEND_FROM_ISR( + QueueHandle_t pxQueue, + void *pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken + )+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: +
+ // A co-routine that blocks on a queue waiting for characters to be received. + static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + char cRxedChar; + BaseType_t xResult; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Wait for data to become available on the queue. This assumes the + // queue xCommsRxQueue has already been created! + crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + + // Was a character received? + if( xResult == pdPASS ) + { + // Process the character here. + } + } + + // All co-routines must end with a call to crEND(). + crEND(); + } + + // An ISR that uses a queue to send characters received on a serial port to + // a co-routine. + void vUART_ISR( void ) + { + char cRxedChar; + BaseType_t xCRWokenByPost = pdFALSE; + + // We loop around reading characters until there are none left in the UART. + while( UART_RX_REG_NOT_EMPTY() ) + { + // Obtain the character from the UART. + cRxedChar = UART_RX_REG; + + // Post the character onto a queue. xCRWokenByPost will be pdFALSE + // the first time around the loop. If the post causes a co-routine + // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE. + // In this manner we can ensure that if more than one co-routine is + // blocked on the queue only one is woken by this ISR no matter how + // many characters are posted to the queue. + xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost ); + } + }+ * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + *
+ crQUEUE_SEND_FROM_ISR( + QueueHandle_t pxQueue, + void *pvBuffer, + BaseType_t * pxCoRoutineWoken + )+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: +
+ // A co-routine that posts a character to a queue then blocks for a fixed + // period. The character is incremented each time. + static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // cChar holds its value while this co-routine is blocked and must therefore + // be declared static. + static char cCharToTx = 'a'; + BaseType_t xResult; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Send the next character to the queue. + crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult ); + + if( xResult == pdPASS ) + { + // The character was successfully posted to the queue. + } + else + { + // Could not post the character to the queue. + } + + // Enable the UART Tx interrupt to cause an interrupt in this + // hypothetical UART. The interrupt will obtain the character + // from the queue and send it. + ENABLE_RX_INTERRUPT(); + + // Increment to the next character then block for a fixed period. + // cCharToTx will maintain its value across the delay as it is + // declared static. + cCharToTx++; + if( cCharToTx > 'x' ) + { + cCharToTx = 'a'; + } + crDELAY( 100 ); + } + + // All co-routines must end with a call to crEND(). + crEND(); + } + + // An ISR that uses a queue to receive characters to send on a UART. + void vUART_ISR( void ) + { + char cCharToTx; + BaseType_t xCRWokenByPost = pdFALSE; + + while( UART_TX_REG_EMPTY() ) + { + // Are there any characters in the queue waiting to be sent? + // xCRWokenByPost will automatically be set to pdTRUE if a co-routine + // is woken by the post - ensuring that only a single co-routine is + // woken no matter how many times we go around this loop. + if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) ) + { + SEND_CHARACTER( cCharToTx ); + } + } + }+ * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ); + +#ifdef __cplusplus +} +#endif + +#endif /* CO_ROUTINE_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h new file mode 100644 index 000000000..9dfdf21aa --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h @@ -0,0 +1,279 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef DEPRECATED_DEFINITIONS_H +#define DEPRECATED_DEFINITIONS_H + + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. The +definitions below remain in the code for backward compatibility only. New +projects should not use them. */ + +#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT + #include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT + #include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef GCC_MEGA_AVR + #include "../portable/GCC/ATMega323/portmacro.h" +#endif + +#ifdef IAR_MEGA_AVR + #include "../portable/IAR/ATMega323/portmacro.h" +#endif + +#ifdef MPLAB_PIC24_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_DSPIC_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_PIC18F_PORT + #include "../../Source/portable/MPLAB/PIC18F/portmacro.h" +#endif + +#ifdef MPLAB_PIC32MX_PORT + #include "../../Source/portable/MPLAB/PIC32MX/portmacro.h" +#endif + +#ifdef _FEDPICC + #include "libFreeRTOS/Include/portmacro.h" +#endif + +#ifdef SDCC_CYGNAL + #include "../../Source/portable/SDCC/Cygnal/portmacro.h" +#endif + +#ifdef GCC_ARM7 + #include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h" +#endif + +#ifdef GCC_ARM7_ECLIPSE + #include "portmacro.h" +#endif + +#ifdef ROWLEY_LPC23xx + #include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h" +#endif + +#ifdef IAR_MSP430 + #include "..\..\Source\portable\IAR\MSP430\portmacro.h" +#endif + +#ifdef GCC_MSP430 + #include "../../Source/portable/GCC/MSP430F449/portmacro.h" +#endif + +#ifdef ROWLEY_MSP430 + #include "../../Source/portable/Rowley/MSP430F449/portmacro.h" +#endif + +#ifdef ARM7_LPC21xx_KEIL_RVDS + #include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h" +#endif + +#ifdef SAM7_GCC + #include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h" +#endif + +#ifdef SAM7_IAR + #include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h" +#endif + +#ifdef SAM9XE_IAR + #include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h" +#endif + +#ifdef LPC2000_IAR + #include "..\..\Source\portable\IAR\LPC2000\portmacro.h" +#endif + +#ifdef STR71X_IAR + #include "..\..\Source\portable\IAR\STR71x\portmacro.h" +#endif + +#ifdef STR75X_IAR + #include "..\..\Source\portable\IAR\STR75x\portmacro.h" +#endif + +#ifdef STR75X_GCC + #include "..\..\Source\portable\GCC\STR75x\portmacro.h" +#endif + +#ifdef STR91X_IAR + #include "..\..\Source\portable\IAR\STR91x\portmacro.h" +#endif + +#ifdef GCC_H8S + #include "../../Source/portable/GCC/H8S2329/portmacro.h" +#endif + +#ifdef GCC_AT91FR40008 + #include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h" +#endif + +#ifdef RVDS_ARMCM3_LM3S102 + #include "../../Source/portable/RVDS/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3_LM3S102 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARM_CM3 + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARMCM3_LM + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef HCS12_CODE_WARRIOR + #include "../../Source/portable/CodeWarrior/HCS12/portmacro.h" +#endif + +#ifdef MICROBLAZE_GCC + #include "../../Source/portable/GCC/MicroBlaze/portmacro.h" +#endif + +#ifdef TERN_EE + #include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h" +#endif + +#ifdef GCC_HCS12 + #include "../../Source/portable/GCC/HCS12/portmacro.h" +#endif + +#ifdef GCC_MCF5235 + #include "../../Source/portable/GCC/MCF5235/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_GCC + #include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_CODEWARRIOR + #include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h" +#endif + +#ifdef GCC_PPC405 + #include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h" +#endif + +#ifdef GCC_PPC440 + #include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h" +#endif + +#ifdef _16FX_SOFTUNE + #include "..\..\Source\portable\Softune\MB96340\portmacro.h" +#endif + +#ifdef BCC_INDUSTRIAL_PC_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\PC\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef BCC_FLASH_LITE_186_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef __GNUC__ + #ifdef __AVR32_AVR32A__ + #include "portmacro.h" + #endif +#endif + +#ifdef __ICCAVR32__ + #ifdef __CORE__ + #if __CORE__ == __AVR32A__ + #include "portmacro.h" + #endif + #endif +#endif + +#ifdef __91467D + #include "portmacro.h" +#endif + +#ifdef __96340 + #include "portmacro.h" +#endif + + +#ifdef __IAR_V850ES_Fx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3_L__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Hx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3L__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#endif /* DEPRECATED_DEFINITIONS_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h new file mode 100644 index 000000000..506440490 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h @@ -0,0 +1,756 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef EVENT_GROUPS_H +#define EVENT_GROUPS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include event_groups.h" +#endif + +/* FreeRTOS includes. */ +#include "timers.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * An event group is a collection of bits to which an application can assign a + * meaning. For example, an application may create an event group to convey + * the status of various CAN bus related events in which bit 0 might mean "A CAN + * message has been received and is ready for processing", bit 1 might mean "The + * application has queued a message that is ready for sending onto the CAN + * network", and bit 2 might mean "It is time to send a SYNC message onto the + * CAN network" etc. A task can then test the bit values to see which events + * are active, and optionally enter the Blocked state to wait for a specified + * bit or a group of specified bits to be active. To continue the CAN bus + * example, a CAN controlling task can enter the Blocked state (and therefore + * not consume any processing time) until either bit 0, bit 1 or bit 2 are + * active, at which time the bit that was actually active would inform the task + * which action it had to take (process a received message, send a message, or + * send a SYNC). + * + * The event groups implementation contains intelligence to avoid race + * conditions that would otherwise occur were an application to use a simple + * variable for the same purpose. This is particularly important with respect + * to when a bit within an event group is to be cleared, and when bits have to + * be set and then tested atomically - as is the case where event groups are + * used to create a synchronisation point between multiple tasks (a + * 'rendezvous'). + * + * \defgroup EventGroup + */ + + + +/** + * event_groups.h + * + * Type by which event groups are referenced. For example, a call to + * xEventGroupCreate() returns an EventGroupHandle_t variable that can then + * be used as a parameter to other event group functions. + * + * \defgroup EventGroupHandle_t EventGroupHandle_t + * \ingroup EventGroup + */ +typedef void * EventGroupHandle_t; + +/* + * The type that holds event bits always matches TickType_t - therefore the + * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, + * 32 bits if set to 0. + * + * \defgroup EventBits_t EventBits_t + * \ingroup EventGroup + */ +typedef TickType_t EventBits_t; + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreate( void ); ++ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @return If the event group was created then a handle to the event group is + * returned. If there was insufficient FreeRTOS heap available to create the + * event group then NULL is returned. See http://www.freertos.org/a00111.html + * + * Example usage: +
+ // Declare a variable to hold the created event group. + EventGroupHandle_t xCreatedEventGroup; + + // Attempt to create the event group. + xCreatedEventGroup = xEventGroupCreate(); + + // Was the event group created successfully? + if( xCreatedEventGroup == NULL ) + { + // The event group was not created because there was insufficient + // FreeRTOS heap available. + } + else + { + // The event group was created. + } ++ * \defgroup xEventGroupCreate xEventGroupCreate + * \ingroup EventGroup + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer ); ++ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type + * StaticEventGroup_t, which will be then be used to hold the event group's data + * structures, removing the need for the memory to be allocated dynamically. + * + * @return If the event group was created then a handle to the event group is + * returned. If pxEventGroupBuffer was NULL then NULL is returned. + * + * Example usage: +
+ // StaticEventGroup_t is a publicly accessible structure that has the same + // size and alignment requirements as the real event group structure. It is + // provided as a mechanism for applications to know the size of the event + // group (which is dependent on the architecture and configuration file + // settings) without breaking the strict data hiding policy by exposing the + // real event group internals. This StaticEventGroup_t variable is passed + // into the xSemaphoreCreateEventGroupStatic() function and is used to store + // the event group's data structures + StaticEventGroup_t xEventGroupBuffer; + + // Create the event group without dynamically allocating any memory. + xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer ); ++ */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + const TickType_t xTicksToWait ); ++ * + * [Potentially] block to wait for one or more bits to be set within a + * previously created event group. + * + * This function cannot be called from an interrupt. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and/or bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and/or bit 1 and/or bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within + * uxBitsToWaitFor that are set within the event group will be cleared before + * xEventGroupWaitBits() returns if the wait condition was met (if the function + * returns for a reason other than a timeout). If xClearOnExit is set to + * pdFALSE then the bits set in the event group are not altered when the call to + * xEventGroupWaitBits() returns. + * + * @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then + * xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor + * are set or the specified block time expires. If xWaitForAllBits is set to + * pdFALSE then xEventGroupWaitBits() will return when any one of the bits set + * in uxBitsToWaitFor is set or the specified block time expires. The block + * time is specified by the xTicksToWait parameter. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for one/all (depending on the xWaitForAllBits value) of the bits specified by + * uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupWaitBits() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupWaitBits() returned because the bits it was waiting for were set + * then the returned value is the event group value before any bits were + * automatically cleared in the case that xClearOnExit parameter was set to + * pdTRUE. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS; + + // Wait a maximum of 100ms for either bit 0 or bit 4 to be set within + // the event group. Clear the bits before exiting. + uxBits = xEventGroupWaitBits( + xEventGroup, // The event group being tested. + BIT_0 | BIT_4, // The bits within the event group to wait for. + pdTRUE, // BIT_0 and BIT_4 should be cleared before returning. + pdFALSE, // Don't wait for both bits, either bit will do. + xTicksToWait ); // Wait a maximum of 100ms for either bit to be set. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // xEventGroupWaitBits() returned because both bits were set. + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // xEventGroupWaitBits() returned because just BIT_0 was set. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // xEventGroupWaitBits() returned because just BIT_4 was set. + } + else + { + // xEventGroupWaitBits() returned because xTicksToWait ticks passed + // without either BIT_0 or BIT_4 becoming set. + } + } ++ * \defgroup xEventGroupWaitBits xEventGroupWaitBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ); ++ * + * Clear bits within an event group. This function cannot be called from an + * interrupt. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear + * in the event group. For example, to clear bit 3 only, set uxBitsToClear to + * 0x08. To clear bit 3 and bit 0 set uxBitsToClear to 0x09. + * + * @return The value of the event group before the specified bits were cleared. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + + // Clear bit 0 and bit 4 in xEventGroup. + uxBits = xEventGroupClearBits( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 );// The bits being cleared. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // Both bit 0 and bit 4 were set before xEventGroupClearBits() was + // called. Both will now be clear (not set). + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // Bit 0 was set before xEventGroupClearBits() was called. It will + // now be clear. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // Bit 4 was set before xEventGroupClearBits() was called. It will + // now be clear. + } + else + { + // Neither bit 0 nor bit 4 were set in the first place. + } + } ++ * \defgroup xEventGroupClearBits xEventGroupClearBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ); ++ * + * A version of xEventGroupClearBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed + * while interrupts are disabled, so protects event groups that are accessed + * from tasks by suspending the scheduler rather than disabling interrupts. As + * a result event groups cannot be accessed directly from an interrupt service + * routine. Therefore xEventGroupClearBitsFromISR() sends a message to the + * timer task to have the clear operation performed in the context of the timer + * task. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear. + * For example, to clear bit 3 only, set uxBitsToClear to 0x08. To clear bit 3 + * and bit 0 set uxBitsToClear to 0x09. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + // An event group which it is assumed has already been created by a call to + // xEventGroupCreate(). + EventGroupHandle_t xEventGroup; + + void anInterruptHandler( void ) + { + // Clear bit 0 and bit 4 in xEventGroup. + xResult = xEventGroupClearBitsFromISR( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 ); // The bits being set. + + if( xResult == pdPASS ) + { + // The message was posted successfully. + } + } ++ * \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ) +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ); ++ * + * Set bits within an event group. + * This function cannot be called from an interrupt. xEventGroupSetBitsFromISR() + * is a version that can be called from an interrupt. + * + * Setting bits in an event group will automatically unblock tasks that are + * blocked waiting for the bits. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @return The value of the event group at the time the call to + * xEventGroupSetBits() returns. There are two reasons why the returned value + * might have the bits specified by the uxBitsToSet parameter cleared. First, + * if setting a bit results in a task that was waiting for the bit leaving the + * blocked state then it is possible the bit will be cleared automatically + * (see the xClearBitOnExit parameter of xEventGroupWaitBits()). Second, any + * unblocked (or otherwise Ready state) task that has a priority above that of + * the task that called xEventGroupSetBits() will execute and may change the + * event group value before the call to xEventGroupSetBits() returns. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + + // Set bit 0 and bit 4 in xEventGroup. + uxBits = xEventGroupSetBits( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 );// The bits being set. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // Both bit 0 and bit 4 remained set when the function returned. + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // Bit 0 remained set when the function returned, but bit 4 was + // cleared. It might be that bit 4 was cleared automatically as a + // task that was waiting for bit 4 was removed from the Blocked + // state. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // Bit 4 remained set when the function returned, but bit 0 was + // cleared. It might be that bit 0 was cleared automatically as a + // task that was waiting for bit 0 was removed from the Blocked + // state. + } + else + { + // Neither bit 0 nor bit 4 remained set. It might be that a task + // was waiting for both of the bits to be set, and the bits were + // cleared as the task left the Blocked state. + } + } ++ * \defgroup xEventGroupSetBits xEventGroupSetBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * A version of xEventGroupSetBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed in + * interrupts or from critical sections. Therefore xEventGroupSetBitsFromISR() + * sends a message to the timer task to have the set operation performed in the + * context of the timer task - where a scheduler lock is used in place of a + * critical section. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task is higher than the priority of the + * currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE by + * xEventGroupSetBitsFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + // An event group which it is assumed has already been created by a call to + // xEventGroupCreate(). + EventGroupHandle_t xEventGroup; + + void anInterruptHandler( void ) + { + BaseType_t xHigherPriorityTaskWoken, xResult; + + // xHigherPriorityTaskWoken must be initialised to pdFALSE. + xHigherPriorityTaskWoken = pdFALSE; + + // Set bit 0 and bit 4 in xEventGroup. + xResult = xEventGroupSetBitsFromISR( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 // The bits being set. + &xHigherPriorityTaskWoken ); + + // Was the message posted successfully? + if( xResult == pdPASS ) + { + // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + // switch should be requested. The macro used is port specific and + // will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - + // refer to the documentation page for the port being used. + portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + } + } ++ * \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ) +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ); ++ * + * Atomically set bits within an event group, then wait for a combination of + * bits to be set within the same event group. This functionality is typically + * used to synchronise multiple tasks, where each task has to wait for the other + * tasks to reach a synchronisation point before proceeding. + * + * This function cannot be used from an interrupt. + * + * The function will return before its block time expires if the bits specified + * by the uxBitsToWait parameter are set, or become set within that time. In + * this case all the bits specified by uxBitsToWait will be automatically + * cleared before the function returns. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToSet The bits to set in the event group before determining + * if, and possibly waiting for, all the bits specified by the uxBitsToWait + * parameter are set. + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and bit 1 and bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for all of the bits specified by uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupSync() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupSync() returned because all the bits it was waiting for were + * set then the returned value is the event group value before any bits were + * automatically cleared. + * + * Example usage: +
+ // Bits used by the three tasks. + #define TASK_0_BIT ( 1 << 0 ) + #define TASK_1_BIT ( 1 << 1 ) + #define TASK_2_BIT ( 1 << 2 ) + + #define ALL_SYNC_BITS ( TASK_0_BIT | TASK_1_BIT | TASK_2_BIT ) + + // Use an event group to synchronise three tasks. It is assumed this event + // group has already been created elsewhere. + EventGroupHandle_t xEventBits; + + void vTask0( void *pvParameters ) + { + EventBits_t uxReturn; + TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Perform task functionality here. + + // Set bit 0 in the event flag to note this task has reached the + // sync point. The other two tasks will set the other two bits defined + // by ALL_SYNC_BITS. All three tasks have reached the synchronisation + // point when all the ALL_SYNC_BITS are set. Wait a maximum of 100ms + // for this to happen. + uxReturn = xEventGroupSync( xEventBits, TASK_0_BIT, ALL_SYNC_BITS, xTicksToWait ); + + if( ( uxReturn & ALL_SYNC_BITS ) == ALL_SYNC_BITS ) + { + // All three tasks reached the synchronisation point before the call + // to xEventGroupSync() timed out. + } + } + } + + void vTask1( void *pvParameters ) + { + for( ;; ) + { + // Perform task functionality here. + + // Set bit 1 in the event flag to note this task has reached the + // synchronisation point. The other two tasks will set the other two + // bits defined by ALL_SYNC_BITS. All three tasks have reached the + // synchronisation point when all the ALL_SYNC_BITS are set. Wait + // indefinitely for this to happen. + xEventGroupSync( xEventBits, TASK_1_BIT, ALL_SYNC_BITS, portMAX_DELAY ); + + // xEventGroupSync() was called with an indefinite block time, so + // this task will only reach here if the syncrhonisation was made by all + // three tasks, so there is no need to test the return value. + } + } + + void vTask2( void *pvParameters ) + { + for( ;; ) + { + // Perform task functionality here. + + // Set bit 2 in the event flag to note this task has reached the + // synchronisation point. The other two tasks will set the other two + // bits defined by ALL_SYNC_BITS. All three tasks have reached the + // synchronisation point when all the ALL_SYNC_BITS are set. Wait + // indefinitely for this to happen. + xEventGroupSync( xEventBits, TASK_2_BIT, ALL_SYNC_BITS, portMAX_DELAY ); + + // xEventGroupSync() was called with an indefinite block time, so + // this task will only reach here if the syncrhonisation was made by all + // three tasks, so there is no need to test the return value. + } + } + ++ * \defgroup xEventGroupSync xEventGroupSync + * \ingroup EventGroup + */ +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + +/** + * event_groups.h + *
+ EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup ); ++ * + * Returns the current value of the bits in an event group. This function + * cannot be used from an interrupt. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBits() was called. + * + * \defgroup xEventGroupGetBits xEventGroupGetBits + * \ingroup EventGroup + */ +#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 ) + +/** + * event_groups.h + *
+ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ); ++ * + * A version of xEventGroupGetBits() that can be called from an ISR. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBitsFromISR() was called. + * + * \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR + * \ingroup EventGroup + */ +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ void xEventGroupDelete( EventGroupHandle_t xEventGroup ); ++ * + * Delete an event group that was previously created by a call to + * xEventGroupCreate(). Tasks that are blocked on the event group will be + * unblocked and obtain 0 as the event group's value. + * + * @param xEventGroup The event group being deleted. + */ +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/* For internal use only. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + +#if (configUSE_TRACE_FACILITY == 1) + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) PRIVILEGED_FUNCTION; + void vEventGroupSetNumber( void* xEventGroup, UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_GROUPS_H */ + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/list.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/list.h new file mode 100644 index 000000000..431443f7c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/list.h @@ -0,0 +1,411 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * This is the list implementation used by the scheduler. While it is tailored + * heavily for the schedulers needs, it is also available for use by + * application code. + * + * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a + * numeric value (xItemValue). Most of the time the lists are sorted in + * descending item value order. + * + * Lists are created already containing one list item. The value of this + * item is the maximum possible that can be stored, it is therefore always at + * the end of the list and acts as a marker. The list member pxHead always + * points to this marker - even though it is at the tail of the list. This + * is because the tail contains a wrap back pointer to the true head of + * the list. + * + * In addition to it's value, each list item contains a pointer to the next + * item in the list (pxNext), a pointer to the list it is in (pxContainer) + * and a pointer to back to the object that contains it. These later two + * pointers are included for efficiency of list manipulation. There is + * effectively a two way link between the object containing the list item and + * the list item itself. + * + * + * \page ListIntroduction List Implementation + * \ingroup FreeRTOSIntro + */ + +#ifndef INC_FREERTOS_H + #error FreeRTOS.h must be included before list.h +#endif + +#ifndef LIST_H +#define LIST_H + +/* + * The list structure members are modified from within interrupts, and therefore + * by rights should be declared volatile. However, they are only modified in a + * functionally atomic way (within critical sections of with the scheduler + * suspended) and are either passed by reference into a function or indexed via + * a volatile variable. Therefore, in all use cases tested so far, the volatile + * qualifier can be omitted in order to provide a moderate performance + * improvement without adversely affecting functional behaviour. The assembly + * instructions generated by the IAR, ARM and GCC compilers when the respective + * compiler's options were set for maximum optimisation has been inspected and + * deemed to be as intended. That said, as compiler technology advances, and + * especially if aggressive cross module optimisation is used (a use case that + * has not been exercised to any great extend) then it is feasible that the + * volatile qualifier will be needed for correct optimisation. It is expected + * that a compiler removing essential code because, without the volatile + * qualifier on the list structure members and with aggressive cross module + * optimisation, the compiler deemed the code unnecessary will result in + * complete and obvious failure of the scheduler. If this is ever experienced + * then the volatile qualifier can be inserted in the relevant places within the + * list structures by simply defining configLIST_VOLATILE to volatile in + * FreeRTOSConfig.h (as per the example at the bottom of this comment block). + * If configLIST_VOLATILE is not defined then the preprocessor directives below + * will simply #define configLIST_VOLATILE away completely. + * + * To use volatile list structure members then add the following line to + * FreeRTOSConfig.h (without the quotes): + * "#define configLIST_VOLATILE volatile" + */ +#ifndef configLIST_VOLATILE + #define configLIST_VOLATILE +#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macros that can be used to place known values within the list structures, +then check that the known values do not get corrupted during the execution of +the application. These may catch the list data structures being overwritten in +memory. They will not catch data errors caused by incorrect configuration or +use of FreeRTOS.*/ +#if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) + /* Define the macros to do nothing. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) + #define listTEST_LIST_INTEGRITY( pxList ) +#else + /* Define macros that add new members into the list structures. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1; + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2; + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1; + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2; + + /* Define macros that set the new structure members to known values. */ + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + + /* Define macros that will assert if one of the structure members does not + contain its expected value. */ + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) + #define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) +#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */ + + +/* + * Definition of the only type of object that a list can contain. + */ +struct xLIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + void * configLIST_VOLATILE pvContainer; /*< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +}; +typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ + +struct xMINI_LIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; + struct xLIST_ITEM * configLIST_VOLATILE pxNext; + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; +}; +typedef struct xMINI_LIST_ITEM MiniListItem_t; + +/* + * Definition of the type of queue used by the scheduler. + */ +typedef struct xLIST +{ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + volatile UBaseType_t uxNumberOfItems; + ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +} List_t; + +/* + * Access macro to set the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) ) + +/* + * Access macro to get the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner ) + +/* + * Access macro to set the value of the list item. In most cases the value is + * used to sort the list in descending order. + * + * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) ) + +/* + * Access macro to retrieve the value of the list item. The value can + * represent anything - for example the priority of a task, or the time at + * which a task should be unblocked. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue ) + +/* + * Access macro to retrieve the value of the list item at the head of a given + * list. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_NEXT listGET_NEXT + * \ingroup LinkedList + */ +#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext ) + +/* + * Return the list item that marks the end of the list + * + * \page listGET_END_MARKER listGET_END_MARKER + * \ingroup LinkedList + */ +#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) ) + +/* + * Access macro to determine if a list contains any items. The macro will + * only have the value true if the list is empty. + * + * \page listLIST_IS_EMPTY listLIST_IS_EMPTY + * \ingroup LinkedList + */ +#define listLIST_IS_EMPTY( pxList ) ( ( BaseType_t ) ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ) + +/* + * Access macro to return the number of items in the list. + */ +#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems ) + +/* + * Access function to obtain the owner of the next entry in a list. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list + * and returns that entry's pxOwner parameter. Using multiple calls to this + * function it is therefore possible to move through every item contained in + * a list. + * + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxTCB pxTCB is set to the address of the owner of the next list item. + * @param pxList The list from which the next item owner is to be returned. + * + * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ +{ \ +List_t * const pxConstList = ( pxList ); \ + /* Increment the index to the next item and return the item, ensuring */ \ + /* we don't return the marker used at the end of the list. */ \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ + { \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + } \ + ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ +} + + +/* + * Access function to obtain the owner of the first entry in a list. Lists + * are normally sorted in ascending item value order. + * + * This function returns the pxOwner member of the first item in the list. + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxList The list from which the owner of the head item is to be + * returned. + * + * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( (&( ( pxList )->xListEnd ))->pxNext->pvOwner ) + +/* + * Check to see if a list item is within a list. The list item maintains a + * "container" pointer that points to the list it is in. All this macro does + * is check to see if the container and the list match. + * + * @param pxList The list we want to know if the list item is within. + * @param pxListItem The list item we want to know if is in the list. + * @return pdTRUE if the list item is in the list, otherwise pdFALSE. + */ +#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( BaseType_t ) ( ( pxListItem )->pvContainer == ( void * ) ( pxList ) ) ) + +/* + * Return the list a list item is contained within (referenced from). + * + * @param pxListItem The list item being queried. + * @return A pointer to the List_t object that references the pxListItem + */ +#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pvContainer ) + +/* + * This provides a crude means of knowing if a list has been initialised, as + * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise() + * function. + */ +#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY ) + +/* + * Must be called before a list is used! This initialises all the members + * of the list structure and inserts the xListEnd item into the list as a + * marker to the back of the list. + * + * @param pxList Pointer to the list being initialised. + * + * \page vListInitialise vListInitialise + * \ingroup LinkedList + */ +void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION; + +/* + * Must be called before a list item is used. This sets the list container to + * null so the item does not think that it is already contained in a list. + * + * @param pxItem Pointer to the list item being initialised. + * + * \page vListInitialiseItem vListInitialiseItem + * \ingroup LinkedList + */ +void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted into the list in + * a position determined by its item value (descending item value order). + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The item that is to be placed in the list. + * + * \page vListInsert vListInsert + * \ingroup LinkedList + */ +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted in a position + * such that it will be the last item within the list returned by multiple + * calls to listGET_OWNER_OF_NEXT_ENTRY. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list. + * Placing an item in a list using vListInsertEnd effectively places the item + * in the list position pointed to by pxIndex. This means that every other + * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before + * the pxIndex parameter again points to the item being inserted. + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The list item to be inserted into the list. + * + * \page vListInsertEnd vListInsertEnd + * \ingroup LinkedList + */ +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Remove an item from a list. The list item has a pointer to the list that + * it is in, so only the list item need be passed into the function. + * + * @param uxListRemove The item to be removed. The item will remove itself from + * the list pointed to by it's pxContainer parameter. + * + * @return The number of items that remain in the list after the list item has + * been removed. + * + * \page uxListRemove uxListRemove + * \ingroup LinkedList + */ +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION; + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h new file mode 100644 index 000000000..8be974041 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h @@ -0,0 +1,779 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +/* + * Message buffers build functionality on top of FreeRTOS stream buffers. + * Whereas stream buffers are used to send a continuous stream of data from one + * task or interrupt to another, message buffers are used to send variable + * length discrete messages from one task or interrupt to another. Their + * implementation is light weight, making them particularly suited for interrupt + * to task and core to core communication scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * timeout to 0. + * + * Message buffers hold variable length messages. To enable that, when a + * message is written to the message buffer an additional sizeof( size_t ) bytes + * are also written to store the message's length (that happens internally, with + * the API function). sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so writing a 10 byte message to a message buffer on a 32-bit + * architecture will actually reduce the available space in the message buffer + * by 14 bytes (10 byte are used by the message, and 4 bytes to hold the length + * of the message). + */ + +#ifndef FREERTOS_MESSAGE_BUFFER_H +#define FREERTOS_MESSAGE_BUFFER_H + +/* Message buffers are built onto of stream buffers. */ +#include "stream_buffer.h" + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which message buffers are referenced. For example, a call to + * xMessageBufferCreate() returns an MessageBufferHandle_t variable that can + * then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(), + * etc. + */ +typedef void * MessageBufferHandle_t; + +/*-----------------------------------------------------------*/ + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreate( size_t xBufferSizeBytes ); ++ * + * Creates a new message buffer using dynamically allocated memory. See + * xMessageBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xMessageBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes (not messages) the message + * buffer will be able to hold at any one time. When a message is written to + * the message buffer an additional sizeof( size_t ) bytes are also written to + * store the message's length. sizeof( size_t ) is typically 4 bytes on a + * 32-bit architecture, so on most 32-bit architectures a 10 byte message will + * take up 14 bytes of message buffer space. + * + * @return If NULL is returned, then the message buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the message buffer data structures and storage area. A non-NULL value being + * returned indicates that the message buffer has been created successfully - + * the returned value should be stored as the handle to the created message + * buffer. + * + * Example use: +
+ +void vAFunction( void ) +{ +MessageBufferHandle_t xMessageBuffer; +const size_t xMessageBufferSizeBytes = 100; + + // Create a message buffer that can hold 100 bytes. The memory used to hold + // both the message buffer structure and the messages themselves is allocated + // dynamically. Each message added to the buffer consumes an additional 4 + // bytes which are used to hold the lengh of the message. + xMessageBuffer = xMessageBufferCreate( xMessageBufferSizeBytes ); + + if( xMessageBuffer == NULL ) + { + // There was not enough heap memory space available to create the + // message buffer. + } + else + { + // The message buffer was created successfully and can now be used. + } + ++ * \defgroup xMessageBufferCreate xMessageBufferCreate + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreate( xBufferSizeBytes ) ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE ) + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreateStatic( size_t xBufferSizeBytes, + uint8_t *pucMessageBufferStorageArea, + StaticMessageBuffer_t *pxStaticMessageBuffer ); ++ * Creates a new message buffer using statically allocated memory. See + * xMessageBufferCreate() for a version that uses dynamically allocated memory. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucMessageBufferStorageArea parameter. When a message is written to the + * message buffer an additional sizeof( size_t ) bytes are also written to store + * the message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so on most 32-bit architecture a 10 byte message will take up + * 14 bytes of message buffer space. The maximum number of bytes that can be + * stored in the message buffer is actually (xBufferSizeBytes - 1). + * + * @param pucMessageBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which messages are + * copied when they are written to the message buffer. + * + * @param pxStaticMessageBuffer Must point to a variable of type + * StaticMessageBuffer_t, which will be used to hold the message buffer's data + * structure. + * + * @return If the message buffer is created successfully then a handle to the + * created message buffer is returned. If either pucMessageBufferStorageArea or + * pxStaticmessageBuffer are NULL then NULL is returned. + * + * Example use: +
+ +// Used to dimension the array used to hold the messages. The available space +// will actually be one less than this, so 999. +#define STORAGE_SIZE_BYTES 1000 + +// Defines the memory that will actually hold the messages within the message +// buffer. +static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ]; + +// The variable used to hold the message buffer structure. +StaticMessageBuffer_t xMessageBufferStruct; + +void MyFunction( void ) +{ +MessageBufferHandle_t xMessageBuffer; + + xMessageBuffer = xMessageBufferCreateStatic( sizeof( ucBufferStorage ), + ucBufferStorage, + &xMessageBufferStruct ); + + // As neither the pucMessageBufferStorageArea or pxStaticMessageBuffer + // parameters were NULL, xMessageBuffer will not be NULL, and can be used to + // reference the created message buffer in other message buffer API calls. + + // Other code that uses the message buffer can go here. +} + ++ * \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ); ++ * + * Sends a discrete message to the message buffer. The message can be any + * length that fits within the buffer's free space, and is copied into the + * buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferSend() to write to a message buffer from a task. Use + * xMessageBufferSendFromISR() to write to a message buffer from an interrupt + * service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer to which a message is + * being sent. + * + * @param pvTxData A pointer to the message that is to be copied into the + * message buffer. + * + * @param xDataLengthBytes The length of the message. That is, the number of + * bytes to copy from pvTxData into the message buffer. When a message is + * written to the message buffer an additional sizeof( size_t ) bytes are also + * written to store the message's length. sizeof( size_t ) is typically 4 bytes + * on a 32-bit architecture, so on most 32-bit architecture setting + * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24 + * bytes (20 bytes of message data and 4 bytes to hold the message length). + * + * @param xTicksToWait The maximum amount of time the calling task should remain + * in the Blocked state to wait for enough space to become available in the + * message buffer, should the message buffer have insufficient space when + * xMessageBufferSend() is called. The calling task will never block if + * xTicksToWait is zero. The block time is specified in tick periods, so the + * absolute time it represents is dependent on the tick frequency. The macro + * pdMS_TO_TICKS() can be used to convert a time specified in milliseconds into + * a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will cause + * the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The number of bytes written to the message buffer. If the call to + * xMessageBufferSend() times out before there was enough space to write the + * message into the message buffer then zero is returned. If the call did not + * time out then xDataLengthBytes is returned. + * + * Example use: ++void vAFunction( MessageBufferHandle_t xMessageBuffer ) +{ +size_t xBytesSent; +uint8_t ucArrayToSend[] = { 0, 1, 2, 3 }; +char *pcStringToSend = "String to send"; +const TickType_t x100ms = pdMS_TO_TICKS( 100 ); + + // Send an array to the message buffer, blocking for a maximum of 100ms to + // wait for enough space to be available in the message buffer. + xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms ); + + if( xBytesSent != sizeof( ucArrayToSend ) ) + { + // The call to xMessageBufferSend() times out before there was enough + // space in the buffer for the data to be written. + } + + // Send the string to the message buffer. Return immediately if there is + // not enough space in the buffer. + xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The string could not be added to the message buffer because there was + // not enough free space in the buffer. + } +} ++ * \defgroup xMessageBufferSend xMessageBufferSend + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + +/** + * message_buffer.h + * ++size_t xMessageBufferSendFromISR( MessageBufferHandle_t xMessageBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * Interrupt safe version of the API function that sends a discrete message to + * the message buffer. The message can be any length that fits within the + * buffer's free space, and is copied into the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferSend() to write to a message buffer from a task. Use + * xMessageBufferSendFromISR() to write to a message buffer from an interrupt + * service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer to which a message is + * being sent. + * + * @param pvTxData A pointer to the message that is to be copied into the + * message buffer. + * + * @param xDataLengthBytes The length of the message. That is, the number of + * bytes to copy from pvTxData into the message buffer. When a message is + * written to the message buffer an additional sizeof( size_t ) bytes are also + * written to store the message's length. sizeof( size_t ) is typically 4 bytes + * on a 32-bit architecture, so on most 32-bit architecture setting + * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24 + * bytes (20 bytes of message data and 4 bytes to hold the message length). + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for data. Calling + * xMessageBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xMessageBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xMessageBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xMessageBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the code example below for an example. + * + * @return The number of bytes actually written to the message buffer. If the + * message buffer didn't have enough free space for the message to be stored + * then 0 is returned, otherwise xDataLengthBytes is returned. + * + * Example use: ++// A message buffer that has already been created. +MessageBufferHandle_t xMessageBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +size_t xBytesSent; +char *pcStringToSend = "String to send"; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Attempt to send the string to the message buffer. + xBytesSent = xMessageBufferSendFromISR( xMessageBuffer, + ( void * ) pcStringToSend, + strlen( pcStringToSend ), + &xHigherPriorityTaskWoken ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The string could not be added to the message buffer because there was + // not enough free space in the buffer. + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xMessageBufferSendFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++size_t xMessageBufferReceive( MessageBufferHandle_t xMessageBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ); ++ * + * Receives a discrete message from a message buffer. Messages can be of + * variable length and are copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for a message, should the message buffer be empty. + * xMessageBufferReceive() will return immediately if xTicksToWait is zero and + * the message buffer is empty. The block time is specified in tick periods, so + * the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. If xMessageBufferReceive() times out before a message became available + * then zero is returned. If the length of the message is greater than + * xBufferLengthBytes then the message will be left in the message buffer and + * zero is returned. + * + * Example use: ++void vAFunction( MessageBuffer_t xMessageBuffer ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +const TickType_t xBlockTime = pdMS_TO_TICKS( 20 ); + + // Receive the next message from the message buffer. Wait in the Blocked + // state (so not using any CPU processing time) for a maximum of 100ms for + // a message to become available. + xReceivedBytes = xMessageBufferReceive( xMessageBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + xBlockTime ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains a message that is xReceivedBytes long. Process + // the message here.... + } +} ++ * \defgroup xMessageBufferReceive xMessageBufferReceive + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + + +/** + * message_buffer.h + * ++size_t xMessageBufferReceiveFromISR( MessageBufferHandle_t xMessageBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * An interrupt safe version of the API function that receives a discrete + * message from a message buffer. Messages can be of variable length and are + * copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for space to become available. Calling + * xMessageBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xMessageBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xMessageBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xMessageBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. + * + * Example use: ++// A message buffer that has already been created. +MessageBuffer_t xMessageBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Receive the next message from the message buffer. + xReceivedBytes = xMessageBufferReceiveFromISR( xMessageBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + &xHigherPriorityTaskWoken ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains a message that is xReceivedBytes long. Process + // the message here.... + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xMessageBufferReceiveFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++void vMessageBufferDelete( MessageBufferHandle_t xMessageBuffer ); ++ * + * Deletes a message buffer that was previously created using a call to + * xMessageBufferCreate() or xMessageBufferCreateStatic(). If the message + * buffer was created using dynamic memory (that is, by xMessageBufferCreate()), + * then the allocated memory is freed. + * + * A message buffer handle must not be used after the message buffer has been + * deleted. + * + * @param xMessageBuffer The handle of the message buffer to be deleted. + * + */ +#define vMessageBufferDelete( xMessageBuffer ) vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) ); ++ * + * Tests to see if a message buffer is full. A message buffer is full if it + * cannot accept any more messages, of any size, until space is made available + * by a message being removed from the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is full then + * pdTRUE is returned. Otherwise pdFALSE is returned. + */ +#define xMessageBufferIsFull( xMessageBuffer ) xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) ); ++ * + * Tests to see if a message buffer is empty (does not contain any messages). + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is empty then + * pdTRUE is returned. Otherwise pdFALSE is returned. + * + */ +#define xMessageBufferIsEmpty( xMessageBuffer ) xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer ); ++ * + * Resets a message buffer to its initial empty state, discarding any message it + * contained. + * + * A message buffer can only be reset if there are no tasks blocked on it. + * + * @param xMessageBuffer The handle of the message buffer being reset. + * + * @return If the message buffer was reset then pdPASS is returned. If the + * message buffer could not be reset because either there was a task blocked on + * the message queue to wait for space to become available, or to wait for a + * a message to be available, then pdFAIL is returned. + * + * \defgroup xMessageBufferReset xMessageBufferReset + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReset( xMessageBuffer ) xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer ) + + +/** + * message_buffer.h ++size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) ); ++ * Returns the number of bytes of free space in the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The number of bytes that can be written to the message buffer before + * the message buffer would be full. When a message is written to the message + * buffer an additional sizeof( size_t ) bytes are also written to store the + * message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size + * of the largest message that can be written to the message buffer is 6 bytes. + * + * \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSpaceAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h + * ++BaseType_t xMessageBufferSendCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xMessageBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferSendCompletedFromISR(). If calling + * xMessageBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++BaseType_t xMessageBufferReceiveCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xMessageBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferReceiveCompletedFromISR(). If calling + * xMessageBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +#if defined( __cplusplus ) +} /* extern "C" */ +#endif + +#endif /* !defined( FREERTOS_MESSAGE_BUFFER_H ) */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h new file mode 100644 index 000000000..a08d748fb --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h @@ -0,0 +1,155 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * When the MPU is used the standard (non MPU) API functions are mapped to + * equivalents that start "MPU_", the prototypes for which are defined in this + * header files. This will cause the application code to call the MPU_ version + * which wraps the non-MPU version with privilege promoting then demoting code, + * so the kernel code always runs will full privileges. + */ + + +#ifndef MPU_PROTOTYPES_H +#define MPU_PROTOTYPES_H + +/* MPU versions of tasks.h API functions. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ); +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ); +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ); +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ); +void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ); +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ); +void MPU_vTaskDelay( const TickType_t xTicksToDelay ); +void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ); +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ); +UBaseType_t MPU_uxTaskPriorityGet( TaskHandle_t xTask ); +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ); +void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ); +void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ); +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ); +void MPU_vTaskResume( TaskHandle_t xTaskToResume ); +void MPU_vTaskStartScheduler( void ); +void MPU_vTaskSuspendAll( void ); +BaseType_t MPU_xTaskResumeAll( void ); +TickType_t MPU_xTaskGetTickCount( void ); +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ); +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ); +TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery ); +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ); +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ); +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ); +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ); +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ); +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ); +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ); +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ); +void MPU_vTaskList( char * pcWriteBuffer ); +void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer ); +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ); +BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ); +uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ); +BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask ); +BaseType_t MPU_xTaskIncrementTick( void ); +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ); +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ); +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ); +void MPU_vTaskMissedYield( void ); +BaseType_t MPU_xTaskGetSchedulerState( void ); + +/* MPU versions of queue.h API functions. */ +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ); +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ); +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ); +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ); +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ); +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ); +void MPU_vQueueDelete( QueueHandle_t xQueue ); +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ); +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ); +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ); +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ); +void* MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ); +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ); +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ); +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ); +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ); +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ); +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ); +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ); +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ); +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ); +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ); +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ); +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ); +void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ); +UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ); +uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ); + +/* MPU versions of timers.h API functions. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction ); +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer ); +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ); +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ); +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ); +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ); +BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ); +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ); +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ); +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ); +BaseType_t MPU_xTimerCreateTimerTask( void ); +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ); + +/* MPU versions of event_group.h API functions. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ); +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ); +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ); +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ); +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ); +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ); +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ); +UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup ); + +/* MPU versions of message/stream_buffer.h API functions. */ +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ); +size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, BaseType_t * const pxHigherPriorityTaskWoken ); +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ); +size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, BaseType_t * const pxHigherPriorityTaskWoken ); +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ); +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ); +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ); +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ); +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ); +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ); +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ); +StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ); +StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ); + + + +#endif /* MPU_PROTOTYPES_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h new file mode 100644 index 000000000..1e97ae4c8 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h @@ -0,0 +1,181 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef MPU_WRAPPERS_H +#define MPU_WRAPPERS_H + +/* This file redefines API functions to be called through a wrapper macro, but +only for ports that are using the MPU. */ +#ifdef portUSING_MPU_WRAPPERS + + /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is + included from queue.c or task.c to prevent it from having an effect within + those files. */ + #ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + + /* + * Map standard (non MPU) API functions to equivalents that start + * "MPU_". This will cause the application code to call the MPU_ + * version, which wraps the non-MPU version with privilege promoting + * then demoting code, so the kernel code always runs will full + * privileges. + */ + + /* Map standard tasks.h API functions to the MPU equivalents. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define vTaskDelete MPU_vTaskDelete + #define vTaskDelay MPU_vTaskDelay + #define vTaskDelayUntil MPU_vTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define vTaskSuspendAll MPU_vTaskSuspendAll + #define xTaskResumeAll MPU_xTaskResumeAll + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define xTaskGetHandle MPU_xTaskGetHandle + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define vTaskList MPU_vTaskList + #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskNotifyWait MPU_xTaskNotifyWait + #define ulTaskNotifyTake MPU_ulTaskNotifyTake + #define xTaskNotifyStateClear MPU_xTaskNotifyStateClear + + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + /* Map standard queue.h API functions to the MPU equivalents. */ + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define vQueueDelete MPU_vQueueDelete + #define xQueueCreateMutex MPU_xQueueCreateMutex + #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic + #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore + #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueGenericCreate MPU_xQueueGenericCreate + #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueCreateSet MPU_xQueueCreateSet + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + #define xQueueGenericReset MPU_xQueueGenericReset + + #if( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif + + /* Map standard timer.h API functions to the MPU equivalents. */ + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerPendFunctionCall MPU_xTimerPendFunctionCall + #define pcTimerGetName MPU_pcTimerGetName + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + #define xTimerGenericCommand MPU_xTimerGenericCommand + + /* Map standard event_group.h API functions to the MPU equivalents. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + #define vEventGroupDelete MPU_vEventGroupDelete + + /* Map standard message/stream_buffer.h API functions to the MPU + equivalents. */ + #define xStreamBufferSend MPU_xStreamBufferSend + #define xStreamBufferSendFromISR MPU_xStreamBufferSendFromISR + #define xStreamBufferReceive MPU_xStreamBufferReceive + #define xStreamBufferReceiveFromISR MPU_xStreamBufferReceiveFromISR + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferIsFull MPU_xStreamBufferIsFull + #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty + #define xStreamBufferReset MPU_xStreamBufferReset + #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable + #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable + #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + + + /* Remove the privileged function macro, but keep the PRIVILEGED_DATA + macro so applications can place data in privileged access sections + (useful when using statically allocated objects). */ + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + + #else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + + /* Ensure API functions go in the privileged execution section. */ + #define PRIVILEGED_FUNCTION __attribute__((section("privileged_functions"))) + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + + #endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + +#else /* portUSING_MPU_WRAPPERS */ + + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA + #define portUSING_MPU_WRAPPERS 0 + +#endif /* portUSING_MPU_WRAPPERS */ + + +#endif /* MPU_WRAPPERS_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h new file mode 100644 index 000000000..8aac353e2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h @@ -0,0 +1,165 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Portable layer API. Each function must be defined for each port. + *----------------------------------------------------------*/ + +#ifndef PORTABLE_H +#define PORTABLE_H + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. +Purely for reasons of backward compatibility the old method is still valid, but +to make it clear that new projects should not use it, support for the port +specific constants has been moved into the deprecated_definitions.h header +file. */ +#include "deprecated_definitions.h" + +/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h +did not result in a portmacro.h header file being included - and it should be +included here. In this case the path to the correct portmacro.h header file +must be set in the compiler's include path. */ +#ifndef portENTER_CRITICAL + #include "portmacro.h" +#endif + +#if portBYTE_ALIGNMENT == 32 + #define portBYTE_ALIGNMENT_MASK ( 0x001f ) +#endif + +#if portBYTE_ALIGNMENT == 16 + #define portBYTE_ALIGNMENT_MASK ( 0x000f ) +#endif + +#if portBYTE_ALIGNMENT == 8 + #define portBYTE_ALIGNMENT_MASK ( 0x0007 ) +#endif + +#if portBYTE_ALIGNMENT == 4 + #define portBYTE_ALIGNMENT_MASK ( 0x0003 ) +#endif + +#if portBYTE_ALIGNMENT == 2 + #define portBYTE_ALIGNMENT_MASK ( 0x0001 ) +#endif + +#if portBYTE_ALIGNMENT == 1 + #define portBYTE_ALIGNMENT_MASK ( 0x0000 ) +#endif + +#ifndef portBYTE_ALIGNMENT_MASK + #error "Invalid portBYTE_ALIGNMENT definition" +#endif + +#ifndef portNUM_CONFIGURABLE_REGIONS + #define portNUM_CONFIGURABLE_REGIONS 1 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mpu_wrappers.h" + +/* + * Setup the stack of a new task so it is ready to be placed under the + * scheduler control. The registers have to be placed on the stack in + * the order that the port expects to find them. + * + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; +#else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; +#endif + +/* Used by heap_5.c. */ +typedef struct HeapRegion +{ + uint8_t *pucStartAddress; + size_t xSizeInBytes; +} HeapRegion_t; + +/* + * Used to define multiple heap regions for use by heap_5.c. This function + * must be called before any calls to pvPortMalloc() - not creating a task, + * queue, semaphore, mutex, software timer, event group, etc. will result in + * pvPortMalloc being called. + * + * pxHeapRegions passes in an array of HeapRegion_t structures - each of which + * defines a region of memory that can be used as the heap. The array is + * terminated by a HeapRegions_t structure that has a size of 0. The region + * with the lowest start address must appear first in the array. + */ +void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION; + + +/* + * Map to the memory management routines required for the port. + */ +void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; +void vPortFree( void *pv ) PRIVILEGED_FUNCTION; +void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION; + +/* + * Setup the hardware ready for the scheduler to take control. This generally + * sets up a tick interrupt and sets timers for the correct tick frequency. + */ +BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so + * the hardware is left in its original condition after the scheduler stops + * executing. + */ +void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * The structures and methods of manipulating the MPU are contained within the + * port layer. + * + * Fills the xMPUSettings structure with the memory region information + * contained in xRegions. + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + struct xMEMORY_REGION; + void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* PORTABLE_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h new file mode 100644 index 000000000..d4aa198a6 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h @@ -0,0 +1,124 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef PROJDEFS_H +#define PROJDEFS_H + +/* + * Defines the prototype to which task functions must conform. Defined in this + * file to ensure the type is known before portable.h is included. + */ +typedef void (*TaskFunction_t)( void * ); + +/* Converts a time in milliseconds to a time in ticks. This macro can be +overridden by a macro of the same name defined in FreeRTOSConfig.h in case the +definition here is not suitable for your application. */ +#ifndef pdMS_TO_TICKS + #define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000 ) ) +#endif + +#define pdFALSE ( ( BaseType_t ) 0 ) +#define pdTRUE ( ( BaseType_t ) 1 ) + +#define pdPASS ( pdTRUE ) +#define pdFAIL ( pdFALSE ) +#define errQUEUE_EMPTY ( ( BaseType_t ) 0 ) +#define errQUEUE_FULL ( ( BaseType_t ) 0 ) + +/* FreeRTOS error definitions. */ +#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ( -1 ) +#define errQUEUE_BLOCKED ( -4 ) +#define errQUEUE_YIELD ( -5 ) + +/* Macros used for basic data corruption checks. */ +#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES + #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 +#endif + +#if( configUSE_16_BIT_TICKS == 1 ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a +#else + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#endif + +/* The following errno values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_ERRNO_NONE 0 /* No errors */ +#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */ +#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */ +#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */ +#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */ +#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */ +#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */ +#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */ +#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */ +#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */ +#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */ +#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */ +#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */ +#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */ +#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */ +#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */ +#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */ +#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */ +#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */ +#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */ +#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */ +#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */ +#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */ +#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */ +#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */ +#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ +#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ +#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ +#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ +#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ +#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */ +#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */ +#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */ +#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */ +#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */ +#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */ +#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */ +#define pdFREERTOS_ERRNO_EILSEQ 138 /* An invalid UTF-16 sequence was encountered. */ +#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */ + +/* The following endian values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_LITTLE_ENDIAN 0 +#define pdFREERTOS_BIG_ENDIAN 1 + +/* Re-defining endian values for generic naming. */ +#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN +#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN + + +#endif /* PROJDEFS_H */ + + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h new file mode 100644 index 000000000..28952052c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h @@ -0,0 +1,1653 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef QUEUE_H +#define QUEUE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include queue.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * Type by which queues are referenced. For example, a call to xQueueCreate() + * returns an QueueHandle_t variable that can then be used as a parameter to + * xQueueSend(), xQueueReceive(), etc. + */ +typedef void * QueueHandle_t; + +/** + * Type by which queue sets are referenced. For example, a call to + * xQueueCreateSet() returns an xQueueSet variable that can then be used as a + * parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc. + */ +typedef void * QueueSetHandle_t; + +/** + * Queue sets can contain both queues and semaphores, so the + * QueueSetMemberHandle_t is defined as a type to be used where a parameter or + * return value can be either an QueueHandle_t or an SemaphoreHandle_t. + */ +typedef void * QueueSetMemberHandle_t; + +/* For internal use only. */ +#define queueSEND_TO_BACK ( ( BaseType_t ) 0 ) +#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 ) +#define queueOVERWRITE ( ( BaseType_t ) 2 ) + +/* For internal use only. These definitions *must* match those in queue.c. */ +#define queueQUEUE_TYPE_BASE ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_SET ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_MUTEX ( ( uint8_t ) 1U ) +#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ( ( uint8_t ) 2U ) +#define queueQUEUE_TYPE_BINARY_SEMAPHORE ( ( uint8_t ) 3U ) +#define queueQUEUE_TYPE_RECURSIVE_MUTEX ( ( uint8_t ) 4U ) + +/** + * queue. h + *+ QueueHandle_t xQueueCreate( + UBaseType_t uxQueueLength, + UBaseType_t uxItemSize + ); + *+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @return If the queue is successfully create then a handle to the newly + * created queue is returned. If the queue cannot be created then 0 is + * returned. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + }; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + if( xQueue1 == 0 ) + { + // Queue was not created and must not be used. + } + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue2 == 0 ) + { + // Queue was not created and must not be used. + } + + // ... Rest of task code. + } ++ * \defgroup xQueueCreate xQueueCreate + * \ingroup QueueManagement + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) ) +#endif + +/** + * queue. h + *+ QueueHandle_t xQueueCreateStatic( + UBaseType_t uxQueueLength, + UBaseType_t uxItemSize, + uint8_t *pucQueueStorageBuffer, + StaticQueue_t *pxQueueBuffer + ); + *+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @param pucQueueStorageBuffer If uxItemSize is not zero then + * pucQueueStorageBuffer must point to a uint8_t array that is at least large + * enough to hold the maximum number of items that can be in the queue at any + * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is + * zero then pucQueueStorageBuffer can be NULL. + * + * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which + * will be used to hold the queue's data structure. + * + * @return If the queue is created then a handle to the created queue is + * returned. If pxQueueBuffer is NULL then NULL is returned. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + }; + + #define QUEUE_LENGTH 10 + #define ITEM_SIZE sizeof( uint32_t ) + + // xQueueBuffer will hold the queue structure. + StaticQueue_t xQueueBuffer; + + // ucQueueStorage will hold the items posted to the queue. Must be at least + // [(queue length) * ( queue item size)] bytes long. + uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ]; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold. + ITEM_SIZE // The size of each item in the queue + &( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue. + &xQueueBuffer ); // The buffer that will hold the queue structure. + + // The queue is guaranteed to be created successfully as no dynamic memory + // allocation is used. Therefore xQueue1 is now a handle to a valid queue. + + // ... Rest of task code. + } ++ * \defgroup xQueueCreateStatic xQueueCreateStatic + * \ingroup QueueManagement + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * queue. h + *+ BaseType_t xQueueSendToToFront( + QueueHandle_t xQueue, + const void *pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * Post an item to the front of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT ) + +/** + * queue. h + *+ BaseType_t xQueueSendToBack( + QueueHandle_t xQueue, + const void *pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * This is a macro that calls xQueueGenericSend(). + * + * Post an item to the back of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the queue + * is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueSend( + QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * This is a macro that calls xQueueGenericSend(). It is included for + * backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToFront() and xQueueSendToBack() macros. It is + * equivalent to xQueueSendToBack(). + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueOverwrite( + QueueHandle_t xQueue, + const void * pvItemToQueue + ); + *+ * + * Only for use with queues that have a length of one - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * This function must not be called from an interrupt service routine. + * See xQueueOverwriteFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle of the queue to which the data is being sent. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and + * therefore has the same return values as xQueueSendToFront(). However, pdPASS + * is the only value that can be returned because xQueueOverwrite() will write + * to the queue even when the queue is already full. + * + * Example usage: ++ + void vFunction( void *pvParameters ) + { + QueueHandle_t xQueue; + uint32_t ulVarToSend, ulValReceived; + + // Create a queue to hold one uint32_t value. It is strongly + // recommended *not* to use xQueueOverwrite() on queues that can + // contain more than one value, and doing so will trigger an assertion + // if configASSERT() is defined. + xQueue = xQueueCreate( 1, sizeof( uint32_t ) ); + + // Write the value 10 to the queue using xQueueOverwrite(). + ulVarToSend = 10; + xQueueOverwrite( xQueue, &ulVarToSend ); + + // Peeking the queue should now return 10, but leave the value 10 in + // the queue. A block time of zero is used as it is known that the + // queue holds a value. + ulValReceived = 0; + xQueuePeek( xQueue, &ulValReceived, 0 ); + + if( ulValReceived != 10 ) + { + // Error unless the item was removed by a different task. + } + + // The queue is still full. Use xQueueOverwrite() to overwrite the + // value held in the queue with 100. + ulVarToSend = 100; + xQueueOverwrite( xQueue, &ulVarToSend ); + + // This time read from the queue, leaving the queue empty once more. + // A block time of 0 is used again. + xQueueReceive( xQueue, &ulValReceived, 0 ); + + // The value read should be the last value written, even though the + // queue was already full when the value was written. + if( ulValReceived != 100 ) + { + // Error! + } + + // ... +} ++ * \defgroup xQueueOverwrite xQueueOverwrite + * \ingroup QueueManagement + */ +#define xQueueOverwrite( xQueue, pvItemToQueue ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), 0, queueOVERWRITE ) + + +/** + * queue. h + *+ BaseType_t xQueueGenericSend( + QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait + BaseType_t xCopyPosition + ); + *+ * + * It is preferred that the macros xQueueSend(), xQueueSendToFront() and + * xQueueSendToBack() are used in place of calling this function directly. + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueuePeek( + QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait + );+ * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * This macro must not be used in an interrupt service routine. See + * xQueuePeekFromISR() for an alternative that can be called from an interrupt + * service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * xQueuePeek() will return immediately if xTicksToWait is 0 and the queue + * is empty. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + QueueHandle_t xQueue; + + // Task to create a queue and post a value. + void vATask( void *pvParameters ) + { + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 ); + + // ... Rest of task code. + } + + // Task to peek the data from the queue. + void vADifferentTask( void *pvParameters ) + { + struct AMessage *pxRxedMessage; + + if( xQueue != 0 ) + { + // Peek a message on the created queue. Block for 10 ticks if a + // message is not immediately available. + if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) ) + { + // pcRxedMessage now points to the struct AMessage variable posted + // by vATask, but the item still remains on the queue. + } + } + + // ... Rest of task code. + } ++ * \defgroup xQueuePeek xQueuePeek + * \ingroup QueueManagement + */ +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueuePeekFromISR( + QueueHandle_t xQueue, + void *pvBuffer, + );+ * + * A version of xQueuePeek() that can be called from an interrupt service + * routine (ISR). + * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * \defgroup xQueuePeekFromISR xQueuePeekFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueReceive( + QueueHandle_t xQueue, + void *pvBuffer, + TickType_t xTicksToWait + );+ * + * Receive an item from a queue. The item is received by copy so a buffer of + * adequate size must be provided. The number of bytes copied into the buffer + * was defined when the queue was created. + * + * Successfully received items are removed from the queue. + * + * This function must not be used in an interrupt service routine. See + * xQueueReceiveFromISR for an alternative that can. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. xQueueReceive() will return immediately if xTicksToWait + * is zero and the queue is empty. The time is defined in tick periods so the + * constant portTICK_PERIOD_MS should be used to convert to real time if this is + * required. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + QueueHandle_t xQueue; + + // Task to create a queue and post a value. + void vATask( void *pvParameters ) + { + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 ); + + // ... Rest of task code. + } + + // Task to receive from the queue. + void vADifferentTask( void *pvParameters ) + { + struct AMessage *pxRxedMessage; + + if( xQueue != 0 ) + { + // Receive a message on the created queue. Block for 10 ticks if a + // message is not immediately available. + if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) ) + { + // pcRxedMessage now points to the struct AMessage variable posted + // by vATask. + } + } + + // ... Rest of task code. + } ++ * \defgroup xQueueReceive xQueueReceive + * \ingroup QueueManagement + */ +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );+ * + * Return the number of messages stored in a queue. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of messages available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );+ * + * Return the number of free spaces available in a queue. This is equal to the + * number of items that can be sent to the queue before the queue becomes full + * if no items are removed. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of spaces available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *void vQueueDelete( QueueHandle_t xQueue );+ * + * Delete a queue - freeing all the memory allocated for storing of items + * placed on the queue. + * + * @param xQueue A handle to the queue to be deleted. + * + * \defgroup vQueueDelete vQueueDelete + * \ingroup QueueManagement + */ +void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueSendToFrontFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the front of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPrioritTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + taskYIELD (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT ) + + +/** + * queue. h + *+ BaseType_t xQueueSendToBackFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the back of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + taskYIELD (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueOverwriteFromISR( + QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); + *+ * + * A version of xQueueOverwrite() that can be used in an interrupt service + * routine (ISR). + * + * Only for use with queues that can hold a single item - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return xQueueOverwriteFromISR() is a macro that calls + * xQueueGenericSendFromISR(), and therefore has the same return values as + * xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be + * returned because xQueueOverwriteFromISR() will write to the queue even when + * the queue is already full. + * + * Example usage: ++ + QueueHandle_t xQueue; + + void vFunction( void *pvParameters ) + { + // Create a queue to hold one uint32_t value. It is strongly + // recommended *not* to use xQueueOverwriteFromISR() on queues that can + // contain more than one value, and doing so will trigger an assertion + // if configASSERT() is defined. + xQueue = xQueueCreate( 1, sizeof( uint32_t ) ); +} + +void vAnInterruptHandler( void ) +{ +// xHigherPriorityTaskWoken must be set to pdFALSE before it is used. +BaseType_t xHigherPriorityTaskWoken = pdFALSE; +uint32_t ulVarToSend, ulValReceived; + + // Write the value 10 to the queue using xQueueOverwriteFromISR(). + ulVarToSend = 10; + xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken ); + + // The queue is full, but calling xQueueOverwriteFromISR() again will still + // pass because the value held in the queue will be overwritten with the + // new value. + ulVarToSend = 100; + xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken ); + + // Reading from the queue will now return 100. + + // ... + + if( xHigherPrioritytaskWoken == pdTRUE ) + { + // Writing to the queue caused a task to unblock and the unblocked task + // has a priority higher than or equal to the priority of the currently + // executing task (the task this interrupt interrupted). Perform a context + // switch so this interrupt returns directly to the unblocked task. + portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the port. + } +} ++ * \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR + * \ingroup QueueManagement + */ +#define xQueueOverwriteFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE ) + +/** + * queue. h + *+ BaseType_t xQueueSendFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). It is included + * for backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR() + * macros. + * + * Post an item to the back of a queue. It is safe to use this function from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + // Actual macro used here is port specific. + portYIELD_FROM_ISR (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueGenericSendFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken, + BaseType_t xCopyPosition + ); ++ * + * It is preferred that the macros xQueueSendFromISR(), + * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place + * of calling this function directly. xQueueGiveFromISR() is an + * equivalent for use by semaphores that don't actually copy any data. + * + * Post an item on a queue. It is safe to use this function from within an + * interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWokenByPost; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWokenByPost = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post each byte. + xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. Note that the + // name of the yield function required is port specific. + if( xHigherPriorityTaskWokenByPost ) + { + taskYIELD_YIELD_FROM_ISR(); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueReceiveFromISR( + QueueHandle_t xQueue, + void *pvBuffer, + BaseType_t *pxTaskWoken + ); + *+ * + * Receive an item from a queue. It is safe to use this function from within an + * interrupt service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param pxTaskWoken A task may be blocked waiting for space to become + * available on the queue. If xQueueReceiveFromISR causes such a task to + * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * remain unchanged. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ + QueueHandle_t xQueue; + + // Function to create a queue and post some values. + void vAFunction( void *pvParameters ) + { + char cValueToPost; + const TickType_t xTicksToWait = ( TickType_t )0xff; + + // Create a queue capable of containing 10 characters. + xQueue = xQueueCreate( 10, sizeof( char ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Post some characters that will be used within an ISR. If the queue + // is full then this task will block for xTicksToWait ticks. + cValueToPost = 'a'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + cValueToPost = 'b'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + + // ... keep posting characters ... this task may block when the queue + // becomes full. + + cValueToPost = 'c'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + } + + // ISR that outputs all the characters received on the queue. + void vISR_Routine( void ) + { + BaseType_t xTaskWokenByReceive = pdFALSE; + char cRxedChar; + + while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) ) + { + // A character was received. Output the character now. + vOutputCharacter( cRxedChar ); + + // If removing the character from the queue woke the task that was + // posting onto the queue cTaskWokenByReceive will have been set to + // pdTRUE. No matter how many times this loop iterates only one + // task will be woken. + } + + if( cTaskWokenByPost != ( char ) pdFALSE; + { + taskYIELD (); + } + } ++ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* + * Utilities to query queues that are safe to use from an ISR. These utilities + * should be used only from witin an ISR, or within a critical section. + */ +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ); + +/* + * For internal use only. Use xSemaphoreCreateMutex(), + * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling + * these functions directly. + */ +QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void* xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Use xSemaphoreTakeMutexRecursive() or + * xSemaphoreGiveMutexRecursive() instead of calling these functions directly. + */ +BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) PRIVILEGED_FUNCTION; + +/* + * Reset a queue back to its original empty state. The return value is now + * obsolete and is always set to pdPASS. + */ +#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE ) + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger. If you are not using a kernel + * aware debugger then this function can be ignored. + * + * configQUEUE_REGISTRY_SIZE defines the maximum number of handles the + * registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0 + * within FreeRTOSConfig.h for the registry to be available. Its value + * does not effect the number of queues, semaphores and mutexes that can be + * created - just the number that the registry can hold. + * + * @param xQueue The handle of the queue being added to the registry. This + * is the handle returned by a call to xQueueCreate(). Semaphore and mutex + * handles can also be passed in here. + * + * @param pcName The name to be associated with the handle. This is the + * name that the kernel aware debugger will display. The queue registry only + * stores a pointer to the string - so the string must be persistent (global or + * preferably in ROM/Flash), not on the stack. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger, and vQueueUnregisterQueue() to + * remove the queue, semaphore or mutex from the register. If you are not using + * a kernel aware debugger then this function can be ignored. + * + * @param xQueue The handle of the queue being removed from the registry. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueUnregisterQueue( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +#endif + +/* + * The queue registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call pcQueueGetName() to look + * up and return the name of a queue in the queue registry from the queue's + * handle. + * + * @param xQueue The handle of the queue the name of which will be returned. + * @return If the queue is in the registry then a pointer to the name of the + * queue is returned. If the queue is not in the registry then NULL is + * returned. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + const char *pcQueueGetName( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Queue sets provide a mechanism to allow a task to block (pend) on a read + * operation from multiple queues or semaphores simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * A queue set must be explicitly created using a call to xQueueCreateSet() + * before it can be used. Once created, standard FreeRTOS queues and semaphores + * can be added to the set using calls to xQueueAddToSet(). + * xQueueSelectFromSet() is then used to determine which, if any, of the queues + * or semaphores contained in the set is in a state where a queue read or + * semaphore take operation would be successful. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: An additional 4 bytes of RAM is required for each space in a every + * queue added to a queue set. Therefore counting semaphores that have a high + * maximum count value should not be added to a queue set. + * + * Note 4: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param uxEventQueueLength Queue sets store events that occur on + * the queues and semaphores contained in the set. uxEventQueueLength specifies + * the maximum number of events that can be queued at once. To be absolutely + * certain that events are not lost uxEventQueueLength should be set to the + * total sum of the length of the queues added to the set, where binary + * semaphores and mutexes have a length of 1, and counting semaphores have a + * length set by their maximum count value. Examples: + * + If a queue set is to hold a queue of length 5, another queue of length 12, + * and a binary semaphore, then uxEventQueueLength should be set to + * (5 + 12 + 1), or 18. + * + If a queue set is to hold three binary semaphores then uxEventQueueLength + * should be set to (1 + 1 + 1 ), or 3. + * + If a queue set is to hold a counting semaphore that has a maximum count of + * 5, and a counting semaphore that has a maximum count of 3, then + * uxEventQueueLength should be set to (5 + 3), or 8. + * + * @return If the queue set is created successfully then a handle to the created + * queue set is returned. Otherwise NULL is returned. + */ +QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; + +/* + * Adds a queue or semaphore to a queue set that was previously created by a + * call to xQueueCreateSet(). + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being added to + * the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set to which the queue or semaphore + * is being added. + * + * @return If the queue or semaphore was successfully added to the queue set + * then pdPASS is returned. If the queue could not be successfully added to the + * queue set because it is already a member of a different queue set then pdFAIL + * is returned. + */ +BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * Removes a queue or semaphore from a queue set. A queue or semaphore can only + * be removed from a set if the queue or semaphore is empty. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being removed + * from the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set in which the queue or semaphore + * is included. + * + * @return If the queue or semaphore was successfully removed from the queue set + * then pdPASS is returned. If the queue was not in the queue set, or the + * queue (or semaphore) was not empty, then pdFAIL is returned. + */ +BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * xQueueSelectFromSet() selects from the members of a queue set a queue or + * semaphore that either contains data (in the case of a queue) or is available + * to take (in the case of a semaphore). xQueueSelectFromSet() effectively + * allows a task to block (pend) on a read operation on all the queues and + * semaphores in a queue set simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueSet The queue set on which the task will (potentially) block. + * + * @param xTicksToWait The maximum time, in ticks, that the calling task will + * remain in the Blocked state (with other tasks executing) to wait for a member + * of the queue set to be ready for a successful queue read or semaphore take + * operation. + * + * @return xQueueSelectFromSet() will return the handle of a queue (cast to + * a QueueSetMemberHandle_t type) contained in the queue set that contains data, + * or the handle of a semaphore (cast to a QueueSetMemberHandle_t type) contained + * in the queue set that is available, or NULL if no such queue or semaphore + * exists before before the specified block time expires. + */ +QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * A version of xQueueSelectFromSet() that can be used from an ISR. + */ +QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* Not public API functions. */ +void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif + +#endif /* QUEUE_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h new file mode 100644 index 000000000..0f58285ce --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h @@ -0,0 +1,1140 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef SEMAPHORE_H +#define SEMAPHORE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include semphr.h" +#endif + +#include "queue.h" + +typedef QueueHandle_t SemaphoreHandle_t; + +#define semBINARY_SEMAPHORE_QUEUE_LENGTH ( ( uint8_t ) 1U ) +#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U ) +#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + + +/** + * semphr. h + *vSemaphoreCreateBinary( SemaphoreHandle_t xSemaphore )+ * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * This old vSemaphoreCreateBinary() macro is now deprecated in favour of the + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * Macro that implements a semaphore by using the existing queue mechanism. + * The queue length is 1 as this is a binary semaphore. The data size is 0 + * as we don't want to actually store any data - we just want to know if the + * queue is empty or full. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param xSemaphore Handle to the created semaphore. Should be of type SemaphoreHandle_t. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to vSemaphoreCreateBinary (). + // This is a macro so pass the variable in directly. + vSemaphoreCreateBinary( xSemaphore ); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define vSemaphoreCreateBinary( xSemaphore ) \ + { \ + ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ + if( ( xSemaphore ) != NULL ) \ + { \ + ( void ) xSemaphoreGive( ( xSemaphore ) ); \ + } \ + } +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateBinary( void )+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @return Handle to the created semaphore, or NULL if the memory required to + * hold the semaphore's data structures could not be allocated. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateBinary(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateBinary(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer )+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * NOTE: In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the semaphore is created then a handle to the created semaphore is + * returned. If pxSemaphoreBuffer is NULL then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + StaticSemaphore_t xSemaphoreBuffer; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateBinary(). + // The semaphore's data structures will be placed in the xSemaphoreBuffer + // variable, the address of which is passed into the function. The + // function's parameter is not NULL, so the function will not attempt any + // dynamic memory allocation, and therefore the function will not return + // return NULL. + xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer ); + + // Rest of task code goes here. + } ++ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *xSemaphoreTake( + * SemaphoreHandle_t xSemaphore, + * TickType_t xBlockTime + * )+ * + * Macro to obtain a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). + * + * @param xSemaphore A handle to the semaphore being taken - obtained when + * the semaphore was created. + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. A block + * time of portMAX_DELAY can be used to block indefinitely (provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h). + * + * @return pdTRUE if the semaphore was obtained. pdFALSE + * if xBlockTime expired without the semaphore becoming available. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + // A task that creates a semaphore. + void vATask( void * pvParameters ) + { + // Create the semaphore to guard a shared resource. + xSemaphore = xSemaphoreCreateBinary(); + } + + // A task that uses the semaphore. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xSemaphore != NULL ) + { + // See if we can obtain the semaphore. If the semaphore is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTake( xSemaphore, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the semaphore and can now access the + // shared resource. + + // ... + + // We have finished accessing the shared resource. Release the + // semaphore. + xSemaphoreGive( xSemaphore ); + } + else + { + // We could not obtain the semaphore and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreTake xSemaphoreTake + * \ingroup Semaphores + */ +#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) ) + +/** + * semphr. h + * xSemaphoreTakeRecursive( + * SemaphoreHandle_t xMutex, + * TickType_t xBlockTime + * ) + * + * Macro to recursively obtain, or 'take', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being obtained. This is the + * handle returned by xSemaphoreCreateRecursiveMutex(); + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. If + * the task already owns the semaphore then xSemaphoreTakeRecursive() will + * return immediately no matter what the value of xBlockTime. + * + * @return pdTRUE if the semaphore was obtained. pdFALSE if xBlockTime + * expired without the semaphore becoming available. + * + * Example usage: ++ SemaphoreHandle_t xMutex = NULL; + + // A task that creates a mutex. + void vATask( void * pvParameters ) + { + // Create the mutex to guard a shared resource. + xMutex = xSemaphoreCreateRecursiveMutex(); + } + + // A task that uses the mutex. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xMutex != NULL ) + { + // See if we can obtain the mutex. If the mutex is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTakeRecursive( xSemaphore, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the mutex and can now access the + // shared resource. + + // ... + // For some reason due to the nature of the code further calls to + // xSemaphoreTakeRecursive() are made on the same mutex. In real + // code these would not be just sequential calls as this would make + // no sense. Instead the calls are likely to be buried inside + // a more complex call structure. + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + + // The mutex has now been 'taken' three times, so will not be + // available to another task until it has also been given back + // three times. Again it is unlikely that real code would have + // these calls sequentially, but instead buried in a more complex + // call structure. This is just for illustrative purposes. + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + + // Now the mutex can be taken by other tasks. + } + else + { + // We could not obtain the mutex and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreTakeRecursive( xMutex, xBlockTime ) xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) ) +#endif + +/** + * semphr. h + *xSemaphoreGive( SemaphoreHandle_t xSemaphore )+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). and obtained using sSemaphoreTake(). + * + * This macro must not be used from an ISR. See xSemaphoreGiveFromISR () for + * an alternative which can be used from an ISR. + * + * This macro must also not be used on semaphores created using + * xSemaphoreCreateRecursiveMutex(). + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @return pdTRUE if the semaphore was released. pdFALSE if an error occurred. + * Semaphores are implemented using queues. An error can occur if there is + * no space on the queue to post a message - indicating that the + * semaphore was not first obtained correctly. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Create the semaphore to guard a shared resource. + xSemaphore = vSemaphoreCreateBinary(); + + if( xSemaphore != NULL ) + { + if( xSemaphoreGive( xSemaphore ) != pdTRUE ) + { + // We would expect this call to fail because we cannot give + // a semaphore without first "taking" it! + } + + // Obtain the semaphore - don't block if the semaphore is not + // immediately available. + if( xSemaphoreTake( xSemaphore, ( TickType_t ) 0 ) ) + { + // We now have the semaphore and can access the shared resource. + + // ... + + // We have finished accessing the shared resource so can free the + // semaphore. + if( xSemaphoreGive( xSemaphore ) != pdTRUE ) + { + // We would not expect this call to fail because we must have + // obtained the semaphore to get here. + } + } + } + } ++ * \defgroup xSemaphoreGive xSemaphoreGive + * \ingroup Semaphores + */ +#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK ) + +/** + * semphr. h + *xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex )+ * + * Macro to recursively release, or 'give', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being released, or 'given'. This is the + * handle returned by xSemaphoreCreateMutex(); + * + * @return pdTRUE if the semaphore was given. + * + * Example usage: ++ SemaphoreHandle_t xMutex = NULL; + + // A task that creates a mutex. + void vATask( void * pvParameters ) + { + // Create the mutex to guard a shared resource. + xMutex = xSemaphoreCreateRecursiveMutex(); + } + + // A task that uses the mutex. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xMutex != NULL ) + { + // See if we can obtain the mutex. If the mutex is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the mutex and can now access the + // shared resource. + + // ... + // For some reason due to the nature of the code further calls to + // xSemaphoreTakeRecursive() are made on the same mutex. In real + // code these would not be just sequential calls as this would make + // no sense. Instead the calls are likely to be buried inside + // a more complex call structure. + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + + // The mutex has now been 'taken' three times, so will not be + // available to another task until it has also been given back + // three times. Again it is unlikely that real code would have + // these calls sequentially, it would be more likely that the calls + // to xSemaphoreGiveRecursive() would be called as a call stack + // unwound. This is just for demonstrative purposes. + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + + // Now the mutex can be taken by other tasks. + } + else + { + // We could not obtain the mutex and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreGiveRecursive( xMutex ) xQueueGiveMutexRecursive( ( xMutex ) ) +#endif + +/** + * semphr. h + *+ xSemaphoreGiveFromISR( + SemaphoreHandle_t xSemaphore, + BaseType_t *pxHigherPriorityTaskWoken + )+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary() or xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR. + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if giving the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreGiveFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully given, otherwise errQUEUE_FULL. + * + * Example usage: ++ \#define LONG_TIME 0xffff + \#define TICKS_TO_WAIT 10 + SemaphoreHandle_t xSemaphore = NULL; + + // Repetitive task. + void vATask( void * pvParameters ) + { + for( ;; ) + { + // We want this task to run every 10 ticks of a timer. The semaphore + // was created before this task was started. + + // Block waiting for the semaphore to become available. + if( xSemaphoreTake( xSemaphore, LONG_TIME ) == pdTRUE ) + { + // It is time to execute. + + // ... + + // We have finished our task. Return to the top of the loop where + // we will block on the semaphore until it is time to execute + // again. Note when using the semaphore for synchronisation with an + // ISR in this manner there is no need to 'give' the semaphore back. + } + } + } + + // Timer ISR + void vTimerISR( void * pvParameters ) + { + static uint8_t ucLocalTickCount = 0; + static BaseType_t xHigherPriorityTaskWoken; + + // A timer tick has occurred. + + // ... Do other time functions. + + // Is it time for vATask () to run? + xHigherPriorityTaskWoken = pdFALSE; + ucLocalTickCount++; + if( ucLocalTickCount >= TICKS_TO_WAIT ) + { + // Unblock the task by releasing the semaphore. + xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken ); + + // Reset the count so we release the semaphore again in 10 ticks time. + ucLocalTickCount = 0; + } + + if( xHigherPriorityTaskWoken != pdFALSE ) + { + // We can force a context switch here. Context switching from an + // ISR uses port specific syntax. Check the demo task for your port + // to find the syntax required. + } + } ++ * \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR + * \ingroup Semaphores + */ +#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *+ xSemaphoreTakeFromISR( + SemaphoreHandle_t xSemaphore, + BaseType_t *pxHigherPriorityTaskWoken + )+ * + * Macro to take a semaphore from an ISR. The semaphore must have + * previously been created with a call to xSemaphoreCreateBinary() or + * xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR, however taking a semaphore from an ISR + * is not a common operation. It is likely to only be useful when taking a + * counting semaphore when an interrupt is obtaining an object from a resource + * pool (when the semaphore count indicates the number of resources available). + * + * @param xSemaphore A handle to the semaphore being taken. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreTakeFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if taking the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreTakeFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully taken, otherwise + * pdFALSE + */ +#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateMutex( void )+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return If the mutex was successfully created then a handle to the created + * semaphore is returned. If there was not enough heap to allocate the mutex + * data structures then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateMutex(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateMutex(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer )+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will be used to hold the mutex's data structure, removing the need for + * the memory to be allocated dynamically. + * + * @return If the mutex was successfully created then a handle to the created + * mutex is returned. If pxMutexBuffer was NULL then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xMutexBuffer; + + void vATask( void * pvParameters ) + { + // A mutex cannot be used before it has been created. xMutexBuffer is + // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is + // attempted. + xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer ); + + // As no dynamic memory allocation was performed, xSemaphore cannot be NULL, + // so there is no need to check it. + } ++ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic + * \ingroup Semaphores + */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void )+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return xSemaphore Handle to the created mutex semaphore. Should be of type + * SemaphoreHandle_t. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateMutex(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateRecursiveMutex(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateRecursiveMutex xSemaphoreCreateRecursiveMutex + * \ingroup Semaphores + */ +#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer )+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the recursive mutex's data structure, + * removing the need for the memory to be allocated dynamically. + * + * @return If the recursive mutex was successfully created then a handle to the + * created recursive mutex is returned. If pxMutexBuffer was NULL then NULL is + * returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xMutexBuffer; + + void vATask( void * pvParameters ) + { + // A recursive semaphore cannot be used before it is created. Here a + // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic(). + // The address of xMutexBuffer is passed into the function, and will hold + // the mutexes data structures - so no dynamic memory allocation will be + // attempted. + xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer ); + + // As no dynamic memory allocation was performed, xSemaphore cannot be NULL, + // so there is no need to check it. + } ++ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic + * \ingroup Semaphores + */ +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount )+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer can + * instead optionally provide the memory that will get used by the counting + * semaphore. xSemaphoreCreateCountingStatic() therefore allows a counting + * semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @return Handle to the created semaphore. Null if the semaphore could not be + * created. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + SemaphoreHandle_t xSemaphore = NULL; + + // Semaphore cannot be used before a call to xSemaphoreCreateCounting(). + // The max value to which the semaphore can count should be 10, and the + // initial value assigned to the count should be 0. + xSemaphore = xSemaphoreCreateCounting( 10, 0 ); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer )+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer must + * provide the memory. xSemaphoreCreateCountingStatic() therefore allows a + * counting semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the counting semaphore was successfully created then a handle to + * the created counting semaphore is returned. If pxSemaphoreBuffer was NULL + * then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xSemaphoreBuffer; + + void vATask( void * pvParameters ) + { + SemaphoreHandle_t xSemaphore = NULL; + + // Counting semaphore cannot be used before they have been created. Create + // a counting semaphore using xSemaphoreCreateCountingStatic(). The max + // value to which the semaphore can count is 10, and the initial value + // assigned to the count will be 0. The address of xSemaphoreBuffer is + // passed in and will be used to hold the semaphore structure, so no dynamic + // memory allocation will be used. + xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer ); + + // No memory allocation was attempted so xSemaphore cannot be NULL, so there + // is no need to check its value. + } ++ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer ) xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( pxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );+ * + * Delete a semaphore. This function must be used with care. For example, + * do not delete a mutex type semaphore if the mutex is held by a task. + * + * @param xSemaphore A handle to the semaphore to be deleted. + * + * \defgroup vSemaphoreDelete vSemaphoreDelete + * \ingroup Semaphores + */ +#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) + +/** + * semphr.h + *TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + * Note: This is a good way of determining if the calling task is the mutex + * holder, but not a good way of determining the identity of the mutex holder as + * the holder may change between the function exiting and the returned value + * being tested. + */ +#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) + +/** + * semphr.h + *TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + */ +#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) + +/** + * semphr.h + *UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );+ * + * If the semaphore is a counting semaphore then uxSemaphoreGetCount() returns + * its current count value. If the semaphore is a binary semaphore then + * uxSemaphoreGetCount() returns 1 if the semaphore is available, and 0 if the + * semaphore is not available. + * + */ +#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) + +#endif /* SEMAPHORE_H */ + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h new file mode 100644 index 000000000..e8515b35a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h @@ -0,0 +1,129 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h new file mode 100644 index 000000000..f967dd284 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h @@ -0,0 +1,852 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * Stream buffers are used to send a continuous stream of data from one task or + * interrupt to another. Their implementation is light weight, making them + * particularly suited for interrupt to task and core to core communication + * scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section section and set the + * receive block time to 0. + * + */ + +#ifndef STREAM_BUFFER_H +#define STREAM_BUFFER_H + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which stream buffers are referenced. For example, a call to + * xStreamBufferCreate() returns an StreamBufferHandle_t variable that can + * then be used as a parameter to xStreamBufferSend(), xStreamBufferReceive(), + * etc. + */ +typedef void * StreamBufferHandle_t; + + +/** + * message_buffer.h + * ++StreamBufferHandle_t xStreamBufferCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes ); ++ * + * Creates a new stream buffer using dynamically allocated memory. See + * xStreamBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xStreamBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes the stream buffer will be + * able to hold at any one time. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @return If NULL is returned, then the stream buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the stream buffer data structures and storage area. A non-NULL value being + * returned indicates that the stream buffer has been created successfully - + * the returned value should be stored as the handle to the created stream + * buffer. + * + * Example use: ++ +void vAFunction( void ) +{ +StreamBufferHandle_t xStreamBuffer; +const size_t xStreamBufferSizeBytes = 100, xTriggerLevel = 10; + + // Create a stream buffer that can hold 100 bytes. The memory used to hold + // both the stream buffer structure and the data in the stream buffer is + // allocated dynamically. + xStreamBuffer = xStreamBufferCreate( xStreamBufferSizeBytes, xTriggerLevel ); + + if( xStreamBuffer == NULL ) + { + // There was not enough heap memory space available to create the + // stream buffer. + } + else + { + // The stream buffer was created successfully and can now be used. + } +} ++ * \defgroup xStreamBufferCreate xStreamBufferCreate + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE ) + +/** + * stream_buffer.h + * ++StreamBufferHandle_t xStreamBufferCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t *pucStreamBufferStorageArea, + StaticStreamBuffer_t *pxStaticStreamBuffer ); ++ * Creates a new stream buffer using statically allocated memory. See + * xStreamBufferCreate() for a version that uses dynamically allocated memory. + * + * configSUPPORT_STATIC_ALLOCATION must be set to 1 in FreeRTOSConfig.h for + * xStreamBufferCreateStatic() to be available. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucStreamBufferStorageArea parameter. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @param pucStreamBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which streams are + * copied when they are written to the stream buffer. + * + * @param pxStaticStreamBuffer Must point to a variable of type + * StaticStreamBuffer_t, which will be used to hold the stream buffer's data + * structure. + * + * @return If the stream buffer is created successfully then a handle to the + * created stream buffer is returned. If either pucStreamBufferStorageArea or + * pxStaticstreamBuffer are NULL then NULL is returned. + * + * Example use: ++ +// Used to dimension the array used to hold the streams. The available space +// will actually be one less than this, so 999. +#define STORAGE_SIZE_BYTES 1000 + +// Defines the memory that will actually hold the streams within the stream +// buffer. +static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ]; + +// The variable used to hold the stream buffer structure. +StaticStreamBuffer_t xStreamBufferStruct; + +void MyFunction( void ) +{ +StreamBufferHandle_t xStreamBuffer; +const size_t xTriggerLevel = 1; + + xStreamBuffer = xStreamBufferCreateStatic( sizeof( ucBufferStorage ), + xTriggerLevel, + ucBufferStorage, + &xStreamBufferStruct ); + + // As neither the pucStreamBufferStorageArea or pxStaticStreamBuffer + // parameters were NULL, xStreamBuffer will not be NULL, and can be used to + // reference the created stream buffer in other stream buffer API calls. + + // Other code that uses the stream buffer can go here. +} + ++ * \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer ) + +/** + * stream_buffer.h + * ++size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ); ++ * + * Sends bytes to a stream buffer. The bytes are copied into the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the buffer that holds the bytes to be copied + * into the stream buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for enough space to become available in the stream + * buffer, should the stream buffer contain too little space to hold the + * another xDataLengthBytes bytes. The block time is specified in tick periods, + * so the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. If a task times out + * before it can write all xDataLengthBytes into the buffer it will still write + * as many bytes as possible. A task does not use any CPU time when it is in + * the blocked state. + * + * @return The number of bytes written to the stream buffer. If a task times + * out before it can write all xDataLengthBytes into the buffer it will still + * write as many bytes as possible. + * + * Example use: ++void vAFunction( StreamBufferHandle_t xStreamBuffer ) +{ +size_t xBytesSent; +uint8_t ucArrayToSend[] = { 0, 1, 2, 3 }; +char *pcStringToSend = "String to send"; +const TickType_t x100ms = pdMS_TO_TICKS( 100 ); + + // Send an array to the stream buffer, blocking for a maximum of 100ms to + // wait for enough space to be available in the stream buffer. + xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms ); + + if( xBytesSent != sizeof( ucArrayToSend ) ) + { + // The call to xStreamBufferSend() times out before there was enough + // space in the buffer for the data to be written, but it did + // successfully write xBytesSent bytes. + } + + // Send the string to the stream buffer. Return immediately if there is not + // enough space in the buffer. + xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The entire string could not be added to the stream buffer because + // there was not enough free space in the buffer, but xBytesSent bytes + // were sent. Could try again to send the remaining bytes. + } +} ++ * \defgroup xStreamBufferSend xStreamBufferSend + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * Interrupt safe version of the API function that sends a stream of bytes to + * the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the data that is to be copied into the stream + * buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for data. Calling + * xStreamBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xStreamBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xStreamBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xStreamBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the example code below for an example. + * + * @return The number of bytes actually written to the stream buffer, which will + * be less than xDataLengthBytes if the stream buffer didn't have enough free + * space for all the bytes to be written. + * + * Example use: ++// A stream buffer that has already been created. +StreamBufferHandle_t xStreamBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +size_t xBytesSent; +char *pcStringToSend = "String to send"; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Attempt to send the string to the stream buffer. + xBytesSent = xStreamBufferSendFromISR( xStreamBuffer, + ( void * ) pcStringToSend, + strlen( pcStringToSend ), + &xHigherPriorityTaskWoken ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // There was not enough free space in the stream buffer for the entire + // string to be written, ut xBytesSent bytes were written. + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xStreamBufferSendFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ); ++ * + * Receives bytes from a stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferReceive() to read from a stream buffer from a task. Use + * xStreamBufferReceiveFromISR() to read from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which bytes are to + * be received. + * + * @param pvRxData A pointer to the buffer into which the received bytes will be + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for data to become available if the stream buffer is + * empty. xStreamBufferReceive() will return immediately if xTicksToWait is + * zero. The block time is specified in tick periods, so the absolute time it + * represents is dependent on the tick frequency. The macro pdMS_TO_TICKS() can + * be used to convert a time specified in milliseconds into a time specified in + * ticks. Setting xTicksToWait to portMAX_DELAY will cause the task to wait + * indefinitely (without timing out), provided INCLUDE_vTaskSuspend is set to 1 + * in FreeRTOSConfig.h. A task does not use any CPU time when it is in the + * Blocked state. + * + * @return The number of bytes actually read from the stream buffer, which will + * be less than xBufferLengthBytes if the call to xStreamBufferReceive() timed + * out before xBufferLengthBytes were available. + * + * Example use: ++void vAFunction( StreamBuffer_t xStreamBuffer ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +const TickType_t xBlockTime = pdMS_TO_TICKS( 20 ); + + // Receive up to another sizeof( ucRxData ) bytes from the stream buffer. + // Wait in the Blocked state (so not using any CPU processing time) for a + // maximum of 100ms for the full sizeof( ucRxData ) number of bytes to be + // available. + xReceivedBytes = xStreamBufferReceive( xStreamBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + xBlockTime ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains another xRecievedBytes bytes of data, which can + // be processed here.... + } +} ++ * \defgroup xStreamBufferReceive xStreamBufferReceive + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * An interrupt safe version of the API function that receives bytes from a + * stream buffer. + * + * Use xStreamBufferReceive() to read bytes from a stream buffer from a task. + * Use xStreamBufferReceiveFromISR() to read bytes from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which a stream + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received bytes are + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for space to become available. Calling + * xStreamBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xStreamBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xStreamBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xStreamBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The number of bytes read from the stream buffer, if any. + * + * Example use: ++// A stream buffer that has already been created. +StreamBuffer_t xStreamBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Receive the next stream from the stream buffer. + xReceivedBytes = xStreamBufferReceiveFromISR( xStreamBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + &xHigherPriorityTaskWoken ); + + if( xReceivedBytes > 0 ) + { + // ucRxData contains xReceivedBytes read from the stream buffer. + // Process the stream here.... + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xStreamBufferReceiveFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ); ++ * + * Deletes a stream buffer that was previously created using a call to + * xStreamBufferCreate() or xStreamBufferCreateStatic(). If the stream + * buffer was created using dynamic memory (that is, by xStreamBufferCreate()), + * then the allocated memory is freed. + * + * A stream buffer handle must not be used after the stream buffer has been + * deleted. + * + * @param xStreamBuffer The handle of the stream buffer to be deleted. + * + * \defgroup vStreamBufferDelete vStreamBufferDelete + * \ingroup StreamBufferManagement + */ +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see if it is full. A stream buffer is full if it + * does not have any free space, and therefore cannot accept any more data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is full then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsFull xStreamBufferIsFull + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see if it is empty. A stream buffer is empty if + * it does not contain any data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is empty then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ); ++ * + * Resets a stream buffer to its initial, empty, state. Any data that was in + * the stream buffer is discarded. A stream buffer can only be reset if there + * are no tasks blocked waiting to either send to or receive from the stream + * buffer. + * + * @param xStreamBuffer The handle of the stream buffer being reset. + * + * @return If the stream buffer is reset then pdPASS is returned. If there was + * a task blocked waiting to send to or read from the stream buffer then the + * stream buffer is not reset and pdFAIL is returned. + * + * \defgroup xStreamBufferReset xStreamBufferReset + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see how much free space it contains, which is + * equal to the amount of data that can be sent to the stream buffer before it + * is full. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be written to the stream buffer before + * the stream buffer would be full. + * + * \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see how much data it contains, which is equal to + * the number of bytes that can be read from the stream buffer before the stream + * buffer would be empty. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be read from the stream buffer before + * the stream buffer would be empty. + * + * \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ); ++ * + * A stream buffer's trigger level is the number of bytes that must be in the + * stream buffer before a task that is blocked on the stream buffer to + * wait for data is moved out of the blocked state. For example, if a task is + * blocked on a read of an empty stream buffer that has a trigger level of 1 + * then the task will be unblocked when a single byte is written to the buffer + * or the task's block time expires. As another example, if a task is blocked + * on a read of an empty stream buffer that has a trigger level of 10 then the + * task will not be unblocked until the stream buffer contains at least 10 bytes + * or the task's block time expires. If a reading task's block time expires + * before the trigger level is reached then the task will still receive however + * many bytes are actually available. Setting a trigger level of 0 will result + * in a trigger level of 1 being used. It is not valid to specify a trigger + * level that is greater than the buffer size. + * + * A trigger level is set when the stream buffer is created, and can be modified + * using xStreamBufferSetTriggerLevel(). + * + * @param xStreamBuffer The handle of the stream buffer being updated. + * + * @param xTriggerLevel The new trigger level for the stream buffer. + * + * @return If xTriggerLevel was less than or equal to the stream buffer's length + * then the trigger level will be updated and pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xStreamBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferSendCompletedFromISR(). If calling + * xStreamBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xStreamBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferReceiveCompletedFromISR(). If calling + * xStreamBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* Functions below here are not part of the public API. */ +StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + +StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +#endif + +#if defined( __cplusplus ) +} +#endif + +#endif /* !defined( STREAM_BUFFER_H ) */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/task.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/task.h new file mode 100644 index 000000000..df7ce95a6 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/task.h @@ -0,0 +1,2338 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef INC_TASK_H +#define INC_TASK_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include task.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +#define tskKERNEL_VERSION_NUMBER "V10.0.1" +#define tskKERNEL_VERSION_MAJOR 10 +#define tskKERNEL_VERSION_MINOR 0 +#define tskKERNEL_VERSION_BUILD 1 + +/** + * task. h + * + * Type by which tasks are referenced. For example, a call to xTaskCreate + * returns (via a pointer parameter) an TaskHandle_t variable that can then + * be used as a parameter to vTaskDelete to delete the task. + * + * \defgroup TaskHandle_t TaskHandle_t + * \ingroup Tasks + */ +typedef void * TaskHandle_t; + +/* + * Defines the prototype to which the application task hook function must + * conform. + */ +typedef BaseType_t (*TaskHookFunction_t)( void * ); + +/* Task states returned by eTaskGetState. */ +typedef enum +{ + eRunning = 0, /* A task is querying the state of itself, so must be running. */ + eReady, /* The task being queried is in a read or pending ready list. */ + eBlocked, /* The task being queried is in the Blocked state. */ + eSuspended, /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + eDeleted, /* The task being queried has been deleted, but its TCB has not yet been freed. */ + eInvalid /* Used as an 'invalid state' value. */ +} eTaskState; + +/* Actions that can be performed when vTaskNotify() is called. */ +typedef enum +{ + eNoAction = 0, /* Notify the task without updating its notify value. */ + eSetBits, /* Set bits in the task's notification value. */ + eIncrement, /* Increment the task's notification value. */ + eSetValueWithOverwrite, /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */ + eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */ +} eNotifyAction; + +/* + * Used internally only. + */ +typedef struct xTIME_OUT +{ + BaseType_t xOverflowCount; + TickType_t xTimeOnEntering; +} TimeOut_t; + +/* + * Defines the memory ranges allocated to the task when an MPU is used. + */ +typedef struct xMEMORY_REGION +{ + void *pvBaseAddress; + uint32_t ulLengthInBytes; + uint32_t ulParameters; +} MemoryRegion_t; + +/* + * Parameters required to create an MPU protected task. + */ +typedef struct xTASK_PARAMETERS +{ + TaskFunction_t pvTaskCode; + const char * const pcName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + uint16_t usStackDepth; + void *pvParameters; + UBaseType_t uxPriority; + StackType_t *puxStackBuffer; + MemoryRegion_t xRegions[ portNUM_CONFIGURABLE_REGIONS ]; + #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + StaticTask_t * const pxTaskBuffer; + #endif +} TaskParameters_t; + +/* Used with the uxTaskGetSystemState() function to return the state of each task +in the system. */ +typedef struct xTASK_STATUS +{ + TaskHandle_t xHandle; /* The handle of the task to which the rest of the information in the structure relates. */ + const char *pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + UBaseType_t xTaskNumber; /* A number unique to the task. */ + eTaskState eCurrentState; /* The state in which the task existed when the structure was populated. */ + UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ + UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ + uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See http://www.freertos.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t *pxStackBase; /* Points to the lowest address of the task's stack area. */ + uint16_t usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ +} TaskStatus_t; + +/* Possible return values for eTaskConfirmSleepModeStatus(). */ +typedef enum +{ + eAbortSleep = 0, /* A task has been made ready or a context switch pended since portSUPPORESS_TICKS_AND_SLEEP() was called - abort entering a sleep mode. */ + eStandardSleep, /* Enter a sleep mode that will not last any longer than the expected idle time. */ + eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe to enter a sleep mode that can only be exited by an external interrupt. */ +} eSleepModeStatus; + +/** + * Defines the priority used by the idle task. This must not be modified. + * + * \ingroup TaskUtils + */ +#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) + +/** + * task. h + * + * Macro for forcing a context switch. + * + * \defgroup taskYIELD taskYIELD + * \ingroup SchedulerControl + */ +#define taskYIELD() portYIELD() + +/** + * task. h + * + * Macro to mark the start of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL + * \ingroup SchedulerControl + */ +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/** + * task. h + * + * Macro to mark the end of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL + * \ingroup SchedulerControl + */ +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +/** + * task. h + * + * Macro to disable all maskable interrupts. + * + * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to enable microcontroller interrupts. + * + * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() + +/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is +0 to generate more optimal code when configASSERT() is defined as the constant +is used in assert() statements. */ +#define taskSCHEDULER_SUSPENDED ( ( BaseType_t ) 0 ) +#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) +#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) + + +/*----------------------------------------------------------- + * TASK CREATION API + *----------------------------------------------------------*/ + +/** + * task. h + *+ BaseType_t xTaskCreate( + TaskFunction_t pvTaskCode, + const char * const pcName, + configSTACK_DEPTH_TYPE usStackDepth, + void *pvParameters, + UBaseType_t uxPriority, + TaskHandle_t *pvCreatedTask + );+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. + * + * xTaskCreate() can only be used to create a task that has unrestricted + * access to the entire microcontroller memory map. Systems that include MPU + * support can alternatively create an MPU constrained task using + * xTaskCreateRestricted(). + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default + * is 16. + * + * @param usStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task should run. Systems that + * include MPU support can optionally create tasks in a privileged (system) + * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For + * example, to create a privileged task at priority 2 the uxPriority parameter + * should be set to ( 2 | portPRIVILEGE_BIT ). + * + * @param pvCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++ // Task to be created. + void vTaskCode( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + } + } + + // Function that creates a task. + void vOtherFunction( void ) + { + static uint8_t ucParameterToPass; + TaskHandle_t xHandle = NULL; + + // Create the task, storing the handle. Note that the passed parameter ucParameterToPass + // must exist for the lifetime of the task, so in this case is declared static. If it was just an + // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time + // the new task attempts to access it. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle ); + configASSERT( xHandle ); + + // Use the handle to delete the task. + if( xHandle != NULL ) + { + vTaskDelete( xHandle ); + } + } ++ * \defgroup xTaskCreate xTaskCreate + * \ingroup Tasks + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode, + const char * const pcName, + uint32_t ulStackDepth, + void *pvParameters, + UBaseType_t uxPriority, + StackType_t *pxStackBuffer, + StaticTask_t *pxTaskBuffer );+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param pxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will + * be created and pdPASS is returned. If either pxStackBuffer or pxTaskBuffer + * are NULL then the task will not be created and + * errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY is returned. + * + * Example usage: ++ + // Dimensions the buffer that the task being created will use as its stack. + // NOTE: This is the number of words the stack will hold, not the number of + // bytes. For example, if each stack item is 32-bits, and this is set to 100, + // then 400 bytes (100 * 32-bits) will be allocated. + #define STACK_SIZE 200 + + // Structure that will hold the TCB of the task being created. + StaticTask_t xTaskBuffer; + + // Buffer that the task being created will use as its stack. Note this is + // an array of StackType_t variables. The size of StackType_t is dependent on + // the RTOS port. + StackType_t xStack[ STACK_SIZE ]; + + // Function that implements the task being created. + void vTaskCode( void * pvParameters ) + { + // The parameter value is expected to be 1 as 1 is passed in the + // pvParameters value in the call to xTaskCreateStatic(). + configASSERT( ( uint32_t ) pvParameters == 1UL ); + + for( ;; ) + { + // Task code goes here. + } + } + + // Function that creates a task. + void vOtherFunction( void ) + { + TaskHandle_t xHandle = NULL; + + // Create the task without using any dynamic memory allocation. + xHandle = xTaskCreateStatic( + vTaskCode, // Function that implements the task. + "NAME", // Text name for the task. + STACK_SIZE, // Stack size in words, not bytes. + ( void * ) 1, // Parameter passed into the task. + tskIDLE_PRIORITY,// Priority at which the task is created. + xStack, // Array to use as the task's stack. + &xTaskBuffer ); // Variable to hold the task's data structure. + + // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have + // been created, and xHandle will be the task's handle. Use the handle + // to suspend the task. + vTaskSuspend( xHandle ); + } ++ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * task. h + *+ BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );+ * + * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1. + * + * xTaskCreateRestricted() should only be used in systems that include an MPU + * implementation. + * + * Create a new task and add it to the list of tasks that are ready to run. + * The function parameters define the memory regions and associated access + * permissions allocated to the task. + * + * See xTaskCreateRestrictedStatic() for a version that does not use any + * dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++// Create an TaskParameters_t structure that defines the task to be created. +static const TaskParameters_t xCheckTaskParameters = +{ + vATask, // pvTaskCode - the function that implements the task. + "ATask", // pcName - just a text name for the task to assist debugging. + 100, // usStackDepth - the stack size DEFINED IN WORDS. + NULL, // pvParameters - passed into the task function as the function parameters. + ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state. + cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack. + + // xRegions - Allocate up to three separate memory regions for access by + // the task, with appropriate access permissions. Different processors have + // different memory alignment requirements - refer to the FreeRTOS documentation + // for full information. + { + // Base address Length Parameters + { cReadWriteArray, 32, portMPU_REGION_READ_WRITE }, + { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY }, + { cPrivilegedOnlyAccessArray, 128, portMPU_REGION_PRIVILEGED_READ_WRITE } + } +}; + +int main( void ) +{ +TaskHandle_t xHandle; + + // Create a task from the const structure defined above. The task handle + // is requested (the second parameter is not NULL) but in this case just for + // demonstration purposes as its not actually used. + xTaskCreateRestricted( &xRegTest1Parameters, &xHandle ); + + // Start the scheduler. + vTaskStartScheduler(); + + // Will only get here if there was insufficient memory to create the idle + // and/or timer task. + for( ;; ); +} ++ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );+ * + * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1. + * + * xTaskCreateRestrictedStatic() should only be used in systems that include an + * MPU implementation. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreateRestricted() then the stack is provided by the application writer, + * and the memory used to hold the task's data structure is automatically + * dynamically allocated inside the xTaskCreateRestricted() function. If a task + * is created using xTaskCreateRestrictedStatic() then the application writer + * must provide the memory used to hold the task's data structures too. + * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be + * created without using any dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure + * contains an additional member, which is used to point to a variable of type + * StaticTask_t - which is then used to hold the task's data structure. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++// Create an TaskParameters_t structure that defines the task to be created. +// The StaticTask_t variable is only included in the structure when +// configSUPPORT_STATIC_ALLOCATION is set to 1. The PRIVILEGED_DATA macro can +// be used to force the variable into the RTOS kernel's privileged data area. +static PRIVILEGED_DATA StaticTask_t xTaskBuffer; +static const TaskParameters_t xCheckTaskParameters = +{ + vATask, // pvTaskCode - the function that implements the task. + "ATask", // pcName - just a text name for the task to assist debugging. + 100, // usStackDepth - the stack size DEFINED IN WORDS. + NULL, // pvParameters - passed into the task function as the function parameters. + ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state. + cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack. + + // xRegions - Allocate up to three separate memory regions for access by + // the task, with appropriate access permissions. Different processors have + // different memory alignment requirements - refer to the FreeRTOS documentation + // for full information. + { + // Base address Length Parameters + { cReadWriteArray, 32, portMPU_REGION_READ_WRITE }, + { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY }, + { cPrivilegedOnlyAccessArray, 128, portMPU_REGION_PRIVILEGED_READ_WRITE } + } + + &xTaskBuffer; // Holds the task's data structure. +}; + +int main( void ) +{ +TaskHandle_t xHandle; + + // Create a task from the const structure defined above. The task handle + // is requested (the second parameter is not NULL) but in this case just for + // demonstration purposes as its not actually used. + xTaskCreateRestricted( &xRegTest1Parameters, &xHandle ); + + // Start the scheduler. + vTaskStartScheduler(); + + // Will only get here if there was insufficient memory to create the idle + // and/or timer task. + for( ;; ); +} ++ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic + * \ingroup Tasks + */ +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );+ * + * Memory regions are assigned to a restricted task when the task is created by + * a call to xTaskCreateRestricted(). These regions can be redefined using + * vTaskAllocateMPURegions(). + * + * @param xTask The handle of the task being updated. + * + * @param xRegions A pointer to an MemoryRegion_t structure that contains the + * new memory region definitions. + * + * Example usage: ++// Define an array of MemoryRegion_t structures that configures an MPU region +// allowing read/write access for 1024 bytes starting at the beginning of the +// ucOneKByte array. The other two of the maximum 3 definable regions are +// unused so set to zero. +static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] = +{ + // Base address Length Parameters + { ucOneKByte, 1024, portMPU_REGION_READ_WRITE }, + { 0, 0, 0 }, + { 0, 0, 0 } +}; + +void vATask( void *pvParameters ) +{ + // This task was created such that it has access to certain regions of + // memory as defined by the MPU configuration. At some point it is + // desired that these MPU regions are replaced with that defined in the + // xAltRegions const struct above. Use a call to vTaskAllocateMPURegions() + // for this purpose. NULL is used as the task handle to indicate that this + // function should modify the MPU regions of the calling task. + vTaskAllocateMPURegions( NULL, xAltRegions ); + + // Now the task can continue its function, but from this point on can only + // access its stack and the ucOneKByte array (unless any other statically + // defined or shared regions have been declared elsewhere). +} ++ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskDelete( TaskHandle_t xTask );+ * + * INCLUDE_vTaskDelete must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Remove a task from the RTOS real time kernel's management. The task being + * deleted will be removed from all ready, blocked, suspended and event lists. + * + * NOTE: The idle task is responsible for freeing the kernel allocated + * memory from tasks that have been deleted. It is therefore important that + * the idle task is not starved of microcontroller processing time if your + * application makes any calls to vTaskDelete (). Memory allocated by the + * task code is not automatically freed, and should be freed before the task + * is deleted. + * + * See the demo application file death.c for sample code that utilises + * vTaskDelete (). + * + * @param xTask The handle of the task to be deleted. Passing NULL will + * cause the calling task to be deleted. + * + * Example usage: ++ void vOtherFunction( void ) + { + TaskHandle_t xHandle; + + // Create the task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // Use the handle to delete the task. + vTaskDelete( xHandle ); + } ++ * \defgroup vTaskDelete vTaskDelete + * \ingroup Tasks + */ +void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK CONTROL API + *----------------------------------------------------------*/ + +/** + * task. h + *void vTaskDelay( const TickType_t xTicksToDelay );+ * + * Delay a task for a given number of ticks. The actual time that the + * task remains blocked depends on the tick rate. The constant + * portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * INCLUDE_vTaskDelay must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * + * vTaskDelay() specifies a time at which the task wishes to unblock relative to + * the time at which vTaskDelay() is called. For example, specifying a block + * period of 100 ticks will cause the task to unblock 100 ticks after + * vTaskDelay() is called. vTaskDelay() does not therefore provide a good method + * of controlling the frequency of a periodic task as the path taken through the + * code, as well as other task and interrupt activity, will effect the frequency + * at which vTaskDelay() gets called and therefore the time at which the task + * next executes. See vTaskDelayUntil() for an alternative API function designed + * to facilitate fixed frequency execution. It does this by specifying an + * absolute time (rather than a relative time) at which the calling task should + * unblock. + * + * @param xTicksToDelay The amount of time, in tick periods, that + * the calling task should block. + * + * Example usage: + + void vTaskFunction( void * pvParameters ) + { + // Block for 500ms. + const TickType_t xDelay = 500 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Simply toggle the LED every 500ms, blocking between each toggle. + vToggleLED(); + vTaskDelay( xDelay ); + } + } + + * \defgroup vTaskDelay vTaskDelay + * \ingroup TaskCtrl + */ +void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );+ * + * INCLUDE_vTaskDelayUntil must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Delay a task until a specified time. This function can be used by periodic + * tasks to ensure a constant execution frequency. + * + * This function differs from vTaskDelay () in one important aspect: vTaskDelay () will + * cause a task to block for the specified number of ticks from the time vTaskDelay () is + * called. It is therefore difficult to use vTaskDelay () by itself to generate a fixed + * execution frequency as the time between a task starting to execute and that task + * calling vTaskDelay () may not be fixed [the task may take a different path though the + * code between calls, or may get interrupted or preempted a different number of times + * each time it executes]. + * + * Whereas vTaskDelay () specifies a wake time relative to the time at which the function + * is called, vTaskDelayUntil () specifies the absolute (exact) time at which it wishes to + * unblock. + * + * The constant portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the + * task was last unblocked. The variable must be initialised with the current time + * prior to its first use (see the example below). Following this the variable is + * automatically updated within vTaskDelayUntil (). + * + * @param xTimeIncrement The cycle time period. The task will be unblocked at + * time *pxPreviousWakeTime + xTimeIncrement. Calling vTaskDelayUntil with the + * same xTimeIncrement parameter value will cause the task to execute with + * a fixed interface period. + * + * Example usage: ++ // Perform an action every 10 ticks. + void vTaskFunction( void * pvParameters ) + { + TickType_t xLastWakeTime; + const TickType_t xFrequency = 10; + + // Initialise the xLastWakeTime variable with the current time. + xLastWakeTime = xTaskGetTickCount (); + for( ;; ) + { + // Wait for the next cycle. + vTaskDelayUntil( &xLastWakeTime, xFrequency ); + + // Perform action here. + } + } ++ * \defgroup vTaskDelayUntil vTaskDelayUntil + * \ingroup TaskCtrl + */ +void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskAbortDelay( TaskHandle_t xTask );+ * + * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this + * function to be available. + * + * A task will enter the Blocked state when it is waiting for an event. The + * event it is waiting for can be a temporal event (waiting for a time), such + * as when vTaskDelay() is called, or an event on an object, such as when + * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task + * that is in the Blocked state is used in a call to xTaskAbortDelay() then the + * task will leave the Blocked state, and return from whichever function call + * placed the task into the Blocked state. + * + * @param xTask The handle of the task to remove from the Blocked state. + * + * @return If the task referenced by xTask was not in the Blocked state then + * pdFAIL is returned. Otherwise pdPASS is returned. + * + * \defgroup xTaskAbortDelay xTaskAbortDelay + * \ingroup TaskCtrl + */ +BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask );+ * + * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the priority of any task. + * + * @param xTask Handle of the task to be queried. Passing a NULL + * handle results in the priority of the calling task being returned. + * + * @return The priority of xTask. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to obtain the priority of the created task. + // It was created with tskIDLE_PRIORITY, but may have changed + // it itself. + if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY ) + { + // The task has changed it's priority. + } + + // ... + + // Is our priority higher than the created task? + if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) ) + { + // Our priority (obtained using NULL handle) is higher. + } + } ++ * \defgroup uxTaskPriorityGet uxTaskPriorityGet + * \ingroup TaskCtrl + */ +UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask );+ * + * A version of uxTaskPriorityGet() that can be used from an ISR. + */ +UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *eTaskState eTaskGetState( TaskHandle_t xTask );+ * + * INCLUDE_eTaskGetState must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the state of any task. States are encoded by the eTaskState + * enumerated type. + * + * @param xTask Handle of the task to be queried. + * + * @return The state of xTask at the time the function was called. Note the + * state of the task might change between the function being called, and the + * functions return value being tested by the calling task. + */ +eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );+ * + * configUSE_TRACE_FACILITY must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * Populates a TaskStatus_t structure with information about a task. + * + * @param xTask Handle of the task being queried. If xTask is NULL then + * information will be returned about the calling task. + * + * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be + * filled with information about the task referenced by the handle passed using + * the xTask parameter. + * + * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report + * the stack high water mark of the task being queried. Calculating the stack + * high water mark takes a relatively long time, and can make the system + * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to + * allow the high water mark checking to be skipped. The high watermark value + * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is + * not set to pdFALSE; + * + * @param eState The TaskStatus_t structure contains a member to report the + * state of the task being queried. Obtaining the task state is not as fast as + * a simple assignment - so the eState parameter is provided to allow the state + * information to be omitted from the TaskStatus_t structure. To obtain state + * information then set eState to eInvalid - otherwise the value passed in + * eState will be reported as the task state in the TaskStatus_t structure. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + TaskStatus_t xTaskDetails; + + // Obtain the handle of a task from its name. + xHandle = xTaskGetHandle( "Task_Name" ); + + // Check the handle is not NULL. + configASSERT( xHandle ); + + // Use the handle to obtain further information about the task. + vTaskGetInfo( xHandle, + &xTaskDetails, + pdTRUE, // Include the high water mark in xTaskDetails. + eInvalid ); // Include the task state in xTaskDetails. + } ++ * \defgroup vTaskGetInfo vTaskGetInfo + * \ingroup TaskCtrl + */ +void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );+ * + * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Set the priority of any task. + * + * A context switch will occur before the function returns if the priority + * being set is higher than the currently executing task. + * + * @param xTask Handle to the task for which the priority is being set. + * Passing a NULL handle results in the priority of the calling task being set. + * + * @param uxNewPriority The priority to which the task will be set. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to raise the priority of the created task. + vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 ); + + // ... + + // Use a NULL handle to raise our priority to the same value. + vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 ); + } ++ * \defgroup vTaskPrioritySet vTaskPrioritySet + * \ingroup TaskCtrl + */ +void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskSuspend( TaskHandle_t xTaskToSuspend );+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Suspend any task. When suspended a task will never get any microcontroller + * processing time, no matter what its priority. + * + * Calls to vTaskSuspend are not accumulative - + * i.e. calling vTaskSuspend () twice on the same task still only requires one + * call to vTaskResume () to ready the suspended task. + * + * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL + * handle will cause the calling task to be suspended. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to suspend the created task. + vTaskSuspend( xHandle ); + + // ... + + // The created task will not run during this period, unless + // another task calls vTaskResume( xHandle ). + + //... + + + // Suspend ourselves. + vTaskSuspend( NULL ); + + // We cannot get here unless another task calls vTaskResume + // with our handle as the parameter. + } ++ * \defgroup vTaskSuspend vTaskSuspend + * \ingroup TaskCtrl + */ +void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskResume( TaskHandle_t xTaskToResume );+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Resumes a suspended task. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * vTaskResume (). + * + * @param xTaskToResume Handle to the task being readied. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to suspend the created task. + vTaskSuspend( xHandle ); + + // ... + + // The created task will not run during this period, unless + // another task calls vTaskResume( xHandle ). + + //... + + + // Resume the suspended task ourselves. + vTaskResume( xHandle ); + + // The created task will once again get microcontroller processing + // time in accordance with its priority within the system. + } ++ * \defgroup vTaskResume vTaskResume + * \ingroup TaskCtrl + */ +void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void xTaskResumeFromISR( TaskHandle_t xTaskToResume );+ * + * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * An implementation of vTaskResume() that can be called from within an ISR. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * xTaskResumeFromISR (). + * + * xTaskResumeFromISR() should not be used to synchronise a task with an + * interrupt if there is a chance that the interrupt could arrive prior to the + * task being suspended - as this can lead to interrupts being missed. Use of a + * semaphore as a synchronisation mechanism would avoid this eventuality. + * + * @param xTaskToResume Handle to the task being readied. + * + * @return pdTRUE if resuming the task should result in a context switch, + * otherwise pdFALSE. This is used by the ISR to determine if a context switch + * may be required following the ISR. + * + * \defgroup vTaskResumeFromISR vTaskResumeFromISR + * \ingroup TaskCtrl + */ +BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * SCHEDULER CONTROL + *----------------------------------------------------------*/ + +/** + * task. h + *void vTaskStartScheduler( void );+ * + * Starts the real time kernel tick processing. After calling the kernel + * has control over which tasks are executed and when. + * + * See the demo application file main.c for an example of creating + * tasks and starting the kernel. + * + * Example usage: ++ void vAFunction( void ) + { + // Create at least one task before starting the kernel. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL ); + + // Start the real time kernel with preemption. + vTaskStartScheduler (); + + // Will not get here unless a task calls vTaskEndScheduler () + } ++ * + * \defgroup vTaskStartScheduler vTaskStartScheduler + * \ingroup SchedulerControl + */ +void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskEndScheduler( void );+ * + * NOTE: At the time of writing only the x86 real mode port, which runs on a PC + * in place of DOS, implements this function. + * + * Stops the real time kernel tick. All created tasks will be automatically + * deleted and multitasking (either preemptive or cooperative) will + * stop. Execution then resumes from the point where vTaskStartScheduler () + * was called, as if vTaskStartScheduler () had just returned. + * + * See the demo application file main. c in the demo/PC directory for an + * example that uses vTaskEndScheduler (). + * + * vTaskEndScheduler () requires an exit function to be defined within the + * portable layer (see vPortEndScheduler () in port. c for the PC port). This + * performs hardware specific operations such as stopping the kernel tick. + * + * vTaskEndScheduler () will cause all of the resources allocated by the + * kernel to be freed - but will not free resources allocated by application + * tasks. + * + * Example usage: ++ void vTaskCode( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // At some point we want to end the real time kernel processing + // so call ... + vTaskEndScheduler (); + } + } + + void vAFunction( void ) + { + // Create at least one task before starting the kernel. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL ); + + // Start the real time kernel with preemption. + vTaskStartScheduler (); + + // Will only get here when the vTaskCode () task has called + // vTaskEndScheduler (). When we get here we are back to single task + // execution. + } ++ * + * \defgroup vTaskEndScheduler vTaskEndScheduler + * \ingroup SchedulerControl + */ +void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskSuspendAll( void );+ * + * Suspends the scheduler without disabling interrupts. Context switches will + * not occur while the scheduler is suspended. + * + * After calling vTaskSuspendAll () the calling task will continue to execute + * without risk of being swapped out until a call to xTaskResumeAll () has been + * made. + * + * API functions that have the potential to cause a context switch (for example, + * vTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler + * is suspended. + * + * Example usage: ++ void vTask1( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // ... + + // At some point the task wants to perform a long operation during + // which it does not want to get swapped out. It cannot use + // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the + // operation may cause interrupts to be missed - including the + // ticks. + + // Prevent the real time kernel swapping out the task. + vTaskSuspendAll (); + + // Perform the operation here. There is no need to use critical + // sections as we have all the microcontroller processing time. + // During this time interrupts will still operate and the kernel + // tick count will be maintained. + + // ... + + // The operation is complete. Restart the kernel. + xTaskResumeAll (); + } + } ++ * \defgroup vTaskSuspendAll vTaskSuspendAll + * \ingroup SchedulerControl + */ +void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskResumeAll( void );+ * + * Resumes scheduler activity after it was suspended by a call to + * vTaskSuspendAll(). + * + * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks + * that were previously suspended by a call to vTaskSuspend(). + * + * @return If resuming the scheduler caused a context switch then pdTRUE is + * returned, otherwise pdFALSE is returned. + * + * Example usage: ++ void vTask1( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // ... + + // At some point the task wants to perform a long operation during + // which it does not want to get swapped out. It cannot use + // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the + // operation may cause interrupts to be missed - including the + // ticks. + + // Prevent the real time kernel swapping out the task. + vTaskSuspendAll (); + + // Perform the operation here. There is no need to use critical + // sections as we have all the microcontroller processing time. + // During this time interrupts will still operate and the real + // time kernel tick count will be maintained. + + // ... + + // The operation is complete. Restart the kernel. We want to force + // a context switch - but there is no point if resuming the scheduler + // caused a context switch already. + if( !xTaskResumeAll () ) + { + taskYIELD (); + } + } + } ++ * \defgroup xTaskResumeAll xTaskResumeAll + * \ingroup SchedulerControl + */ +BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK UTILITIES + *----------------------------------------------------------*/ + +/** + * task. h + *TickType_t xTaskGetTickCount( void );+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * \defgroup xTaskGetTickCount xTaskGetTickCount + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *TickType_t xTaskGetTickCountFromISR( void );+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * This is a version of xTaskGetTickCount() that is safe to be called from an + * ISR - provided that TickType_t is the natural word size of the + * microcontroller being used or interrupt nesting is either not supported or + * not being used. + * + * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *uint16_t uxTaskGetNumberOfTasks( void );+ * + * @return The number of tasks that the real time kernel is currently managing. + * This includes all ready, blocked and suspended tasks. A task that + * has been deleted but not yet freed by the idle task will also be + * included in the count. + * + * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks + * \ingroup TaskUtils + */ +UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *char *pcTaskGetName( TaskHandle_t xTaskToQuery );+ * + * @return The text (human readable) name of the task referenced by the handle + * xTaskToQuery. A task can query its own name by either passing in its own + * handle, or by setting xTaskToQuery to NULL. + * + * \defgroup pcTaskGetName pcTaskGetName + * \ingroup TaskUtils + */ +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );+ * + * NOTE: This function takes a relatively long time to complete and should be + * used sparingly. + * + * @return The handle of the task that has the human readable name pcNameToQuery. + * NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle + * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available. + * + * \defgroup pcTaskGetHandle pcTaskGetHandle + * \ingroup TaskUtils + */ +TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task.h + *UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );+ * + * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* When using trace macros it is sometimes necessary to include task.h before +FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been defined, +so the following two prototypes will cause a compilation error. This can be +fixed by simply guarding against the inclusion of these two prototypes unless +they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration +constant. */ +#ifdef configUSE_APPLICATION_TASK_TAG + #if configUSE_APPLICATION_TASK_TAG == 1 + /** + * task.h + *void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );+ * + * Sets pxHookFunction to be the task hook function used by the task xTask. + * Passing xTask as NULL has the effect of setting the calling tasks hook + * function. + */ + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION; + + /** + * task.h + *void xTaskGetApplicationTaskTag( TaskHandle_t xTask );+ * + * Returns the pxHookFunction value assigned to the task xTask. + */ + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + #endif /* configUSE_APPLICATION_TASK_TAG ==1 */ +#endif /* ifdef configUSE_APPLICATION_TASK_TAG */ + +#if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + + /* Each task contains an array of pointers that is dimensioned by the + configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The + kernel does not use the pointers itself, so the application writer can use + the pointers for any purpose they wish. The following two functions are + used to set and query a pointer respectively. */ + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) PRIVILEGED_FUNCTION; + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) PRIVILEGED_FUNCTION; + +#endif + +/** + * task.h + *BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );+ * + * Calls the hook function associated with xTask. Passing xTask as NULL has + * the effect of calling the Running tasks (the calling task) hook function. + * + * pvParameter is passed to the hook function for the task to interpret as it + * wants. The return value is the value returned by the task hook function + * registered by the user. + */ +BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) PRIVILEGED_FUNCTION; + +/** + * xTaskGetIdleTaskHandle() is only available if + * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. + * + * Simply returns the handle of the idle task. It is not valid to call + * xTaskGetIdleTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for + * uxTaskGetSystemState() to be available. + * + * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in + * the system. TaskStatus_t structures contain, among other things, members + * for the task handle, task name, task priority, task state, and total amount + * of run time consumed by the task. See the TaskStatus_t structure + * definition in this file for the full member list. + * + * NOTE: This function is intended for debugging use only as its use results in + * the scheduler remaining suspended for an extended period. + * + * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures. + * The array must contain at least one TaskStatus_t structure for each task + * that is under the control of the RTOS. The number of tasks under the control + * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API function. + * + * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray + * parameter. The size is specified as the number of indexes in the array, or + * the number of TaskStatus_t structures contained in the array, not by the + * number of bytes in the array. + * + * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in + * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to the + * total run time (as defined by the run time stats clock, see + * http://www.freertos.org/rtos-run-time-stats.html) since the target booted. + * pulTotalRunTime can be set to NULL to omit the total run time information. + * + * @return The number of TaskStatus_t structures that were populated by + * uxTaskGetSystemState(). This should equal the number returned by the + * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed + * in the uxArraySize parameter was too small. + * + * Example usage: ++ // This example demonstrates how a human readable table of run time stats + // information is generated from raw data provided by uxTaskGetSystemState(). + // The human readable table is written to pcWriteBuffer + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + volatile UBaseType_t uxArraySize, x; + uint32_t ulTotalRunTime, ulStatsAsPercentage; + + // Make sure the write buffer does not contain a string. + *pcWriteBuffer = 0x00; + + // Take a snapshot of the number of tasks in case it changes while this + // function is executing. + uxArraySize = uxTaskGetNumberOfTasks(); + + // Allocate a TaskStatus_t structure for each task. An array could be + // allocated statically at compile time. + pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t ) ); + + if( pxTaskStatusArray != NULL ) + { + // Generate raw status information about each task. + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalRunTime ); + + // For percentage calculations. + ulTotalRunTime /= 100UL; + + // Avoid divide by zero errors. + if( ulTotalRunTime > 0 ) + { + // For each populated position in the pxTaskStatusArray array, + // format the raw data as human readable ASCII data + for( x = 0; x < uxArraySize; x++ ) + { + // What percentage of the total run time has the task used? + // This will always be rounded down to the nearest integer. + // ulTotalRunTimeDiv100 has already been divided by 100. + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalRunTime; + + if( ulStatsAsPercentage > 0UL ) + { + sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + else + { + // If the percentage is zero here then the task has + // consumed less than 1% of the total run time. + sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + + pcWriteBuffer += strlen( ( char * ) pcWriteBuffer ); + } + } + + // The array is no longer needed, free the memory it consumes. + vPortFree( pxTaskStatusArray ); + } + } ++ */ +UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskList( char *pcWriteBuffer );+ * + * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must + * both be defined as 1 for this function to be available. See the + * configuration section of the FreeRTOS.org website for more information. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Lists all the current tasks, along with their current state and stack + * usage high water mark. + * + * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or + * suspended ('S'). + * + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays task + * names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that might + * bloat the code size, use a lot of stack, and provide different results on + * different platforms. An alternative, tiny, third party, and limited + * functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly through a + * call to vTaskList(). + * + * @param pcWriteBuffer A buffer into which the above mentioned details + * will be written, in ASCII form. This buffer is assumed to be large + * enough to contain the generated report. Approximately 40 bytes per + * task should be sufficient. + * + * \defgroup vTaskList vTaskList + * \ingroup TaskUtils + */ +void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *void vTaskGetRunTimeStats( char *pcWriteBuffer );+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * Calling vTaskGetRunTimeStats() writes the total execution time of each + * task into a buffer, both as an absolute count value and as a percentage + * of the total system execution time. + * + * NOTE 2: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays the + * amount of time each task has spent in the Running state in both absolute and + * percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function + * that might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, and + * limited functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() directly + * to get access to raw stats data, rather than indirectly through a call to + * vTaskGetRunTimeStats(). + * + * @param pcWriteBuffer A buffer into which the execution times will be + * written, in ASCII form. This buffer is assumed to be large enough to + * contain the generated report. Approximately 40 bytes per task should + * be sufficient. + * + * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats + * \ingroup TaskUtils + */ +void vTaskGetRunTimeStats( char *pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * pulPreviousNotificationValue - + * Can be used to pass out the subject task's notification value before any + * bits are modified by the notify function. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; +#define xTaskNotify( xTaskToNotify, ulValue, eAction ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL ) +#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) + +/** + * task. h + *BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotify() that can be used from an interrupt service routine + * (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should + * be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value + * will be cleared in the calling task's notification value before the task + * checks to see if any notifications are pending, and optionally blocks if no + * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if + * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have + * the effect of resetting the task's notification value to 0. Setting + * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged. + * + * @param ulBitsToClearOnExit If a notification is pending or received before + * the calling task exits the xTaskNotifyWait() function then the task's + * notification value (see the xTaskNotify() API function) is passed out using + * the pulNotificationValue parameter. Then any bits that are set in + * ulBitsToClearOnExit will be cleared in the task's notification value (note + * *pulNotificationValue is set before any bits are cleared). Setting + * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL + * (if limits.h is not included) will have the effect of resetting the task's + * notification value to 0 before the function exits. Setting + * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged + * when the function exits (in which case the value passed out in + * pulNotificationValue will match the task's notification value). + * + * @param pulNotificationValue Used to pass the task's notification value out + * of the function. Note the value passed out will not be effected by the + * clearing of any bits caused by ulBitsToClearOnExit being non-zero. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for a notification to be received, should a notification + * not already be pending when xTaskNotifyWait() was called. The task + * will not consume any processing time while it is in the Blocked state. This + * is specified in kernel ticks, the macro pdMS_TO_TICSK( value_in_ms ) can be + * used to convert a time specified in milliseconds to a time specified in + * ticks. + * + * @return If a notification was received (including notifications that were + * already pending when xTaskNotifyWait was called) then pdPASS is + * returned. Otherwise pdFAIL is returned. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * xTaskNotifyGive() is a helper macro intended for use when task notifications + * are used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given using the xSemaphoreGive() API function, + * the equivalent action that instead uses a task notification is + * xTaskNotifyGive(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the + * eAction parameter set to eIncrement - so pdPASS is always returned. + * + * \defgroup xTaskNotifyGive xTaskNotifyGive + * \ingroup TaskNotifications + */ +#define xTaskNotifyGive( xTaskToNotify ) xTaskGenericNotify( ( xTaskToNotify ), ( 0 ), eIncrement, NULL ) + +/** + * task. h + *void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken ); + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotifyGive() that can be called from an interrupt service + * routine (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * vTaskNotifyGiveFromISR() is intended for use when task notifications are + * used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given from an ISR using the + * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses + * a task notification is vTaskNotifyGiveFromISR(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param pxHigherPriorityTaskWoken vTaskNotifyGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch + * should be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * task. h + *uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * ulTaskNotifyTake() is intended for use when a task notification is used as a + * faster and lighter weight binary or counting semaphore alternative. Actual + * FreeRTOS semaphores are taken using the xSemaphoreTake() API function, the + * equivalent action that instead uses a task notification is + * ulTaskNotifyTake(). + * + * When a task is using its notification value as a binary or counting semaphore + * other tasks should send notifications to it using the xTaskNotifyGive() + * macro, or xTaskNotify() function with the eAction parameter set to + * eIncrement. + * + * ulTaskNotifyTake() can either clear the task's notification value to + * zero on exit, in which case the notification value acts like a binary + * semaphore, or decrement the task's notification value on exit, in which case + * the notification value acts like a counting semaphore. + * + * A task can use ulTaskNotifyTake() to [optionally] block to wait for a + * the task's notification value to be non-zero. The task does not consume any + * CPU time while it is in the Blocked state. + * + * Where as xTaskNotifyWait() will return when a notification is pending, + * ulTaskNotifyTake() will return when the task's notification value is + * not zero. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's + * notification value is decremented when the function exits. In this way the + * notification value acts like a counting semaphore. If xClearCountOnExit is + * not pdFALSE then the task's notification value is cleared to zero when the + * function exits. In this way the notification value acts like a binary + * semaphore. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for the task's notification value to be greater than zero, + * should the count not already be greater than zero when + * ulTaskNotifyTake() was called. The task will not consume any processing + * time while it is in the Blocked state. This is specified in kernel ticks, + * the macro pdMS_TO_TICSK( value_in_ms ) can be used to convert a time + * specified in milliseconds to a time specified in ticks. + * + * @return The task's notification count before it is either cleared to zero or + * decremented (see the xClearCountOnExit parameter). + * + * \defgroup ulTaskNotifyTake ulTaskNotifyTake + * \ingroup TaskNotifications + */ +uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );+ * + * If the notification state of the task referenced by the handle xTask is + * eNotified, then set the task's notification state to eNotWaitingNotification. + * The task's notification value is not altered. Set xTask to NULL to clear the + * notification state of the calling task. + * + * @return pdTRUE if the task's notification state was set to + * eNotWaitingNotification, otherwise pdFALSE. + * \defgroup xTaskNotifyStateClear xTaskNotifyStateClear + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ); + +/*----------------------------------------------------------- + * SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES + *----------------------------------------------------------*/ + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Called from the real time kernel tick (either preemptive or cooperative), + * this increments the tick count and checks if any tasks that are blocked + * for a finite period required removing from a blocked list and placing on + * a ready list. If a non-zero value is returned then a context switch is + * required because either: + * + A task was removed from a blocked list because its timeout had expired, + * or + * + Time slicing is in use and there is a task of equal priority to the + * currently running task. + */ +BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes the calling task from the ready list and places it both + * on the list of tasks waiting for a particular event, and the + * list of delayed tasks. The task will be removed from both lists + * and replaced on the ready list should either the event occur (and + * there be no higher priority tasks waiting on the same event) or + * the delay period expires. + * + * The 'unordered' version replaces the event list item value with the + * xItemValue value, and inserts the list item at the end of the list. + * + * The 'ordered' version uses the existing event list item value (which is the + * owning tasks priority) to insert the list item into the event list is task + * priority order. + * + * @param pxEventList The list containing tasks that are blocked waiting + * for the event to occur. + * + * @param xItemValue The item value to use for the event list item when the + * event list is not ordered by task priority. + * + * @param xTicksToWait The maximum amount of time that the task should wait + * for the event to occur. This is specified in kernel ticks,the constant + * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time + * period. + */ +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * This function performs nearly the same function as vTaskPlaceOnEventList(). + * The difference being that this function does not permit tasks to block + * indefinitely, whereas vTaskPlaceOnEventList() does. + * + */ +void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes a task from both the specified event list and the list of blocked + * tasks, and places it on a ready queue. + * + * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called + * if either an event occurs to unblock a task, or the block timeout period + * expires. + * + * xTaskRemoveFromEventList() is used when the event list is in task priority + * order. It removes the list item from the head of the event list as that will + * have the highest priority owning task of all the tasks on the event list. + * vTaskRemoveFromUnorderedEventList() is used when the event list is not + * ordered and the event list items hold something other than the owning tasks + * priority. In this case the event list item value is updated to the value + * passed in the xItemValue parameter. + * + * @return pdTRUE if the task being removed has a higher priority than the task + * making the call, otherwise pdFALSE. + */ +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run. + */ +void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; + +/* + * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY + * THE EVENT BITS MODULE. + */ +TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the calling task. + */ +TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; + +/* + * Capture the current time status for future reference. + */ +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/* + * Compare the time status now with that previously captured to see if the + * timeout has expired. + */ +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * Shortcut used by the queue implementation to prevent unnecessary call to + * taskYIELD(); + */ +void vTaskMissedYield( void ) PRIVILEGED_FUNCTION; + +/* + * Returns the scheduler state as taskSCHEDULER_RUNNING, + * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED. + */ +BaseType_t xTaskGetSchedulerState( void ) PRIVILEGED_FUNCTION; + +/* + * Raises the priority of the mutex holder to that of the calling task should + * the mutex holder have a priority less than the calling task. + */ +BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * Set the priority of a task back to its proper priority in the case that it + * inherited a higher priority while it was holding a semaphore. + */ +BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * If a higher priority task attempting to obtain a mutex caused a lower + * priority task to inherit the higher priority task's priority - but the higher + * priority task then timed out without obtaining the mutex, then the lower + * priority task will disinherit the priority again - but only down as far as + * the highest priority task that is still waiting for the mutex (if there were + * more than one task waiting for the mutex). + */ +void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) PRIVILEGED_FUNCTION; + +/* + * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. + */ +UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* + * Set the uxTaskNumber of the task referenced by the xTask parameter to + * uxHandle. + */ +void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * If tickless mode is being used, or a low power mode is implemented, then + * the tick interrupt will not execute during idle periods. When this is the + * case, the tick count value maintained by the scheduler needs to be kept up + * to date with the actual execution time by being skipped forward by a time + * equal to the idle period. + */ +void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION; + +/* + * Only avilable when configUSE_TICKLESS_IDLE is set to 1. + * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port + * specific sleep function to determine if it is ok to proceed with the sleep, + * and if it is ok to proceed, if it is ok to sleep indefinitely. + * + * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only + * called with the scheduler suspended, not from within a critical section. It + * is therefore possible for an interrupt to request a context switch between + * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being + * entered. eTaskConfirmSleepModeStatus() should be called from a short + * critical section between the timer being stopped and the sleep mode being + * entered to ensure it is ok to proceed into the sleep mode. + */ +eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Increment the mutex held count when a mutex is + * taken and return the handle of the task that has taken the mutex. + */ +void *pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as vTaskSetTimeOutState(), but without a critial + * section. + */ +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif +#endif /* INC_TASK_H */ + + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h new file mode 100644 index 000000000..7e2eceb16 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h @@ -0,0 +1,1277 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef TIMERS_H +#define TIMERS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include timers.h" +#endif + +/*lint -save -e537 This headers are only multiply included if the application code +happens to also be including task.h. */ +#include "task.h" +/*lint -restore */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +/* IDs for commands that can be sent/received on the timer queue. These are to +be used solely through the macros that make up the public software timer API, +as defined below. The commands that are sent from interrupts must use the +highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task +or interrupt version of the queue send function should be used. */ +#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR ( ( BaseType_t ) -2 ) +#define tmrCOMMAND_EXECUTE_CALLBACK ( ( BaseType_t ) -1 ) +#define tmrCOMMAND_START_DONT_TRACE ( ( BaseType_t ) 0 ) +#define tmrCOMMAND_START ( ( BaseType_t ) 1 ) +#define tmrCOMMAND_RESET ( ( BaseType_t ) 2 ) +#define tmrCOMMAND_STOP ( ( BaseType_t ) 3 ) +#define tmrCOMMAND_CHANGE_PERIOD ( ( BaseType_t ) 4 ) +#define tmrCOMMAND_DELETE ( ( BaseType_t ) 5 ) + +#define tmrFIRST_FROM_ISR_COMMAND ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_START_FROM_ISR ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_RESET_FROM_ISR ( ( BaseType_t ) 7 ) +#define tmrCOMMAND_STOP_FROM_ISR ( ( BaseType_t ) 8 ) +#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ( ( BaseType_t ) 9 ) + + +/** + * Type by which software timers are referenced. For example, a call to + * xTimerCreate() returns an TimerHandle_t variable that can then be used to + * reference the subject timer in calls to other software timer API functions + * (for example, xTimerStart(), xTimerReset(), etc.). + */ +typedef void * TimerHandle_t; + +/* + * Defines the prototype to which timer callback functions must conform. + */ +typedef void (*TimerCallbackFunction_t)( TimerHandle_t xTimer ); + +/* + * Defines the prototype to which functions used with the + * xTimerPendFunctionCallFromISR() function must conform. + */ +typedef void (*PendedFunction_t)( void *, uint32_t ); + +/** + * TimerHandle_t xTimerCreate( const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @return If the timer is successfully created then a handle to the newly + * created timer is returned. If the timer cannot be created (because either + * there is insufficient FreeRTOS heap remaining to allocate the timer + * structures, or the timer period was set to 0) then NULL is returned. + * + * Example usage: + * @verbatim + * #define NUM_TIMERS 5 + * + * // An array to hold handles to the created timers. + * TimerHandle_t xTimers[ NUM_TIMERS ]; + * + * // An array to hold a count of the number of times each timer expires. + * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 }; + * + * // Define a callback function that will be used by multiple timer instances. + * // The callback function does nothing but count the number of times the + * // associated timer expires, and stop the timer once the timer has expired + * // 10 times. + * void vTimerCallback( TimerHandle_t pxTimer ) + * { + * int32_t lArrayIndex; + * const int32_t xMaxExpiryCountBeforeStopping = 10; + * + * // Optionally do something if the pxTimer parameter is NULL. + * configASSERT( pxTimer ); + * + * // Which timer expired? + * lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer ); + * + * // Increment the number of times that pxTimer has expired. + * lExpireCounters[ lArrayIndex ] += 1; + * + * // If the timer has expired 10 times then stop it from running. + * if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping ) + * { + * // Do not use a block time if calling a timer API function from a + * // timer callback function, as doing so could cause a deadlock! + * xTimerStop( pxTimer, 0 ); + * } + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start some timers. Starting the timers before the scheduler + * // has been started means the timers will start running immediately that + * // the scheduler starts. + * for( x = 0; x < NUM_TIMERS; x++ ) + * { + * xTimers[ x ] = xTimerCreate( "Timer", // Just a text name, not used by the kernel. + * ( 100 * x ), // The timer period in ticks. + * pdTRUE, // The timers will auto-reload themselves when they expire. + * ( void * ) x, // Assign each timer a unique id equal to its array index. + * vTimerCallback // Each timer calls the same callback when it expires. + * ); + * + * if( xTimers[ x ] == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xTimers[ x ], 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +#endif + +/** + * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction, + * StaticTimer_t *pxTimerBuffer ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which + * will be then be used to hold the software timer's data structures, removing + * the need for the memory to be allocated dynamically. + * + * @return If the timer is created then a handle to the created timer is + * returned. If pxTimerBuffer was NULL then NULL is returned. + * + * Example usage: + * @verbatim + * + * // The buffer used to hold the software timer's data structure. + * static StaticTimer_t xTimerBuffer; + * + * // A variable that will be incremented by the software timer's callback + * // function. + * UBaseType_t uxVariableToIncrement = 0; + * + * // A software timer callback function that increments a variable passed to + * // it when the software timer was created. After the 5th increment the + * // callback function stops the software timer. + * static void prvTimerCallback( TimerHandle_t xExpiredTimer ) + * { + * UBaseType_t *puxVariableToIncrement; + * BaseType_t xReturned; + * + * // Obtain the address of the variable to increment from the timer ID. + * puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID( xExpiredTimer ); + * + * // Increment the variable to show the timer callback has executed. + * ( *puxVariableToIncrement )++; + * + * // If this callback has executed the required number of times, stop the + * // timer. + * if( *puxVariableToIncrement == 5 ) + * { + * // This is called from a timer callback so must not block. + * xTimerStop( xExpiredTimer, staticDONT_BLOCK ); + * } + * } + * + * + * void main( void ) + * { + * // Create the software time. xTimerCreateStatic() has an extra parameter + * // than the normal xTimerCreate() API function. The parameter is a pointer + * // to the StaticTimer_t structure that will hold the software timer + * // structure. If the parameter is passed as NULL then the structure will be + * // allocated dynamically, just as if xTimerCreate() had been called. + * xTimer = xTimerCreateStatic( "T1", // Text name for the task. Helps debugging only. Not used by FreeRTOS. + * xTimerPeriod, // The period of the timer in ticks. + * pdTRUE, // This is an auto-reload timer. + * ( void * ) &uxVariableToIncrement, // A variable incremented by the software timer's callback function + * prvTimerCallback, // The function to execute when the timer expires. + * &xTimerBuffer ); // The buffer that will hold the software timer structure. + * + * // The scheduler has not started yet so a block time is not used. + * xReturned = xTimerStart( xTimer, 0 ); + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * void *pvTimerGetTimerID( TimerHandle_t xTimer ); + * + * Returns the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer, and by calling the + * vTimerSetTimerID() API function. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being queried. + * + * @return The ID assigned to the timer being queried. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ); + * + * Sets the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being updated. + * + * @param pvNewID The ID to assign to the timer. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ); + * + * Queries a timer to see if it is active or dormant. + * + * A timer will be dormant if: + * 1) It has been created but not started, or + * 2) It is an expired one-shot timer that has not been restarted. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a timer into the + * active state. + * + * @param xTimer The timer being queried. + * + * @return pdFALSE will be returned if the timer is dormant. A value other than + * pdFALSE will be returned if the timer is active. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is active, do something. + * } + * else + * { + * // xTimer is not active, do something else. + * } + * } + * @endverbatim + */ +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ); + * + * Simply returns the handle of the timer service/daemon task. It it not valid + * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStart() starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerStart() has equivalent functionality + * to the xTimerReset() API function. + * + * Starting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerStart() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerStart() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerStart() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStart() + * to be available. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the start command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStart() was called. xTicksToWait is ignored if xTimerStart() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStart( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStop() stops a timer that was previously started using either of the + * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(), + * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions. + * + * Stopping a timer ensures the timer is not in the active state. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop() + * to be available. + * + * @param xTimer The handle of the timer being stopped. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the stop command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStop() was called. xTicksToWait is ignored if xTimerStop() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStop( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerChangePeriod( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerChangePeriod() changes the period of a timer that was previously + * created using the xTimerCreate() API function. + * + * xTimerChangePeriod() can be called to change the period of an active or + * dormant state timer. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerChangePeriod() to be available. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the change period command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerChangePeriod() was called. xTicksToWait is ignored if + * xTimerChangePeriod() is called before the scheduler is started. + * + * @return pdFAIL will be returned if the change period command could not be + * sent to the timer command queue even after xTicksToWait ticks had passed. + * pdPASS will be returned if the command was successfully sent to the timer + * command queue. When the command is actually processed will depend on the + * priority of the timer service/daemon task relative to other tasks in the + * system. The timer service/daemon task priority is set by the + * configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. If the timer + * // referenced by xTimer is already active when it is called, then the timer + * // is deleted. If the timer referenced by xTimer is not active when it is + * // called, then the period of the timer is set to 500ms and the timer is + * // started. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is already active - delete it. + * xTimerDelete( xTimer ); + * } + * else + * { + * // xTimer is not active, change its period to 500ms. This will also + * // cause the timer to start. Block for a maximum of 100 ticks if the + * // change period command cannot immediately be sent to the timer + * // command queue. + * if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) == pdPASS ) + * { + * // The command was successfully sent. + * } + * else + * { + * // The command could not be sent, even after waiting for 100 ticks + * // to pass. Take appropriate action here. + * } + * } + * } + * @endverbatim + */ + #define xTimerChangePeriod( xTimer, xNewPeriod, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD, ( xNewPeriod ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerDelete() deletes a timer that was previously created using the + * xTimerCreate() API function. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerDelete() to be available. + * + * @param xTimer The handle of the timer being deleted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the delete command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerDelete() was called. xTicksToWait is ignored if xTimerDelete() + * is called before the scheduler is started. + * + * @return pdFAIL will be returned if the delete command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerChangePeriod() API function example usage scenario. + */ +#define xTimerDelete( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_DELETE, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerReset() re-starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerReset() will cause the timer to + * re-evaluate its expiry time so that it is relative to when xTimerReset() was + * called. If the timer was in the dormant state then xTimerReset() has + * equivalent functionality to the xTimerStart() API function. + * + * Resetting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerReset() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerReset() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerReset() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerReset() + * to be available. + * + * @param xTimer The handle of the timer being reset/started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the reset command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerReset() was called. xTicksToWait is ignored if xTimerReset() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // When a key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer. + * + * TimerHandle_t xBacklightTimer = NULL; + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press event handler. + * void vKeyPressEventHandler( char cKey ) + * { + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. Wait 10 ticks for the command to be successfully sent + * // if it cannot be sent immediately. + * vSetBacklightState( BACKLIGHT_ON ); + * if( xTimerReset( xBacklightTimer, 100 ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start the one-shot timer that is responsible for turning + * // the back-light off if no keys are pressed within a 5 second period. + * xBacklightTimer = xTimerCreate( "BacklightTimer", // Just a text name, not used by the kernel. + * ( 5000 / portTICK_PERIOD_MS), // The timer period in ticks. + * pdFALSE, // The timer is a one-shot timer. + * 0, // The id is not used by the callback so can take any value. + * vBacklightTimerCallback // The callback function that switches the LCD back-light off. + * ); + * + * if( xBacklightTimer == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xBacklightTimer, 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timer running as it has already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#define xTimerReset( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStartFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStart() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStartFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStartFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStartFromISR() function. If + * xTimerStartFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerStartFromISR() is actually called. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then restart the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The start command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStartFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerStopFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStop() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being stopped. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStopFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStopFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStopFromISR() function. If + * xTimerStopFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the timer should be simply stopped. + * + * // The interrupt service routine that stops the timer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - simply stop the timer. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The stop command was not executed successfully. Take appropriate + * // action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStopFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP_FROM_ISR, 0, ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerChangePeriod() that can be called from an interrupt + * service routine. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerChangePeriodFromISR() writes a message to the + * timer command queue, so has the potential to transition the timer service/ + * daemon task out of the Blocked state. If calling xTimerChangePeriodFromISR() + * causes the timer service/daemon task to leave the Blocked state, and the + * timer service/daemon task has a priority equal to or greater than the + * currently executing task (the task that was interrupted), then + * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the + * xTimerChangePeriodFromISR() function. If xTimerChangePeriodFromISR() sets + * this value to pdTRUE then a context switch should be performed before the + * interrupt exits. + * + * @return pdFAIL will be returned if the command to change the timers period + * could not be sent to the timer command queue. pdPASS will be returned if the + * command was successfully sent to the timer command queue. When the command + * is actually processed will depend on the priority of the timer service/daemon + * task relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the period of xTimer should be changed to 500ms. + * + * // The interrupt service routine that changes the period of xTimer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - change the period of xTimer to 500ms. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The command to change the timers period was not executed + * // successfully. Take appropriate action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerChangePeriodFromISR( xTimer, xNewPeriod, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, ( xNewPeriod ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerResetFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerReset() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer that is to be started, reset, or + * restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerResetFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerResetFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerResetFromISR() function. If + * xTimerResetFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerResetFromISR() is actually called. The timer service/daemon + * task priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerResetFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + + +/** + * BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * + * Used from application interrupt service routines to defer the execution of a + * function to the RTOS daemon task (the timer service task, hence this function + * is implemented in timers.c and is prefixed with 'Timer'). + * + * Ideally an interrupt service routine (ISR) is kept as short as possible, but + * sometimes an ISR either has a lot of processing to do, or needs to perform + * processing that is not deterministic. In these cases + * xTimerPendFunctionCallFromISR() can be used to defer processing of a function + * to the RTOS daemon task. + * + * A mechanism is provided that allows the interrupt to return directly to the + * task that will subsequently execute the pended callback function. This + * allows the callback function to execute contiguously in time with the + * interrupt - just as if the callback had executed in the interrupt itself. + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task (which is set using + * configTIMER_TASK_PRIORITY in FreeRTOSConfig.h) is higher than the priority of + * the currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE within + * xTimerPendFunctionCallFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + * Example usage: + * @verbatim + * + * // The callback function that will execute in the context of the daemon task. + * // Note callback functions must all use this same prototype. + * void vProcessInterface( void *pvParameter1, uint32_t ulParameter2 ) + * { + * BaseType_t xInterfaceToService; + * + * // The interface that requires servicing is passed in the second + * // parameter. The first parameter is not used in this case. + * xInterfaceToService = ( BaseType_t ) ulParameter2; + * + * // ...Perform the processing here... + * } + * + * // An ISR that receives data packets from multiple interfaces + * void vAnISR( void ) + * { + * BaseType_t xInterfaceToService, xHigherPriorityTaskWoken; + * + * // Query the hardware to determine which interface needs processing. + * xInterfaceToService = prvCheckInterfaces(); + * + * // The actual processing is to be deferred to a task. Request the + * // vProcessInterface() callback function is executed, passing in the + * // number of the interface that needs processing. The interface to + * // service is passed in the second parameter. The first parameter is + * // not used in this case. + * xHigherPriorityTaskWoken = pdFALSE; + * xTimerPendFunctionCallFromISR( vProcessInterface, NULL, ( uint32_t ) xInterfaceToService, &xHigherPriorityTaskWoken ); + * + * // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + * // switch should be requested. The macro used is port specific and will + * // be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - refer to + * // the documentation page for the port being used. + * portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + * + * } + * @endverbatim + */ +BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + + /** + * BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * TickType_t xTicksToWait ); + * + * + * Used to defer the execution of a function to the RTOS daemon task (the timer + * service task, hence this function is implemented in timers.c and is prefixed + * with 'Timer'). + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param xTicksToWait Calling this function will result in a message being + * sent to the timer daemon task on a queue. xTicksToWait is the amount of + * time the calling task should remain in the Blocked state (so not using any + * processing time) for space to become available on the timer queue if the + * queue is found to be full. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + */ +BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * const char * const pcTimerGetName( TimerHandle_t xTimer ); + * + * Returns the name that was assigned to a timer when the timer was created. + * + * @param xTimer The handle of the timer being queried. + * + * @return The name assigned to the timer specified by the xTimer parameter. + */ +const char * pcTimerGetName( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * TickType_t xTimerGetPeriod( TimerHandle_t xTimer ); + * + * Returns the period of a timer. + * + * @param xTimer The handle of the timer being queried. + * + * @return The period of the timer in ticks. + */ +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** +* TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ); +* +* Returns the time in ticks at which the timer will expire. If this is less +* than the current tick count then the expiry time has overflowed from the +* current time. +* +* @param xTimer The handle of the timer being queried. +* +* @return If the timer is running then the time in ticks at which the timer +* will next expire is returned. If the timer is not running then the return +* value is undefined. +*/ +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/* + * Functions beyond this part are not part of the public API and are intended + * for use by the kernel only. + */ +BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* TIMERS_H */ + + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/list.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/list.c new file mode 100644 index 000000000..e3a54e360 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/list.c @@ -0,0 +1,198 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#include+#include "FreeRTOS.h" +#include "list.h" + +/*----------------------------------------------------------- + * PUBLIC LIST API documented in list.h + *----------------------------------------------------------*/ + +void vListInitialise( List_t * const pxList ) +{ + /* The list structure contains a list item which is used to mark the + end of the list. To initialise the list the list end is inserted + as the only list entry. */ + pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + /* The list end value is the highest possible value in the list to + ensure it remains at the end of the list. */ + pxList->xListEnd.xItemValue = portMAX_DELAY; + + /* The list end next and previous pointers point to itself so we know + when the list is empty. */ + pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + pxList->uxNumberOfItems = ( UBaseType_t ) 0U; + + /* Write known values into the list if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ); + listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ); +} +/*-----------------------------------------------------------*/ + +void vListInitialiseItem( ListItem_t * const pxItem ) +{ + /* Make sure the list item is not recorded as being on a list. */ + pxItem->pvContainer = NULL; + + /* Write known values into the list item if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); + listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); +} +/*-----------------------------------------------------------*/ + +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert a new list item into pxList, but rather than sort the list, + makes the new list item the last item to be removed by a call to + listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pvContainer = ( void * ) pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t *pxIterator; +const TickType_t xValueOfInsertion = pxNewListItem->xItemValue; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert the new list item into the list, sorted in xItemValue order. + + If the list already contains a list item with the same item value then the + new list item should be placed after it. This ensures that TCB's which are + stored in ready lists (all of which have the same xItemValue value) get a + share of the CPU. However, if the xItemValue is the same as the back marker + the iteration loop below will not end. Therefore the value is checked + first, and the algorithm slightly modified if necessary. */ + if( xValueOfInsertion == portMAX_DELAY ) + { + pxIterator = pxList->xListEnd.pxPrevious; + } + else + { + /* *** NOTE *********************************************************** + If you find your application is crashing here then likely causes are + listed below. In addition see http://www.freertos.org/FAQHelp.html for + more tips, and ensure configASSERT() is defined! + http://www.freertos.org/a00110.html#configASSERT + + 1) Stack overflow - + see http://www.freertos.org/Stacks-and-stack-overflow-checking.html + 2) Incorrect interrupt priority assignment, especially on Cortex-M + parts where numerically high priority values denote low actual + interrupt priorities, which can seem counter intuitive. See + http://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition + of configMAX_SYSCALL_INTERRUPT_PRIORITY on + http://www.freertos.org/a00110.html + 3) Calling an API function from within a critical section or when + the scheduler is suspended, or calling an API function that does + not end in "FromISR" from an interrupt. + 4) Using a queue or semaphore before it has been initialised or + before the scheduler has been started (are interrupts firing + before vTaskStartScheduler() has been called?). + **********************************************************************/ + + for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + { + /* There is nothing to do here, just iterating to the wanted + insertion position. */ + } + } + + pxNewListItem->pxNext = pxIterator->pxNext; + pxNewListItem->pxNext->pxPrevious = pxNewListItem; + pxNewListItem->pxPrevious = pxIterator; + pxIterator->pxNext = pxNewListItem; + + /* Remember which list the item is in. This allows fast removal of the + item later. */ + pxNewListItem->pvContainer = ( void * ) pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) +{ +/* The list item knows which list it is in. Obtain the list from the list +item. */ +List_t * const pxList = ( List_t * ) pxItemToRemove->pvContainer; + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pvContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; +} +/*-----------------------------------------------------------*/ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c new file mode 100644 index 000000000..5e1f805e2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c @@ -0,0 +1,765 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Implementation of functions defined in portable.h for the ARM CM4F port. + *----------------------------------------------------------*/ + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +#ifndef __VFP_FP__ + #error This port can only be used when the project options are configured to enable hardware floating point support. +#endif + +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + /* The way the SysTick is clocked is not modified in case it is not the same + as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif + +/* Constants required to manipulate the core. Registers first... */ +#define portNVIC_SYSTICK_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( * ( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( * ( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SYSPRI2_REG ( * ( ( volatile uint32_t * ) 0xe000ed20 ) ) +/* ...then bits in the registers. */ +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) + +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) + +/* Constants required to check the validity of an interrupt priority. */ +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( * ( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) + +/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ +#define portVECTACTIVE_MASK ( 0xFFUL ) + +/* Constants required to manipulate the VFP. */ +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */ +#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL ) + +/* Constants required to set up the initial stack. */ +#define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffd ) + +/* The systick is a 24-bit counter. */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/* For strict compliance with the Cortex-M spec the task start address should +have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ +#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* A fiddle factor to estimate the number of SysTick counts that would have +occurred while the SysTick counter is stopped during tickless idle +calculations. */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) + +/* Let the user override the pre-loading of the initial LR with the address of +prvTaskExitError() in case it messes up unwinding of the stack in the +debugger. */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/* + * Setup the timer to generate the tick interrupts. The implementation in this + * file is weak to allow application writers to change the timer used to + * generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ); + +/* + * Exception handlers. + */ +void xPortPendSVHandler( void ) __attribute__ (( naked )); +void xPortSysTickHandler( void ); +void vPortSVCHandler( void ) __attribute__ (( naked )); + +/* + * Start first task is a separate function so it can be tested in isolation. + */ +static void prvPortStartFirstTask( void ) __attribute__ (( naked )); + +/* + * Function to enable the VFP. + */ +static void vPortEnableVFP( void ) __attribute__ (( naked )); + +/* + * Used to catch tasks that attempt to return from their implementing function. + */ +static void prvTaskExitError( void ); + +/*-----------------------------------------------------------*/ + +/* Each task maintains its own interrupt status in the critical nesting +variable. */ +static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; + +/* + * The number of SysTick increments that make up one tick period. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulTimerCountsForOneTick = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * The maximum number of tick periods that can be suppressed is limited by the + * 24 bit resolution of the SysTick timer. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t xMaximumPossibleSuppressedTicks = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Compensate for the CPU cycles that pass while the SysTick is stopped (low + * power functionality only. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if( configASSERT_DEFINED == 1 ) + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * const ) portNVIC_IP_REGISTERS_OFFSET_16; +#endif /* configASSERT_DEFINED */ + +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + /* Simulate the stack frame as it would be created by a context switch + interrupt. */ + + /* Offset added to account for the way the MCU uses the stack on entry/exit + of interrupts, and to ensure alignment. */ + pxTopOfStack--; + + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + + /* Save code space by skipping register initialisation. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + + /* A save method is being used that requires each task to maintain its + own exec return value. */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; + + pxTopOfStack -= 8; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ +volatile uint32_t ulDummy = 0; + + /* A function that implements a task must not exit or attempt to return to + its caller as there is nothing to return to. If a task wants to exit it + should instead call vTaskDelete( NULL ). + + Artificially force an assert() to be triggered if configASSERT() is + defined, then stop here so application writers can catch the error. */ + configASSERT( uxCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + started to remove a compiler warning about the function being defined + but never called. ulDummy is used purely to quieten other warnings + about code appearing after this function is called - making ulDummy + volatile makes the compiler think the function could return and + therefore not output an 'unreachable code' warning for code that appears + after it. */ + } +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler( void ) +{ + __asm volatile ( + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + ); +} +/*-----------------------------------------------------------*/ + +static void prvPortStartFirstTask( void ) +{ + /* Start the first task. This also clears the bit that indicates the FPU is + in use in case the FPU was used before the scheduler was started - which + would otherwise result in the unnecessary leaving of space in the SVC stack + for lazy saving of FPU registers. */ + __asm volatile( + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr control, r0 \n" + " cpsie i \n" /* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc 0 \n" /* System call to start first task. */ + " nop \n" + ); +} +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. + See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); + + #if( configASSERT_DEFINED == 1 ) + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + functions can be called. ISR safe functions are those that end in + "FromISR". FreeRTOS maintains separate thread and ISR API functions to + ensure interrupt entry is as fast and simple as possible. + + Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; + + /* Determine the number of priority bits available. First write to all + possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Calculate the maximum acceptable priority group value for the number + of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } + #endif /* conifgASSERT_DEFINED */ + + /* Make PendSV and SysTick the lowest priority interrupts. */ + portNVIC_SYSPRI2_REG |= portNVIC_PENDSV_PRI; + portNVIC_SYSPRI2_REG |= portNVIC_SYSTICK_PRI; + + /* Start the timer that generates the tick ISR. Interrupts are disabled + here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + /* Ensure the VFP is enabled - it should be anyway. */ + vPortEnableVFP(); + + /* Lazy save always. */ + *( portFPCCR ) |= portASPEN_AND_LSPEN_BITS; + + /* Start the first task. */ + prvPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + exit error function to prevent compiler warnings about a static function + not being called in the case that the application writer overrides this + functionality by defining configTASK_RETURN_ADDRESS. Call + vTaskSwitchContext() so link time optimisation does not remove the + symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Not implemented in ports where there is nothing to return to. + Artificially force an assert. */ + configASSERT( uxCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) +{ + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + assert() if it is being called from an interrupt context. Only API + functions that end in "FromISR" can be used in an interrupt. Only assert if + the critical nesting count is 1 to protect against recursive calls if the + assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) +{ + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void xPortPendSVHandler( void ) +{ + /* This is a naked function. */ + + __asm volatile + ( + " mrs r0, psp \n" + " isb \n" + " \n" + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ + " ldr r2, [r3] \n" + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n" + " \n" + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ + " \n" + " stmdb sp!, {r0, r3} \n" + " mov r0, %0 \n" + " cpsid i \n" /* Errata workaround. */ + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " cpsie i \n" /* Errata workaround. */ + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " ldmia sp!, {r0, r3} \n" + " \n" + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r0, [r1] \n" + " \n" + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n" + " \n" + " msr psp, r0 \n" + " isb \n" + " \n" + #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */ + #if WORKAROUND_PMU_CM001 == 1 + " push { r14 } \n" + " pop { pc } \n" + #endif + #endif + " \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) + ); +} +/*-----------------------------------------------------------*/ + +void xPortSysTickHandler( void ) +{ + /* The SysTick runs at the lowest interrupt priority, so when this interrupt + executes all interrupts must be unmasked. There is therefore no need to + save and then restore the interrupt mask value as its value is already + known. */ + portDISABLE_INTERRUPTS(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* A context switch is required. Context switching is performed in + the PendSV interrupt. Pend the PendSV interrupt. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portENABLE_INTERRUPTS(); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE == 1 ) + + __attribute__((weak)) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for + is accounted for as best it can be, but using the tickless mode will + inevitably result in some tiny drift of the time maintained by the + kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + tick periods. -1 is used because this code will execute part way + through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + method as that will mask interrupts that should exit sleep mode. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + above. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + set its parameter to 0 to indicate that its implementation contains + its own wait for interrupt or wait for event instruction, and so wfi + should not be executed again. However, the original expected idle + time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( &xModifiableIdleTime ); + if( xModifiableIdleTime > 0 ) + { + __asm volatile( "dsb" ::: "memory" ); + __asm volatile( "wfi" ); + __asm volatile( "isb" ); + } + configPOST_SLEEP_PROCESSING( &xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + out of sleep mode to execute immediately. see comments above + __disable_interrupt() call above. */ + __asm volatile( "cpsie i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + and interrupts that execute while the clock is stopped will increase + any slippage between the time maintained by the RTOS and calendar + time. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable the SysTick clock without reading the + portNVIC_SYSTICK_CTRL_REG register to ensure the + portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + the time the SysTick is stopped for is accounted for as best it can + be, but using the tickless mode will inevitably result in some tiny + drift of the time maintained by the kernel with respect to calendar + time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + been set back to the current reload value (the reload back being + correct for the entire expected idle time) or if the SysTick is yet + to count to zero (in which case an interrupt other than the SysTick + must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + reloaded with ulReloadValue. Reset the + portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + underflowed because the post sleep hook did something + that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + function exits, the tick value maintained by the tick is stepped + forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + Work out how long the sleep lasted rounded to complete tick + periods (not the ulReload value which accounted for part + ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrpts enabled. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + } + +#endif /* #if configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +/* + * Setup the systick timer to generate the tick interrupts at the required + * frequency. + */ +__attribute__(( weak )) void vPortSetupTimerInterrupt( void ) +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and clear the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); +} +/*-----------------------------------------------------------*/ + +/* This is a naked function. */ +static void vPortEnableVFP( void ) +{ + __asm volatile + ( + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ + " ldr r1, [r0] \n" + " \n" + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ + " str r1, [r0] \n" + " bx r14 " + ); +} +/*-----------------------------------------------------------*/ + +#if( configASSERT_DEFINED == 1 ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + an interrupt that has been assigned a priority above + configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + function. ISR safe FreeRTOS API functions must *only* be called + from interrupts that have been assigned a priority at or below + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Numerically low interrupt priority numbers represent logically high + interrupt priorities, therefore the priority of the interrupt must + be set to a value equal to or numerically *higher* than + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Interrupts that use the FreeRTOS API must not be left at their + default priority of zero as that is the highest possible priority, + which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + and therefore also guaranteed to be invalid. + + FreeRTOS maintains separate thread and ISR API functions to ensure + interrupt entry is as fast and simple as possible. + + The following links provide detailed information: + http://www.freertos.org/RTOS-Cortex-M3-M4.html + http://www.freertos.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + that define each interrupt's priority to be split between bits that + define the interrupt's pre-emption priority bits and bits that define + the interrupt's sub-priority. For simplicity all bits must be defined + to be pre-emption priority bits. The following assertion will fail if + this is not the case (if some bits represent a sub-priority). + + If the application only uses CMSIS libraries for interrupt + configuration then the correct setting can be achieved on all Cortex-M + devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + scheduler. Note however that some vendor specific peripheral libraries + assume a non-zero priority group setting, in which cases using a value + of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* configASSERT_DEFINED */ + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h new file mode 100644 index 000000000..6bc73e348 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -0,0 +1,246 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the + * given hardware and compiler. + * + * These settings should not be altered. + *----------------------------------------------------------- + */ + +/* Type definitions. */ +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specifics. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() \ +{ \ + /* Set a PendSV to request a context switch. */ \ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; \ + \ + /* Barriers are normally not required but do ensure the code is completely \ + within the specified behaviour for the architecture. */ \ + __asm volatile( "dsb" ::: "memory" ); \ + __asm volatile( "isb" ); \ +} + +#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD() +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* Critical section management. */ +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortRaiseBASEPRI() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortSetBASEPRI(x) +#define portDISABLE_INTERRUPTS() vPortRaiseBASEPRI() +#define portENABLE_INTERRUPTS() vPortSetBASEPRI(0) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + +/* Tickless idle/low power functionality. */ +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Generic helper function. */ + __attribute__( ( always_inline ) ) static inline uint8_t ucPortCountLeadingZeros( uint32_t ulBitmap ) + { + uint8_t ucReturn; + + __asm volatile ( "clz %0, %1" : "=r" ( ucReturn ) : "r" ( ulBitmap ) : "memory" ); + return ucReturn; + } + + /* Check the configuration. */ + #if( configMAX_PRIORITIES > 32 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice. + #endif + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) ucPortCountLeadingZeros( ( uxReadyPriorities ) ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif + +/* portNOP() is not required by this port. */ +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__(( always_inline)) +#endif + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ +uint32_t ulCurrentInterrupt; +BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortRaiseBASEPRI( void ) +{ +uint32_t ulNewBASEPRI; + + __asm volatile + ( + " mov %0, %1 \n" \ + " cpsid i \n" \ + " msr basepri, %0 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void ) +{ +uint32_t ulOriginalBASEPRI, ulNewBASEPRI; + + __asm volatile + ( + " mrs %0, basepri \n" \ + " mov %1, %2 \n" \ + " cpsid i \n" \ + " msr basepri, %1 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulOriginalBASEPRI), "=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); + + /* This return will not be reached but is necessary to prevent compiler + warnings. */ + return ulOriginalBASEPRI; +} +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortSetBASEPRI( uint32_t ulNewMaskValue ) +{ + __asm volatile + ( + " msr basepri, %0 " :: "r" ( ulNewMaskValue ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + + +#ifdef __cplusplus +} +#endif + +#endif /* PORTMACRO_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c new file mode 100644 index 000000000..9ec5af5c2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c @@ -0,0 +1,436 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * A sample implementation of pvPortMalloc() and vPortFree() that combines + * (coalescences) adjacent memory blocks as they are freed, and in so doing + * limits memory fragmentation. + * + * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the + * memory management pages of http://www.FreeRTOS.org for more information. + */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Allocate the memory for the heap. */ +#if( configAPPLICATION_ALLOCATED_HEAP == 1 ) + /* The application writer has already defined the array used for the RTOS + heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#else + static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/* Define the linked list structure. This is used to link free blocks in order +of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ + size_t xBlockSize; /*<< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory +block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +static BlockLink_t xStart, *pxEnd = NULL; + +/* Keeps track of the number of free bytes remaining, but says nothing about +fragmentation. */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize +member of an BlockLink_t structure is set then the block belongs to the +application. When the bit is free the block is still part of the free heap +space. */ +static size_t xBlockAllocatedBit = 0; + +/*-----------------------------------------------------------*/ + +void *pvPortMalloc( size_t xWantedSize ) +{ +BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; +void *pvReturn = NULL; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is + set. The top bit of the block size member of the BlockLink_t structure + is used to determine who owns the block - the application or the + kernel, so it must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); + configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + block following the number of bytes requested. The void + cast is used to prevent byte alignment warnings from the + compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + by the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void *pv ) +{ +uint8_t *puc = ( uint8_t * ) pv; +BlockLink_t *pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocks( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ +BlockLink_t *pxFirstFreeBlock; +uint8_t *pucAlignedHeap; +size_t uxAddress; +size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) +{ +BlockLink_t *pxIterator; +uint8_t *puc; + + /* Iterate through the list until a block is found that has a higher address + than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + before and the block after, then it's pxNextFreeBlock pointer will have + already been set, and should not be set here as that would make it point + to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/queue.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/queue.c new file mode 100644 index 000000000..f6d14503e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/queue.c @@ -0,0 +1,2908 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" + +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + +/* Lint e961 and e750 are suppressed as a MISRA exception justified because the +MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the +header files above, but not in this file, in order to generate the correct +privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */ + + +/* Constants used with the cRxLock and cTxLock structure members. */ +#define queueUNLOCKED ( ( int8_t ) -1 ) +#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + +/* When the Queue_t structure is used to represent a base queue its pcHead and +pcTail members are used as pointers into the queue storage area. When the +Queue_t structure is used to represent a mutex pcHead and pcTail pointers are +not necessary, and the pcHead pointer is set to NULL to indicate that the +pcTail pointer actually points to the mutex holder (if any). Map alternative +names to the pcHead and pcTail structure members to ensure the readability of +the code is maintained despite this dual use of two structure members. An +alternative implementation would be to use a union, but use of a union is +against the coding standard (although an exception to the standard has been +permitted where the dual use also significantly changes the type of the +structure member). */ +#define pxMutexHolder pcTail +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL + +/* Semaphores do not actually store or copy data, so have an item size of +zero. */ +#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) +#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define queueYIELD_IF_USING_PREEMPTION() +#else + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* + * Definition of the queue used by the scheduler. + * Items are queued by copy, not reference. See the following link for the + * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html + */ +typedef struct QueueDefinition +{ + int8_t *pcHead; /*< Points to the beginning of the queue storage area. */ + int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */ + + union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */ + { + int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ + UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ + } u; + + List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + + volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */ + UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + struct QueueDefinition *pxQueueSetContainer; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif + +} xQUEUE; + +/* The old xQUEUE name is maintained above then typedefed to the new Queue_t +name below to enable the use of older kernel aware debuggers. */ +typedef xQUEUE Queue_t; + +/*-----------------------------------------------------------*/ + +/* + * The queue registry is just a means for kernel aware debuggers to locate + * queue structures. It has no other purpose so is an optional component. + */ +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + /* The type stored within the queue registry array. This allows a name + to be assigned to each queue making kernel aware debugging a little + more user friendly. */ + typedef struct QUEUE_REGISTRY_ITEM + { + const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + QueueHandle_t xHandle; + } xQueueRegistryItem; + + /* The old xQueueRegistryItem name is maintained above then typedefed to the + new xQueueRegistryItem name below to enable the use of older kernel aware + debuggers. */ + typedef xQueueRegistryItem QueueRegistryItem_t; + + /* The queue registry is simply an array of QueueRegistryItem_t structures. + The pcQueueName member of a structure being NULL is indicative of the + array position being vacant. */ + PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + +#endif /* configQUEUE_REGISTRY_SIZE */ + +/* + * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not + * prevent an ISR from adding or removing items to the queue, but does prevent + * an ISR from removing tasks from the queue event lists. If an ISR finds a + * queue is locked it will instead increment the appropriate queue lock count + * to indicate that a task may require unblocking. When the queue in unlocked + * these lock counts are inspected, and the appropriate action taken. + */ +static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any data in a queue. + * + * @return pdTRUE if the queue contains no items, otherwise pdFALSE. + */ +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any space in a queue. + * + * @return pdTRUE if there is no space, otherwise pdFALSE; + */ +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Copies an item into the queue, either at the front of the queue or the + * back of the queue. + */ +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION; + +/* + * Copies an item out of a queue. + */ +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +#if ( configUSE_QUEUE_SETS == 1 ) + /* + * Checks to see if a queue is a member of a queue set, and if so, notifies + * the queue set that the queue contains data. + */ + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +#endif + +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + +#if( configUSE_MUTEXES == 1 ) + /* + * If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. This function returns + * that priority. + */ + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif +/*-----------------------------------------------------------*/ + +/* + * Macro to mark a queue as locked. Locking a queue prevents an ISR from + * accessing the queue event lists. + */ +#define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL(); \ + { \ + if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ + } \ + if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ + } \ + } \ + taskEXIT_CRITICAL() +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) +{ +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); + pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; + pxQueue->pcWriteTo = pxQueue->pcHead; + pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize ); + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + + if( xNewQueue == pdFALSE ) + { + /* If there are tasks blocked waiting to read from the queue, then + the tasks will remain blocked as after this function exits the queue + will still be empty. If there are tasks blocked waiting to write to + the queue, then one should be unblocked as after this function exits + it will be possible to write to it. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Ensure the event queues start in the correct state. */ + vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); + vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); + } + } + taskEXIT_CRITICAL(); + + /* A value is returned for calling semantic consistency with previous + versions. */ + return pdPASS; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + /* The StaticQueue_t structure and the queue storage area must be + supplied. */ + configASSERT( pxStaticQueue != NULL ); + + /* A queue storage area should be provided if the item size is not 0, and + should not be provided if the item size is 0. */ + configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ); + configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticQueue_t or StaticSemaphore_t equals the size of + the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + configASSERT( xSize == sizeof( Queue_t ) ); + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + The address of a statically allocated storage area was also passed in + but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewQueue != NULL ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + note this queue was allocated statically in case the queue is + later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + size_t xQueueSizeInBytes; + uint8_t *pucQueueStorage; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* There is not going to be a queue storage area. */ + xQueueSizeInBytes = ( size_t ) 0; + } + else + { + /* Allocate enough space to hold the maximum number of items that + can be in the queue at any time. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + storage area. */ + pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t ); + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + note this task was created dynamically in case it is later + deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) +{ + /* Remove compiler warnings about unused parameters should + configUSE_TRACE_FACILITY not be set to 1. */ + ( void ) ucQueueType; + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* No RAM was allocated for the queue storage area, but PC head cannot + be set to NULL because NULL is used as a key to say the queue is used as + a mutex. Therefore just set pcHead to point to the queue as a benign + value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; + } + else + { + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; + } + + /* Initialise the queue members as described where the queue type is + defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxNewQueue->ucQueueType = ucQueueType; + } + #endif /* configUSE_TRACE_FACILITY */ + + #if( configUSE_QUEUE_SETS == 1 ) + { + pxNewQueue->pxQueueSetContainer = NULL; + } + #endif /* configUSE_QUEUE_SETS */ + + traceQUEUE_CREATE( pxNewQueue ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static void prvInitialiseMutex( Queue_t *pxNewQueue ) + { + if( pxNewQueue != NULL ) + { + /* The queue create function will set all the queue structure members + correctly for a generic queue, but this function is creating a + mutex. Overwrite those members that need to be set differently - + in particular the information required for priority inheritance. */ + pxNewQueue->pxMutexHolder = NULL; + pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; + + /* In case this is a recursive mutex. */ + pxNewQueue->u.uxRecursiveCallCount = 0; + + traceCREATE_MUTEX( pxNewQueue ); + + /* Start with the semaphore in the expected state. */ + ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); + } + else + { + traceCREATE_MUTEX_FAILED(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( pxNewQueue ); + + return pxNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) + { + Queue_t *pxNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( pxNewQueue ); + + return pxNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + void* xQueueGetMutexHolder( QueueHandle_t xSemaphore ) + { + void *pxReturn; + + /* This function is called by xSemaphoreGetMutexHolder(), and should not + be called directly. Note: This is a good way of determining if the + calling task is the mutex holder, but not a good way of determining the + identity of the mutex holder, as the holder may change between the + following critical section exiting and the function returning. */ + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder; + } + else + { + pxReturn = NULL; + } + } + taskEXIT_CRITICAL(); + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) + { + void *pxReturn; + + configASSERT( xSemaphore ); + + /* Mutexes cannot be used in interrupt service routines, so the mutex + holder should not change in an ISR, and therefore a critical section is + not required here. */ + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder; + } + else + { + pxReturn = NULL; + } + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* If this is the task that holds the mutex then pxMutexHolder will not + change outside of this task. If this task does not hold the mutex then + pxMutexHolder can never coincidentally equal the tasks handle, and as + this is the only condition we are interested in it does not matter if + pxMutexHolder is accessed simultaneously by another task. Therefore no + mutual exclusion is required to test the pxMutexHolder variable. */ + if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */ + { + traceGIVE_MUTEX_RECURSIVE( pxMutex ); + + /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to + the task handle, therefore no underflow check is required. Also, + uxRecursiveCallCount is only modified by the mutex holder, and as + there can only be one, no mutual exclusion is required to modify the + uxRecursiveCallCount member. */ + ( pxMutex->u.uxRecursiveCallCount )--; + + /* Has the recursive call count unwound to 0? */ + if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 ) + { + /* Return the mutex. This will automatically unblock any other + task that might be waiting to access the mutex. */ + ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + /* The mutex cannot be given because the calling task is not the + holder. */ + xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* Comments regarding mutual exclusion as per those within + xQueueGiveMutexRecursive(). */ + + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + + if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */ + { + ( pxMutex->u.uxRecursiveCallCount )++; + xReturn = pdPASS; + } + else + { + xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); + + /* pdPASS will only be returned if the mutex was successfully + obtained. The calling task may have entered the Blocked state + before reaching here. */ + if( xReturn != pdFAIL ) + { + ( pxMutex->u.uxRecursiveCallCount )++; + } + else + { + traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) +{ +BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /* This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Is there room on the queue now? The running task must be the + highest priority task wanting to access the queue. If the head item + in the queue is to be overwritten then it does not matter if the + queue is full. */ + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + traceQUEUE_SEND( pxQueue ); + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to + do this from within the critical section - the + kernel takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes + and the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to do + this from within the critical section - the kernel + takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes and + the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was full and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + + /* Return to the original privilege level before exiting + the function. */ + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was full and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + event list. It is possible that interrupts occurring now + remove this task from the event list again - but as the + scheduler is suspended the task will go onto the pending + ready last instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + ready list into the ready list - so it is feasible that this + task is already in a ready list before it yields - in which + case the yield will not cause a context switch unless there + is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + /* Similar to xQueueGenericSend, except without blocking if there is no room + in the queue. Also don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a + semaphore or mutex. That means prvCopyDataToQueue() cannot result + in a task disinheriting a priority and prvCopyDataToQueue() can be + called here even though the disinherit function does not check if + the scheduler is suspended before accessing the ready lists. */ + ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* Similar to xQueueGenericSendFromISR() but used with semaphores where the + item size is 0. Don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + + configASSERT( pxQueue ); + + /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() + if the item size is not 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Normally a mutex would not be given from an interrupt, especially if + there is a mutex holder, as priority inheritance makes no sense for an + interrupts, only tasks. */ + configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* When the queue is used to implement a semaphore no data is ever + moved through the queue but it is still valid to see if the queue 'has + space'. */ + if( uxMessagesWaiting < pxQueue->uxLength ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* A task can only have an inherited priority if it is a mutex + holder - and if there is a mutex holder then the mutex cannot be + given from an ISR. As this is the ISR version of the function it + can be assumed there is no mutex holder and no need to determine if + priority disinheritance is needed. Simply increase the count of + messages (semaphores) available. */ + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The semaphore is a member of a queue set, and + posting to the queue set caused a higher priority + task to unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + /* This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data available, remove one item. */ + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_RECEIVE( pxQueue ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* There is now space in the queue, were any tasks waiting to + post to the queue? If so, unblock the highest priority waiting + task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + +#if( configUSE_MUTEXES == 1 ) + BaseType_t xInheritanceOccurred = pdFALSE; +#endif + + /* Check the queue pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* Check this really is a semaphore, in which case the item size will be + 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /* This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Semaphores are queues with an item size of 0, and where the + number of messages in the queue is the semaphore's count value. */ + const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxSemaphoreCount > ( UBaseType_t ) 0 ) + { + traceQUEUE_RECEIVE( pxQueue ); + + /* Semaphores are queues with a data size of zero and where the + messages waiting is the semaphore's count. Reduce the count. */ + pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* Record the information required to implement + priority inheritance should it become necessary. */ + pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + + /* Check to see if other tasks are blocked waiting to give the + semaphore, and if so, unblock the highest priority such task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* For inheritance to have occurred there must have been an + initial timeout, and an adjusted timeout cannot become 0, as + if it were 0 the function would have exited. */ + #if( configUSE_MUTEXES == 1 ) + { + configASSERT( xInheritanceOccurred == pdFALSE ); + } + #endif /* configUSE_MUTEXES */ + + /* The semaphore count was 0 and no block time is specified + (or the block time has expired) so exit now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The semaphore count was 0 and a block time was specified + so configure the timeout structure ready to block. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can give to and take from the semaphore + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + count is 0 then enter the Blocked state to wait for a semaphore to + become available. As semaphores are implemented with queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL(); + { + xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder ); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There was no timeout and the semaphore count was not 0, so + attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + expired. Otherwise return to attempt to take the semaphore that is + known to be available. As semaphores are implemented by queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL(); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + task to inherit this task's priority. Now this task + has timed out the priority should be disinherited + again, but only as low as the next highest priority + task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL(); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /* This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Remember the read position so it can be reset after the data + is read from the queue as this function is only peeking the + data, not removing it. */ + pcOriginalReadPosition = pxQueue->u.pcReadFrom; + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_PEEK( pxQueue ); + + /* The data is not being removed, so reset the read pointer. */ + pxQueue->u.pcReadFrom = pcOriginalReadPosition; + + /* The data is being left in the queue, so see if there are + any other tasks waiting for the data. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than this task. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure ready to enter the blocked + state. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Cannot block in an ISR, so check there is data available. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + const int8_t cRxLock = pxQueue->cRxLock; + + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* If the queue is locked the event list will not be modified. + Instead update the lock count so the task that unlocks the queue + will know that an ISR has removed data while the queue was + locked. */ + if( cRxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than us so + force a context switch. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was removed while it was locked. */ + pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Cannot block in an ISR, so check there is data available. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + traceQUEUE_PEEK_FROM_ISR( pxQueue ); + + /* Remember the read position so it can be reset as nothing is + actually being removed from the queue. */ + pcOriginalReadPosition = pxQueue->u.pcReadFrom; + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->u.pcReadFrom = pcOriginalReadPosition; + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; + + configASSERT( xQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t *pxQueue; + + pxQueue = ( Queue_t * ) xQueue; + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; + + configASSERT( xQueue ); + + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +void vQueueDelete( QueueHandle_t xQueue ) +{ +Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + configASSERT( pxQueue ); + traceQUEUE_DELETE( pxQueue ); + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + vQueueUnregisterQueue( pxQueue ); + } + #endif + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + again. */ + vPortFree( pxQueue ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + /* The queue must have been statically allocated, so is not going to be + deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) + { + ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->ucQueueType; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) + { + UBaseType_t uxHighestPriorityOfWaitingTasks; + + /* If a task waiting for a mutex causes the mutex holder to inherit a + priority, but the waiting task times out, then the holder should + disinherit the priority - but only down to the highest priority of any + other tasks that are waiting for the same mutex. For this purpose, + return the priority of the highest priority task that is waiting for the + mutex. */ + if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 ) + { + uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); + } + else + { + uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; + } + + return uxHighestPriorityOfWaitingTasks; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) +{ +BaseType_t xReturn = pdFALSE; +UBaseType_t uxMessagesWaiting; + + /* This function is called from a critical section. */ + + uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) + { + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* The mutex is no longer being held. */ + xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder ); + pxQueue->pxMutexHolder = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + } + else if( xPosition == queueSEND_TO_BACK ) + { + ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */ + pxQueue->pcWriteTo += pxQueue->uxItemSize; + if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->pcWriteTo = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + pxQueue->u.pcReadFrom -= pxQueue->uxItemSize; + if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xPosition == queueOVERWRITE ) + { + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* An item is not being added but overwritten, so subtract + one from the recorded number of items in the queue so when + one is added again below the number of recorded items remains + correct. */ + --uxMessagesWaiting; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) +{ + if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) + { + pxQueue->u.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ + { + pxQueue->u.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */ + } +} +/*-----------------------------------------------------------*/ + +static void prvUnlockQueue( Queue_t * const pxQueue ) +{ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ + + /* The lock counts contains the number of extra data items placed or + removed from the queue while the queue was locked. When a queue is + locked items can be added or removed, but the event lists cannot be + updated. */ + taskENTER_CRITICAL(); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + the queue set caused a higher priority task to unblock. + A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + added to the pending ready list as the scheduler is still + suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL(); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; + + configASSERT( xQueue ); + if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; + + configASSERT( xQueue ); + if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* If the queue is already full we may have to block. A critical section + is required to prevent an interrupt removing something from the queue + between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* If the queue is already empty we may have to block. A critical section + is required to prevent an interrupt adding something to the queue + between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) + { + pxQueue->u.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* We cannot block from an ISR, so check there is data available. If + not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) + { + pxQueue->u.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + + /* See if there is an empty space in the registry. A NULL name denotes + a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].pcQueueName == NULL ) + { + /* Store the information on this queue. */ + xQueueRegistry[ ux ].pcQueueName = pcQueueName; + xQueueRegistry[ ux ].xHandle = xQueue; + + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + /* Note there is nothing here to protect against another task adding or + removing entries from the registry while it is being searched. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return pcReturn; + } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueUnregisterQueue( QueueHandle_t xQueue ) + { + UBaseType_t ux; + + /* See if the handle of the queue being unregistered in actually in the + registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; + + /* Set the handle to NULL to ensure the same queue handle cannot + appear in the registry twice if it is added, removed, then + added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + Queue_t * const pxQueue = ( Queue_t * ) xQueue; + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements. + It can result in vListInsert() being called on a list that can only + possibly ever have one item in it, so the list will be fast, but even + so it should be called with the scheduler locked and not from a critical + section. */ + + /* Only do anything if there are no messages in the queue. This function + will not actually cause the task to block, just place it on a blocked + list. It will not block until the scheduler is unlocked - at which + time a yield will be performed. If an item is added to the queue while + the queue is locked, and the calling task blocks on the queue, then the + calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvUnlockQueue( pxQueue ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) + { + QueueSetHandle_t pxQueue; + + pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); + + return pxQueue; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + { + /* Cannot add a queue/semaphore to more than one queue set. */ + xReturn = pdFAIL; + } + else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* Cannot add a queue/semaphore to a queue set if there are already + items in the queue/semaphore. */ + xReturn = pdFAIL; + } + else + { + ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + xReturn = pdPASS; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; + + if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) + { + /* The queue was not a member of the set. */ + xReturn = pdFAIL; + } + else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* It is dangerous to remove a queue from a set when the queue is + not empty because the queue set will still hold pending events for + the queue. */ + xReturn = pdFAIL; + } + else + { + taskENTER_CRITICAL(); + { + /* The queue is no longer contained in the set. */ + pxQueueOrSemaphore->pxQueueSetContainer = NULL; + } + taskEXIT_CRITICAL(); + xReturn = pdPASS; + } + + return xReturn; + } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) + { + Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + configASSERT( pxQueueSetContainer ); + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ + + + + + + + + + + + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c new file mode 100644 index 000000000..c0ef7272a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c @@ -0,0 +1,1199 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "stream_buffer.h" + +#if( configUSE_TASK_NOTIFICATIONS != 1 ) + #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c +#endif + +/* Lint e961 and e750 are suppressed as a MISRA exception justified because the +MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the +header files above, but not in this file, in order to generate the correct +privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */ + +/* If the user has not provided application specific Rx notification macros, +or #defined the notification macros away, them provide default implementations +that uses task notifications. */ +/*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */ +#ifndef sbRECEIVE_COMPLETED + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbRECEIVE_COMPLETED */ + +#ifndef sbRECEIVE_COMPLETED_FROM_ISR + #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbRECEIVE_COMPLETED_FROM_ISR */ + +/* If the user has not provided an application specific Tx notification macro, +or #defined the notification macro away, them provide a default implementation +that uses task notifications. */ +#ifndef sbSEND_COMPLETED + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbSEND_COMPLETED */ + +#ifndef sbSEND_COMPLETE_FROM_ISR + #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbSEND_COMPLETE_FROM_ISR */ +/*lint -restore (9026) */ + +/* The number of bytes used to hold the length of a message in the buffer. */ +#define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( size_t ) ) + +/* Bits stored in the ucFlags field of the stream buffer. */ +#define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */ +#define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */ + +/*-----------------------------------------------------------*/ + +/* Structure that hold state information on the buffer. */ +typedef struct xSTREAM_BUFFER /*lint !e9058 Style convention uses tag. */ +{ + volatile size_t xTail; /* Index to the next item to read within the buffer. */ + volatile size_t xHead; /* Index to the next item to write within the buffer. */ + size_t xLength; /* The length of the buffer pointed to by pucBuffer. */ + size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ + volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */ + volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */ + uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */ + uint8_t ucFlags; + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ + #endif +} StreamBuffer_t; + +/* + * The number of bytes available to be read from the buffer. + */ +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Add xCount bytes from pucData into the pxStreamBuffer message buffer. + * Returns the number of bytes written, which will either equal xCount in the + * success case, or 0 if there was not enough space in the buffer (in which case + * no data is written into the buffer). + */ +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then reads an entire + * message out of the buffer. If the stream buffer is being used as a stream + * buffer then read as many bytes as possible from the buffer. + * prvReadBytesFromBuffer() is called to actually extract the bytes from the + * buffer's data storage area. + */ +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then writes an entire + * message to the buffer. If the stream buffer is being used as a stream + * buffer then write as many bytes as possible to the buffer. + * prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's + * data storage area. + */ +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) PRIVILEGED_FUNCTION; + +/* + * Read xMaxCount bytes from the pxStreamBuffer message buffer and write them + * to pucData. + */ +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, + uint8_t *pucData, + size_t xMaxCount, + size_t xBytesAvailable ); PRIVILEGED_FUNCTION + +/* + * Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to + * initialise the members of the newly created stream buffer structure. + */ +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) + { + uint8_t *pucAllocatedMemory; + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; /*lint !e9044 Parameter modified to ensure it doesn't have a dangerous value. */ + } + + /* A stream buffer requires a StreamBuffer_t structure and a buffer. + Both are allocated in a single call to pvPortMalloc(). The + StreamBuffer_t structure is placed at the start of the allocated memory + and the buffer follows immediately after. The requested size is + incremented so the free space is returned as the user would expect - + this is a quirk of the implementation that means otherwise the free + space would be reported as one byte smaller than would be logically + expected. */ + xBufferSizeBytes++; + pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + + if( pucAllocatedMemory != NULL ) + { + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer ); + + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + } + + return ( StreamBufferHandle_t * ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) + { + StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ + StreamBufferHandle_t xReturn; + + configASSERT( pucStreamBufferStorageArea ); + configASSERT( pxStaticStreamBuffer ); + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; /*lint !e9044 Function parameter deliberately modified to ensure it is in range. */ + } + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticStreamBuffer_t equals the size of the real + message buffer structure. */ + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } + #endif /* configASSERT_DEFINED */ + + if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pucStreamBufferStorageArea, + xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer ); + + /* Remember this was statically allocated in case it is ever deleted + again. */ + pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); + + xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ + } + else + { + xReturn = NULL; + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + } + + return xReturn; + } + +#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ + + configASSERT( pxStreamBuffer ); + + traceSTREAM_BUFFER_DELETE( xStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both the structure and the buffer were allocated using a single call + to pvPortMalloc(), hence only one call to vPortFree() is required. */ + vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */ + } + #else + { + /* Should not be possible to get here, ucFlags must be corrupt. + Force an assert. */ + configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 ); + } + #endif + } + else + { + /* The structure and buffer were not allocated dynamically and cannot be + freed - just scrub the structure so future use will assert. */ + memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +BaseType_t xReturn = pdFAIL, xIsMessageBuffer; + +#if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; +#endif + + configASSERT( pxStreamBuffer ); + + #if( configUSE_TRACE_FACILITY == 1 ) + { + /* Store the stream buffer number so it can be restored after the + reset. */ + uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber; + } + #endif + + /* Can only reset a message buffer if there are no tasks blocked on it. */ + if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) + { + if( pxStreamBuffer->xTaskWaitingToSend == NULL ) + { + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xIsMessageBuffer = pdTRUE; + } + else + { + xIsMessageBuffer = pdFALSE; + } + + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pxStreamBuffer->pucBuffer, + pxStreamBuffer->xLength, + pxStreamBuffer->xTriggerLevelBytes, + xIsMessageBuffer ); + xReturn = pdPASS; + + #if( configUSE_TRACE_FACILITY == 1 ) + { + pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + #endif + + traceSTREAM_BUFFER_RESET( xStreamBuffer ); + } + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +BaseType_t xReturn; + + configASSERT( pxStreamBuffer ); + + /* It is not valid for the trigger level to be 0. */ + if( xTriggerLevel == ( size_t ) 0 ) + { + xTriggerLevel = ( size_t ) 1; /*lint !e9044 Parameter modified to ensure it doesn't have a dangerous value. */ + } + + /* The trigger level is the number of bytes that must be in the stream + buffer before a task that is waiting for data is unblocked. */ + if( xTriggerLevel <= pxStreamBuffer->xLength ) + { + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel; + xReturn = pdPASS; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xSpace; + + configASSERT( pxStreamBuffer ); + + xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail; + xSpace -= pxStreamBuffer->xHead; + xSpace -= ( size_t ) 1; + + if( xSpace >= pxStreamBuffer->xLength ) + { + xSpace -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xSpace; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xReturn; + + configASSERT( pxStreamBuffer ); + + xReturn = prvBytesInBuffer( pxStreamBuffer ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xReturn, xSpace = 0; +size_t xRequiredSpace = xDataLengthBytes; +TimeOut_t xTimeOut; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + vTaskSetTimeOutState( &xTimeOut ); + + do + { + /* Wait until the required number of bytes are free in the message + buffer. */ + taskENTER_CRITICAL(); + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + + if( xSpace < xRequiredSpace ) + { + /* Clear notification state as going to wait for space. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one writer. */ + configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL ); + pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle(); + } + else + { + taskEXIT_CRITICAL(); + break; + } + } + taskEXIT_CRITICAL(); + + traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, UINT32_MAX, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToSend = NULL; + + } while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xSpace == ( size_t ) 0 ) + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ); + + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xReturn, xSpace; +size_t xRequiredSpace = xDataLengthBytes; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) +{ + BaseType_t xShouldWrite; + size_t xReturn; + + if( xSpace == ( size_t ) 0 ) + { + /* Doesn't matter if this is a stream buffer or a message buffer, there + is no space to write. */ + xShouldWrite = pdFALSE; + } + else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 ) + { + /* This is a stream buffer, as opposed to a message buffer, so writing a + stream of bytes rather than discrete messages. Write as many bytes as + possible. */ + xShouldWrite = pdTRUE; + xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); /*lint !e9044 Function parameter modified to ensure it is capped to available space. */ + } + else if( xSpace >= xRequiredSpace ) + { + /* This is a message buffer, as opposed to a stream buffer, and there + is enough space to write both the message length and the message itself + into the buffer. Start by writing the length of the data, the data + itself will be written later in this function. */ + xShouldWrite = pdTRUE; + ( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* There is space available, but not enough space. */ + xShouldWrite = pdFALSE; + } + + if( xShouldWrite != pdFALSE ) + { + /* Writes the data itself. */ + xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */ + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + /* Checking if there is data and clearing the notification state must be + performed atomically. */ + taskENTER_CRITICAL(); + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* If this function was invoked by a message buffer read then + xBytesToStoreMessageLength holds the number of bytes used to hold + the length of the next discrete message. If this function was + invoked by a stream buffer read then xBytesToStoreMessageLength will + be 0. */ + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Clear notification state as going to wait for data. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one reader. */ + configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL ); + pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Wait for data to be available. */ + traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, UINT32_MAX, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToReceive = NULL; + + /* Recheck the data available after blocking. */ + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); + sbRECEIVE_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ); + mtCOVERAGE_TEST_MARKER(); + } + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ); + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) +{ +size_t xOriginalTail, xReceivedLength, xNextMessageLength; + + if( xBytesToStoreMessageLength != ( size_t ) 0 ) + { + /* A discrete message is being received. First receive the length + of the message. A copy of the tail is stored so the buffer can be + returned to its prior state if the length of the message is too + large for the provided buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable ); + + /* Reduce the number of bytes available by the number of bytes just + read out. */ + xBytesAvailable -= xBytesToStoreMessageLength; + + /* Check there is enough space in the buffer provided by the + user. */ + if( xNextMessageLength > xBufferLengthBytes ) + { + /* The user has provided insufficient space to read the message + so return the buffer to its previous state (so the length of + the message is in the buffer again). */ + pxStreamBuffer->xTail = xOriginalTail; + xNextMessageLength = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* A stream of bytes is being received (as opposed to a discrete + message), so read as many bytes as possible. */ + xNextMessageLength = xBufferLengthBytes; + } + + /* Read the actual data. */ + xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */ + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +BaseType_t xReturn; +size_t xTail; + + configASSERT( pxStreamBuffer ); + + /* True if no bytes are available. */ + xTail = pxStreamBuffer->xTail; + if( pxStreamBuffer->xHead == xTail ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) +{ +BaseType_t xReturn; +size_t xBytesToStoreMessageLength; +const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ + + configASSERT( pxStreamBuffer ); + + /* This generic version of the receive function is used by both message + buffers, which store discrete messages, and stream buffers, which store a + continuous stream of bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + /* True if the available space equals zero. */ + if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) +{ +size_t xNextHead, xFirstLength; + + configASSERT( xCount > ( size_t ) 0 ); + + xNextHead = pxStreamBuffer->xHead; + + /* Calculate the number of bytes that can be added in the first write - + which may be less than the total number of bytes that need to be added if + the buffer will wrap back to the beginning. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount ); + + /* Write as many bytes as can be written in the first write. */ + configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength ); + memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the number of bytes written was less than the number that could be + written in the first write... */ + if( xCount > xFirstLength ) + { + /* ...then write the remaining bytes to the start of the buffer. */ + configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength ); + memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xNextHead += xCount; + if( xNextHead >= pxStreamBuffer->xLength ) + { + xNextHead -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxStreamBuffer->xHead = xNextHead; + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) +{ +size_t xCount, xFirstLength, xNextTail; + + /* Use the minimum of the wanted bytes and the available bytes. */ + xCount = configMIN( xBytesAvailable, xMaxCount ); + + if( xCount > ( size_t ) 0 ) + { + xNextTail = pxStreamBuffer->xTail; + + /* Calculate the number of bytes that can be read - which may be + less than the number wanted if the data wraps around to the start of + the buffer. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount ); + + /* Obtain the number of bytes it is possible to obtain in the first + read. Asserts check bounds of read and write. */ + configASSERT( xFirstLength <= xMaxCount ); + configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength ); + memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the total number of wanted bytes is greater than the number + that could be read in the first read... */ + if( xCount > xFirstLength ) + { + /*...then read the remaining bytes from the start of the buffer. */ + configASSERT( xCount <= xMaxCount ); + memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Move the tail pointer to effectively remove the data read from + the buffer. */ + xNextTail += xCount; + + if( xNextTail >= pxStreamBuffer->xLength ) + { + xNextTail -= pxStreamBuffer->xLength; + } + + pxStreamBuffer->xTail = xNextTail; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) +{ +/* Returns the distance between xTail and xHead. */ +size_t xCount; + + xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead; + xCount -= pxStreamBuffer->xTail; + if ( xCount >= pxStreamBuffer->xLength ) + { + xCount -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) +{ + /* Assert here is deliberately writing to the entire buffer to ensure it can + be written to without generating exceptions, and is setting the buffer to a + known value to assist in development/debugging. */ + #if( configASSERT_DEFINED == 1 ) + { + /* The value written just has to be identifiable when looking at the + memory. Don't use 0xA5 as that is the stack fill value and could + result in confusion as to what is actually being observed. */ + const BaseType_t xWriteValue = 0x55; + configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); + } + #endif + + memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + pxStreamBuffer->pucBuffer = pucBuffer; + pxStreamBuffer->xLength = xBufferSizeBytes; + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; + + if( xIsMessageBuffer != pdFALSE ) + { + pxStreamBuffer->ucFlags |= sbFLAGS_IS_MESSAGE_BUFFER; + } +} + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) + { + return ( ( StreamBuffer_t * ) xStreamBuffer )->uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) + { + ( ( StreamBuffer_t * ) xStreamBuffer )->uxStreamBufferNumber = uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) + { + return ( ( StreamBuffer_t * )xStreamBuffer )->ucFlags | sbFLAGS_IS_MESSAGE_BUFFER; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/tasks.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/tasks.c new file mode 100644 index 000000000..9e1cb9b36 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/tasks.c @@ -0,0 +1,5039 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "stack_macros.h" + +/* Lint e961 and e750 are suppressed as a MISRA exception justified because the +MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the +header files above, but not in this file, in order to generate the correct +privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */ + +/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting +functions but without including stdio.h here. */ +#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) + /* At the bottom of this file are two optional functions that can be used + to generate human readable text from the raw data generated by the + uxTaskGetSystemState() function. Note the formatting functions are provided + for convenience only, and are NOT considered part of the kernel. */ + #include +#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define taskYIELD_IF_USING_PREEMPTION() +#else + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* Values that can be assigned to the ucNotifyState member of the TCB. */ +#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) +#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) +#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 ) + +/* + * The value used to fill the stack of a task when the task is created. This + * is used purely for checking the high water mark for tasks. + */ +#define tskSTACK_FILL_BYTE ( 0xa5U ) + +/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using +dynamically allocated RAM, in which case when any task is deleted it is known +that both the task's stack and TCB need to be freed. Sometimes the +FreeRTOSConfig.h settings only allow a task to be created using statically +allocated RAM, in which case when any task is deleted it is known that neither +the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h +settings allow a task to be created using either statically or dynamically +allocated RAM, in which case a member of the TCB is used to record whether the +stack and/or TCB were allocated statically or dynamically, so when a task is +deleted the RAM that was allocated dynamically is freed again and no attempt is +made to free the RAM that was allocated statically. +tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a +task to be created using either statically or dynamically allocated RAM. Note +that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with +a statically allocated stack and a dynamically allocated TCB. +!!!NOTE!!! If the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is +changed then the definition of StaticTask_t must also be updated. */ +#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + +/* If any of the following are set then task stacks are filled with a known +value so the high water mark can be determined. If none of the following are +set then don't fill the stack so there is no unnecessary dependency on memset. */ +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 +#else + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 +#endif + +/* + * Macros used by vListTask to indicate which state a task is in. + */ +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* + * Some kernel aware debuggers require the data the debugger needs access to be + * global, rather than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + +/* The name allocated to the Idle task. This can be overridden by defining +configIDLE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configIDLE_TASK_NAME + #define configIDLE_TASK_NAME "IDLE" +#endif + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is + performed in a generic way that is not optimised to any particular + microcontroller architecture. */ + + /* uxTopReadyPriority holds the priority of the highest priority ready + state task. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) \ + { \ + if( ( uxPriority ) > uxTopReadyPriority ) \ + { \ + uxTopReadyPriority = ( uxPriority ); \ + } \ + } /* taskRECORD_READY_PRIORITY */ + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + + /*-----------------------------------------------------------*/ + + /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as + they are only required when a port optimised method of task selection is + being used. */ + #define taskRESET_READY_PRIORITY( uxPriority ) + #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is + performed in a way that is tailored to the particular microcontroller + architecture being used. */ + + /* A port optimised version is provided. Call the port defined macros. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority; \ + \ + /* Find the highest priority list that contains ready tasks. */ \ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + + /*-----------------------------------------------------------*/ + + /* A port optimised version is provided, call it only if the TCB being reset + is being referenced from a ready list. If it is referenced from a delayed + or suspended list then it won't be in a ready list. */ + #define taskRESET_READY_PRIORITY( uxPriority ) \ + { \ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ + { \ + portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ + } \ + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick +count overflows. */ +#define taskSWITCH_DELAYED_LISTS() \ +{ \ + List_t *pxTemp; \ + \ + /* The delayed tasks list should be empty when the lists are switched. */ \ + configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \ + \ + pxTemp = pxDelayedTaskList; \ + pxDelayedTaskList = pxOverflowDelayedTaskList; \ + pxOverflowDelayedTaskList = pxTemp; \ + xNumOfOverflows++; \ + prvResetNextTaskUnblockTime(); \ +} + +/*-----------------------------------------------------------*/ + +/* + * Place the task represented by pxTCB into the appropriate ready list for + * the task. It is inserted at the end of the list. + */ +#define prvAddTaskToReadyList( pxTCB ) \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +/*-----------------------------------------------------------*/ + +/* + * Several functions take an TaskHandle_t parameter that can optionally be NULL, + * where NULL is used to indicate that the handle of the currently executing + * task should be used in place of the parameter. This macro simply checks to + * see if the parameter is NULL and returns a pointer to the appropriate TCB. + */ +#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) pxCurrentTCB : ( TCB_t * ) ( pxHandle ) ) + +/* The item value of the event list item is normally used to hold the priority +of the task to which it belongs (coded to allow it to be held in reverse +priority order). However, it is occasionally borrowed for other purposes. It +is important its value is not updated due to a task priority change while it is +being used for another purpose. The following bit definition is used to inform +the scheduler that the value should not be changed - in which case it is the +responsibility of whichever module is using the value to ensure it gets set back +to its original value when it is released. */ +#if( configUSE_16_BIT_TICKS == 1 ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U +#else + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#endif + +/* + * Task control block. A task control block (TCB) is allocated for each task, + * and stores task state information, including a pointer to the task's context + * (the task's run time environment, including register values) + */ +typedef struct tskTaskControlBlock +{ + volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + #endif + + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ + StackType_t *pxStack; /*< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */ + #endif + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + #endif + + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxMutexesHeld; + #endif + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + TaskHookFunction_t pxTaskTag; + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + + #if( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + /* Allocate a Newlib reent structure that is specific to this task. + Note Newlib support has been included by popular demand, but is not + used by the FreeRTOS maintainers themselves. FreeRTOS is not + responsible for resulting newlib operation. User must be familiar with + newlib and must provide system-wide implementations of the necessary + stubs. Be warned that (at the time of writing) the current newlib design + implements a system-wide malloc() that must be provided with locks. */ + struct _reent xNewLib_reent; + #endif + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + volatile uint32_t ulNotifiedValue; + volatile uint8_t ucNotifyState; + #endif + + /* See the comments above the definition of + tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */ + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDelayAborted; + #endif + +} tskTCB; + +/* The old tskTCB name is maintained above then typedefed to the new TCB_t name +below to enable the use of older kernel aware debuggers. */ +typedef tskTCB TCB_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ + +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; + +/* Lists for ready and blocked tasks. --------------------*/ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ] = {0}; /*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1 = {0}; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2 = {0}; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList = NULL; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList = NULL; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList = {0}; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ + +#if( INCLUDE_vTaskDelete == 1 ) + + PRIVILEGED_DATA static List_t xTasksWaitingTermination = {0}; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; + +#endif + +#if ( INCLUDE_vTaskSuspend == 1 ) + + PRIVILEGED_DATA static List_t xSuspendedTaskList = {0}; /*< Tasks that are currently suspended. */ + +#endif + +/* Other file private variables. --------------------------------*/ +PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; +PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; +PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; +PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +/* Context switches are held pending while the scheduler is suspended. Also, +interrupts must not manipulate the xStateListItem of a TCB, or any of the +lists the xStateListItem can be referenced from, if the scheduler is suspended. +If an interrupt needs to unblock a task while the scheduler is suspended then it +moves the task's event list item into the xPendingReadyList, ready for the +kernel to move the task from the pending ready list into the real ready list +when the scheduler is unsuspended. The pending ready list itself can only be +accessed from a critical section. */ +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +/* Callback function prototypes. --------------------------*/ +#if( configCHECK_FOR_STACK_OVERFLOW > 0 ) + extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName ); +#endif + +#if( configUSE_TICK_HOOK > 0 ) + extern void vApplicationTickHook( void ); +#endif + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); +#endif + +/* File private functions. --------------------------------*/ + +/** + * Utility task that simply returns pdTRUE if the task referenced by xTask is + * currently in the Suspended state, or pdFALSE if the task referenced by xTask + * is in any other state. + */ +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* INCLUDE_vTaskSuspend */ + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first task. + */ +static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; + +/* + * The idle task, which as all tasks is implemented as a never ending loop. + * The idle task is automatically created and added to the ready lists upon + * creation of the first user task. + * + * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ); + +/* + * Utility to free all memory allocated by the scheduler to hold a TCB, + * including the stack pointed to by the TCB. + * + * This does not free memory allocated by the task itself (i.e. memory + * allocated by calls to pvPortMalloc from within the tasks application code). + */ +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Used only by the idle task. This checks to see if anything has been placed + * in the list of tasks waiting to be deleted. If so the task is cleaned up + * and its TCB deleted. + */ +static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; + +/* + * The currently executing task is entering the Blocked state. Add the task to + * either the current or the overflow delayed task list. + */ +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * Fills an TaskStatus_t structure with information on each task that is + * referenced from the pxList list (which may be a ready list, a delayed list, + * a suspended list, etc.). + * + * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM + * NORMAL APPLICATION CODE. + */ +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Searches pxList for a task with name pcNameToQuery - returning a handle to + * the task if it is found, or NULL if the task is not found. + */ +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION; + +#endif + +/* + * When a task is created, the stack of the task is filled with a known value. + * This function determines the 'high water mark' of the task stack by + * determining how much of the stack remains at the original preset value. + */ +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) + + static uint16_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Return the amount of time, in ticks, that will pass before the kernel will + * next move a task from the Blocked state to the Running state. + * + * This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user + * defined low power mode implementations require configUSE_TICKLESS_IDLE to be + * set to a value other than 1. + */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Set xNextTaskUnblockTime to the time at which the next Blocked state task + * will exit the Blocked state. + */ +static void prvResetNextTaskUnblockTime( void ); + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + /* + * Helper function used to pad task names with spaces when printing out + * human readable tables of task information. + */ + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; + +/* + * freertos_tasks_c_additions_init() should only be called if the user definable + * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro + * called by the function. + */ +#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + + static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION; + +#endif + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + { + TCB_t *pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTask_t equals the size of the real task + structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + } + #endif /* configASSERT_DEFINED */ + + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Tasks can be created statically or dynamically, so note + this task had a statically allocated stack in case it is + later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + does not grow into the TCB. Likewise if the stack grows up then allocate + the TCB then the stack. */ + #if( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + the implementation of the port malloc function and whether or not static + allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + The base of the stack memory stored in the TCB so the task can + be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } + } + #else /* portSTACK_GROWTH */ + { + StackType_t *pxStack; + + /* Allocate space for the stack used by the task being created. */ + pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + it again. */ + vPortFree( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } + } + #endif /* portSTACK_GROWTH */ + + if( pxNewTCB != NULL ) + { + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) +{ +StackType_t *pxTopOfStack; +UBaseType_t x; + + #if( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) + { + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */ + + /* Calculate the top of stack address. This depends on whether the stack + grows from high memory to low (as per the 80x86) or vice versa. + portSTACK_GROWTH is used to make the result positive or negative as required + by the port. */ + #if( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + #if( configRECORD_STACK_HIGH_ADDRESS == 1 ) + { + /* Also record the stack's high address, which may assist + debugging. */ + pxNewTCB->pxEndOfStack = pxTopOfStack; + } + #endif /* configRECORD_STACK_HIGH_ADDRESS */ + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + + /* This is used as an array index so must ensure it's not too large. First + remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xStateListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) + { + pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL; + } + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + pxNewTCB->ulNotifiedValue = 0; + pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + but had been interrupted by the scheduler. The return address is set + to the start of the task function. Once the stack has been initialised + the top of stack variable is updated. */ + #if( portUSING_MPU_WRAPPERS == 1 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portUSING_MPU_WRAPPERS */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portUSING_MPU_WRAPPERS */ + + if( ( void * ) pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + if( pxCurrentTCB == NULL ) + { + /* There are no other tasks, or all the other tasks are in + the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + initialisation required. We will not recover if this call + fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + current task if it is the highest priority task to be created + so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + void vTaskDelete( TaskHandle_t xTaskToDelete ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the calling task that is + being deleted. */ + pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + + /* Remove task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + detect that the task lists need re-generating. This is done before + portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + not return. */ + uxTaskNumber++; + + if( pxTCB == pxCurrentTCB ) + { + /* A task is deleting itself. This cannot complete within the + task itself, as a context switch to another task is required. + Place the task in the termination list. The idle task will + check the termination list and free up any memory allocated by + the scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + there is a task that has been deleted and that it should therefore + check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* The pre-delete hook is primarily for the Windows simulator, + in which Windows specific clean up operations are performed, + after which it is not possible to yield away from this task - + hence xYieldPending is used to latch that a context switch is + required. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + } + else + { + --uxCurrentNumberOfTasks; + prvDeleteTCB( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } + + traceTASK_DELETE( pxTCB ); + } + taskEXIT_CRITICAL(); + + /* Force a reschedule if it is the currently running task that has just + been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelayUntil == 1 ) + + void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) + { + TickType_t xTimeToWake; + BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; + + configASSERT( pxPreviousWakeTime ); + configASSERT( ( xTimeIncrement > 0U ) ); + configASSERT( uxSchedulerSuspended == 0 ); + + vTaskSuspendAll(); + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount; + + /* Generate the tick time at which the task wants to wake. */ + xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; + + if( xConstTickCount < *pxPreviousWakeTime ) + { + /* The tick count has overflowed since this function was + lasted called. In this case the only time we should ever + actually delay is if the wake time has also overflowed, + and the wake time is greater than the tick time. When this + is the case it is as if neither time had overflowed. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The tick time has not overflowed. In this case we will + delay if either the wake time has overflowed, and/or the + tick time is less than the wake time. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Update the wake time ready for the next call. */ + *pxPreviousWakeTime = xTimeToWake; + + if( xShouldDelay != pdFALSE ) + { + traceTASK_DELAY_UNTIL( xTimeToWake ); + + /* prvAddCurrentTaskToDelayedList() needs the block time, not + the time to wake, so subtract the current tick count. */ + prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + xAlreadyYielded = xTaskResumeAll(); + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelayUntil */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + + void vTaskDelay( const TickType_t xTicksToDelay ) + { + BaseType_t xAlreadyYielded = pdFALSE; + + /* A delay time of zero just forces a reschedule. */ + if( xTicksToDelay > ( TickType_t ) 0U ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskSuspendAll(); + { + traceTASK_DELAY(); + + /* A task that is removed from the event list while the + scheduler is suspended will not get placed in the ready + list or removed from the blocked list until the scheduler + is resumed. + + This task cannot be in an event list as it is the currently + executing task. */ + prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); + } + xAlreadyYielded = xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelay */ +/*-----------------------------------------------------------*/ + +#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) ) + + eTaskState eTaskGetState( TaskHandle_t xTask ) + { + eTaskState eReturn; + List_t *pxStateList; + const TCB_t * const pxTCB = ( TCB_t * ) xTask; + + configASSERT( pxTCB ); + + if( pxTCB == pxCurrentTCB ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + { + taskENTER_CRITICAL(); + { + pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + } + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) ) + { + /* The task being queried is referenced from one of the Blocked + lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) + { + /* The task being queried is referenced from the suspended + list. Is it genuinely suspended or is it block + indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + { + eReturn = eSuspended; + } + else + { + eReturn = eBlocked; + } + } + #endif + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + tasks list, or it is not referenced from any lists at + all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + Ready (including pending ready) state. */ + eReturn = eReady; + } + } + + return eReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_eTaskGetState */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + UBaseType_t uxReturn; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the that + called uxTaskPriorityGet() that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + taskEXIT_CRITICAL(); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + UBaseType_t uxReturn, uxSavedInterruptState; + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* If null is passed in here then it is the priority of the calling + task that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskPrioritySet == 1 ) + + void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) + { + TCB_t *pxTCB; + UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; + BaseType_t xYieldRequired = pdFALSE; + + configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); + + /* Ensure the new priority is valid. */ + if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the calling + task that is being changed. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + traceTASK_PRIORITY_SET( pxTCB, uxNewPriority ); + + #if ( configUSE_MUTEXES == 1 ) + { + uxCurrentBasePriority = pxTCB->uxBasePriority; + } + #else + { + uxCurrentBasePriority = pxTCB->uxPriority; + } + #endif + + if( uxCurrentBasePriority != uxNewPriority ) + { + /* The priority change may have readied a task of higher + priority than the calling task. */ + if( uxNewPriority > uxCurrentBasePriority ) + { + if( pxTCB != pxCurrentTCB ) + { + /* The priority of a task other than the currently + running task is being raised. Is the priority being + raised above that of the running task? */ + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The priority of the running task is being raised, + but the running task must already be the highest + priority task able to run so no yield is required. */ + } + } + else if( pxTCB == pxCurrentTCB ) + { + /* Setting the priority of the running task down means + there may now be another task of higher priority that + is ready to execute. */ + xYieldRequired = pdTRUE; + } + else + { + /* Setting the priority of any other task down does not + require a yield as the running task must be above the + new priority of the task being modified. */ + } + + /* Remember the ready list the task might be referenced from + before its uxPriority member is changed so the + taskRESET_READY_PRIORITY() macro can function correctly. */ + uxPriorityUsedOnEntry = pxTCB->uxPriority; + + #if ( configUSE_MUTEXES == 1 ) + { + /* Only change the priority being used if the task is not + currently using an inherited priority. */ + if( pxTCB->uxBasePriority == pxTCB->uxPriority ) + { + pxTCB->uxPriority = uxNewPriority; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else + { + pxTCB->uxPriority = uxNewPriority; + } + #endif + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + nothing more than change its priority variable. However, if + the task is in a ready list it needs to be removed and placed + in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + adding it to it's new ready list. As we are in a critical + section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xYieldRequired != pdFALSE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Remove compiler warning about unused variables when the port + optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskSuspend( TaskHandle_t xTaskToSuspend ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the running task that is + being suspended. */ + pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); + + traceTASK_SUSPEND( pxTCB ); + + /* Remove task from the ready/delayed list and place in the + suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + now suspended, so no notification was received. */ + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + } + #endif + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + task that is now in the Suspended state. */ + taskENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + /* The scheduler is not running, but the task that was pointed + to by pxCurrentTCB has just been suspended and pxCurrentTCB + must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) + { + /* No other tasks are ready, so set pxCurrentTCB back to + NULL so when the next task is created pxCurrentTCB will + be set to point to it no matter what its relative priority + is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) + { + BaseType_t xReturn = pdFALSE; + const TCB_t * const pxTCB = ( TCB_t * ) xTask; + + /* Accesses xPendingReadyList so must be called from a critical + section. */ + + /* It does not make sense to check if the calling task is suspended. */ + configASSERT( xTask ); + + /* Is the task being resumed actually in the suspended list? */ + if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* Has the task already been resumed from within an ISR? */ + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + { + /* Is it in the suspended list because it is in the Suspended + state, or because is is blocked with no timeout? */ + if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */ + { + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskResume( TaskHandle_t xTaskToResume ) + { + TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume; + + /* It does not make sense to resume the calling task. */ + configASSERT( xTaskToResume ); + + /* The parameter cannot be NULL as it is impossible to resume the + currently executing task. */ + if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB ) ) + { + taskENTER_CRITICAL(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + but will leave the lists in the correct state for the + next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ + +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) + { + BaseType_t xYieldRequired = pdFALSE; + TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToResume ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Ready lists can be accessed so move the task from the + suspended list to the ready list directly. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + is held in the pending ready list until the scheduler is + unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +void vTaskStartScheduler( void ) +{ +BaseType_t xReturn; + + /* Add the idle task at the lowest priority. */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxIdleTaskTCBBuffer = NULL; + StackType_t *pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandle != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), + &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + #if ( configUSE_TIMERS == 1 ) + { + if( xReturn == pdPASS ) + { + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TIMERS */ + + if( xReturn == pdPASS ) + { + /* freertos_tasks_c_additions_init() should only be called if the user + definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is + the only macro called by the function. */ + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + { + freertos_tasks_c_additions_init(); + } + #endif + + /* Interrupts are turned off here, to ensure a tick does not occur + before or during the call to xPortStartScheduler(). The stacks of + the created tasks contain a status word with interrupts switched on + so interrupts will automatically get re-enabled when the first task + starts to run. */ + portDISABLE_INTERRUPTS(); + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to the task that will run first. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) 0U; + + /* If configGENERATE_RUN_TIME_STATS is defined then the following + macro must be defined to configure the timer/counter used to generate + the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS + is set to 0 and the following line fails to build then ensure you do not + have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your + FreeRTOSConfig.h file. */ + portCONFIGURE_TIMER_FOR_RUN_TIME_STATS(); + + /* Setting up the timer tick is hardware specific and thus in the + portable interface. */ + if( xPortStartScheduler() != pdFALSE ) + { + /* Should not reach here as if the scheduler is running the + function will not return. */ + } + else + { + /* Should only reach here if a task calls xTaskEndScheduler(). */ + } + } + else + { + /* This line will only be reached if the kernel could not be started, + because there was not enough FreeRTOS heap to create the idle task + or the timer task. */ + configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ); + } + + /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, + meaning xIdleTaskHandle is not used anywhere else. */ + ( void ) xIdleTaskHandle; +} +/*-----------------------------------------------------------*/ + +void vTaskEndScheduler( void ) +{ + /* Stop the scheduler interrupts and call the portable scheduler end + routine so the original ISRs can be restored if necessary. The port + layer must ensure interrupts enable bit is left in the correct state. */ + portDISABLE_INTERRUPTS(); + xSchedulerRunning = pdFALSE; + vPortEndScheduler(); +} +/*----------------------------------------------------------*/ + +void vTaskSuspendAll( void ) +{ + /* A critical section is not required as the variable is of type + BaseType_t. Please read Richard Barry's reply in the following link to a + post in the FreeRTOS support forum before reporting this as a bug! - + http://goo.gl/wu4acr */ + ++uxSchedulerSuspended; +} +/*----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) + { + TickType_t xReturn; + UBaseType_t uxHigherPriorityReadyTasks = pdFALSE; + + /* uxHigherPriorityReadyTasks takes care of the case where + configUSE_PREEMPTION is 0, so there may be tasks above the idle priority + task that are in the Ready state, even though the idle task is + running. */ + #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + { + if( uxTopReadyPriority > tskIDLE_PRIORITY ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #else + { + const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01; + + /* When port optimised task selection is used the uxTopReadyPriority + variable is used as a bit map. If bits other than the least + significant bit are set then there are tasks that have a priority + above the idle priority that are in the Ready state. This takes + care of the case where the co-operative scheduler is in use. */ + if( uxTopReadyPriority > uxLeastSignificantBit ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #endif + + if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + { + xReturn = 0; + } + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + { + /* There are other idle priority tasks in the ready state. If + time slicing is used then the very next tick interrupt must be + processed. */ + xReturn = 0; + } + else if( uxHigherPriorityReadyTasks != pdFALSE ) + { + /* There are tasks in the Ready state that have a priority above the + idle priority. This path can only be reached if + configUSE_PREEMPTION is 0. */ + xReturn = 0; + } + else + { + xReturn = xNextTaskUnblockTime - xTickCount; + } + + return xReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskResumeAll( void ) +{ +TCB_t *pxTCB = NULL; +BaseType_t xAlreadyYielded = pdFALSE; + + /* If uxSchedulerSuspended is zero then this function does not match a + previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + /* It is possible that an ISR caused a task to be removed from an event + list while the scheduler was suspended. If this was the case then the + removed task will have been added to the xPendingReadyList. Once the + scheduler has been resumed it is safe to move all the pending ready + tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + --uxSchedulerSuspended; + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + { + /* Move any readied tasks from the pending list into the + appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* If the moved task has a priority higher than the current + task then a yield must be performed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + which may have prevented the next unblock time from being + re-calculated, in which case re-calculate it now. Mainly + important for low power tickless implementations, where + this can prevent an unnecessary exit from low power + state. */ + prvResetNextTaskUnblockTime(); + } + + /* If any ticks occurred while the scheduler was suspended then + they should be processed now. This ensures the tick count does + not slip, and that any delayed tasks are resumed at the correct + time. */ + { + UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */ + + if( uxPendedCounts > ( UBaseType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --uxPendedCounts; + } while( uxPendedCounts > ( UBaseType_t ) 0U ); + + uxPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPending != pdFALSE ) + { + #if( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xAlreadyYielded; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCount( void ) +{ +TickType_t xTicks; + + /* Critical section required if running on a 16 bit processor. */ + portTICK_TYPE_ENTER_CRITICAL(); + { + xTicks = xTickCount; + } + portTICK_TYPE_EXIT_CRITICAL(); + + return xTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCountFromISR( void ) +{ +TickType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxTaskGetNumberOfTasks( void ) +{ + /* A critical section is not required because the variables are of type + BaseType_t. */ + return uxCurrentNumberOfTasks; +} +/*-----------------------------------------------------------*/ + +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +TCB_t *pxTCB; + + /* If null is passed in here then the name of the calling task is being + queried. */ + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + configASSERT( pxTCB ); + return &( pxTCB->pcTaskName[ 0 ] ); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) + { + TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL; + UBaseType_t x; + char cNextChar; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); + + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); + + /* Check each character in the name looking for a match or + mismatch. */ + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + break; + } + else if( cNextChar == 0x00 ) + { + /* Both strings terminated, a match must have been + found. */ + pxReturn = pxNextTCB; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( pxReturn != NULL ) + { + /* The handle has been found. */ + break; + } + + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t uxQueue = configMAX_PRIORITIES; + TCB_t* pxTCB; + + /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ + configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); + + vTaskSuspendAll(); + { + /* Search the ready lists. */ + do + { + uxQueue--; + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery ); + + if( pxTCB != NULL ) + { + /* Found the handle. */ + break; + } + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Search the delayed lists. */ + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery ); + } + + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery ); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the suspended list. */ + pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery ); + } + } + #endif + + #if( INCLUDE_vTaskDelete == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the deleted list. */ + pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery ); + } + } + #endif + } + ( void ) xTaskResumeAll(); + + return ( TaskHandle_t ) pxTCB; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) + { + UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; + + vTaskSuspendAll(); + { + /* Is there a space in the array for each task in the system? */ + if( uxArraySize >= uxCurrentNumberOfTasks ) + { + /* Fill in an TaskStatus_t structure with information on each + task in the Ready state. */ + do + { + uxQueue--; + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ); + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Fill in an TaskStatus_t structure with information on each + task in the Blocked state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ); + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ); + + #if( INCLUDE_vTaskDelete == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task that has been deleted but not yet cleaned up. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ); + } + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task in the Suspended state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ); + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1) + { + if( pulTotalRunTime != NULL ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); + #else + *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + } + } + #else + { + if( pulTotalRunTime != NULL ) + { + *pulTotalRunTime = 0; + } + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t xTaskGetIdleTaskHandle( void ) + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xIdleTaskHandle != NULL ) ); + return xIdleTaskHandle; + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +/* This conditional compilation should use inequality to 0, not equality to 1. +This is to ensure vTaskStepTick() is available when user defined low power mode +implementations require configUSE_TICKLESS_IDLE to be set to a value other than +1. */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + void vTaskStepTick( const TickType_t xTicksToJump ) + { + /* Correct the tick count value after a period during which the tick + was suppressed. Note this does *not* call the tick hook function for + each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) + { + TCB_t *pxTCB = ( TCB_t * ) xTask; + BaseType_t xReturn; + + configASSERT( pxTCB ); + + vTaskSuspendAll(); + { + /* A task can only be prematurely removed from the Blocked state if + it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. An + interrupt won't touch the xStateListItem because the + scheduler is suspended. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + the event list too. Interrupts can touch the event list item, + even though the scheduler is suspended, so a critical section + is used. */ + taskENTER_CRITICAL(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate context + switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should only be + performed if the unblocked task has a priority that is + equal to or higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Pend the yield to be performed when the scheduler + is unsuspended. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + else + { + xReturn = pdFAIL; + } + } + ( void ) xTaskResumeAll(); + + return xReturn; + } + +#endif /* INCLUDE_xTaskAbortDelay */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskIncrementTick( void ) +{ +TCB_t * pxTCB; +TickType_t xItemValue; +BaseType_t xSwitchRequired = pdFALSE; + + /* Called by the portable layer each time a tick interrupt occurs. + Increments the tick then checks to see if the new tick value will cause any + tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + + /* Increment the RTOS tick, switching the delayed and overflowed + delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + the queue in the order of their wake time - meaning once one task + has been found whose block time has not expired there is no need to + look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ;; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + to the maximum possible value so it is extremely + unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass + next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else + { + /* The delayed list is not empty, get the value of the + item at the head of the delayed list. This is the time + at which the task at the head of the delayed list must + be removed from the Blocked state. */ + pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + item value is the time at which the task at the head + of the blocked list must be removed from the Blocked + state - so record the item value in + xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + only be performed if the unblocked task has a + priority that is equal to or higher than the + currently executing task. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + processing time (time slice) if preemption is on, and the application + writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + count is being unwound (when the scheduler is being unlocked). */ + if( uxPendedTicks == ( UBaseType_t ) 0U ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + } + else + { + ++uxPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif + } + + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldPending != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + + return xSwitchRequired; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) + { + TCB_t *xTCB; + + /* If xTask is NULL then it is the task hook of the calling task that is + getting set. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = ( TCB_t * ) xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + xTCB->pxTaskTag = pxHookFunction; + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) + { + TCB_t *xTCB; + TaskHookFunction_t xReturn; + + /* If xTask is NULL then we are setting our own task hook. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = ( TCB_t * ) xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xReturn = xTCB->pxTaskTag; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) + { + TCB_t *xTCB; + BaseType_t xReturn; + + /* If xTask is NULL then we are calling our own task hook. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = ( TCB_t * ) xTask; + } + + if( xTCB->pxTaskTag != NULL ) + { + xReturn = xTCB->pxTaskTag( pvParameter ); + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +void vTaskSwitchContext( void ) +{ + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + switch. */ + xYieldPending = pdTRUE; + } + else + { + xYieldPending = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + accumulated time so far. The time the task started running was + stored in ulTaskSwitchedInTime. Note that there is no overflow + protection here so count values are only valid until the timer + overflows. The guard against negative values is to protect + against suspect run time stat counter implementations - which + are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Select a new task to run using either the generic C or port + optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); + traceTASK_SWITCHED_IN(); + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to this task. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + } +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE + SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + + /* Place the event list item of the TCB in the appropriate event list. + This is placed in the list in priority order so the highest priority task + is the first to be woken by the event. The queue that contains the event + list is locked, preventing simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event groups implementation. */ + configASSERT( uxSchedulerSuspended != 0 ); + + /* Store the item value in the event list item. It is safe to access the + event list item here as interrupts won't access the event list item of a + task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Place the event list item of the TCB at the end of the appropriate event + list. It is safe to access the event list here because it is part of an + event group implementation - and interrupts don't access event groups + directly (instead they access them indirectly by pending function calls to + the task level). */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TIMERS == 1 ) + + void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + configASSERT( pxEventList ); + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements - + it should be called with the scheduler suspended. */ + + + /* Place the event list item of the TCB in the appropriate event list. + In this case it is assume that this is the only task that is going to + be waiting on this event list, so the faster vListInsertEnd() function + can be used in place of vListInsert. */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + /* If the task should block indefinitely then set the block time to a + value that will be recognised as an indefinite delay inside the + prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +{ +TCB_t *pxUnblockedTCB; +BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + be removed as it is known to be the highest priority. Remove the TCB from + the delayed list, and add it to the ready list. + + If an event is for a queue that is locked then this function will never + get called - the lock count on the queue will get modified instead. This + means exclusive access to the event list is guaranteed here. + + This function assumes that a check has already been made to ensure that + pxEventList is not empty. */ + pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + priority than the calling task. This allows the calling task to know if + it should force a context switch now. */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) +{ +TCB_t *pxUnblockedTCB; + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + + /* Store the new item value in the event list. */ + listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Remove the event list form the event flag. Interrupts do not access + event flags. */ + pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem ); + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( pxEventListItem ); + + /* Remove the task from the delayed list and add it to the ready list. The + scheduler is suspended so interrupts will not be accessing the ready + lists. */ + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The unblocked task has a priority above that of the calling task, so + a context switch is required. This function is called with the + scheduler suspended so xYieldPending is set so the context switch + occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPending = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + configASSERT( pxTimeOut ); + taskENTER_CRITICAL(); + { + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + /* For internal use only as it does not use a critical section. */ + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; +} +/*-----------------------------------------------------------*/ + +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) +{ +BaseType_t xReturn; + + configASSERT( pxTimeOut ); + configASSERT( pxTicksToWait ); + + taskENTER_CRITICAL(); + { + /* Minor optimisation. The tick count cannot change in this block. */ + const TickType_t xConstTickCount = xTickCount; + const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + if( pxCurrentTCB->ucDelayAborted != pdFALSE ) + { + /* The delay was aborted, which is not the same as a time out, + but has the same result. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + xReturn = pdTRUE; + } + else + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + if( *pxTicksToWait == portMAX_DELAY ) + { + /* If INCLUDE_vTaskSuspend is set to 1 and the block time + specified is the maximum block time then the task should block + indefinitely, and therefore never time out. */ + xReturn = pdFALSE; + } + else + #endif + + if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */ + { + /* The tick count is greater than the time at which + vTaskSetTimeout() was called, but has also overflowed since + vTaskSetTimeOut() was called. It must have wrapped all the way + around and gone past again. This passed since vTaskSetTimeout() + was called. */ + xReturn = pdTRUE; + } + else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ + { + /* Not a genuine timeout. Adjust parameters for time remaining. */ + *pxTicksToWait -= xElapsedTime; + vTaskInternalSetTimeOutState( pxTimeOut ); + xReturn = pdFALSE; + } + else + { + *pxTicksToWait = 0; + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskMissedYield( void ) +{ + xYieldPending = pdTRUE; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) + { + UBaseType_t uxReturn; + TCB_t *pxTCB; + + if( xTask != NULL ) + { + pxTCB = ( TCB_t * ) xTask; + uxReturn = pxTCB->uxTaskNumber; + } + else + { + uxReturn = 0U; + } + + return uxReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) + { + TCB_t *pxTCB; + + if( xTask != NULL ) + { + pxTCB = ( TCB_t * ) xTask; + pxTCB->uxTaskNumber = uxHandle; + } + } + +#endif /* configUSE_TRACE_FACILITY */ + +/* + * ----------------------------------------------------------- + * The Idle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION( prvIdleTask, pvParameters ) +{ + /* Stop warnings. */ + ( void ) pvParameters; + + /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE + SCHEDULER IS STARTED. **/ + + /* In case a task that has a secure context deletes itself, in which case + the idle task is responsible for deleting the task's secure context, if + any. */ + portTASK_CALLS_SECURE_FUNCTIONS(); + + for( ;; ) + { + /* See if any tasks have deleted themselves - if so then the idle task + is responsible for freeing the deleted task's TCB and stack. */ + prvCheckTasksWaitingTermination(); + + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + see if any other task has become available. If we are using + preemption we don't need to do this as any task becoming available + will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + timesliced. If a task that is sharing the idle priority is ready + to run then the idle task should yield before the end of the + timeslice. + + A critical region is not required here as we are just reading from + the list, and an occasional incorrect value will not matter. If + the ready list at the idle priority contains more than one task + then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_IDLE_HOOK == 1 ) + { + extern void vApplicationIdleHook( void ); + + /* Call the user defined function from within the idle task. This + allows the application designer to add background functionality + without the overhead of a separate task. + NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationIdleHook(); + } + #endif /* configUSE_IDLE_HOOK */ + + /* This conditional compilation should use inequality to 0, not equality + to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when + user defined low power mode implementations require + configUSE_TICKLESS_IDLE to be set to a value other than 1. */ + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + TickType_t xExpectedIdleTime; + + /* It is not desirable to suspend then resume the scheduler on + each iteration of the idle task. Therefore, a preliminary + test of the expected idle time is performed without the + scheduler suspended. The result here is not necessarily + valid. */ + xExpectedIdleTime = prvGetExpectedIdleTime(); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + vTaskSuspendAll(); + { + /* Now the scheduler is suspended, the expected idle + time can be sampled again, and this time its value can + be used. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xExpectedIdleTime = prvGetExpectedIdleTime(); + + /* Define the following macro to set xExpectedIdleTime to 0 + if the application does not want + portSUPPRESS_TICKS_AND_SLEEP() to be called. */ + configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime ); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + traceLOW_POWER_IDLE_BEGIN(); + portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ); + traceLOW_POWER_IDLE_END(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICKLESS_IDLE */ + } +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE != 0 ) + + eSleepModeStatus eTaskConfirmSleepModeStatus( void ) + { + /* The idle task exists in addition to the application tasks. */ + const UBaseType_t uxNonApplicationTasks = 1; + eSleepModeStatus eReturn = eStandardSleep; + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + { + /* A task was made ready while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xYieldPending != pdFALSE ) + { + /* A yield was pended while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else + { + /* If all the tasks are in the suspended list (which might mean they + have an infinite block time rather than actually being suspended) + then it is safe to turn all clocks off and just wait for external + interrupts. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) + { + eReturn = eNoTasksWaitingTimeout; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return eReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) + { + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; + } + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) + { + void *pvReturn = NULL; + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; + } + else + { + pvReturn = NULL; + } + + return pvReturn; + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions ) + { + TCB_t *pxTCB; + + /* If null is passed in here then we are modifying the MPU settings of + the calling task. */ + pxTCB = prvGetTCBFromHandle( xTaskToModify ); + + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseTaskLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) + { + vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) ); + } + + vListInitialise( &xDelayedTaskList1 ); + vListInitialise( &xDelayedTaskList2 ); + vListInitialise( &xPendingReadyList ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + vListInitialise( &xTasksWaitingTermination ); + } + #endif /* INCLUDE_vTaskDelete */ + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + vListInitialise( &xSuspendedTaskList ); + } + #endif /* INCLUDE_vTaskSuspend */ + + /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList + using list2. */ + pxDelayedTaskList = &xDelayedTaskList1; + pxOverflowDelayedTaskList = &xDelayedTaskList2; +} +/*-----------------------------------------------------------*/ + +static void prvCheckTasksWaitingTermination( void ) +{ + + /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/ + + #if ( INCLUDE_vTaskDelete == 1 ) + { + TCB_t *pxTCB; + + /* uxDeletedTasksWaitingCleanUp is used to prevent vTaskSuspendAll() + being called too often in the idle task. */ + while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + taskENTER_CRITICAL(); + { + pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); + } + } + #endif /* INCLUDE_vTaskDelete */ +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TRACE_FACILITY == 1 ) + + void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) + { + TCB_t *pxTCB; + + /* xTask is NULL then get the state of the calling task. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + + #if ( configUSE_MUTEXES == 1 ) + { + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; + } + #else + { + pxTaskStatus->uxBasePriority = 0; + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + value of eState passed into this function is eInvalid - otherwise the + state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCB ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + chance it is actually just blocked indefinitely - so really + it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + vTaskSuspendAll(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + ( void ) xTaskResumeAll(); + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) + { + configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB; + UBaseType_t uxTask = 0; + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); + + /* Populate an TaskStatus_t structure within the + pxTaskStatusArray array for each task that is referenced from + pxList. See the definition of TaskStatus_t in task.h for the + meaning of each TaskStatus_t structure member. */ + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); + vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); + uxTask++; + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) + + static uint16_t prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) + { + uint32_t ulCount = 0U; + + while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE ) + { + pucStackByte -= portSTACK_GROWTH; + ulCount++; + } + + ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */ + + return ( uint16_t ) ulCount; + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + UBaseType_t uxReturn; + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) + { + /* This call is required specifically for the TriCore port. It must be + above the vPortFree() calls. The call is also used by ports/demos that + want to allocate and clean RAM statically. */ + portCLEAN_UP_TCB( pxTCB ); + + /* Free up the memory allocated by the scheduler for the task. It is up + to the task to free any memory allocated at the application level. */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) + { + /* The task can only have been allocated dynamically - free both + the stack and TCB. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */ + { + /* The task could have been allocated statically or dynamically, so + check what was statically allocated before trying to free the + memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) + { + /* Both the stack and TCB were allocated dynamically, so both + must be freed. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ); + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +static void prvResetNextTaskUnblockTime( void ) +{ +TCB_t *pxTCB; + + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The new current delayed list is empty. Set xNextTaskUnblockTime to + the maximum possible value so it is extremely unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass until + there is an item in the delayed list. */ + xNextTaskUnblockTime = portMAX_DELAY; + } + else + { + /* The new current delayed list is not empty, get the value of + the item at the head of the delayed list. This is the time at + which the task at the head of the delayed list should be removed + from the Blocked state. */ + ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); + xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + /* A critical section is not required as this is not called from + an interrupt and the current TCB will always be the same for any + individual execution thread. */ + xReturn = pxCurrentTCB; + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + + BaseType_t xTaskGetSchedulerState( void ) + { + BaseType_t xReturn; + + if( xSchedulerRunning == pdFALSE ) + { + xReturn = taskSCHEDULER_NOT_STARTED; + } + else + { + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxMutexHolderTCB = ( TCB_t * ) pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + /* If the mutex was given back by an interrupt while the queue was + locked then the mutex holder might now be NULL. _RB_ Is this still + needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + the task attempting to obtain the mutex then it will temporarily + inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + { + /* Adjust the mutex holder state to account for its new + priority. Only reset the event list item value if the value is + not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task being modified is in the ready state it will need + to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + } + + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + + /* Inheritance occurred. */ + xReturn = pdTRUE; + } + else + { + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + priority of the task attempting to take the mutex, but the + current priority of the mutex holder is not lower than the + priority of the task attempting to take the mutex. + Therefore the mutex holder must have already inherited a + priority, but inheritance would have occurred if that had + not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + if( pxMutexHolder != NULL ) + { + /* A task can only have an inherited priority if it holds the mutex. + If the mutex is held by a task then it cannot be given from an + interrupt, and if a mutex is given by the holding task then it must + be the running state task. */ + configASSERT( pxTCB == pxCurrentTCB ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + { + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + { + /* A task can only have an inherited priority if it holds + the mutex. If the mutex is held by a task then it cannot be + given from an interrupt, and if a mutex is given by the + holding task then it must be the running state task. Remove + the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + any other purpose if this task is running, and it must be + running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + This is only actually required in the corner case whereby + multiple mutexes were held and the mutexes were given back + in an order different to that in which they were taken. + If a context switch did not occur when the first mutex was + returned, even if a task was waiting on it, then a context + switch should occur when the last mutex is returned whether + a task is waiting on it or not. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) + { + TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder; + UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; + const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; + + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); + + /* Determine the priority to which the priority of the task that + holds the mutex should be set. This will be the greater of the + holding task's base priority and the priority of the highest + priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) + { + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } + + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + simplification in the priority inheritance implementation. If + the task that holds the mutex is also holding other mutexes then + the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + mutex it was trying to obtain then it cannot of inherited + its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); + + /* Disinherit the priority, remembering the previous + priority to facilitate determining the subject task's + state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the running task is not the task that holds the mutex + then the task that holds the mutex could be in either the + Ready, Blocked or Suspended states. Only remove the task + from its current state list if it is in the Ready state as + the task's priority is going to change and there is one + Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + function so assert() if it is being called from an interrupt + context. Only API functions that end in "FromISR" can be used in an + interrupt. Only assert if the critical nesting count is 1 to + protect against recursive calls if the assert function also uses a + critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( pxCurrentTCB->uxCriticalNesting > 0U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portENABLE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) + { + size_t x; + + /* Start by copying the entire string. */ + strcpy( pcBuffer, pcTaskName ); + + /* Pad the end of the string with spaces to ensure columns line up when + printed out. */ + for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + { + pcBuffer[ x ] = ' '; + } + + /* Terminate. */ + pcBuffer[ x ] = 0x00; + + /* Return the new end of string. */ + return &( pcBuffer[ x ] ); + } + +#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskList( char * pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + volatile UBaseType_t uxArraySize, x; + char cStatus; + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that + * displays task names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that + * might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, + * and limited functionality implementation of sprintf() is provided in + * many of the FreeRTOS/Demo sub-directories in a file called + * printf-stdarg.c (note printf-stdarg.c does not provide a full + * snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskList(). + */ + + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! if + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL ); + + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + switch( pxTaskStatusArray[ x ].eCurrentState ) + { + case eRunning: cStatus = tskRUNNING_CHAR; + break; + + case eReady: cStatus = tskREADY_CHAR; + break; + + case eBlocked: cStatus = tskBLOCKED_CHAR; + break; + + case eSuspended: cStatus = tskSUSPENDED_CHAR; + break; + + case eDeleted: cStatus = tskDELETED_CHAR; + break; + + default: /* Should not get here, but it is included + to prevent static checking errors. */ + cStatus = 0x00; + break; + } + + /* Write the task name to the string, padding with spaces so it + can be printed in tabular form more easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + /* Write the rest of the string. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); + pcWriteBuffer += strlen( pcWriteBuffer ); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + volatile UBaseType_t uxArraySize, x; + uint32_t ulTotalTime, ulStatsAsPercentage; + + #if( configUSE_TRACE_FACILITY != 1 ) + { + #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). + } + #endif + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part + * of the uxTaskGetSystemState() output into a human readable table that + * displays the amount of time each task has spent in the Running state + * in both absolute and percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library + * function that might bloat the code size, use a lot of stack, and + * provide different results on different platforms. An alternative, + * tiny, third party, and limited functionality implementation of + * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in + * a file called printf-stdarg.c (note printf-stdarg.c does not provide + * a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskGetRunTimeStats(). + */ + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! If + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime ); + + /* For percentage calculations. */ + ulTotalTime /= 100UL; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > 0 ) + { + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + /* What percentage of the total run time has the task used? + This will always be rounded down to the nearest integer. + ulTotalRunTimeDiv100 has already been divided by 100. */ + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime; + + /* Write the task name to the string, padding with + spaces so it can be printed in tabular form more + easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + if( ulStatsAsPercentage > 0UL ) + { + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); + } + #endif + } + else + { + /* If the percentage is zero here then the task has + consumed less than 1% of the total run time. */ + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #endif + } + + pcWriteBuffer += strlen( pcWriteBuffer ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +TickType_t uxTaskResetEventItemValue( void ) +{ +TickType_t uxReturn; + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void *pvTaskIncrementMutexHeldCount( void ) + { + /* If xSemaphoreCreateMutex() is called before any tasks have been created + then pxCurrentTCB will be NULL. */ + if( pxCurrentTCB != NULL ) + { + ( pxCurrentTCB->uxMutexesHeld )++; + } + + return pxCurrentTCB; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) + { + uint32_t ulReturn; + + taskENTER_CRITICAL(); + { + /* Only block if the notification count is not already non-zero. */ + if( pxCurrentTCB->ulNotifiedValue == 0UL ) + { + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_TAKE_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_TAKE(); + ulReturn = pxCurrentTCB->ulNotifiedValue; + + if( ulReturn != 0UL ) + { + if( xClearCountOnExit != pdFALSE ) + { + pxCurrentTCB->ulNotifiedValue = 0UL; + } + else + { + pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + /* Only block if a notification is not already pending. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* Clear bits in the task's notification value as bits may get + set by the notifying task or interrupt. This can be used to + clear the value to zero. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry; + + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_WAIT_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_WAIT(); + + if( pulNotificationValue != NULL ) + { + /* Output the current notification value, which may or may not + have changed. */ + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue; + } + + /* If ucNotifyValue is set then either the task never entered the + blocked state (because a notification was already pending) or the + task unblocked because of a notification. Otherwise the task + unblocked because of a timeout. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* A notification was not received. */ + xReturn = pdFALSE; + } + else + { + /* A notification was already pending or a notification was + received while the task was waiting. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit; + xReturn = pdTRUE; + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) + { + TCB_t * pxTCB; + BaseType_t xReturn = pdPASS; + uint8_t ucOriginalNotifyState; + + configASSERT( xTaskToNotify ); + pxTCB = ( TCB_t * ) xTaskToNotify; + + taskENTER_CRITICAL(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction: + /* The task is being notified without its notify value being + updated. */ + break; + } + + traceTASK_NOTIFY(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked waiting for a notification then + xNextTaskUnblockTime might be set to the blocked task's time + out time. If the task is unblocked for a reason other than + a timeout xNextTaskUnblockTime is normally left unchanged, + because it will automatically get reset to a new value when + the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter + sleep mode at the earliest possible time - so reset + xNextTaskUnblockTime here to ensure it is updated at the + earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = ( TCB_t * ) xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction : + /* The task is being notified without its notify value being + updated. */ + break; + } + + traceTASK_NOTIFY_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter to an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = ( TCB_t * ) xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + /* 'Giving' is equivalent to incrementing a count in a counting + semaphore. */ + ( pxTCB->ulNotifiedValue )++; + + traceTASK_NOTIFY_GIVE_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter in an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ + +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + BaseType_t xReturn; + + /* If null is passed in here then it is the calling task that is having + its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED ) + { + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + + +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) +{ +TickType_t xTimeToWake; +const TickType_t xConstTickCount = xTickCount; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + /* About to enter a delayed list, so ensure the ucDelayAborted flag is + reset to pdFALSE so it can be detected as having been set to pdTRUE + when the task leaves the Blocked state. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Remove the task from the ready list before adding it to the blocked list + as the same list item is used for both lists. */ + if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* The current task must be in a ready list, so there is no need to + check, and the port reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) ) + { + /* Add the task to the suspended task list instead of a delayed task + list to ensure it is not woken by a timing event. It will block + indefinitely. */ + vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the + kernel will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow + list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list + is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the + head of the list of blocked tasks then xNextTaskUnblockTime + needs to be updated too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #else /* INCLUDE_vTaskSuspend */ + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the kernel + will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the head of the + list of blocked tasks then xNextTaskUnblockTime needs to be updated + too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */ + ( void ) xCanBlockIndefinitely; + } + #endif /* INCLUDE_vTaskSuspend */ +} + +/* Code below here allows additional code to be inserted into this source file, +especially where access to file scope functions and data is needed (for example +when performing module tests). */ + +#ifdef FREERTOS_MODULE_TEST + #include "tasks_test_access_functions.h" +#endif + + +#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) + + #include "freertos_tasks_c_additions.h" + + static void freertos_tasks_c_additions_init( void ) + { + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + FREERTOS_TASKS_C_ADDITIONS_INIT(); + #endif + } + +#endif + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/timers.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/timers.c new file mode 100644 index 000000000..8a5d99c78 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/FreeRTOS/Source/timers.c @@ -0,0 +1,1076 @@ +/* + * FreeRTOS Kernel V10.0.1 + * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" + +#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) + #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. +#endif + +/* Lint e961 and e750 are suppressed as a MISRA exception justified because the +MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the +header files above, but not in this file, in order to generate the correct +privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */ + + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. This #if is closed at the very bottom +of this file. If you want to include software timer functionality then ensure +configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#if ( configUSE_TIMERS == 1 ) + +/* Misc definitions. */ +#define tmrNO_DELAY ( TickType_t ) 0U + +/* The name assigned to the timer service task. This can be overridden by +defining trmTIMER_SERVICE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configTIMER_SERVICE_TASK_NAME + #define configTIMER_SERVICE_TASK_NAME "Tmr Svc" +#endif + +/* The definition of the timers themselves. */ +typedef struct tmrTimerControl +{ + const char *pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks;/*<< How quickly and often the timer expires. */ + UBaseType_t uxAutoReload; /*<< Set to pdTRUE if the timer should be automatically restarted once expired. Set to pdFALSE if the timer is, in effect, a one-shot timer. */ + void *pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + #endif + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*<< Set to pdTRUE if the timer was created statically so no attempt is made to free the memory again if the timer is later deleted. */ + #endif +} xTIMER; + +/* The old xTIMER name is maintained above then typedefed to the new Timer_t +name below to enable the use of older kernel aware debuggers. */ +typedef xTIMER Timer_t; + +/* The definition of messages that can be sent and received on the timer queue. +Two types of message can be queued - messages that manipulate a software timer, +and messages that request the execution of a non-timer related callback. The +two message types are defined in two separate structures, xTimerParametersType +and xCallbackParametersType respectively. */ +typedef struct tmrTimerParameters +{ + TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ +} TimerParameter_t; + + +typedef struct tmrCallbackParameters +{ + PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ + void *pvParameter1; /* << The value that will be used as the callback functions first parameter. */ + uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ +} CallbackParameters_t; + +/* The structure that contains the two message types, along with an identifier +that is used to determine which message type is valid. */ +typedef struct tmrTimerQueueMessage +{ + BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + union + { + TimerParameter_t xTimerParameters; + + /* Don't include xCallbackParameters if it is not going to be used as + it makes the structure (and therefore the timer queue) larger. */ + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + CallbackParameters_t xCallbackParameters; + #endif /* INCLUDE_xTimerPendFunctionCall */ + } u; +} DaemonTaskMessage_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ + +/* The list in which active timers are stored. Timers are referenced in expire +time order, with the nearest expiry time at the front of the list. Only the +timer service task is allowed to access these lists. */ +PRIVILEGED_DATA static List_t xActiveTimerList1 = {0}; +PRIVILEGED_DATA static List_t xActiveTimerList2 = {0}; +PRIVILEGED_DATA static List_t *pxCurrentTimerList = NULL; +PRIVILEGED_DATA static List_t *pxOverflowTimerList = NULL; + +/* A queue that is used to send commands to the timer service task. */ +PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; +PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + /* If static allocation is supported then the application must provide the + following callback function - which enables the application to optionally + provide the memory that will be used by the timer task as the task's stack + and TCB. */ + extern void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize ); + +#endif + +/* + * Initialise the infrastructure used by the timer service task if it has not + * been initialised already. + */ +static void prvCheckForValidListAndQueue( void ) PRIVILEGED_FUNCTION; + +/* + * The timer service task (daemon). Timer functionality is controlled by this + * task. Other tasks communicate with the timer service task using the + * xTimerQueue queue. + */ +static void prvTimerTask( void *pvParameters ) PRIVILEGED_FUNCTION; + +/* + * Called by the timer service task to interpret and process a command it + * received on the timer queue. + */ +static void prvProcessReceivedCommands( void ) PRIVILEGED_FUNCTION; + +/* + * Insert the timer into either xActiveTimerList1, or xActiveTimerList2, + * depending on if the expire time causes a timer counter overflow. + */ +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) PRIVILEGED_FUNCTION; + +/* + * An active timer has reached its expire time. Reload the timer if it is an + * auto reload timer, then call its callback. + */ +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION; + +/* + * The tick count has overflowed. Switch the timer lists after ensuring the + * current timer list does not still reference some timers. + */ +static void prvSwitchTimerLists( void ) PRIVILEGED_FUNCTION; + +/* + * Obtain the current tick count, setting *pxTimerListsWereSwitched to pdTRUE + * if a tick count overflow occurred since prvSampleTimeNow() was last called. + */ +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) PRIVILEGED_FUNCTION; + +/* + * If the timer list contains any active timers then return the expire time of + * the timer that will expire first and set *pxListWasEmpty to false. If the + * timer list does not contain any timers then return 0 and set *pxListWasEmpty + * to pdTRUE. + */ +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * If a timer has expired, process it. Otherwise, block the timer service task + * until either a timer does expire or a command is received. + */ +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * Called after a Timer_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +BaseType_t xTimerCreateTimerTask( void ) +{ +BaseType_t xReturn = pdFAIL; + + /* This function is called when the scheduler is started if + configUSE_TIMERS is set to 1. Check that the infrastructure used by the + timer service task has been created/initialised. If timers have already + been created then the initialisation will already have been performed. */ + prvCheckForValidListAndQueue(); + + if( xTimerQueue != NULL ) + { + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxTimerTaskTCBBuffer = NULL; + StackType_t *pxTimerTaskStackBuffer = NULL; + uint32_t ulTimerTaskStackSize; + + vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize ); + xTimerTaskHandle = xTaskCreateStatic( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + ulTimerTaskStackSize, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + pxTimerTaskStackBuffer, + pxTimerTaskTCBBuffer ); + + if( xTimerTaskHandle != NULL ) + { + xReturn = pdPASS; + } + } + #else + { + xReturn = xTaskCreate( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + configTIMER_TASK_STACK_DEPTH, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + &xTimerTaskHandle ); + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( xReturn ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) + { + Timer_t *pxNewTimer; + + pxNewTimer = ( Timer_t * ) pvPortMalloc( sizeof( Timer_t ) ); + + if( pxNewTimer != NULL ) + { + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Timers can be created statically or dynamically, so note this + timer was created dynamically in case the timer is later + deleted. */ + pxNewTimer->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) + { + Timer_t *pxNewTimer; + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTimer_t equals the size of the real timer + structure. */ + volatile size_t xSize = sizeof( StaticTimer_t ); + configASSERT( xSize == sizeof( Timer_t ) ); + } + #endif /* configASSERT_DEFINED */ + + /* A pointer to a StaticTimer_t structure MUST be provided, use it. */ + configASSERT( pxTimerBuffer ); + pxNewTimer = ( Timer_t * ) pxTimerBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewTimer != NULL ) + { + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Timers can be created statically or dynamically so note this + timer was created statically in case it is later deleted. */ + pxNewTimer->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) +{ + /* 0 is not a valid value for xTimerPeriodInTicks. */ + configASSERT( ( xTimerPeriodInTicks > 0 ) ); + + if( pxNewTimer != NULL ) + { + /* Ensure the infrastructure used by the timer service task has been + created/initialised. */ + prvCheckForValidListAndQueue(); + + /* Initialise the timer structure members using the function + parameters. */ + pxNewTimer->pcTimerName = pcTimerName; + pxNewTimer->xTimerPeriodInTicks = xTimerPeriodInTicks; + pxNewTimer->uxAutoReload = uxAutoReload; + pxNewTimer->pvTimerID = pvTimerID; + pxNewTimer->pxCallbackFunction = pxCallbackFunction; + vListInitialiseItem( &( pxNewTimer->xTimerListItem ) ); + traceTIMER_CREATE( pxNewTimer ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) +{ +BaseType_t xReturn = pdFAIL; +DaemonTaskMessage_t xMessage; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = ( Timer_t * ) xTimer; + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + else + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) +{ + /* If xTimerGetTimerDaemonTaskHandle() is called before the scheduler has been + started, then xTimerTaskHandle will be NULL. */ + configASSERT( ( xTimerTaskHandle != NULL ) ); + return xTimerTaskHandle; +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) +{ +Timer_t *pxTimer = ( Timer_t * ) xTimer; + + configASSERT( xTimer ); + return pxTimer->xTimerPeriodInTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) +{ +Timer_t * pxTimer = ( Timer_t * ) xTimer; +TickType_t xReturn; + + configASSERT( xTimer ); + xReturn = listGET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ) ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +Timer_t *pxTimer = ( Timer_t * ) xTimer; + + configASSERT( xTimer ); + return pxTimer->pcTimerName; +} +/*-----------------------------------------------------------*/ + +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) +{ +BaseType_t xResult; +Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); + + /* Remove the timer from the list of active timers. A check has already + been performed to ensure the list is not empty. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* If the timer is an auto reload timer then calculate the next + expiry time and re-insert the timer in the list of active timers. */ + if( pxTimer->uxAutoReload == ( UBaseType_t ) pdTRUE ) + { + /* The timer is inserted into a list using a time relative to anything + other than the current time. It will therefore be inserted into the + correct list relative to the time this task thinks it is now. */ + if( prvInsertTimerInActiveList( pxTimer, ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ), xTimeNow, xNextExpireTime ) != pdFALSE ) + { + /* The timer expired before it was added to the active timer + list. Reload it now. */ + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Call the timer callback. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); +} +/*-----------------------------------------------------------*/ + +static void prvTimerTask( void *pvParameters ) +{ +TickType_t xNextExpireTime; +BaseType_t xListWasEmpty; + + /* Just to avoid compiler warnings. */ + ( void ) pvParameters; + + #if( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) + { + extern void vApplicationDaemonTaskStartupHook( void ); + + /* Allow the application writer to execute some code in the context of + this task at the point the task starts executing. This is useful if the + application includes initialisation code that would benefit from + executing after the scheduler has been started. */ + vApplicationDaemonTaskStartupHook(); + } + #endif /* configUSE_DAEMON_TASK_STARTUP_HOOK */ + + for( ;; ) + { + /* Query the timers list to see if it contains any timers, and if so, + obtain the time at which the next timer will expire. */ + xNextExpireTime = prvGetNextExpireTime( &xListWasEmpty ); + + /* If a timer has expired, process it. Otherwise, block this task + until either a timer does expire, or a command is received. */ + prvProcessTimerOrBlockTask( xNextExpireTime, xListWasEmpty ); + + /* Empty the command queue. */ + prvProcessReceivedCommands(); + } +} +/*-----------------------------------------------------------*/ + +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) +{ +TickType_t xTimeNow; +BaseType_t xTimerListsWereSwitched; + + vTaskSuspendAll(); + { + /* Obtain the time now to make an assessment as to whether the timer + has expired or not. If obtaining the time causes the lists to switch + then don't process this timer as any timers that remained in the list + when the lists were switched will have been processed within the + prvSampleTimeNow() function. */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + if( xTimerListsWereSwitched == pdFALSE ) + { + /* The tick count has not overflowed, has the timer expired? */ + if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) + { + ( void ) xTaskResumeAll(); + prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); + } + else + { + /* The tick count has not overflowed, and the next expire + time has not been reached yet. This task should therefore + block to wait for the next expire time or a command to be + received - whichever comes first. The following line cannot + be reached unless xNextExpireTime > xTimeNow, except in the + case when the current timer list is empty. */ + if( xListWasEmpty != pdFALSE ) + { + /* The current timer list is empty - is the overflow list + also empty? */ + xListWasEmpty = listLIST_IS_EMPTY( pxOverflowTimerList ); + } + + vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); + + if( xTaskResumeAll() == pdFALSE ) + { + /* Yield to wait for either a command to arrive, or the + block time to expire. If a command arrived between the + critical section being exited and this yield then the yield + will not cause the task to block. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + ( void ) xTaskResumeAll(); + } + } +} +/*-----------------------------------------------------------*/ + +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) +{ +TickType_t xNextExpireTime; + + /* Timers are listed in expiry time order, with the head of the list + referencing the task that will expire first. Obtain the time at which + the timer with the nearest expiry time will expire. If there are no + active timers then just set the next expire time to 0. That will cause + this task to unblock when the tick count overflows, at which point the + timer lists will be switched and the next expiry time can be + re-assessed. */ + *pxListWasEmpty = listLIST_IS_EMPTY( pxCurrentTimerList ); + if( *pxListWasEmpty == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + } + else + { + /* Ensure the task unblocks when the tick count rolls over. */ + xNextExpireTime = ( TickType_t ) 0U; + } + + return xNextExpireTime; +} +/*-----------------------------------------------------------*/ + +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) +{ +TickType_t xTimeNow; +PRIVILEGED_DATA static TickType_t xLastTime = ( TickType_t ) 0U; /*lint !e956 Variable is only accessible to one task. */ + + xTimeNow = xTaskGetTickCount(); + + if( xTimeNow < xLastTime ) + { + prvSwitchTimerLists(); + *pxTimerListsWereSwitched = pdTRUE; + } + else + { + *pxTimerListsWereSwitched = pdFALSE; + } + + xLastTime = xTimeNow; + + return xTimeNow; +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) +{ +BaseType_t xProcessTimerNow = pdFALSE; + + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xNextExpiryTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + + if( xNextExpiryTime <= xTimeNow ) + { + /* Has the expiry time elapsed between the command to start/reset a + timer was issued, and the time the command was processed? */ + if( ( ( TickType_t ) ( xTimeNow - xCommandTime ) ) >= pxTimer->xTimerPeriodInTicks ) /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + { + /* The time between a command being issued and the command being + processed actually exceeds the timers period. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxOverflowTimerList, &( pxTimer->xTimerListItem ) ); + } + } + else + { + if( ( xTimeNow < xCommandTime ) && ( xNextExpiryTime >= xCommandTime ) ) + { + /* If, since the command was issued, the tick count has overflowed + but the expiry time has not, then the timer must have already passed + its expiry time and should be processed immediately. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + } + + return xProcessTimerNow; +} +/*-----------------------------------------------------------*/ + +static void prvProcessReceivedCommands( void ) +{ +DaemonTaskMessage_t xMessage; +Timer_t *pxTimer; +BaseType_t xTimerListsWereSwitched, xResult; +TickType_t xTimeNow; + + while( xQueueReceive( xTimerQueue, &xMessage, tmrNO_DELAY ) != pdFAIL ) /*lint !e603 xMessage does not have to be initialised as it is passed out, not in, and it is not used unless xQueueReceive() returns pdTRUE. */ + { + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + { + /* Negative commands are pended function calls rather than timer + commands. */ + if( xMessage.xMessageID < ( BaseType_t ) 0 ) + { + const CallbackParameters_t * const pxCallback = &( xMessage.u.xCallbackParameters ); + + /* The timer uses the xCallbackParameters member to request a + callback be executed. Check the callback is not NULL. */ + configASSERT( pxCallback ); + + /* Call the function. */ + pxCallback->pxCallbackFunction( pxCallback->pvParameter1, pxCallback->ulParameter2 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* INCLUDE_xTimerPendFunctionCall */ + + /* Commands that are positive are timer commands rather than pended + function calls. */ + if( xMessage.xMessageID >= ( BaseType_t ) 0 ) + { + /* The messages uses the xTimerParameters member to work on a + software timer. */ + pxTimer = xMessage.u.xTimerParameters.pxTimer; + + if( listIS_CONTAINED_WITHIN( NULL, &( pxTimer->xTimerListItem ) ) == pdFALSE ) /*lint !e961. The cast is only redundant when NULL is passed into the macro. */ + { + /* The timer is in a list, remove it. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceTIMER_COMMAND_RECEIVED( pxTimer, xMessage.xMessageID, xMessage.u.xTimerParameters.xMessageValue ); + + /* In this case the xTimerListsWereSwitched parameter is not used, but + it must be present in the function call. prvSampleTimeNow() must be + called after the message is received from xTimerQueue so there is no + possibility of a higher priority task adding a message to the message + queue with a time that is ahead of the timer daemon task (because it + pre-empted the timer daemon task after the xTimeNow value was set). */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + + switch( xMessage.xMessageID ) + { + case tmrCOMMAND_START : + case tmrCOMMAND_START_FROM_ISR : + case tmrCOMMAND_RESET : + case tmrCOMMAND_RESET_FROM_ISR : + case tmrCOMMAND_START_DONT_TRACE : + /* Start or restart a timer. */ + if( prvInsertTimerInActiveList( pxTimer, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, xTimeNow, xMessage.u.xTimerParameters.xMessageValue ) != pdFALSE ) + { + /* The timer expired before it was added to the active + timer list. Process it now. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + traceTIMER_EXPIRED( pxTimer ); + + if( pxTimer->uxAutoReload == ( UBaseType_t ) pdTRUE ) + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + break; + + case tmrCOMMAND_STOP : + case tmrCOMMAND_STOP_FROM_ISR : + /* The timer has already been removed from the active list. + There is nothing to do here. */ + break; + + case tmrCOMMAND_CHANGE_PERIOD : + case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR : + pxTimer->xTimerPeriodInTicks = xMessage.u.xTimerParameters.xMessageValue; + configASSERT( ( pxTimer->xTimerPeriodInTicks > 0 ) ); + + /* The new period does not really have a reference, and can + be longer or shorter than the old one. The command time is + therefore set to the current time, and as the period cannot + be zero the next expiry time can only be in the future, + meaning (unlike for the xTimerStart() case above) there is + no fail case that needs to be handled here. */ + ( void ) prvInsertTimerInActiveList( pxTimer, ( xTimeNow + pxTimer->xTimerPeriodInTicks ), xTimeNow, xTimeNow ); + break; + + case tmrCOMMAND_DELETE : + /* The timer has already been removed from the active list, + just free up the memory if the memory was dynamically + allocated. */ + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The timer can only have been allocated dynamically - + free it again. */ + vPortFree( pxTimer ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The timer could have been allocated statically or + dynamically, so check before attempting to free the + memory. */ + if( pxTimer->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxTimer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + break; + + default : + /* Don't expect to get here. */ + break; + } + } + } +} +/*-----------------------------------------------------------*/ + +static void prvSwitchTimerLists( void ) +{ +TickType_t xNextExpireTime, xReloadTime; +List_t *pxTemp; +Timer_t *pxTimer; +BaseType_t xResult; + + /* The tick count has overflowed. The timer lists must be switched. + If there are any timers still referenced from the current timer list + then they must have expired and should be processed before the lists + are switched. */ + while( listLIST_IS_EMPTY( pxCurrentTimerList ) == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + + /* Remove the timer from the list. */ + pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* Execute its callback, then send a command to restart the timer if + it is an auto-reload timer. It cannot be restarted here as the lists + have not yet been switched. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + + if( pxTimer->uxAutoReload == ( UBaseType_t ) pdTRUE ) + { + /* Calculate the reload value, and if the reload value results in + the timer going into the same timer list then it has already expired + and the timer should be re-inserted into the current list so it is + processed again within this loop. Otherwise a command should be sent + to restart the timer to ensure it is only inserted into a list after + the lists have been swapped. */ + xReloadTime = ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ); + if( xReloadTime > xNextExpireTime ) + { + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xReloadTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + else + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxTemp = pxCurrentTimerList; + pxCurrentTimerList = pxOverflowTimerList; + pxOverflowTimerList = pxTemp; +} +/*-----------------------------------------------------------*/ + +static void prvCheckForValidListAndQueue( void ) +{ + /* Check that the list from which active timers are referenced, and the + queue used to communicate with the timer service, have been + initialised. */ + taskENTER_CRITICAL(); + { + if( xTimerQueue == NULL ) + { + vListInitialise( &xActiveTimerList1 ); + vListInitialise( &xActiveTimerList2 ); + pxCurrentTimerList = &xActiveTimerList1; + pxOverflowTimerList = &xActiveTimerList2; + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* The timer queue is allocated statically in case + configSUPPORT_DYNAMIC_ALLOCATION is 0. */ + static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + + xTimerQueue = xQueueCreateStatic( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, ( UBaseType_t ) sizeof( DaemonTaskMessage_t ), &( ucStaticTimerQueueStorage[ 0 ] ), &xStaticTimerQueue ); + } + #else + { + xTimerQueue = xQueueCreate( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, sizeof( DaemonTaskMessage_t ) ); + } + #endif + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + if( xTimerQueue != NULL ) + { + vQueueAddToRegistry( xTimerQueue, "TmrQ" ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configQUEUE_REGISTRY_SIZE */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) +{ +BaseType_t xTimerIsInActiveList; +Timer_t *pxTimer = ( Timer_t * ) xTimer; + + configASSERT( xTimer ); + + /* Is the timer in the list of active timers? */ + taskENTER_CRITICAL(); + { + /* Checking to see if it is in the NULL list in effect checks to see if + it is referenced from either the current or the overflow timer lists in + one go, but the logic has to be reversed, hence the '!'. */ + xTimerIsInActiveList = ( BaseType_t ) !( listIS_CONTAINED_WITHIN( NULL, &( pxTimer->xTimerListItem ) ) ); /*lint !e961. Cast is only redundant when NULL is passed into the macro. */ + } + taskEXIT_CRITICAL(); + + return xTimerIsInActiveList; +} /*lint !e818 Can't be pointer to const due to the typedef. */ +/*-----------------------------------------------------------*/ + +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) +{ +Timer_t * const pxTimer = ( Timer_t * ) xTimer; +void *pvReturn; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pvReturn = pxTimer->pvTimerID; + } + taskEXIT_CRITICAL(); + + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) +{ +Timer_t * const pxTimer = ( Timer_t * ) xTimer; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pxTimer->pvTimerID = pvNewID; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + + tracePEND_FUNC_CALL_FROM_ISR( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* This function can only be called after a timer has been created or + after the scheduler has been started because, until then, the timer + queue does not exist. */ + configASSERT( xTimerQueue ); + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + + tracePEND_FUNC_CALL( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) + { + return ( ( Timer_t * ) xTimer )->uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) + { + ( ( Timer_t * ) xTimer )->uxTimerNumber = uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. If you want to include software timer +functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#endif /* configUSE_TIMERS == 1 */ + + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 000000000..dda05d3af --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1010 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn* +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + + netconn_free(conn); + + return ERR_OK; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + if (ERR_IS_FATAL(conn->last_err)) { + /* don't recv on fatal errors: this might block the application task + waiting on acceptmbox forever! */ + return conn->last_err; + } + if (!sys_mbox_valid(&conn->acceptmbox)) { + return ERR_CLSD; + } + +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_ALLOC(msg); +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + newconn = (struct netconn *)accept_ptr; + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (accept_ptr == &netconn_aborted) { + /* a connection has been aborted: out of pcbs or out of netconns during accept */ + /* @todo: set netconn error, but this would be fatal and thus block further accepts */ +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + return ERR_ABRT; + } + if (newconn == NULL) { + /* connection has been aborted */ + /* in this special case, we set the netconn error from application thread, as + on a ready-to-accept listening netconn, there should not be anything running + in tcpip_thread */ + NETCONN_SET_SAFE_ERR(conn, ERR_CLSD); +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + return ERR_CLSD; + } +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf) +{ + void *buf = NULL; + u16_t len; +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + if (!sys_mbox_valid(&conn->recvmbox)) { + /* This happens when calling this function after receiving FIN */ + return sys_mbox_valid(&conn->acceptmbox) ? ERR_CONN : ERR_CLSD; + } + } +#endif /* LWIP_TCP */ + LWIP_ERROR("netconn_recv: invalid recvmbox", sys_mbox_valid(&conn->recvmbox), return ERR_CONN;); + + if (ERR_IS_FATAL(conn->last_err)) { + /* don't recv on fatal errors: this might block the application task + waiting on recvmbox forever! */ + /* @todo: this does not allow us to fetch data that has been put into recvmbox + before the fatal error occurred - is that a problem? */ + return conn->last_err; + } +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + API_MSG_VAR_ALLOC(msg); + } +#endif /* LWIP_TCP */ + +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + API_MSG_VAR_FREE(msg); + } +#endif /* LWIP_TCP */ + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + /* Let the stack know that we have taken the data. */ + /* @todo: Speedup: Don't block and wait for the answer here + (to prevent multiple thread-switches). */ + API_MSG_VAR_REF(msg).conn = conn; + if (buf != NULL) { + API_MSG_VAR_REF(msg).msg.r.len = ((struct pbuf *)buf)->tot_len; + } else { + API_MSG_VAR_REF(msg).msg.r.len = 1; + } + + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_recv, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + return conn->last_err == ERR_OK ? ERR_RST : conn->last_err; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf*)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data(conn, (void **)&p); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type)== NETCONN_TCP), return ERR_VAL;); + if (size == 0) { + return ERR_OK; + } + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.dataptr = dataptr; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if ((err == ERR_OK) && (bytes_written != NULL)) { + if (dontblock) { + /* nonblocking write: maybe the data has been sent partly */ + *bytes_written = API_MSG_VAR_REF(msg).msg.w.len; + } else { + /* blocking call succeeded: all data has been sent if it */ + *bytes_written = size; + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0)); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH-1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH-1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_callback(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg)); + if (cberr != ERR_OK) { +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + sys_sem_wait(API_EXPR_REF_SEM(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 000000000..a609017cf --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,1953 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + (conn)->flags |= NETCONN_FLAG_IN_NONBLOCKING_CONNECT; \ +} else { \ + (conn)->flags &= ~ NETCONN_FLAG_IN_NONBLOCKING_CONNECT; }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) (((conn)->flags & NETCONN_FLAG_IN_NONBLOCKING_CONNECT) != 0) + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) NETCONN_SET_SAFE_ERR((m)->conn, (m)->err) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { NETCONN_SET_SAFE_ERR((m)->conn, (m)->err); sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_TCP +u8_t netconn_aborted; +#endif /* LWIP_TCP */ + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && sys_mbox_valid(&conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (q != NULL) { + if (pbuf_copy(q, p) != ERR_OK) { + pbuf_free(q); + q = NULL; + } + } + + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!sys_mbox_valid(&conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!sys_mbox_valid(&conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr* udphdr = (const struct udp_hdr*)ip_next_header_ptr(); +#if LWIP_CHECKSUM_ON_COPY + buf->flags = NETBUF_FLAG_DESTADDR; +#endif /* LWIP_CHECKSUM_ON_COPY */ + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!sys_mbox_valid(&conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + /* don't overwrite fatal errors! */ + if (err != ERR_OK) { + NETCONN_SET_SAFE_ERR(conn, err); + } + + if (p != NULL) { + len = p->tot_len; + } else { + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, p) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + conn->pcb.tcp = NULL; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + if (old_state == NETCONN_CLOSE) { + /* RST during close: let close return success & dealloc the netconn */ + err = ERR_OK; + NETCONN_SET_SAFE_ERR(conn, ERR_OK); + } else { + /* no check since this is always fatal! */ + SYS_ARCH_SET(conn->last_err, err); + } + + /* @todo: the type of NETCONN_EVT created should depend on 'old_state' */ + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + /* pass NULL-message to recvmbox to wake up pending recv */ + if (sys_mbox_valid(&conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, NULL); + } + /* pass NULL-message to acceptmbox to wake up pending accept */ + if (sys_mbox_valid(&conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, NULL); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t* op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + NETCONN_SET_SAFE_ERR(conn, err); + sys_sem_signal(op_completed_sem); + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!sys_mbox_valid(&conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, &netconn_aborted) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->tate: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, &netconn_aborted) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + /* no protection: when creating the pcb, the netconn is not yet known + to the application thread */ + newconn->last_err = err; + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb* pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if(NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch(NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn* +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->last_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch(NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; + conn->write_offset = 0; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = 0; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; +#if LWIP_TCP + struct pbuf *p; +#endif /* LWIP_TCP */ + + /* This runs in tcpip_thread, so we don't need to lock against rx packets */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + if (mem != NULL) { + p = (struct pbuf*)mem; + /* pcb might be set to NULL already by err_tcp() */ + if (conn->pcb.tcp != NULL) { + tcp_recved(conn->pcb.tcp, p->tot_len); + } + pbuf_free(p); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { + if (mem != &netconn_aborted) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, close; + u8_t close_finished = 0; + struct tcp_pcb* tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + close = 1; + } else { + close = 0; + } + + /* Set back some callback pointers */ + if (close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t* op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + NETCONN_SET_SAFE_ERR(conn, err); +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t* op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->write_offset = 0; + msg->conn->state = NETCONN_NONE; + NETCONN_SET_SAFE_ERR(msg->conn, ERR_CLSD); + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; + /* Drain and delete mboxes */ + netconn_drain(msg->conn); + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL && + msg->conn->write_offset == 0); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (ERR_IS_FATAL(msg->conn->last_err)) { + msg->err = msg->conn->last_err; + } else { + msg->err = ERR_VAL; + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + break; + } + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t* op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + NETCONN_SET_SAFE_ERR(conn, ERR_OK); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + msg->err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + msg->err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + msg->err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + msg->err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (msg->err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + msg->err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do{ msg->err = ERR_VAL; }while(0)); + break; + } + } + /* For all other protocols, netconn_connect() calls TCPIP_APIMSG(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (ERR_IS_FATAL(msg->conn->last_err)) { + msg->err = msg->conn->last_err; + } else { + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb* lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + msg->err = ERR_VAL; + } else { + err_t err; + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + msg->err = err; + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + msg->err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + msg->err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (msg->err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + msg->err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } + } else { + msg->err = ERR_ARG; + } + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (ERR_IS_FATAL(msg->conn->last_err)) { + msg->err = msg->conn->last_err; + } else { + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + msg->err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + msg->err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + msg->err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + msg->err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + msg->err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + msg->err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + break; + } + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + u32_t remaining = msg->msg.r.len; + do { + u16_t recved = (remaining > 0xffff) ? 0xffff : (u16_t)remaining; + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->write_offset < conn->current_msg->msg.w.len", + conn->write_offset < conn->current_msg->msg.w.len); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->write_offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + conn->current_msg->msg.w.len = 0; + } else { + /* partial write */ + err = ERR_OK; + conn->current_msg->msg.w.len = conn->write_offset; + conn->write_offset = 0; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + dataptr = (const u8_t*)conn->current_msg->msg.w.dataptr + conn->write_offset; + diff = conn->current_msg->msg.w.len - conn->write_offset; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + err = ERR_WOULDBLOCK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", ((conn->write_offset + len) <= conn->current_msg->msg.w.len)); + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (len < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, len); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, len); + } + } + + if (err == ERR_OK) { + err_t out_err; + conn->write_offset += len; + if ((conn->write_offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length */ + conn->current_msg->msg.w.len = conn->write_offset; + /* everything was written */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (ERR_IS_FATAL(out_err) || (out_err == ERR_RTE)) { + /* If tcp_output fails with fatal error or no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + conn->current_msg->msg.w.len = 0; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (ERR_IS_FATAL(out_err) || (out_err == ERR_RTE)) { + /* If tcp_output fails with fatal error or no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + conn->current_msg->msg.w.len = 0; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM */ + err = ERR_WOULDBLOCK; + write_finished = 1; + conn->current_msg->msg.w.len = 0; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + conn->current_msg->msg.w.len = 0; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t* op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->write_offset = 0; + conn->state = NETCONN_NONE; + NETCONN_SET_SAFE_ERR(conn, err); +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (ERR_IS_FATAL(msg->conn->last_err)) { + msg->err = msg->conn->last_err; + } else { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + msg->err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL && + msg->conn->write_offset == 0); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; + msg->conn->write_offset = 0; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + msg->err = ERR_CONN; + } +#else /* LWIP_TCP */ + msg->err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t* write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->write_offset = 0; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + NETCONN_SET_SAFE_ERR(msg->conn, ERR_CLSD); + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { + /* Drain and delete mboxes */ + netconn_drain(msg->conn); + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL && + msg->conn->write_offset == 0); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg*)m; + + if (ERR_IS_FATAL(msg->conn->last_err)) { + msg->err = msg->conn->last_err; + } else { + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } + else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } else { + msg->err = ERR_CONN; + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg*)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg*)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/err.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 000000000..6e9ab76be --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 000000000..6a5a1823b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,246 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom*)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netdb.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 000000000..deff4951c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,413 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent* +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char**)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void*)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void*)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx=0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t*)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + namelen + 1 + (MEM_ALIGNMENT - 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper*)LWIP_MEM_ALIGN(buf); + hostname = ((char*)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char**)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any(ai_family == AF_INET6, &addr); + } else { + ip_addr_set_loopback(ai_family == AF_INET6, &addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void*)((u8_t*)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6*)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in*)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char*)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr*)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 000000000..aac0975f5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,221 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg*)(void*)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/sockets.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 000000000..d72724ff6 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,2827 @@ +/** + * @file + * Sockets BSD-Like API module + * + * @defgroup socket Socket API + * @ingroup sequential_api + * BSD-style socket API.\n + * Thread-safe, to be called from non-TCPIP threads only.\n + * Can be activated by defining @ref LWIP_SOCKET to 1.\n + * Header is in posix/sys/socket.h\b + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/api.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/priv/tcpip_priv.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = 0; }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr* sockaddr, ip_addr_t* ipaddr, u16_t* port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_V6(ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype) do { if ((optlen) < sizeof(opttype)) { return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype); \ + if ((sock)->conn == NULL) { return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((s32_t)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + s32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (loc) / 1000U; \ + ((struct timeval *)(optval))->tv_usec = ((loc) % 1000U) * 1000U; }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000U) + (((const struct timeval *)(optval))->tv_usec / 1000U)) +#endif + +#define NUM_SOCKETS MEMP_NUM_NETCONN + +/** This is overridable for the rare case where more than 255 threads + * select on the same socket... + */ +#ifndef SELWAIT_T +#define SELWAIT_T u8_t +#endif + +/** Contains all internal pointers and states used for a socket */ +struct lwip_sock { + /** sockets currently are built on netconns, each socket has one netconn */ + struct netconn *conn; + /** data that was left from the previous read */ + void *lastdata; + /** offset in the data that was left from the previous read */ + u16_t lastoffset; + /** number of times data was received, set by event_callback(), + tested by the receive and select functions */ + s16_t rcvevent; + /** number of times data was ACKed (free send buffer), set by event_callback(), + tested by select */ + u16_t sendevent; + /** error happened for this socket, set by event_callback(), tested by select */ + u16_t errevent; + /** last error that occurred on this socket (in fact, all our errnos fit into an u8_t) */ + u8_t err; + /** counter of how many threads are waiting for this socket using select */ + SELWAIT_T select_waiting; +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define SELECT_SEM_T sys_sem_t* +#define SELECT_SEM_PTR(sem) (sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define SELECT_SEM_T sys_sem_t +#define SELECT_SEM_PTR(sem) (&(sem)) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +/** Description for a task waiting in select */ +struct lwip_select_cb { + /** Pointer to the next waiting task */ + struct lwip_select_cb *next; + /** Pointer to the previous waiting task */ + struct lwip_select_cb *prev; + /** readset passed to select */ + fd_set *readset; + /** writeset passed to select */ + fd_set *writeset; + /** unimplemented: exceptset passed to select */ + fd_set *exceptset; + /** don't signal the same semaphore twice: set to 1 when signalled */ + int sem_signalled; + /** semaphore to wake up a task waiting for select */ + SELECT_SEM_T sem; +}; + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +#if LWIP_IGMP +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock* sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +/** This counter is increased from lwip_select when the list is changed + and checked in event_callback to see if it has changed. */ +static volatile int select_cb_ctr; + +#if LWIP_SOCKET_SET_ERRNO +#ifndef set_errno +#define set_errno(err) do { if (err) { errno = (err); } } while(0) +#endif +#else /* LWIP_SOCKET_SET_ERRNO */ +#define set_errno(err) +#endif /* LWIP_SOCKET_SET_ERRNO */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + sk->err = (u8_t)sockerr; \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static u8_t lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static u8_t lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr* sockaddr, ip_addr_t* ipaddr, u16_t* port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param s externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int s) +{ + struct lwip_sock *sock; + + s -= LWIP_SOCKET_OFFSET; + + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", s + LWIP_SOCKET_OFFSET)); + set_errno(EBADF); + return NULL; + } + + sock = &sockets[s]; + + if (!sock->conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): not active\n", s + LWIP_SOCKET_OFFSET)); + set_errno(EBADF); + return NULL; + } + + return sock; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param s externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int s) +{ + s -= LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + return NULL; + } + if (!sockets[s].conn) { + return NULL; + } + return &sockets[s]; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn && (sockets[i].select_waiting == 0)) { + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata = NULL; + sockets[i].lastoffset = 0; + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; + sockets[i].err = 0; + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + void *lastdata; + + lastdata = sock->lastdata; + sock->lastdata = NULL; + sock->lastoffset = 0; + sock->err = 0; + + /* Protect socket array */ + SYS_ARCH_SET(sock->conn, NULL); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (lastdata != NULL) { + if (is_tcp) { + pbuf_free((struct pbuf *)lastdata); + } else { + netbuf_delete((struct netbuf *)lastdata); + } + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (netconn_is_nonblocking(sock->conn) && (sock->rcvevent <= 0)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): returning EWOULDBLOCK\n", s)); + set_errno(EWOULDBLOCK); + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + LWIP_ASSERT("newconn->callback == event_callback", newconn->callback == event_callback); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + nsock->rcvevent += (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if (addr != NULL) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + LWIP_ASSERT("addr valid but addrlen NULL", addrlen != NULL); + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ + + err = netconn_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + return -1; + } + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + + sock_set_errno(sock, 0); + return 0; +} + +int +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + void *buf = NULL; + struct pbuf *p; + u16_t buflen, copylen; + int off = 0; + u8_t done = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + do { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: top while sock->lastdata=%p\n", sock->lastdata)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata) { + buf = sock->lastdata; + } else { + /* If this is non-blocking call, then check first */ + if (((flags & MSG_DONTWAIT) || netconn_is_nonblocking(sock->conn)) && + (sock->rcvevent <= 0)) { + if (off > 0) { + /* already received data, return that */ + sock_set_errno(sock, 0); + return off; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): returning EWOULDBLOCK\n", s)); + set_errno(EWOULDBLOCK); + return -1; + } + + /* No data was left from the previous operation, so we try to get + some from the network. */ + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + err = netconn_recv_tcp_pbuf(sock->conn, (struct pbuf **)&buf); + } else { + err = netconn_recv(sock->conn, (struct netbuf **)&buf); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: netconn_recv err=%d, netbuf=%p\n", + err, buf)); + + if (err != ERR_OK) { + if (off > 0) { + if (err == ERR_CLSD) { + /* closed but already received data, ensure select gets the FIN, too */ + event_callback(sock->conn, NETCONN_EVT_RCVPLUS, 0); + } + /* already received data, return that */ + sock_set_errno(sock, 0); + return off; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata = buf; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + p = (struct pbuf *)buf; + } else { + p = ((struct netbuf *)buf)->p; + } + buflen = p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: buflen=%"U16_F" len=%"SZT_F" off=%d sock->lastoffset=%"U16_F"\n", + buflen, len, off, sock->lastoffset)); + + buflen -= sock->lastoffset; + + if (len > buflen) { + copylen = buflen; + } else { + copylen = (u16_t)len; + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t*)mem + off, copylen, sock->lastoffset); + + off += copylen; + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + LWIP_ASSERT("invalid copylen, len would underflow", len >= copylen); + len -= copylen; + if ((len <= 0) || + (p->flags & PBUF_FLAG_PUSH) || + (sock->rcvevent <= 0) || + ((flags & MSG_PEEK) != 0)) { + done = 1; + } + } else { + done = 1; + } + + /* Check to see from where the data was.*/ + if (done) { +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + u16_t port; + ip_addr_t tmpaddr; + ip_addr_t *fromaddr; + union sockaddr_aligned saddr; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d): addr=", s)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + fromaddr = &tmpaddr; + netconn_getaddr(sock->conn, fromaddr, &port, 0); + } else { + port = netbuf_fromport((struct netbuf *)buf); + fromaddr = netbuf_fromaddr((struct netbuf *)buf); + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + ip_addr_debug_print(SOCKETS_DEBUG, fromaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, off)); +#if SOCKETS_DEBUG + if (from && fromlen) +#endif /* SOCKETS_DEBUG */ + { + if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + } + } + } + + /* If we don't peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* If this is a TCP socket, check if there is data left in the + buffer. If so, it should be saved in the sock structure for next + time around. */ + if ((NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) && (buflen - copylen > 0)) { + sock->lastdata = buf; + sock->lastoffset += copylen; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: lastdata now netbuf=%p\n", buf)); + } else { + sock->lastdata = NULL; + sock->lastoffset = 0; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom: deleting netbuf=%p\n", buf)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + pbuf_free((struct pbuf *)buf); + } else { + netbuf_delete((struct netbuf *)buf); + } + buf = NULL; + } + } + } while (!done); + + sock_set_errno(sock, 0); + return off; +} + +int +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +int +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +int +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + return (err == ERR_OK ? (int)written : -1); +} + +int +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; + int i; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + int size = 0; + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", (msg->msg_iov != NULL && msg->msg_iovlen != 0), + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0); + + for (i = 0; i < msg->msg_iovlen; i++) { + u8_t apiflags = write_flags; + if (i + 1 < msg->msg_iovlen) { + apiflags |= NETCONN_MORE; + } + written = 0; + err = netconn_write_partly(sock->conn, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len, write_flags, &written); + if (err == ERR_OK) { + size += written; + /* check that the entire IO vector was accepected, if not return a partial write */ + if (written != msg->msg_iov[i].iov_len) + break; + } + /* none of this IO vector was accepted, but previous was, return partial write and conceal ERR_WOULDBLOCK */ + else if (err == ERR_WOULDBLOCK && size > 0) { + err = ERR_OK; + /* let ERR_WOULDBLOCK persist on the netconn since we are returning ERR_OK */ + break; + } else { + size = -1; + break; + } + } + sock_set_errno(sock, err_to_errno(err)); + return size; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf *chain_buf; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)) , + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + + /* initialize chain buffer with destination */ + chain_buf = netbuf_new(); + if (!chain_buf) { + sock_set_errno(sock, err_to_errno(ERR_MEM)); + return -1; + } + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf->addr, remote_port); + netbuf_fromport(chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t*)chain_buf->p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf->p); + netbuf_set_chksum(chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + LWIP_ASSERT("iov_len < u16_t", msg->msg_iov[i].iov_len <= 0xFFFF); + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf->p == NULL) { + chain_buf->p = chain_buf->ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + pbuf_cat(chain_buf->p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf->addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf->addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf->addr), ip_2_ip6(&chain_buf->addr)); + IP_SET_TYPE_VAL(chain_buf->addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, chain_buf); + } + + /* deallocated the buffer */ + netbuf_delete(chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + return (err == ERR_OK ? size : -1); + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +int +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + return -1; +#endif /* LWIP_TCP */ + } + + /* @todo: split into multiple sendto's? */ + LWIP_ASSERT("lwip_sendto: size must fit in u16_t", size <= 0xffff); + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print(SOCKETS_DEBUG, &buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, event_callback); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)) , + event_callback); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), event_callback); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +int +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +int +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket(i); + if (sock != NULL) { + void* lastdata = sock->lastdata; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + } else { + SYS_ARCH_UNPROTECT(lev); + /* continue on to next FD in list */ + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + struct lwip_select_cb select_cb; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t)-1, + timeout ? (s32_t)timeout->tv_usec : (s32_t)-1)); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + goto return_copy_fdsets; + } + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + select_cb.next = NULL; + select_cb.prev = NULL; + select_cb.readset = readset; + select_cb.writeset = writeset; + select_cb.exceptset = exceptset; + select_cb.sem_signalled = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + select_cb.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&select_cb.sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + /* Protect the select_cb_list */ + SYS_ARCH_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb.next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = &select_cb; + } + select_cb_list = &select_cb; + /* Increasing this counter tells event_callback that the list has changed. */ + select_cb_ctr++; + + /* Now we can safely unprotect */ + SYS_ARCH_UNPROTECT(lev); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket(i); + if (sock != NULL) { + sock->select_waiting++; + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + break; + } + SYS_ARCH_UNPROTECT(lev); + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + msectimeout = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500)/1000)); + if (msectimeout == 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(select_cb.sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } else { + /* Not a valid socket */ + nready = -1; + } + SYS_ARCH_UNPROTECT(lev); + } + } + /* Take us off the list */ + SYS_ARCH_PROTECT(lev); + if (select_cb.next != NULL) { + select_cb.next->prev = select_cb.prev; + } + if (select_cb_list == &select_cb) { + LWIP_ASSERT("select_cb.prev == NULL", select_cb.prev == NULL); + select_cb_list = select_cb.next; + } else { + LWIP_ASSERT("select_cb.prev != NULL", select_cb.prev != NULL); + select_cb.prev->next = select_cb.next; + } + /* Increasing this counter tells event_callback that the list has changed. */ + select_cb_ctr++; + SYS_ARCH_UNPROTECT(lev); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(select_cb.sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&select_cb.sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + set_errno(EBADF); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + goto return_copy_fdsets; + } + + /* See what's set */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); +return_copy_fdsets: + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} + +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s; + struct lwip_sock *sock; + struct lwip_select_cb *scb; + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + break; + case NETCONN_EVT_SENDPLUS: + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting == 0) { + /* noone is waiting for this socket, no need to check select_cb_list */ + SYS_ARCH_UNPROTECT(lev); + return; + } + + /* Now decide if anyone is waiting for this socket */ + /* NOTE: This code goes through the select_cb_list list multiple times + ONLY IF a select was actually waiting. We go through the list the number + of waiting select calls + 1. This list is expected to be small. */ + + /* At this point, SYS_ARCH is still protected! */ +again: + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; + /* Test this select call for our socket */ + if (sock->rcvevent > 0) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (sock->sendevent != 0) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (sock->errevent != 0) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + if (do_signal) { + scb->sem_signalled = 1; + /* Don't call SYS_ARCH_UNPROTECT() before signaling the semaphore, as this might + lead to the select thread taking itself off the list, invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + } + SYS_ARCH_UNPROTECT(lev); +} + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + u8_t err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + err = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (err != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + sys_arch_sem_wait((sys_sem_t*)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data*)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t*)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static u8_t +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + u8_t err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + + switch (level) { + +/* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int*)optval = 1; + } else { + *(int*)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int*)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int*)optval?"on":"off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int*)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int*)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int*)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int*)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(*optlen, int); + /* only overwrite ERR_OK or temporary errors */ + if (((sock->err == 0) || (sock->err == EINPROGRESS)) && (sock->conn != NULL)) { + sock_set_errno(sock, err_to_errno(sock->conn->last_err)); + } + *(int *)optval = (sock->err == 0xFF ? (int)-1 : (int)sock->err); + sock->err = 0; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: + { + s16_t conn_linger; + struct linger* linger = (struct linger*)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) { + /* this flag is only available for UDP, not for UDP lite */ + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int*)optval = (udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +/* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int*)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int*)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_MULTICAST_TX_OPTIONS + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + return ENOPROTOOPT; + } + *(u8_t*)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr*)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t*)optval = 1; + } else { + *(u8_t*)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP +/* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int*)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int*)optval)?"on":"off") ); + break; + case TCP_KEEPALIVE: + *(int*)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int*)optval = (int)(sock->conn->pcb.tcp->keep_idle/1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int*)optval = (int)(sock->conn->pcb.tcp->keep_intvl/1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int*)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int*)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int*)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int*)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int*)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int*)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int*)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + u8_t err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void*)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + err = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (err != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(err)); + return -1; + } + sys_arch_sem_wait((sys_sem_t*)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data*)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t*)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static u8_t +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + u8_t err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + + switch (level) { + +/* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int*)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int*)optval?"on":"off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + netconn_set_sendtimeout(sock->conn, LWIP_SO_SNDRCVTIMEO_GET_MS(optval)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + netconn_set_recvtimeout(sock->conn, (int)LWIP_SO_SNDRCVTIMEO_GET_MS(optval)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int*)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: + { + const struct linger* linger = (const struct linger*)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) { + /* this flag is only available for UDP, not for UDP lite */ + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int*)optval) { + udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) | UDP_FLAGS_NOCHKSUM); + } else { + udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) & ~UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +/* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_MULTICAST_TX_OPTIONS + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t*)optval)); + break; + case IP_MULTICAST_IF: + { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr*)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t*)optval) { + udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) | UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) & ~UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: + { + /* If this is a TCP or a RAW socket, ignore these options. */ + /* @todo: assign membership to this socket so that it is dropped when closing the socket */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP +/* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int*)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval)?"on":"off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000*(u32_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000*(u32_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int*)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 +/* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (*(const int*)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int*)optval != 0) && ((*(const int*)optval < 8) || (*(const int*)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t)*(const int*)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int*)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int*)optval != 0) && ((*(const int*)optval < 8) || (*(const int*)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t)*(const int*)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int*)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if(sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t)*(const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + u16_t buflen = 0; + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct pbuf *p; + if (sock->lastdata) { + p = ((struct netbuf *)sock->lastdata)->p; + *((int*)argp) = p->tot_len - sock->lastoffset; + } else { + struct netbuf *rxbuf; + err_t err; + if (sock->rcvevent <= 0) { + *((int*)argp) = 0; + } else { + err = netconn_recv(sock->conn, &rxbuf); + if (err != ERR_OK) { + *((int*)argp) = 0; + } else { + sock->lastdata = rxbuf; + sock->lastoffset = 0; + *((int*)argp) = rxbuf->p->tot_len; + } + } + } + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + *((int*)argp) = recv_avail; + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata) { + struct pbuf *p = (struct pbuf *)sock->lastdata; + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + p = ((struct netbuf *)p)->p; + } + buflen = p->tot_len; + buflen -= sock->lastoffset; + + *((int*)argp) += buflen; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t*)argp))); + sock_set_errno(sock, 0); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(u32_t*)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * Only the flag O_NONBLOCK is implemented. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + break; + case F_SETFL: + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + return ret; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + return 1; + } + } + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + return; + } + } +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } +} +#endif /* LWIP_IGMP */ +#endif /* LWIP_SOCKET */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 000000000..48ba5fb2d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,518 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_TIMERS +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_timeouts_mbox_fetch(mbox, msg) +#else /* LWIP_TIMERS */ +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#endif /* LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + LOCK_TCPIP_CORE(); + while (1) { /* MAIN Loop */ + UNLOCK_TCPIP_CORE(); + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&mbox, (void **)&msg); + LOCK_TCPIP_CORE(); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif); + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } + } +} + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * + * @param function the function to call + * @param ctx parameter passed to f + * @param block 1 to block until the request is posted, 0 to non-blocking mode + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_callback_with_block(tcpip_callback_fn function, void *ctx, u8_t block) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + if (block) { + sys_mbox_post(&mbox, msg); + } else { + if (sys_mbox_trypost(&mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Allocate a structure for a static callback message and initialize it. + * This is intended to be used to send "static" messages from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_trycallback(). + */ +struct tcpip_callback_msg* +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg*)msg; +} + +/** + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * Try to post a callback-message to the tcpip_thread mbox + * This is intended to be used to send "static" messages from interrupt context. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + */ +err_t +tcpip_trycallback(struct tcpip_callback_msg* msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); + return sys_mbox_trypost(&mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_callback_with_block(pbuf_free_int, p, 0); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_callback_with_block(mem_free, m, 0); +} + +#endif /* !NO_SYS */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 000000000..565c34a71 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1342 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/tcp.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + +static void mqtt_cyclic_timer(void *arg); + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +#if defined(LWIP_DEBUG) +static const char * const mqtt_message_type_str[15] = +{ + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + + +#define MQTT_RINGBUF_IDX_MASK ((MQTT_OUTPUT_RINGBUF_SIZE) - 1) + +/** Add single item to ring buffer */ +#define mqtt_ringbuf_put(rb, item) ((rb)->buf)[(rb)->put++ & MQTT_RINGBUF_IDX_MASK] = (item) + +/** Return number of bytes in ring buffer */ +#define mqtt_ringbuf_len(rb) ((u16_t)((rb)->put - (rb)->get)) + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - ((rb)->get & MQTT_RINGBUF_IDX_MASK))) + +/** Return pointer to ring buffer get position */ +#define mqtt_ringbuf_get_ptr(rb) (&(rb)->buf[(rb)->get & MQTT_RINGBUF_IDX_MASK]) + +#define mqtt_ringbuf_advance_get_idx(rb, len) ((rb)->get += (len)) + + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct tcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = tcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, ((rb)->get & MQTT_RINGBUF_IDX_MASK), ((rb)->put & MQTT_RINGBUF_IDX_MASK))); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = tcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(tcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = tcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + tcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < MQTT_REQ_MAX_IN_FLIGHT; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail= iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t * const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < MQTT_REQ_MAX_IN_FLIGHT; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param dup MQTT DUP flag + * @param qos MQTT QoS field + * @param retain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t dup, + u8_t qos, u8_t retain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((dup & 1) << 3) | ((qos & 3) << 1) | (retain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + tcp_recv(client->conn, NULL); + tcp_err(client->conn, NULL); + tcp_sent(client->conn, NULL); + res = tcp_close(client->conn); + if (res != ERR_OK) { + tcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive/2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL*1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t + mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + uint8_t *topic; + uint16_t after_topic; + u8_t bkp; + u16_t topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos? 2 : 1)) > length) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %d\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n",pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n",pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + b = client->rx_buffer[fixed_hdr_idx]; + } else { + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_parse_incoming: Remaining length after fixed header: %d\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer+cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_parse_incoming: msg_idx: %d, cpy_len: %d, remaining %d\n", client->msg_idx, cpy_len, msg_rem_len)); + if (msg_rem_len == 0 || cpy_len == buffer_space) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + tcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct tcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + mqtt_client_t* client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + tcp_recv(tpcb, mqtt_tcp_recv_cb); + tcp_sent(tpcb, mqtt_tcp_sent_cb); + tcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL*1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length: Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + if (qos > 0) { + remaining_length += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + + r = mqtt_create_request(client->req_list, pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + mqtt_client_t *client = (mqtt_client_t *)mem_malloc(sizeof(mqtt_client_t)); + if (client != NULL) { + memset(client, 0, sizeof(mqtt_client_t)); + } + return client; +} + + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + + client->conn = tcp_new(); + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + tcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = tcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = tcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + tcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + return ERR_OK; + +tcp_fail: + tcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c new file mode 100644 index 000000000..7bfd85b52 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c @@ -0,0 +1,727 @@ +/** + * @file + * SNTP client module + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + */ + + +/** + * @defgroup sntp SNTP + * @ingroup apps + * + * This is simple "SNTP" client for the lwIP raw API. + * It is a minimal implementation of SNTPv4 as specified in RFC 4330. + * + * For a list of some public NTP servers, see this link : + * http://support.ntp.org/bin/view/Servers/NTPPoolServers + * + * @todo: + * - set/change servers at runtime + * - complete SNTP_CHECK_RESPONSE checks 3 and 4 + */ + +#include "lwip/apps/sntp.h" + +#include "lwip/opt.h" +#include "lwip/timeouts.h" +#include "lwip/udp.h" +#include "lwip/dns.h" +#include "lwip/ip_addr.h" +#include "lwip/pbuf.h" +#include "lwip/dhcp.h" + +#include +#include + +#if LWIP_UDP + +/* Handle support for more than one server via SNTP_MAX_SERVERS */ +#if SNTP_MAX_SERVERS > 1 +#define SNTP_SUPPORT_MULTIPLE_SERVERS 1 +#else /* NTP_MAX_SERVERS > 1 */ +#define SNTP_SUPPORT_MULTIPLE_SERVERS 0 +#endif /* NTP_MAX_SERVERS > 1 */ + +#if (SNTP_UPDATE_DELAY < 15000) && !defined(SNTP_SUPPRESS_DELAY_CHECK) +#error "SNTPv4 RFC 4330 enforces a minimum update time of 15 seconds (define SNTP_SUPPRESS_DELAY_CHECK to disable this error)!" +#endif + +/* Configure behaviour depending on microsecond or second precision */ +#ifdef SNTP_SET_SYSTEM_TIME_US +#define SNTP_CALC_TIME_US 1 +#define SNTP_RECEIVE_TIME_SIZE 2 +#else +#define SNTP_SET_SYSTEM_TIME_US(sec, us) +#define SNTP_CALC_TIME_US 0 +#define SNTP_RECEIVE_TIME_SIZE 1 +#endif + + +/* the various debug levels for this file */ +#define SNTP_DEBUG_TRACE (SNTP_DEBUG | LWIP_DBG_TRACE) +#define SNTP_DEBUG_STATE (SNTP_DEBUG | LWIP_DBG_STATE) +#define SNTP_DEBUG_WARN (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define SNTP_DEBUG_WARN_STATE (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define SNTP_DEBUG_SERIOUS (SNTP_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + +#define SNTP_ERR_KOD 1 + +/* SNTP protocol defines */ +#define SNTP_MSG_LEN 48 + +#define SNTP_OFFSET_LI_VN_MODE 0 +#define SNTP_LI_MASK 0xC0 +#define SNTP_LI_NO_WARNING 0x00 +#define SNTP_LI_LAST_MINUTE_61_SEC 0x01 +#define SNTP_LI_LAST_MINUTE_59_SEC 0x02 +#define SNTP_LI_ALARM_CONDITION 0x03 /* (clock not synchronized) */ + +#define SNTP_VERSION_MASK 0x38 +#define SNTP_VERSION (4/* NTP Version 4*/<<3) + +#define SNTP_MODE_MASK 0x07 +#define SNTP_MODE_CLIENT 0x03 +#define SNTP_MODE_SERVER 0x04 +#define SNTP_MODE_BROADCAST 0x05 + +#define SNTP_OFFSET_STRATUM 1 +#define SNTP_STRATUM_KOD 0x00 + +#define SNTP_OFFSET_ORIGINATE_TIME 24 +#define SNTP_OFFSET_RECEIVE_TIME 32 +#define SNTP_OFFSET_TRANSMIT_TIME 40 + +/* number of seconds between 1900 and 1970 (MSB=1)*/ +#define DIFF_SEC_1900_1970 (2208988800UL) +/* number of seconds between 1970 and Feb 7, 2036 (6:28:16 UTC) (MSB=0) */ +#define DIFF_SEC_1970_2036 (2085978496UL) + +/** + * SNTP packet format (without optional fields) + * Timestamps are coded as 64 bits: + * - 32 bits seconds since Jan 01, 1970, 00:00 + * - 32 bits seconds fraction (0-padded) + * For future use, if the MSB in the seconds part is set, seconds are based + * on Feb 07, 2036, 06:28:16. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct sntp_msg { + PACK_STRUCT_FLD_8(u8_t li_vn_mode); + PACK_STRUCT_FLD_8(u8_t stratum); + PACK_STRUCT_FLD_8(u8_t poll); + PACK_STRUCT_FLD_8(u8_t precision); + PACK_STRUCT_FIELD(u32_t root_delay); + PACK_STRUCT_FIELD(u32_t root_dispersion); + PACK_STRUCT_FIELD(u32_t reference_identifier); + PACK_STRUCT_FIELD(u32_t reference_timestamp[2]); + PACK_STRUCT_FIELD(u32_t originate_timestamp[2]); + PACK_STRUCT_FIELD(u32_t receive_timestamp[2]); + PACK_STRUCT_FIELD(u32_t transmit_timestamp[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* function prototypes */ +static void sntp_request(void *arg); + +/** The operating mode */ +static u8_t sntp_opmode; + +/** The UDP pcb used by the SNTP client */ +static struct udp_pcb* sntp_pcb; +/** Names/Addresses of servers */ +struct sntp_server { +#if SNTP_SERVER_DNS + char* name; +#endif /* SNTP_SERVER_DNS */ + ip_addr_t addr; +}; +static struct sntp_server sntp_servers[SNTP_MAX_SERVERS]; + +#if SNTP_GET_SERVERS_FROM_DHCP +static u8_t sntp_set_servers_from_dhcp; +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ +#if SNTP_SUPPORT_MULTIPLE_SERVERS +/** The currently used server (initialized to 0) */ +static u8_t sntp_current_server; +#else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ +#define sntp_current_server 0 +#endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ + +#if SNTP_RETRY_TIMEOUT_EXP +#define SNTP_RESET_RETRY_TIMEOUT() sntp_retry_timeout = SNTP_RETRY_TIMEOUT +/** Retry time, initialized with SNTP_RETRY_TIMEOUT and doubled with each retry. */ +static u32_t sntp_retry_timeout; +#else /* SNTP_RETRY_TIMEOUT_EXP */ +#define SNTP_RESET_RETRY_TIMEOUT() +#define sntp_retry_timeout SNTP_RETRY_TIMEOUT +#endif /* SNTP_RETRY_TIMEOUT_EXP */ + +#if SNTP_CHECK_RESPONSE >= 1 +/** Saves the last server address to compare with response */ +static ip_addr_t sntp_last_server_address; +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + +#if SNTP_CHECK_RESPONSE >= 2 +/** Saves the last timestamp sent (which is sent back by the server) + * to compare against in response */ +static u32_t sntp_last_timestamp_sent[2]; +#endif /* SNTP_CHECK_RESPONSE >= 2 */ + +/** + * SNTP processing of received timestamp + */ +static void +sntp_process(u32_t *receive_timestamp) +{ + /* convert SNTP time (1900-based) to unix GMT time (1970-based) + * if MSB is 0, SNTP time is 2036-based! + */ + u32_t rx_secs = lwip_ntohl(receive_timestamp[0]); + int is_1900_based = ((rx_secs & 0x80000000) != 0); + u32_t t = is_1900_based ? (rx_secs - DIFF_SEC_1900_1970) : (rx_secs + DIFF_SEC_1970_2036); + time_t tim = t; + +#if SNTP_CALC_TIME_US + u32_t us = lwip_ntohl(receive_timestamp[1]) / 4295; + SNTP_SET_SYSTEM_TIME_US(t, us); + /* display local time from GMT time */ + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_process: %s, %"U32_F" us", ctime(&tim), us)); + +#else /* SNTP_CALC_TIME_US */ + + /* change system time and/or the update the RTC clock */ + SNTP_SET_SYSTEM_TIME(t); + /* display local time from GMT time */ + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_process: %s", ctime(&tim))); +#endif /* SNTP_CALC_TIME_US */ + LWIP_UNUSED_ARG(tim); +} + +/** + * Initialize request struct to be sent to server. + */ +static void +sntp_initialize_request(struct sntp_msg *req) +{ + memset(req, 0, SNTP_MSG_LEN); + req->li_vn_mode = SNTP_LI_NO_WARNING | SNTP_VERSION | SNTP_MODE_CLIENT; + +#if SNTP_CHECK_RESPONSE >= 2 + { + u32_t sntp_time_sec, sntp_time_us; + /* fill in transmit timestamp and save it in 'sntp_last_timestamp_sent' */ + SNTP_GET_SYSTEM_TIME(sntp_time_sec, sntp_time_us); + sntp_last_timestamp_sent[0] = lwip_htonl(sntp_time_sec + DIFF_SEC_1900_1970); + req->transmit_timestamp[0] = sntp_last_timestamp_sent[0]; + /* we send/save us instead of fraction to be faster... */ + sntp_last_timestamp_sent[1] = lwip_htonl(sntp_time_us); + req->transmit_timestamp[1] = sntp_last_timestamp_sent[1]; + } +#endif /* SNTP_CHECK_RESPONSE >= 2 */ +} + +/** + * Retry: send a new request (and increase retry timeout). + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_retry(void* arg) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_retry: Next request will be sent in %"U32_F" ms\n", + sntp_retry_timeout)); + + /* set up a timer to send a retry and increase the retry delay */ + sys_timeout(sntp_retry_timeout, sntp_request, NULL); + +#if SNTP_RETRY_TIMEOUT_EXP + { + u32_t new_retry_timeout; + /* increase the timeout for next retry */ + new_retry_timeout = sntp_retry_timeout << 1; + /* limit to maximum timeout and prevent overflow */ + if ((new_retry_timeout <= SNTP_RETRY_TIMEOUT_MAX) && + (new_retry_timeout > sntp_retry_timeout)) { + sntp_retry_timeout = new_retry_timeout; + } + } +#endif /* SNTP_RETRY_TIMEOUT_EXP */ +} + +#if SNTP_SUPPORT_MULTIPLE_SERVERS +/** + * If Kiss-of-Death is received (or another packet parsing error), + * try the next server or retry the current server and increase the retry + * timeout if only one server is available. + * (implicitly, SNTP_MAX_SERVERS > 1) + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_try_next_server(void* arg) +{ + u8_t old_server, i; + LWIP_UNUSED_ARG(arg); + + old_server = sntp_current_server; + for (i = 0; i < SNTP_MAX_SERVERS - 1; i++) { + sntp_current_server++; + if (sntp_current_server >= SNTP_MAX_SERVERS) { + sntp_current_server = 0; + } + if (!ip_addr_isany(&sntp_servers[sntp_current_server].addr) +#if SNTP_SERVER_DNS + || (sntp_servers[sntp_current_server].name != NULL) +#endif + ) { + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_try_next_server: Sending request to server %"U16_F"\n", + (u16_t)sntp_current_server)); + /* new server: reset retry timeout */ + SNTP_RESET_RETRY_TIMEOUT(); + /* instantly send a request to the next server */ + sntp_request(NULL); + return; + } + } + /* no other valid server found */ + sntp_current_server = old_server; + sntp_retry(NULL); +} +#else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ +/* Always retry on error if only one server is supported */ +#define sntp_try_next_server sntp_retry +#endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ + +/** UDP recv callback for the sntp pcb */ +static void +sntp_recv(void *arg, struct udp_pcb* pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t mode; + u8_t stratum; + u32_t receive_timestamp[SNTP_RECEIVE_TIME_SIZE]; + err_t err; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + + /* packet received: stop retry timeout */ + sys_untimeout(sntp_try_next_server, NULL); + sys_untimeout(sntp_request, NULL); + + err = ERR_ARG; +#if SNTP_CHECK_RESPONSE >= 1 + /* check server address and port */ + if (((sntp_opmode != SNTP_OPMODE_POLL) || ip_addr_cmp(addr, &sntp_last_server_address)) && + (port == SNTP_PORT)) +#else /* SNTP_CHECK_RESPONSE >= 1 */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + { + /* process the response */ + if (p->tot_len == SNTP_MSG_LEN) { + pbuf_copy_partial(p, &mode, 1, SNTP_OFFSET_LI_VN_MODE); + mode &= SNTP_MODE_MASK; + /* if this is a SNTP response... */ + if (((sntp_opmode == SNTP_OPMODE_POLL) && (mode == SNTP_MODE_SERVER)) || + ((sntp_opmode == SNTP_OPMODE_LISTENONLY) && (mode == SNTP_MODE_BROADCAST))) { + pbuf_copy_partial(p, &stratum, 1, SNTP_OFFSET_STRATUM); + if (stratum == SNTP_STRATUM_KOD) { + /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ + err = SNTP_ERR_KOD; + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Received Kiss-of-Death\n")); + } else { +#if SNTP_CHECK_RESPONSE >= 2 + /* check originate_timetamp against sntp_last_timestamp_sent */ + u32_t originate_timestamp[2]; + pbuf_copy_partial(p, &originate_timestamp, 8, SNTP_OFFSET_ORIGINATE_TIME); + if ((originate_timestamp[0] != sntp_last_timestamp_sent[0]) || + (originate_timestamp[1] != sntp_last_timestamp_sent[1])) + { + LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid originate timestamp in response\n")); + } else +#endif /* SNTP_CHECK_RESPONSE >= 2 */ + /* @todo: add code for SNTP_CHECK_RESPONSE >= 3 and >= 4 here */ + { + /* correct answer */ + err = ERR_OK; + pbuf_copy_partial(p, &receive_timestamp, SNTP_RECEIVE_TIME_SIZE * 4, SNTP_OFFSET_TRANSMIT_TIME); + } + } + } else { + LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid mode in response: %"U16_F"\n", (u16_t)mode)); + /* wait for correct response */ + err = ERR_TIMEOUT; + } + } else { + LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid packet length: %"U16_F"\n", p->tot_len)); + } + } +#if SNTP_CHECK_RESPONSE >= 1 + else { + /* packet from wrong remote address or port, wait for correct response */ + err = ERR_TIMEOUT; + } +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + pbuf_free(p); + if (err == ERR_OK) { + sntp_process(receive_timestamp); + + /* Set up timeout for next request (only if poll response was received)*/ + if (sntp_opmode == SNTP_OPMODE_POLL) { + /* Correct response, reset retry timeout */ + SNTP_RESET_RETRY_TIMEOUT(); + + sys_timeout((u32_t)SNTP_UPDATE_DELAY, sntp_request, NULL); + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Scheduled next time request: %"U32_F" ms\n", + (u32_t)SNTP_UPDATE_DELAY)); + } + } else if (err != ERR_TIMEOUT) { + /* Errors are only processed in case of an explicit poll response */ + if (sntp_opmode == SNTP_OPMODE_POLL) { + if (err == SNTP_ERR_KOD) { + /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ + sntp_try_next_server(NULL); + } else { + /* another error, try the same server again */ + sntp_retry(NULL); + } + } + } +} + +/** Actually send an sntp request to a server. + * + * @param server_addr resolved IP address of the SNTP server + */ +static void +sntp_send_request(const ip_addr_t *server_addr) +{ + struct pbuf* p; + p = pbuf_alloc(PBUF_TRANSPORT, SNTP_MSG_LEN, PBUF_RAM); + if (p != NULL) { + struct sntp_msg *sntpmsg = (struct sntp_msg *)p->payload; + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_send_request: Sending request to server\n")); + /* initialize request message */ + sntp_initialize_request(sntpmsg); + /* send request */ + udp_sendto(sntp_pcb, p, server_addr, SNTP_PORT); + /* free the pbuf after sending it */ + pbuf_free(p); + /* set up receive timeout: try next server or retry on timeout */ + sys_timeout((u32_t)SNTP_RECV_TIMEOUT, sntp_try_next_server, NULL); +#if SNTP_CHECK_RESPONSE >= 1 + /* save server address to verify it in sntp_recv */ + ip_addr_set(&sntp_last_server_address, server_addr); +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + } else { + LWIP_DEBUGF(SNTP_DEBUG_SERIOUS, ("sntp_send_request: Out of memory, trying again in %"U32_F" ms\n", + (u32_t)SNTP_RETRY_TIMEOUT)); + /* out of memory: set up a timer to send a retry */ + sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_request, NULL); + } +} + +#if SNTP_SERVER_DNS +/** + * DNS found callback when using DNS names as server address. + */ +static void +sntp_dns_found(const char* hostname, const ip_addr_t *ipaddr, void *arg) +{ + LWIP_UNUSED_ARG(hostname); + LWIP_UNUSED_ARG(arg); + + if (ipaddr != NULL) { + /* Address resolved, send request */ + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_dns_found: Server address resolved, sending request\n")); + sntp_send_request(ipaddr); + } else { + /* DNS resolving failed -> try another server */ + LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_dns_found: Failed to resolve server address resolved, trying next server\n")); + sntp_try_next_server(NULL); + } +} +#endif /* SNTP_SERVER_DNS */ + +/** + * Send out an sntp request. + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_request(void *arg) +{ + ip_addr_t sntp_server_address; + err_t err; + + LWIP_UNUSED_ARG(arg); + + /* initialize SNTP server address */ +#if SNTP_SERVER_DNS + if (sntp_servers[sntp_current_server].name) { + /* always resolve the name and rely on dns-internal caching & timeout */ + ip_addr_set_zero(&sntp_servers[sntp_current_server].addr); + err = dns_gethostbyname(sntp_servers[sntp_current_server].name, &sntp_server_address, + sntp_dns_found, NULL); + if (err == ERR_INPROGRESS) { + /* DNS request sent, wait for sntp_dns_found being called */ + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_request: Waiting for server address to be resolved.\n")); + return; + } else if (err == ERR_OK) { + sntp_servers[sntp_current_server].addr = sntp_server_address; + } + } else +#endif /* SNTP_SERVER_DNS */ + { + sntp_server_address = sntp_servers[sntp_current_server].addr; + err = (ip_addr_isany_val(sntp_server_address)) ? ERR_ARG : ERR_OK; + } + + if (err == ERR_OK) { + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_request: current server address is %s\n", + ipaddr_ntoa(&sntp_server_address))); + sntp_send_request(&sntp_server_address); + } else { + /* address conversion failed, try another server */ + LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_request: Invalid server address, trying next server.\n")); + sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_try_next_server, NULL); + } +} + +/** + * @ingroup sntp + * Initialize this module. + * Send out request instantly or after SNTP_STARTUP_DELAY(_FUNC). + */ +void +sntp_init(void) +{ +#ifdef SNTP_SERVER_ADDRESS +#if SNTP_SERVER_DNS + sntp_setservername(0, SNTP_SERVER_ADDRESS); +#else +#error SNTP_SERVER_ADDRESS string not supported SNTP_SERVER_DNS==0 +#endif +#endif /* SNTP_SERVER_ADDRESS */ + + if (sntp_pcb == NULL) { + sntp_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("Failed to allocate udp pcb for sntp client", sntp_pcb != NULL); + if (sntp_pcb != NULL) { + udp_recv(sntp_pcb, sntp_recv, NULL); + + if (sntp_opmode == SNTP_OPMODE_POLL) { + SNTP_RESET_RETRY_TIMEOUT(); +#if SNTP_STARTUP_DELAY + sys_timeout((u32_t)SNTP_STARTUP_DELAY_FUNC, sntp_request, NULL); +#else + sntp_request(NULL); +#endif + } else if (sntp_opmode == SNTP_OPMODE_LISTENONLY) { + ip_set_option(sntp_pcb, SOF_BROADCAST); + udp_bind(sntp_pcb, IP_ANY_TYPE, SNTP_PORT); + } + } + } +} + +/** + * @ingroup sntp + * Stop this module. + */ +void +sntp_stop(void) +{ + if (sntp_pcb != NULL) { + sys_untimeout(sntp_request, NULL); + sys_untimeout(sntp_try_next_server, NULL); + udp_remove(sntp_pcb); + sntp_pcb = NULL; + } +} + +/** + * @ingroup sntp + * Get enabled state. + */ +u8_t sntp_enabled(void) +{ + return (sntp_pcb != NULL)? 1 : 0; +} + +/** + * @ingroup sntp + * Sets the operating mode. + * @param operating_mode one of the available operating modes + */ +void +sntp_setoperatingmode(u8_t operating_mode) +{ + LWIP_ASSERT("Invalid operating mode", operating_mode <= SNTP_OPMODE_LISTENONLY); + LWIP_ASSERT("Operating mode must not be set while SNTP client is running", sntp_pcb == NULL); + sntp_opmode = operating_mode; +} + +/** + * @ingroup sntp + * Gets the operating mode. + */ +u8_t +sntp_getoperatingmode(void) +{ + return sntp_opmode; +} + +#if SNTP_GET_SERVERS_FROM_DHCP +/** + * Config SNTP server handling by IP address, name, or DHCP; clear table + * @param set_servers_from_dhcp enable or disable getting server addresses from dhcp + */ +void +sntp_servermode_dhcp(int set_servers_from_dhcp) +{ + u8_t new_mode = set_servers_from_dhcp ? 1 : 0; + if (sntp_set_servers_from_dhcp != new_mode) { + sntp_set_servers_from_dhcp = new_mode; + } +} +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +/** + * @ingroup sntp + * Initialize one of the NTP servers by IP address + * + * @param idx the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param server IP address of the NTP server to set + */ +void +sntp_setserver(u8_t idx, const ip_addr_t *server) +{ + if (idx < SNTP_MAX_SERVERS) { + if (server != NULL) { + sntp_servers[idx].addr = (*server); + } else { + ip_addr_set_zero(&sntp_servers[idx].addr); + } +#if SNTP_SERVER_DNS + sntp_servers[idx].name = NULL; +#endif + } +} + +#if LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP +/** + * Initialize one of the NTP servers by IP address, required by DHCP + * + * @param numdns the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param dnsserver IP address of the NTP server to set + */ +void +dhcp_set_ntp_servers(u8_t num, const ip4_addr_t *server) +{ + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp: %s %u.%u.%u.%u as NTP server #%u via DHCP\n", + (sntp_set_servers_from_dhcp ? "Got" : "Rejected"), + ip4_addr1(server), ip4_addr2(server), ip4_addr3(server), ip4_addr4(server), num)); + if (sntp_set_servers_from_dhcp && num) { + u8_t i; + for (i = 0; (i < num) && (i < SNTP_MAX_SERVERS); i++) { + ip_addr_t addr; + ip_addr_copy_from_ip4(addr, server[i]); + sntp_setserver(i, &addr); + } + for (i = num; i < SNTP_MAX_SERVERS; i++) { + sntp_setserver(i, NULL); + } + } +} +#endif /* LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP */ + +/** + * @ingroup sntp + * Obtain one of the currently configured by IP address (or DHCP) NTP servers + * + * @param idx the index of the NTP server + * @return IP address of the indexed NTP server or "ip_addr_any" if the NTP + * server has not been configured by address (or at all). + */ +const ip_addr_t* +sntp_getserver(u8_t idx) +{ + if (idx < SNTP_MAX_SERVERS) { + return &sntp_servers[idx].addr; + } + return IP_ADDR_ANY; +} + +#if SNTP_SERVER_DNS +/** + * Initialize one of the NTP servers by name + * + * @param numdns the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param dnsserver DNS name of the NTP server to set, to be resolved at contact time + */ +void +sntp_setservername(u8_t idx, char *server) +{ + if (idx < SNTP_MAX_SERVERS) { + sntp_servers[idx].name = server; + } +} + +/** + * Obtain one of the currently configured by name NTP servers. + * + * @param numdns the index of the NTP server + * @return IP address of the indexed NTP server or NULL if the NTP + * server has not been configured by name (or at all) + */ +char * +sntp_getservername(u8_t idx) +{ + if (idx < SNTP_MAX_SERVERS) { + return sntp_servers[idx].name; + } + return NULL; +} +#endif /* SNTP_SERVER_DNS */ + +#endif /* LWIP_UDP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/def.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 000000000..63cb74b5f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,222 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return (u16_t)PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return (u32_t)PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char* +lwip_strnstr(const char* buffer, const char* token, size_t n) +{ + const char* p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char* str1, const char* str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char* str1, const char* str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (len-- && c1 != 0); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char* result, size_t bufsize, int number) +{ + const int base = 10; + char* ptr = result, *ptr1 = result, tmp_char; + int tmp_value; + LWIP_UNUSED_ARG(bufsize); + + do { + tmp_value = number; + number /= base; + *ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmnopqrstuvwxyz"[35 + (tmp_value - number * base)]; + } while(number); + + /* Apply negative sign */ + if (tmp_value < 0) { + *ptr++ = '-'; + } + *ptr-- = '\0'; + while(ptr1 < ptr) { + tmp_char = *ptr; + *ptr--= *ptr1; + *ptr1++ = tmp_char; + } +} +#endif diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/dns.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 000000000..bf6306f8b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1573 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS maximum number of retries when asking for a name, before "timeout". */ +#ifndef DNS_MAX_RETRIES +#define DNS_MAX_RETRIES 4 +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t* addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t* +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char* entry_name = (char*)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char* entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char*)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) +#endif /* DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) */ +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print(DNS_DEBUG, &(dns_table[i].ipaddr)); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf* p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset++); + if (n < 0) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if ((*query) != (u8_t)c) { + return 0xFFFF; + } + ++response_offset; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + return response_offset + 1; +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf* p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if (n < 0) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + return offset + 1; +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry* entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t* dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, query_idx + 1); + query_idx += n + 1; + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) + { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb* +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb* ret; + + ret = udp_new_ip_type(IPADDR_TYPE_ANY); + if (ret == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (!DNS_PORT_ALLOWED(port)) { + /* this port is not allowed, try again */ + err = ERR_USE; + continue; + } + err = udp_bind(ret, IP_ANY_TYPE, port); + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(ret); + return NULL; + } + udp_recv(ret, dns_recv, NULL); + return ret; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one */ + for (i = 0, idx = dns_last_pcb_idx + 1; i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t* addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if ((entry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[entry->server_idx + 1]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print(DNS_DEBUG, (&(entry->ipaddr))); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto memerr; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + const struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto memerr; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto memerr; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto memerr; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto memerr; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto memerr; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto memerr; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + res_idx += SIZEOF_DNS_QUERY; + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto memerr; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto memerr; /* ignore this packet */ + } + res_idx += SIZEOF_DNS_ANSWER; + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto memerr; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_t), res_idx) != sizeof(ip6_addr_t)) { + goto memerr; /* ignore this packet */ + } + ip_addr_copy_from_ip6(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto memerr; /* ignore this packet */ + } + res_idx += lwip_htons(ans.len); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +memerr: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry* req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = dns_seqno - entry->seqno; + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH-1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 000000000..589157b0d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,609 @@ +/** + * @file + * Incluse internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t*)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void*)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void*)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + u8_t swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = 1 - swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + addr = ip4_addr_get_u32(dest); + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + addr = dest->addr[addr_part]; + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + u8_t swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len -= chklen; + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = 1 - swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + addr = ip4_addr_get_u32(dest); + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + addr = dest->addr[addr_part]; + acc += (addr & 0xffffUL); + acc += ((addr >> 16) & 0xffffUL); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + u8_t swapped; + + acc = 0; + swapped = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = 1 - swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/init.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 000000000..5e50858c0 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,385 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test +{ + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER + #error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) + #error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) + #error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) + #error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_MULTICAST_TX_OPTIONS) + #error "If you want to use IGMP/LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) + #error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) + #error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) + #error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) + #error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) + #error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) + #error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) + #error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) + #error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if (LWIP_MULTICAST_TX_OPTIONS && !LWIP_IPV4) + #error "LWIP_MULTICAST_TX_OPTIONS needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) + #error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_SUPPORT + (LWIP_IPV6 ? (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD) : 0))) + #error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) + #error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) + #error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) + #error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) + #error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) + #error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) + #error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) + #error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) + #error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) + #error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) + #error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) + #error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) + #error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) + #error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) + #error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) + #error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) + #error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) + #error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) + #error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) + #error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) + #error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) + #error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) + #error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT + #error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT + #error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 + #error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 + #error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) + #error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING + #error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE + #error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY + #error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE + #error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +/* Check that the SO_* socket options and SOF_* lwIP-internal flags match */ +#if SO_REUSEADDR != SOF_REUSEADDR + #error "WARNING: SO_REUSEADDR != SOF_REUSEADDR" +#endif +#if SO_KEEPALIVE != SOF_KEEPALIVE + #error "WARNING: SO_KEEPALIVE != SOF_KEEPALIVE" +#endif +#if SO_BROADCAST != SOF_BROADCAST + #error "WARNING: SO_BROADCAST != SOF_BROADCAST" +#endif +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG + #error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG + #error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS + #error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST + #error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT + #error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) + #error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) + #error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) + #error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) + #error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF + #error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) + #error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN + #error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) + #error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) + #error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS + #error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void*, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ip.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 000000000..1d65cf29b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,124 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char* c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 000000000..939d7f62d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip* autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_malloc(sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + memset(autoip, 0, sizeof(struct autoip)); + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip* autoip = netif_autoip_data(netif); + + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif = netif_list; + /* loop through netif's */ + while (netif != NULL) { + struct autoip* autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch(autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + /* proceed to next network interface */ + netif = netif->next; + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip* autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + ETHADDR16_COPY(netifaddr.addr, netif->hwaddr); + + /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR2_COPY(&sipaddr, &hdr->sipaddr); + IPADDR2_COPY(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip* autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip* autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 000000000..74f6b5826 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1951 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" + +#include + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ + }; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static err_t dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type); +/* free a DHCP request */ +static void dhcp_delete_msg(struct dhcp *dhcp); +/* add a DHCP option (type, then length in bytes) */ +static void dhcp_option(struct dhcp *dhcp, u8_t option_type, u8_t option_len); +/* add option values */ +static void dhcp_option_byte(struct dhcp *dhcp, u8_t value); +static void dhcp_option_short(struct dhcp *dhcp, u16_t value); +static void dhcp_option_long(struct dhcp *dhcp, u32_t value); +#if LWIP_NETIF_HOSTNAME +static void dhcp_option_hostname(struct dhcp *dhcp, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(struct dhcp *dhcp); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, DHCP_CLIENT_PORT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, DHCP_SERVER_PORT); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, dhcp->msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void*)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); + dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_option(dhcp, DHCP_OPTION_SERVER_ID, 4); + dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + dhcp_option_hostname(dhcp, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + dhcp_option_trailer(dhcp); + /* shrink the pbuf to the actual content length */ + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + /* send broadcast to any DHCP server */ + udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif = netif_list; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + while (netif != NULL) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release(netif); + dhcp_discover(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + /* proceed to next netif */ + netif = netif->next; + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif = netif_list; + /* loop through netif's */ + while (netif != NULL) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } + else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { netif->dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + /* proceed to next network interface */ + netif = netif->next; + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release(netif); + dhcp_discover(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) + { + dhcp->t1_renew_time = ((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) + { + dhcp->t2_rebind_time = ((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, dhcp->msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, dhcp->msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32(&dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + LWIP_ASSERT("pbuf p_out wasn't freed", dhcp->p_out == NULL); + LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL ); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + +#if LWIP_DHCP_CHECK_LINK_UP + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } +#endif /* LWIP_DHCP_CHECK_LINK_UP */ + + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + err_t result = ERR_OK; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, &dhcp, DHCP_INFORM); + if (result == ERR_OK) { + dhcp_option(&dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(&dhcp, DHCP_MAX_MSG_LEN(netif)); + + dhcp_option_trailer(&dhcp); + + pbuf_realloc(dhcp.p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp.options_out_len); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, dhcp.p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); + + dhcp_delete_msg(&dhcp); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) + return; + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_DECLINE); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); + dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_option_trailer(dhcp); + /* resize pbuf to reflect true size of options */ + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + /* per section 4.4.4, broadcast DECLINE messages */ + udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10*1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER); + if (result == ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); + + dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); + } + dhcp_option_trailer(dhcp); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: realloc()ing\n")); + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, DHCP_SERVER_PORT)\n")); + udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease*1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew*1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind*1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); + + dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + dhcp_option_hostname(dhcp, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + /* append DHCP message trailer */ + dhcp_option_trailer(dhcp); + + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + udp_sendto_if(dhcp_pcb, dhcp->p_out, &dhcp->server_ip_addr, DHCP_SERVER_PORT, netif); + dhcp_delete_msg(dhcp); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); + + dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + dhcp_option_hostname(dhcp, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + dhcp_option_trailer(dhcp); + + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + /* broadcast to server */ + udp_sendto_if(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); + dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); + } + + dhcp_option_trailer(dhcp); + + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + /* broadcast to server */ + udp_sendto_if(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000; + dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * @ingroup dhcp4 + * Release a DHCP lease (usually called before @ref dhcp_stop). + * + * @param netif network interface which must release its lease + */ +err_t +dhcp_release(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + ip_addr_t server_ip_addr; + u8_t is_dhcp_supplied_address; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release()\n")); + if (dhcp == NULL) { + return ERR_ARG; + } + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + is_dhcp_supplied_address = dhcp_supplied_address(netif); + + /* idle DHCP client */ + dhcp_set_state(dhcp, DHCP_STATE_OFF); + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + if (!is_dhcp_supplied_address) { + /* don't issue release message when address is not dhcp-assigned */ + return ERR_OK; + } + + /* create and initialize the DHCP message header */ + result = dhcp_create_msg(netif, dhcp, DHCP_RELEASE); + if (result == ERR_OK) { + dhcp_option(dhcp, DHCP_OPTION_SERVER_ID, 4); + dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + dhcp_option_trailer(dhcp); + + pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); + + udp_sendto_if(dhcp_pcb, dhcp->p_out, &server_ip_addr, DHCP_SERVER_PORT, netif); + dhcp_delete_msg(dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + return result; +} + +/** + * @ingroup dhcp4 + * Remove the DHCP client from the interface. + * + * @param netif The network interface to stop DHCP on + */ +void +dhcp_stop(struct netif *netif) +{ + struct dhcp *dhcp; + LWIP_ERROR("dhcp_stop: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_stop()\n")); + /* netif is DHCP configured? */ + if (dhcp != NULL) { +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL); + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } + } +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static void +dhcp_option(struct dhcp *dhcp, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: dhcp->options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + dhcp->msg_out->options[dhcp->options_out_len++] = option_type; + dhcp->msg_out->options[dhcp->options_out_len++] = option_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static void +dhcp_option_byte(struct dhcp *dhcp, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: dhcp->options_out_len < DHCP_OPTIONS_LEN", dhcp->options_out_len < DHCP_OPTIONS_LEN); + dhcp->msg_out->options[dhcp->options_out_len++] = value; +} + +static void +dhcp_option_short(struct dhcp *dhcp, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: dhcp->options_out_len + 2 <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 2U <= DHCP_OPTIONS_LEN); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t) (value & 0x00ffU); +} + +static void +dhcp_option_long(struct dhcp *dhcp, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: dhcp->options_out_len + 4 <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 4U <= DHCP_OPTIONS_LEN); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x000000ffUL)); +} + +#if LWIP_NETIF_HOSTNAME +static void +dhcp_option_hostname(struct dhcp *dhcp, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - dhcp->options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + dhcp_option(dhcp, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + dhcp_option_byte(dhcp, *p++); + } + } + } +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct dhcp *dhcp, struct pbuf *p) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + dhcp->msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx -= q->len; + options_idx_max -= q->len; + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t*)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = offset + 2; + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t*)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch(op) { + /* case(DHCP_OPTION_END): handled above */ + case(DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + offset--; + break; + case(DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case(DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case(DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case(DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case(DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case(DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case(DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case(DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case(DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case(DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + break; + } + offset += len + 2; + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len -= 4; + val_offset += 4; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t*)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + if (offset >= q->len) { + offset -= q->len; + offset_max -= q->len; + if ((offset < offset_max) && offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t*)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + break; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } +#if LWIP_DHCP_BOOTP_FILE + if (!parse_file_as_options) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void*)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < NETIF_MAX_HWADDR_LEN && i < DHCP_CHADDR_LEN; i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n",lwip_ntohl(reply_msg->xid),dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(dhcp, p) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + dhcp->request_timeout = 0; + /* remember offered lease */ + dhcp_handle_offer(netif); + } + +free_pbuf_and_return: + if (dhcp != NULL) { + dhcp->msg_in = NULL; + } + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static err_t +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type) +{ + u16_t i; +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + LWIP_ASSERT("dhcp_create_msg: dhcp->p_out == NULL", dhcp->p_out == NULL); + LWIP_ASSERT("dhcp_create_msg: dhcp->msg_out == NULL", dhcp->msg_out == NULL); + dhcp->p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (dhcp->p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return ERR_MEM; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (dhcp->p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + dhcp->msg_out = (struct dhcp_msg *)dhcp->p_out->payload; + + dhcp->msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + dhcp->msg_out->htype = DHCP_HTYPE_ETH; + dhcp->msg_out->hlen = netif->hwaddr_len; + dhcp->msg_out->hops = 0; + dhcp->msg_out->xid = lwip_htonl(dhcp->xid); + dhcp->msg_out->secs = 0; + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + dhcp->msg_out->flags = 0; + ip4_addr_set_zero(&dhcp->msg_out->ciaddr); + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state== DHCP_STATE_RENEWING) || dhcp->state== DHCP_STATE_REBINDING))) { + ip4_addr_copy(dhcp->msg_out->ciaddr, *netif_ip4_addr(netif)); + } + ip4_addr_set_zero(&dhcp->msg_out->yiaddr); + ip4_addr_set_zero(&dhcp->msg_out->siaddr); + ip4_addr_set_zero(&dhcp->msg_out->giaddr); + for (i = 0; i < DHCP_CHADDR_LEN; i++) { + /* copy netif hardware address, pad with zeroes */ + dhcp->msg_out->chaddr[i] = (i < netif->hwaddr_len && i < NETIF_MAX_HWADDR_LEN) ? netif->hwaddr[i] : 0/* pad byte*/; + } + for (i = 0; i < DHCP_SNAME_LEN; i++) { + dhcp->msg_out->sname[i] = 0; + } + for (i = 0; i < DHCP_FILE_LEN; i++) { + dhcp->msg_out->file[i] = 0; + } + dhcp->msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + dhcp->options_out_len = 0; + /* fill options field with an incrementing array (for debugging purposes) */ + for (i = 0; i < DHCP_OPTIONS_LEN; i++) { + dhcp->msg_out->options[i] = (u8_t)i; /* for debugging only, no matter if truncated */ + } + /* Add option MESSAGE_TYPE */ + dhcp_option(dhcp, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + dhcp_option_byte(dhcp, message_type); + return ERR_OK; +} + +/** + * Free previously allocated memory used to send a DHCP request. + * + * @param dhcp the dhcp struct to free the request from + */ +static void +dhcp_delete_msg(struct dhcp *dhcp) +{ + LWIP_ERROR("dhcp_delete_msg: dhcp != NULL", (dhcp != NULL), return;); + LWIP_ASSERT("dhcp_delete_msg: dhcp->p_out != NULL", dhcp->p_out != NULL); + LWIP_ASSERT("dhcp_delete_msg: dhcp->msg_out != NULL", dhcp->msg_out != NULL); + if (dhcp->p_out != NULL) { + pbuf_free(dhcp->p_out); + } + dhcp->p_out = NULL; + dhcp->msg_out = NULL; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + * + * @param dhcp DHCP state structure + */ +static void +dhcp_option_trailer(struct dhcp *dhcp) +{ + LWIP_ERROR("dhcp_option_trailer: dhcp != NULL", (dhcp != NULL), return;); + LWIP_ASSERT("dhcp_option_trailer: dhcp->msg_out != NULL\n", dhcp->msg_out != NULL); + LWIP_ASSERT("dhcp_option_trailer: dhcp->options_out_len < DHCP_OPTIONS_LEN\n", dhcp->options_out_len < DHCP_OPTIONS_LEN); + dhcp->msg_out->options[dhcp->options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((dhcp->options_out_len < DHCP_MIN_OPTIONS_LEN) || (dhcp->options_out_len & 3)) && + (dhcp->options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + dhcp->msg_out->options[dhcp->options_out_len++] = 0; + } +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp* dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 000000000..a5df46b9d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1206 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + ,ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static u8_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_HINT(netif, hint) if (((netif) != NULL) && ((netif)->addr_hint != NULL)) \ + *((netif)->addr_hint) = (hint); +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_HINT(netif, hint) (etharp_cached_entry = (hint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Some checks, instead of etharp_init(): */ +#if (LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) + #error "ARP_TABLE_SIZE must fit in an s8_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr* hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + u8_t i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %"U16_F".\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", (u16_t)i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s8_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif* netif) +{ + s8_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s8_t empty = ARP_TABLE_SIZE; + u8_t i = 0; + /* oldest entry with packets on queue */ + s8_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %"U16_F"\n", (u16_t)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %"U16_F"\n", (u16_t)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s8_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %"U16_F"\n", (u16_t)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %"U16_F"\n", (u16_t)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %"U16_F" (without queue)\n", (u16_t)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %"U16_F", freeing packet queue %p\n", (u16_t)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s8_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF*/ + return (err_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s8_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", (s16_t)i)); + /* update address */ + ETHADDR32_COPY(&arp_table[i].ethaddr, ethaddr); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr*)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s8_t i; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + u8_t i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +s8_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s8_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +u8_t +etharp_get_entry(u8_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR2_COPY(&sipaddr, &hdr->sipaddr); + IPADDR2_COPY(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, u8_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr*)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + s8_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr*, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->addr_hint != NULL) { + /* per-pcb cached entry was given */ + u8_t etharp_cached_entry = *(netif->addr_hint); + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_HINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr*)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr * srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s8_t i; /* ARP entry index */ + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i; + } + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_HINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a PBUF_REF, PBUF_POOL or PBUF_RAM, we have no choice but + * to copy the whole queue into a new PBUF_RAM (see bug #11400) + * PBUF_ROMs can be left as they are, since ROM must not get changed. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (p->type != PBUF_ROM) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (p != NULL) { + if (pbuf_copy(p, q) != ERR_OK) { + pbuf_free(p); + p = NULL; + } + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + ETHADDR16_COPY(&hdr->shwaddr, hwsrc_addr); + ETHADDR16_COPY(&hdr->dhwaddr, hwdst_addr); + /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR2_COPY(&hdr->sipaddr, ipsrc_addr); + IPADDR2_COPY(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if(ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr* hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#endif /* LWIP_ARP || LWIP_ETHERNET */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 000000000..774b72bdd --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,397 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped of when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t* src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL(iphdr_in) * 4; + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t)*2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload)+1); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_header(p, (s16_t)(hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, p->tot_len + hlen, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_header(r, (s16_t)-hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_header(p, -(s16_t)(hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_header(p, (s16_t)hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr*)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum += PP_HTONS(ICMP_ECHO << 8) + 1; + } else { + iecho->chksum += PP_HTONS(ICMP_ECHO << 8); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_src, &iphdr_dst); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 000000000..ddadbac49 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,800 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include "string.h" + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif* netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group* group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void*)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, &group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void*)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if(group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the global igmp_group_list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group?"":"impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)ifp)); + + return group; +} + +/** + * Remove a group in the global igmp_group_list, but don't free it yet + * + * @param group the group to remove from the global igmp_group_list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif* netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in the global igmp_group_list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg* igmp; + struct igmp_group* group; + struct igmp_group* groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->src)); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->dest)); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if(groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print(IGMP_DEBUG, &igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void*)&group, (void*)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + netif = netif_list; + while (netif != NULL) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + /* proceed to next network interface */ + netif = netif->next; + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use==0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + netif = netif_list; + while (netif != NULL) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + /* proceed to next network interface */ + netif = netif->next; + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif = netif_list; + + while (netif != NULL) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + netif = netif->next; + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print(IGMP_DEBUG, &(group->group_address)); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = max_time > 2 ? (LWIP_RAND() % max_time) : 1; +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf* p = NULL; + struct igmp_msg* igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t* dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 000000000..5789712ab --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1086 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/dhcp.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(DHCP_CLIENT_PORT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(DHCP_CLIENT_PORT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif* ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif* default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides he parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, src); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ + struct netif *netif; + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* iterate through netifs */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, NULL); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default))) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) && !IP_MULTICAST(addr)) { + /* don't route link-layer multicasts unless the destination address is an IP + multicast address */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_dest_addr(), ip4_current_src_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1); + } else { + IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100)); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in number of 32-bit words */ + iphdr_hlen = IPH_HL(iphdr); + /* calculate IP header length in bytes */ + iphdr_hlen *= 4; + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. + 'first' is used as a boolean to mark whether we started walking the list */ + int first = 1; + netif = inp; + do { + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(&iphdr->dest), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(&iphdr->dest) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(&iphdr->dest) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* break out of for loop */ + break; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* break out of for loop */ + break; + } +#endif /* LWIP_AUTOIP */ + } + if (first) { +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (ip4_addr_isloopback(ip4_current_dest_addr())) { + netif = NULL; + break; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + first = 0; + netif = netif_list; + } else { + netif = netif->next; + } + if (netif == inp) { + netif = netif->next; + } + } while (netif != NULL); + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + struct udp_hdr *udphdr = (struct udp_hdr *)((u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, iphdr, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)*8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL(iphdr) * 4; + +#if LWIP_RAW + /* raw input did not eat the packet? */ + if (raw_input(p, inp) == 0) +#endif /* LWIP_RAW */ + { + pbuf_header(p, -(s16_t)iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, iphdr_hlen); /* Move to ip header, no check necessary. */ + p->payload = iphdr; + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + pbuf_free(p); + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + /* round up to a multiple of 4 */ + optlen_aligned = ((optlen + 3) & ~3); + ip_hlen += optlen_aligned; + /* First write in the IP options */ + if (pbuf_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char*)p->payload) + optlen, 0, optlen_aligned - optlen); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned/2; i++) { + chk_sum += ((u16_t*)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(dest, src)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_HWADDRHINT +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param addr_hint address hint pointer set to netif->addr_hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, u8_t *addr_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(dest, src)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HWADDRHINT(netif, addr_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + + return err; +} +#endif /* LWIP_NETIF_HWADDRHINT*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16(&iphdr->src), + ip4_addr2_16(&iphdr->src), + ip4_addr3_16(&iphdr->src), + ip4_addr4_16(&iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16(&iphdr->dest), + ip4_addr2_16(&iphdr->dest), + ip4_addr3_16(&iphdr->dest), + ip4_addr4_16(&iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 000000000..40e9ffacf --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,331 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/* Here for now until needed in other places in lwIP */ +#ifndef isprint +#define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up) +#define isprint(c) in_range(c, 0x20, 0x7f) +#define isdigit(c) in_range(c, '0', '9') +#define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F')) +#define islower(c) in_range(c, 'a', 'z') +#define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v') +#endif + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char* +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char* +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 000000000..996c8befe --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,864 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed += clen; + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed += clen; + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount -= pbufs_freed; + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata* +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata* ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct pbuf *q; + u16_t offset, len; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr*)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; + offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper*)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = offset + len; + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper*)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + goto freepbuf; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + goto freepbuf; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto freepbuf; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + goto freepbuf; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper*)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +#if IP_REASS_CHECK_OVERLAP +freepbuf: + ip_reass_pbufcount -= pbuf_clen(new_p); + pbuf_free(new_p); + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr*)p->payload; + + if ((IPH_HL(fraghdr) * 4) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; + len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + ipr->datagram_len += IP_HLEN; + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr*)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(ipr->datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper*)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_header(r, -IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + ip_reass_pbufcount -= pbuf_clen(p); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (netif->mtu - IP_HLEN) / 8; + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + LWIP_ERROR("ip4_frag() does not support IP options", IPH_HL(iphdr) * 4 == IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + LWIP_ERROR("ip_frag(): MF already set", (tmp & IP_MF) == 0, return ERR_VAL); + + left = p->tot_len - IP_HLEN; + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, nfb * 8); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr*)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = p->len - poff; + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t*)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy -= newpbuflen; + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff += newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last) { + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons(fragsize + IP_HLEN)); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left -= fragsize; + ofo += nfb; + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 000000000..015ced1fd --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,50 @@ +/** + * @file + * + * DHCPv6. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/def.h" + + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 000000000..1d3ff9ed7 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,118 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 000000000..c8b1664b0 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,350 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#ifndef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif +#if LWIP_ICMP6_DATASIZE == 0 +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + break; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; + break; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, u32_t pointer) +{ + icmp6_send_response(p, c, pointer, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + const ip6_addr_t *reply_src; + ip6_addr_t *reply_dest; + ip6_addr_t reply_src_local, reply_dest_local; + struct ip6_hdr *ip6hdr; + struct netif *netif; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = data; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* Get the destination address and netif for this ICMP message. */ + if ((ip_current_netif() == NULL) || + ((code == ICMP6_TE_FRAG) && (type == ICMP6_TYPE_TE))) { + /* Special case, as ip6_current_xxx is either NULL, or points + * to a different packet than the one that expired. + * We must use the addresses that are stored in the expired packet. */ + ip6hdr = (struct ip6_hdr *)p->payload; + /* copy from packed address to aligned address */ + ip6_addr_copy(reply_dest_local, ip6hdr->src); + ip6_addr_copy(reply_src_local, ip6hdr->dest); + reply_dest = &reply_dest_local; + reply_src = &reply_src_local; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + /* drop */ + pbuf_free(q); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else { + netif = ip_current_netif(); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + /* drop */ + pbuf_free(q); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 000000000..7cb16aad2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 000000000..e354087e0 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1122 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a link-local address, try to match the src address to a netif. + * this is a tricky case because with multiple netifs, link-local addresses only have + * meaning within a particular subnet/link. + * 3) tries to match the destination subnet to a configured address + * 4) tries to find a router + * 5) tries to match the source address to the netif + * 6) returns the default netif, if configured + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ + struct netif *netif; + s8_t i; + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list)) { + return NULL; + } + return netif_list; + } + + /* Special processing for link-local addresses. */ + if (ip6_addr_islinklocal(dest)) { + if (ip6_addr_isany(src)) { + /* Use default netif, if Up. */ + if (netif_default == NULL || !netif_is_up(netif_default) || + !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; + } + + /* Try to find the netif for the source address, checking that link is up. */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + + /* netif not found, use default netif, if up */ + if (netif_default == NULL || !netif_is_up(netif_default) || + !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; + } + + /* we come here for non-link-local addresses */ +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + + /* Get the netif for a suitable router. */ + netif = nd6_find_route(dest); + if ((netif != NULL) && netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + + /* try with the netif that matches the source address. */ + if (!ip6_addr_isany(src)) { + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination + * IPv6 address. Loosely follows RFC 3484. "Strong host" behavior + * is assumed. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *src = NULL; + u8_t i; + + /* If dest is link-local, choose a link-local source. */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_linklocal(dest) || ip6_addr_ismulticast_iflocal(dest)) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_islinklocal(netif_ip6_addr(netif, i))) { + return netif_ip_addr6(netif, i); + } + } + } + + /* Choose a site-local with matching prefix. */ + if (ip6_addr_issitelocal(dest) || ip6_addr_ismulticast_sitelocal(dest)) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_issitelocal(netif_ip6_addr(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { + return netif_ip_addr6(netif, i); + } + } + } + + /* Choose a unique-local with matching prefix. */ + if (ip6_addr_isuniquelocal(dest) || ip6_addr_ismulticast_orglocal(dest)) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_isuniquelocal(netif_ip6_addr(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { + return netif_ip_addr6(netif, i); + } + } + } + + /* Choose a global with best matching prefix. */ + if (ip6_addr_isglobal(dest) || ip6_addr_ismulticast_global(dest)) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_isglobal(netif_ip6_addr(netif, i))) { + if (src == NULL) { + src = netif_ip_addr6(netif, i); + } + else { + /* Replace src only if we find a prefix match. */ + /* @todo find longest matching prefix. */ + if ((!(ip6_addr_netcmp(ip_2_ip6(src), dest))) && + ip6_addr_netcmp(netif_ip6_addr(netif, i), dest)) { + src = netif_ip_addr6(netif, i); + } + } + } + } + if (src != NULL) { + return src; + } + } + + /* Last resort: see if arbitrary prefix matches. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { + return netif_ip_addr6(netif, i); + } + } + + return NULL; +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + u8_t nexth; + u16_t hlen; /* the current header length */ + u8_t i; +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len)) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, IP6_HLEN + IP6H_PLEN(ip6hdr)); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. + 'first' is used as a boolean to mark whether we started walking the list */ + int first = 1; + netif = inp; + do { + /* interface is up? */ + if (netif_is_up(netif)) { + /* unicast to this interface address? address configured? */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i))) { + /* exit outer loop */ + goto netif_found; + } + } + } + if (first) { + if (ip6_addr_islinklocal(ip6_current_dest_addr()) +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + || ip6_addr_isloopback(ip6_current_dest_addr()) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + ) { + /* Do not match link-local addresses to other netifs. The loopback + * address is to be considered link-local and packets to it should be + * dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. This + * requirement cannot be implemented in the case that loopback + * traffic is sent across a non-loopback interface, however. + */ + netif = NULL; + break; + } + first = 0; + netif = netif_list; + } else { + netif = netif->next; + } + if (netif == inp) { + netif = netif->next; + } + } while (netif != NULL); +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = ip_data.current_ip_header_tot_len = IP6_HLEN; + + /* Move to payload. */ + pbuf_header(p, -IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (nexth != IP6_NEXTH_NONE) + { + switch (nexth) { + case IP6_NEXTH_HOPBYHOP: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + /* Get next header type. */ + nexth = *((u8_t *)p->payload); + + /* Get the header length. */ + hlen = 8 * (1 + *((u8_t *)p->payload + 1)); + ip_data.current_ip_header_tot_len += hlen; + + /* Skip over this header. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + pbuf_header(p, -(s16_t)hlen); + break; + case IP6_NEXTH_DESTOPTS: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + /* Get next header type. */ + nexth = *((u8_t *)p->payload); + + /* Get the header length. */ + hlen = 8 * (1 + *((u8_t *)p->payload + 1)); + ip_data.current_ip_header_tot_len += hlen; + + /* Skip over this header. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + pbuf_header(p, -(s16_t)hlen); + break; + case IP6_NEXTH_ROUTING: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + /* Get next header type. */ + nexth = *((u8_t *)p->payload); + + /* Get the header length. */ + hlen = 8 * (1 + *((u8_t *)p->payload + 1)); + ip_data.current_ip_header_tot_len += hlen; + + /* Skip over this header. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + pbuf_header(p, -(s16_t)hlen); + break; + + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = frag_hdr->_nexth; + + /* Fragment Header length. */ + hlen = 8; + ip_data.current_ip_header_tot_len += hlen; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet, usually a packet that we have + * already reassembled. Skip this header anc continue. */ + pbuf_header(p, -(s16_t)hlen); + } else { +#if LWIP_IPV6_REASS + + /* reassemble the packet */ + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = IP6H_NEXTH(ip6hdr); + hlen = ip_data.current_ip_header_tot_len = IP6_HLEN; + pbuf_header(p, -IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + break; + } + } +options_done: + + /* p points to IPv6 header again. */ + pbuf_header_force(p, (s16_t)ip_data.current_ip_header_tot_len); + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + if (raw_input(p, inp) == 0) +#endif /* LWIP_RAW */ + { + switch (nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + /* Point to payload. */ + pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + /* Point to payload. */ + pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + /* Point to payload. */ + pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_ICMP6 + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, ip_data.current_ip_header_tot_len - hlen); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + /* generate IPv6 header */ + if (pbuf_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, p->tot_len - IP6_HLEN); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy(dest_addr, ip6hdr->dest); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy(src_addr, ip6hdr->src); + ip6_addr_copy(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_HWADDRHINT +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param addr_hint address hint pointer set to netif->addr_hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, u8_t *addr_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy(src_addr, ip6hdr->src); + ip6_addr_copy(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HWADDRHINT(netif, addr_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + + return err; +} +#endif /* LWIP_NETIF_HWADDRHINT*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + struct ip6_hbh_hdr *hbh_hdr; + + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_header(p, sizeof(struct ip6_hbh_hdr))) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Set fields. */ + hbh_hdr->_nexth = nexth; + hbh_hdr->_hlen = 0; + hbh_hdr->_ra_opt_type = IP6_ROUTER_ALERT_OPTION; + hbh_hdr->_ra_opt_dlen = 2; + hbh_hdr->_ra_opt_data = value; + hbh_hdr->_padn_opt_type = IP6_PADN_ALERT_OPTION; + hbh_hdr->_padn_opt_dlen = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 000000000..2c685b2e5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,292 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#ifndef isprint +#define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up) +#define isprint(c) in_range(c, 0x20, 0x7f) +#define isdigit(c) in_range(c, '0', '9') +#define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F')) +#define islower(c) in_range(c, 'a', 'z') +#define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v') +#define xchar(i) ((i) < 10 ? '0' + (i) : 'A' + (i) - 10) +#endif + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; + } else if (!isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + + /* convert to network byte order. */ + if (addr) { + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 000000000..e20f7cfeb --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,805 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)IPV6_FRAG_HDRREF(ipr->iphdr)))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + icmp6_time_exceeded(p, ICMP6_TE_FRAG); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed += clen; + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed += clen; + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount -= pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len; + u16_t clen; + u8_t valid = 1; + struct pbuf *q; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + if ((const void*)ip6_current_header() != ((u8_t*)p->payload) - IP6_HLEN) { + /* ip6_frag_hdr must be in the first pbuf, not chained */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; + } + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + len -= (u16_t)(((u8_t*)p->payload - (const u8_t*)ip6_current_header()) - IP6_HLEN); + len -= IP6_FRAG_HLEN; + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp(ip6_current_src_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->src)) && + ip6_addr_cmp(ip6_current_dest_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->dest))) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN); +#else /* IPV6_FRAG_COPYHEADER */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#endif /* IPV6_FRAG_COPYHEADER */ + + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + iprh = (struct ip6_reass_helper *)p->payload; + iprh->next_pbuf = NULL; + iprh->start = (offset & IP6_FRAG_OFFSET_MASK); + iprh->end = (offset & IP6_FRAG_OFFSET_MASK) + len; + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (iprh->start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (iprh->start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount += clen; + + /* Remember IPv6 header if this is the first fragment. */ + if (iprh->start == 0) { +#if IPV6_FRAG_COPYHEADER + if (iprh->next_pbuf != NULL) { + MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN); + } +#else /* IPV6_FRAG_COPYHEADER */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#endif /* IPV6_FRAG_COPYHEADER */ + } + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + struct pbuf* next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_header(next_pbuf, -IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_header(next_pbuf, -(s16_t)(IPV6_FRAG_REQROOM)); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + u8_t hdrerr = pbuf_header(ipr->p, -(s16_t)(IPV6_FRAG_REQROOM)); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->p->payload - IP6_HLEN); + MEMCPY(iphdr_ptr, &ipr->iphdr, IP6_HLEN); +#else + iphdr_ptr = ipr->iphdr; +#endif + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len += (u16_t)(((u8_t*)ipr->p->payload - (u8_t*)iphdr_ptr) + + IP6_FRAG_HLEN + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* Get the first pbuf. */ + p = ipr->p; + + /* Restore Fragment Header in first pbuf. Mark as "single fragment" + * packet. Restore nexth. */ + frag_hdr = (struct ip6_frag_hdr *) p->payload; + frag_hdr->_nexth = ipr->nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = 0; + frag_hdr->_identification = 0; + + /* release the sources allocate for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + ip6_reass_pbufcount -= pbuf_clen(p); + + /* Move pbuf back to IPv6 header. + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t nfb; + u16_t left, cop; + u16_t mtu; + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + mtu = nd6_get_destination_mtu(dest, netif); + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + left = p->tot_len - IP6_HLEN; + + nfb = (mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK; + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len -= poff; + p->tot_len -= poff; + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy -= newpbuflen; + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG)); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, cop + IP6_FRAG_HLEN); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left -= cop; + fragment_offset += cop; + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 000000000..90bcd36d5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,588 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param srcaddr ipv6 address of the network interface which should + * join a new group. If IP6_ADDR_ANY, join on all netifs + * @param groupaddr the ipv6 address of the group to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + /* loop through netif's */ + netif = netif_list; + while (netif != NULL) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + + /* proceed to next network interface */ + netif = netif->next; + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param srcaddr ipv6 address of the network interface which should + * leave the group. If IP6_ISANY, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + /* loop through netif's */ + netif = netif_list; + while (netif != NULL) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + /* proceed to next network interface */ + netif = netif->next; + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif = netif_list; + + while (netif != NULL) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + netif = netif->next; + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + maxresp = maxresp / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = LWIP_RAND() % maxresp; + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + sizeof(struct ip6_hbh_hdr), PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_header(p, -IP6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_set(&(mld_hdr->multicast_address), &(group->group_address)); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 000000000..cb02ad912 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2102 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static u8_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +/* Static buffer to parse RA packet options (size of a prefix option, biggest option) */ +static u8_t nd6_ra_buffer[sizeof(struct prefix_option)]; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s8_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_destination_cache_entry(void); +static s8_t nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + ip6_addr_t target_address; + + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + + /* Create an aligned copy. */ + ip6_addr_set(&target_address, &(na_hdr->target_address)); + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + netif_ip6_addr_set_state(inp, i, IP6_ADDR_INVALID); + +#if LWIP_IPV6_AUTOCONFIG + /* Check to see if this address was autoconfigured. */ + if (!ip6_addr_islinklocal(&target_address)) { + i = nd6_get_onlink_prefix(&target_address, inp); + if (i >= 0) { + /* Mark this prefix as duplicate, so that we don't use it + * to generate this address again. */ + prefix_list[i].flags |= ND6_PREFIX_AUTOCONFIG_ADDRESS_DUPLICATE; + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + ip6_addr_t target_address; + + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Create an aligned copy. */ + ip6_addr_set(&target_address, &(na_hdr->target_address)); + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&(ns_hdr->target_address), netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&(ns_hdr->target_address), netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + netif_ip6_addr_set_state(inp, i, IP6_ADDR_INVALID); + } + } + } + } else { + ip6_addr_t target_address; + + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Create an aligned copy. */ + ip6_addr_set(&target_address, &(ns_hdr->target_address)); + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can by multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) > 0) { + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + buffer = nd6_ra_buffer; + if (pbuf_copy_partial(p, buffer, sizeof(struct prefix_option), offset) != sizeof(struct prefix_option)) { + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + } + if (buffer[1] == 0) { + /* zero-length extension. drop packet */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + switch (buffer[0]) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + mtu_opt = (struct mtu_option *)buffer; + if (lwip_htonl(mtu_opt->mtu) >= 1280) { +#if LWIP_ND6_ALLOW_RA_UPDATES + inp->mtu = (u16_t)lwip_htonl(mtu_opt->mtu); +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + prefix_opt = (struct prefix_option *)buffer; + + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64) && + !ip6_addr_islinklocal(&(prefix_opt->prefix))) { + /* Add to on-link prefix list. */ + s8_t prefix; + ip6_addr_t prefix_addr; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_set(&prefix_addr, &(prefix_opt->prefix)); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = lwip_htonl(prefix_opt->valid_lifetime); + +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Mark prefix as autonomous, so that address autoconfiguration can take place. + * Only OR flag, so that we don't over-write other flags (such as ADDRESS_DUPLICATE)*/ + prefix_list[prefix].flags |= ND6_PREFIX_AUTOCONFIG_AUTONOMOUS; + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + struct rdnss_option * rdnss_opt; + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Get a memory-aligned copy of the prefix. */ + ip_addr_copy_from_ip6(rdnss_address, rdnss_opt->rdnss_address[n]); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * ((u16_t)buffer[1]); + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t tmp; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Copy original destination address to current source address, to have an aligned copy. */ + ip6_addr_set(&tmp, &(redir_hdr->destination_address)); + + /* Find dest address in cache */ + i = nd6_find_destination_cache_entry(&tmp); + if (i < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Set the new target address. */ + ip6_addr_set(&(destination_cache[i].next_hop_addr), &(redir_hdr->target_address)); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + /* Copy target address to current source address, to have an aligned copy. */ + ip6_addr_set(&tmp, &(redir_hdr->target_address)); + + i = nd6_find_neighbor_cache_entry(&tmp); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), &tmp); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t tmp; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Copy original destination address to current source address, to have an aligned copy. */ + ip6_addr_set(&tmp, &(ip6hdr->dest)); + + /* Look for entry in destination cache. */ + i = nd6_find_destination_cache_entry(&tmp); + if (i < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[i].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer > 0) { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + if (default_router_list[i].invalidation_timer < ND6_TMR_INTERVAL / 1000) { + /* Less than 1 second remaining. Clear this entry. */ + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer < ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + +#if LWIP_IPV6_AUTOCONFIG + /* If any addresses were configured with this prefix, remove them */ + if (prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED) { + s8_t j; + + for (j = 1; j < LWIP_IPV6_NUM_ADDRESSES; j++) { + if ((netif_ip6_addr_state(prefix_list[i].netif, j) != IP6_ADDR_INVALID) && + ip6_addr_netcmp(&prefix_list[i].prefix, netif_ip6_addr(prefix_list[i].netif, j))) { + netif_ip6_addr_set_state(prefix_list[i].netif, j, IP6_ADDR_INVALID); + prefix_list[i].flags = 0; + + /* Exit loop. */ + break; + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + + prefix_list[i].netif = NULL; + prefix_list[i].flags = 0; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + +#if LWIP_IPV6_AUTOCONFIG + /* Initiate address autoconfiguration for this prefix, if conditions are met. */ + if (prefix_list[i].netif->ip6_autoconfig_enabled && + (prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_AUTONOMOUS) && + !(prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED)) { + s8_t j; + /* Try to get an address on this netif that is invalid. + * Skip 0 index (link-local address) */ + for (j = 1; j < LWIP_IPV6_NUM_ADDRESSES; j++) { + if (netif_ip6_addr_state(prefix_list[i].netif, j) == IP6_ADDR_INVALID) { + /* Generate an address using this prefix and interface ID from link-local address. */ + netif_ip6_addr_set_parts(prefix_list[i].netif, j, + prefix_list[i].prefix.addr[0], prefix_list[i].prefix.addr[1], + netif_ip6_addr(prefix_list[i].netif, 0)->addr[2], netif_ip6_addr(prefix_list[i].netif, 0)->addr[3]); + + /* Mark it as tentative (DAD will be performed if configured). */ + netif_ip6_addr_set_state(prefix_list[i].netif, j, IP6_ADDR_TENTATIVE); + + /* Mark this prefix with ADDRESS_GENERATED, so that we don't try again. */ + prefix_list[i].flags |= ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED; + + /* Exit loop. */ + break; + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + } + } + + + /* Process our own addresses, if DAD configured. */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. */ + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + /* @todo implement preferred and valid lifetimes. */ + } else if (netif->flags & NETIF_FLAG_UP) { + /* Send a NS for this address. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), ND6_SEND_FLAG_MULTICAST_DEST); + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* @todo send max 1 NS per tmr call? enable return*/ + /*return;*/ + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + for (netif = netif_list; netif != NULL; netif = netif->next) { + if ((netif->rs_count > 0) && (netif->flags & NETIF_FLAG_UP) && + (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_set(&(ns_hdr->target_address), target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_set(&(na_hdr->target_address), target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s8_t +nd6_new_destination_cache_entry(void) +{ + s8_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static s8_t +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a (manually?) configured address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + /* last_router is used for round-robin router selection (as recommended + * in RFC). This is more robust in case one router is not reachable, + * we are not stuck trying to resolve it. */ + static s8_t last_router; + (void)ip6addr; /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for reachable routers. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (++last_router >= LWIP_ND6_NUM_ROUTERS) { + last_router = 0; + } + if ((default_router_list[i].neighbor_entry != NULL) && + (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1) && + (default_router_list[i].invalidation_timer > 0) && + (default_router_list[i].neighbor_entry->state == ND6_REACHABLE)) { + return i; + } + } + + /* Look for router in other reachability states, but still valid according to timer. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (++last_router >= LWIP_ND6_NUM_ROUTERS) { + last_router = 0; + } + if ((default_router_list[i].neighbor_entry != NULL) && + (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1) && + (default_router_list[i].invalidation_timer > 0)) { + return i; + } + } + + /* Look for any router for which we have any information at all. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (++last_router >= LWIP_ND6_NUM_ROUTERS) { + last_router = 0; + } + if (default_router_list[i].neighbor_entry != NULL && + (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1)) { + return i; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. + * + * The caller is responsible for checking whether the returned netif, if any, + * is in a suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + s8_t i; + + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + if (default_router_list[i].neighbor_entry != NULL) { + return default_router_list[i].neighbor_entry->netif; /* may be NULL */ + } + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); +#if LWIP_IPV6_AUTOCONFIG + prefix_list[i].flags = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + +#if LWIP_NETIF_HWADDRHINT + if (netif->addr_hint != NULL) { + /* per-pcb cached entry was given */ + u8_t addr_hint = *(netif->addr_hint); + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + /* found destination entry. make it our new cached index. */ + nd6_cached_destination_index = i; + } else { + /* Not found. Create a new destination entry. */ + i = nd6_new_destination_cache_entry(); + if (i >= 0) { + /* got new destination entry. make it our new cached index. */ + nd6_cached_destination_index = i; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->addr_hint != NULL) { + /* per-pcb cached entry was given */ + *(netif->addr_hint) = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a PBUF_REF, PBUF_POOL or PBUF_RAM, we have no choice but + * to copy the whole queue into a new PBUF_RAM (see bug #11400) + * PBUF_ROMs can be left as they are, since ROM must not get changed. */ + p = q; + while (p) { + if (p->type != PBUF_ROM) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_alloc(PBUF_LINK, q->tot_len, PBUF_RAM); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_alloc(PBUF_LINK, q->tot_len, PBUF_RAM); + } + if (p != NULL) { + if (pbuf_copy(p, q) != ERR_OK) { + pbuf_free(p); + p = NULL; + } + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_set(&dest, &(ip6hdr->dest)); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_set(&dest, &(ip6hdr->dest)); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif->mtu; + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + i = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_destination_cache_entry(ip6addr); + } + if (i < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[i].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[i].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + prefix_list[i].flags = 0; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/mem.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 000000000..0df6d282d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,777 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void* +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void* ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t*)ret = size; + ret = (u8_t*)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t*)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED(used, *(mem_size_t*)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper*)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t*)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U*SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; +/** pointer to the lowest free block, this is used for faster search */ +static struct mem *lfree; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a semaphore */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = (struct mem *)(void *)&ram[mem->next]; + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); + } + + /* plug hole backward */ + pmem = (struct mem *)(void *)&ram[mem->prev]; + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); + + LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + SYS_ARCH_DECL_PROTECT(lev); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + SYS_ARCH_PROTECT(lev); + MEM_STATS_INC(illegal); + SYS_ARCH_UNPROTECT(lev); + return; + } + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); + /* ... which has to be in a used state ... */ + LWIP_ASSERT("mem_free: mem->used", mem->used); + /* ... and is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param newsize required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t newsize) +{ + mem_size_t size; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = LWIP_MEM_ALIGN_SIZE(newsize); + + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } + + if (newsize > MEM_SIZE_ALIGNED) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + SYS_ARCH_DECL_PROTECT(lev); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + SYS_ARCH_PROTECT(lev); + MEM_STATS_INC(illegal); + SYS_ARCH_UNPROTECT(lev); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); + /* ... and its offset pointer */ + ptr = (mem_size_t)((u8_t *)mem - ram); + + size = mem->next - ptr - SIZEOF_STRUCT_MEM; + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = (struct mem *)(void *)&ram[mem->next]; + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; + if (lfree == mem2) { + lfree = (struct mem *)(void *)&ram[ptr2]; + } + mem2 = (struct mem *)(void *)&ram[ptr2]; + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; + mem2 = (struct mem *)(void *)&ram[ptr2]; + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = LWIP_MEM_ALIGN_SIZE(size); + + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } + + if (size > MEM_SIZE_ALIGNED) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; + ptr = ((struct mem *)(void *)&ram[ptr])->next) { + mem = (struct mem *)(void *)&ram[ptr]; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = ptr + SIZEOF_STRUCT_MEM + size; + /* create mem2 struct */ + mem2 = (struct mem *)(void *)&ram[ptr2]; + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = (struct mem *)(void *)&ram[cur->next]; + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); + + return (u8_t *)mem + SIZEOF_STRUCT_MEM; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc(count * size); + if (p) { + /* zero the memory */ + memset(p, 0, (size_t)count * (size_t)size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/memp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 000000000..727a0c2a5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,496 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/sockets.h" +#include "lwip/netifapi.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc* const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow + * (e.g. the restricted area after it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element_overflow(struct memp *p, const struct memp_desc *desc) +{ +#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 + u16_t k; + u8_t *m; + m = (u8_t*)p + MEMP_SIZE + desc->size; + for (k = 0; k < MEMP_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128] = "detected memp overflow in pool "; + strcat(errstr, desc->desc); + LWIP_ASSERT(errstr, 0); + } + } +#else /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ +} + +/** + * Check if a memp element was victim of an underflow + * (e.g. the restricted area before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element_underflow(struct memp *p, const struct memp_desc *desc) +{ +#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 + u16_t k; + u8_t *m; + m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEMP_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128] = "detected memp underflow in pool "; + strcat(errstr, desc->desc); + LWIP_ASSERT(errstr, 0); + } + } +#else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ +#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEMP_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t*)p + MEMP_SIZE + desc->size; + memset(m, 0xcd, MEMP_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp*)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element_overflow(p, memp_pools[i]); + memp_overflow_check_element_underflow(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp*, ((u8_t*)p + MEMP_SIZE + memp_pools[i]->size + MEMP_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp*)LWIP_MEM_ALIGN(desc->base); + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEMP_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void* +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char* file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element_overflow(memp, desc); + memp_overflow_check_element_underflow(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t*)memp + MEMP_SIZE); + } else { + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); +#if MEMP_STATS + desc->stats->err++; +#endif + } + + SYS_ARCH_UNPROTECT(old_level); + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char* file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char* file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc* desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t*)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element_overflow(memp, desc); + memp_overflow_check_element_underflow(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc* desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/netif.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 000000000..f16a0ff88 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1267 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/raw.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +struct netif *netif_list; +struct netif *netif_default; + +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif* netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif->flags |= NETIF_FLAG_IGMP; +#endif + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127,0,0,1); + IP4_ADDR(&loop_ipaddr, 127,0,0,1); + IP4_ADDR(&loop_netmask, 255,0,0,0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT("No init function given", init != NULL); + + /* reset new interface configuration state */ +#if LWIP_IPV4 + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num++; + netif->input = input; + + NETIF_SET_HWADDRHINT(netif, NULL); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + return netif; +} + +#if LWIP_IPV4 +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + if (ip4_addr_isany(ipaddr)) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + netif_set_ipaddr(netif, ipaddr); + netif_set_netmask(netif, netmask); + netif_set_gw(netif, gw); + } else { + netif_set_netmask(netif, netmask); + netif_set_gw(netif, gw); + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + netif_set_ipaddr(netif, ipaddr); + } +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + if (netif == NULL) { + return; + } + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_TCP + tcp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); +#endif /* LWIP_RAW */ + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { +#if LWIP_TCP + tcp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); +#endif /* LWIP_RAW */ + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif * tmp_netif; + for (tmp_netif = netif_list; tmp_netif != NULL; tmp_netif = tmp_netif->next) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + if (name == NULL) { + return NULL; + } + + num = (u8_t)(name[2] - '0'); + + for (netif = netif_list; netif != NULL; netif = netif->next) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_IPV4 +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = (ipaddr ? *ipaddr : *IP4_ADDR_ANY4); + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ip_2_ip4(&new_addr), netif_ip4_addr(netif)) == 0) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); +#if LWIP_TCP + tcp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); +#endif /* LWIP_RAW */ + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IP address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_addr(netif)), + ip4_addr2_16(netif_ip4_addr(netif)), + ip4_addr3_16(netif_ip4_addr(netif)), + ip4_addr4_16(netif_ip4_addr(netif)))); +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); +} +#endif /* LWIP_IPV4 */ + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + if (!(netif->flags & NETIF_FLAG_UP)) { + netif->flags |= NETIF_FLAG_UP; + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); + } + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif* netif, u8_t report_type) +{ +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages. */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + if (netif->flags & NETIF_FLAG_UP) { + netif->flags &= ~NETIF_FLAG_UP; + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif->flags |= NETIF_FLAG_LINK_UP; + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + if (netif->flags & NETIF_FLAG_UP) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); + } + NETIF_LINK_CALLBACK(netif); + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif ) +{ + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif->flags &= ~NETIF_FLAG_LINK_UP; + NETIF_LINK_CALLBACK(netif); + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current += clen; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + tcpip_callback_with_block((tcpip_callback_fn)netif_poll, netif, 0); +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current -= clen; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif = netif_list; + /* loop through netifs */ + while (netif != NULL) { + netif_poll(netif); + /* proceed to next network interface */ + netif = netif->next; + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX; +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT("addr6 != NULL", addr6 != NULL); + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + const ip6_addr_t *old_addr; + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_addr = netif_ip6_addr(netif, addr_idx); + /* address is actually being changed? */ + if ((old_addr->addr[0] != i0) || (old_addr->addr[1] != i1) || + (old_addr->addr[2] != i2) || (old_addr->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { +#if LWIP_TCP || LWIP_UDP + ip_addr_t new_ipaddr; + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); +#endif /* LWIP_TCP || LWIP_UDP */ +#if LWIP_TCP + tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); +#endif /* LWIP_RAW */ + } + /* @todo: remove/readd mib2 ip6 entries? */ + + IP6_ADDR(ip_2_ip6(&(netif->ip6_addr[addr_idx])), i0, i1, i2, i3); + IP_SET_TYPE_VAL(netif->ip6_addr[addr_idx], IPADDR_TYPE_V6); + + if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ +#if LWIP_TCP + tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); +#endif /* LWIP_RAW */ + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & IP6_ADDR_PREFERRED) != (state & IP6_ADDR_PREFERRED)) { + /* address state has changed (valid flag changed or switched between + preferred and deprecated) -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific address is assigned to the netif and returns its + * index. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= ((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03)); + } + } + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot -- musn't be the first one (reserved for link local) */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 000000000..95e762f62 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1442 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage for zero-copy RX: + @code{.c} +typedef struct my_custom_pbuf +{ + struct pbuf_custom p; + void* dma_descriptor; +} my_custom_pbuf_t; + +LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool"); + +void my_pbuf_free_custom(void* p) +{ + my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p; + + LOCK_INTERRUPTS(); + free_rx_dma_descriptor(my_pbuf->dma_descriptor); + LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf); + UNLOCK_INTERRUPTS(); +} + +void eth_rx_irq() +{ + dma_descriptor* dma_desc = get_RX_DMA_descriptor_from_ethernet(); + my_custom_pbuf_t* my_pbuf = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL); + + my_pbuf->p.custom_free_function = my_pbuf_free_custom; + my_pbuf->dma_descriptor = dma_desc; + + invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length); + + struct pbuf* p = pbuf_alloced_custom(PBUF_RAW, + dma_desc->rx_length, + PBUF_REF, + &my_pbuf->p, + dma_desc->rx_data, + dma_desc->max_buffer_size); + + if(netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } +} + @endcode + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/sys.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb* pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (NULL != pcb->ooseq) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer flag to define header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p, *q, *r; + u16_t offset; + s32_t rem_len; /* remaining length */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + /* determine header offset */ + switch (layer) { + case PBUF_TRANSPORT: + /* add room for transport (often TCP) layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; + break; + case PBUF_IP: + /* add room for IP layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; + break; + case PBUF_LINK: + /* add room for link layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + break; + case PBUF_RAW_TX: + /* add room for encapsulating link layer headers (e.g. 802.11) */ + offset = PBUF_LINK_ENCAPSULATION_HLEN; + break; + case PBUF_RAW: + /* no offset (e.g. RX buffers or chain successors) */ + offset = 0; + break; + default: + LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); + return NULL; + } + + switch (type) { + case PBUF_POOL: + /* allocate head of pbuf chain into p */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p)); + if (p == NULL) { + PBUF_POOL_IS_EMPTY(); + return NULL; + } + p->type = type; + p->next = NULL; + + /* make the payload pointer point 'offset' bytes into pbuf data memory */ + p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset))); + LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + /* the total length of the pbuf chain is the requested size */ + p->tot_len = length; + /* set the length of the first pbuf in the chain */ + p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)); + LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", + ((u8_t*)p->payload + p->len <= + (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + /* set reference count (needed here in case we fail) */ + p->ref = 1; + + /* now allocate the tail of the pbuf chain */ + + /* remember first pbuf for linkage in next iteration */ + r = p; + /* remaining length to be allocated */ + rem_len = length - p->len; + /* any remaining pbufs to be allocated? */ + while (rem_len > 0) { + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + pbuf_free(p); + /* bail out unsuccessfully */ + return NULL; + } + q->type = type; + q->flags = 0; + q->next = NULL; + /* make previous pbuf point to this pbuf */ + r->next = q; + /* set total length of this pbuf and next in chain */ + LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff); + q->tot_len = (u16_t)rem_len; + /* this pbuf length is pool size, unless smaller sized tail */ + q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED); + q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", + ((u8_t*)p->payload + p->len <= + (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); + q->ref = 1; + /* calculate remaining length to be allocated */ + rem_len -= q->len; + /* remember this pbuf for linkage in next iteration */ + r = q; + } + /* end of chain */ + /*r->next = NULL;*/ + + break; + case PBUF_RAM: + { + mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf*)mem_malloc(alloc_len); + } + + if (p == NULL) { + return NULL; + } + /* Set up internal structure of the pbuf. */ + p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)); + p->len = p->tot_len = length; + p->next = NULL; + p->type = type; + + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + /* pbuf references existing (non-volatile static constant) ROM payload? */ + case PBUF_ROM: + /* pbuf references existing (externally allocated) RAM payload? */ + case PBUF_REF: + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + /* caller must set this field properly, afterwards */ + p->payload = NULL; + p->len = p->tot_len = length; + p->next = NULL; + p->type = type; + break; + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + /* set reference count */ + p->ref = 1; + /* set flags */ + p->flags = 0; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * + * @param l flag to define header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf* +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + /* determine header offset */ + switch (l) { + case PBUF_TRANSPORT: + /* add room for transport (often TCP) layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; + break; + case PBUF_IP: + /* add room for IP layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; + break; + case PBUF_LINK: + /* add room for link layer header */ + offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + break; + case PBUF_RAW_TX: + /* add room for encapsulating link layer headers (e.g. 802.11) */ + offset = PBUF_LINK_ENCAPSULATION_HLEN; + break; + case PBUF_RAW: + offset = 0; + break; + default: + LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0); + return NULL; + } + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + p->pbuf.next = NULL; + if (payload_mem != NULL) { + p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + p->pbuf.payload = NULL; + } + p->pbuf.flags = PBUF_FLAG_IS_CUSTOM; + p->pbuf.len = p->pbuf.tot_len = length; + p->pbuf.type = type; + p->pbuf.ref = 1; + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + s32_t grow; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL || + p->type == PBUF_ROM || + p->type == PBUF_RAM || + p->type == PBUF_REF); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + grow = new_len - p->tot_len; + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len -= q->len; + /* decrease total length indicator */ + LWIP_ASSERT("grow < max_u16_t", grow < 0xffff); + q->tot_len += (u16_t)grow; + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if ((q->type == PBUF_RAM) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * @see pbuf_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + u16_t type; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((header_size_increment == 0) || (p == NULL)) { + return 0; + } + + if (header_size_increment < 0) { + increment_magnitude = (u16_t)-header_size_increment; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + } else { + increment_magnitude = (u16_t)header_size_increment; +#if 0 + /* Can't assert these as some callers speculatively call + pbuf_header() to see if it's OK. Will return 1 below instead. */ + /* Check that we've got the correct type of pbuf to work with */ + LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL", + p->type == PBUF_RAM || p->type == PBUF_POOL); + /* Check that we aren't going to move off the beginning of the pbuf */ + LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF", + (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF); +#endif + } + + type = p->type; + /* remember current payload pointer */ + payload = p->payload; + + /* pbuf types containing payloads? */ + if (type == PBUF_RAM || type == PBUF_POOL) { + /* set new payload pointer */ + p->payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_header: failed as %p < %p (not enough space for new header size)\n", + (void *)p->payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* restore old payload pointer */ + p->payload = payload; + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else if (type == PBUF_REF || type == PBUF_ROM) { + /* hide a header in the payload? */ + if ((header_size_increment < 0) && (increment_magnitude <= p->len)) { + /* increase payload pointer */ + p->payload = (u8_t *)p->payload - header_size_increment; + } else if ((header_size_increment > 0) && force) { + p->payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } else { + /* Unknown type */ + LWIP_ASSERT("bad pbuf type", 0); + return 1; + } + /* modify pbuf length fields */ + p->len += header_size_increment; + p->tot_len += header_size_increment; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n", + (void *)payload, (void *)p->payload, header_size_increment)); + + return 0; +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If hdr_size_inc is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u16_t type; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + LWIP_ASSERT("pbuf_free: sane type", + p->type == PBUF_RAM || p->type == PBUF_ROM || + p->type == PBUF_REF || p->type == PBUF_POOL); + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + u16_t ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + type = p->type; +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom*)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (type == PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (type == PBUF_ROM || type == PBUF_REF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else { + mem_free(p); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_INC(p->ref, 1); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len += t->tot_len; + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len += t->tot_len; + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = p->tot_len - p->len; + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + u16_t offset_to=0, offset_from=0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void*)p_to, (const void*)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do + { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + left = 0; + + if ((buf == NULL) || (dataptr == NULL)) { + return 0; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset -= p->len; + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = p->len - offset; + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len); + copied_total += buf_copy_len; + left += buf_copy_len; + len -= buf_copy_len; + offset = 0; + } + } + return copied_total; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) > tot_len_front)) { + tot_len_front += r->len; + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len -= r->tot_len; + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf* +pbuf_skip_const(const struct pbuf* in, u16_t in_offset, u16_t* out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf* q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left -= q->len; + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf* +pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset) +{ + const struct pbuf* out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf*, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + u16_t buf_copy_len; + u16_t total_copy_len = len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf* q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t* src_ptr = (const u8_t*)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len -= first_copy_len; + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf* +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + err_t err; + if (p->next == NULL) { + return p; + } + q = pbuf_alloc(layer, p->tot_len, PBUF_RAM); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + pbuf_free(p); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char*)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf* p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf* p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t*)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf* q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t*)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf* q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if(p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start -= q->len; + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, start + i); + u8_t b = ((const u8_t*)s2)[i]; + if (a != b) { + return i+1; + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max = p->tot_len - mem_len; + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf* p, const char* substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/raw.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 000000000..4ee745712 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,521 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref raw_api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +u8_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + u8_t eaten = 0; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while ((eaten == 0) && (pcb != NULL)) { + if ((pcb->protocol == proto) && raw_input_match(pcb, broadcast)) { + /* receive callback function available? */ + if (pcb->recv != NULL) { +#ifndef LWIP_NOASSERT + void* old_payload = p->payload; +#endif + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + eaten = 1; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return eaten; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. Note that actually you cannot + * modify the IP headers (this is inconsistent with the receive callback where + * you actually get the IP headers), you can only specify the IP payload here. + * It requires some more changes in lwIP. (there will be a raw_send() function + * then.) + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + err_t err; + struct netif *netif; + const ip_addr_t *src_ip; + struct pbuf *q; /* q will be sent down the stack */ + s16_t header_size; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(ipaddr) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_header(q, -header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + + if(IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + /* Don't call ip_route() with IP_ANY_TYPE */ + netif = ip_route(IP46_ADDR_ANY(IP_GET_TYPE(ipaddr)), ipaddr); + } else { + netif = ip_route(&pcb->local_ip, ipaddr); + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_RTE; + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(ipaddr)) + { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(ipaddr, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + if (ip_addr_isany(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + if (q != p) { + pbuf_free(q); + } + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(ipaddr) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(ipaddr)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + NETIF_SET_HWADDRHINT(netif, &pcb->addr_hint); + err = ip_output_if(q, src_ip, ipaddr, pcb->ttl, pcb->tos, pcb->protocol, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) +{ + struct raw_pcb* rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/stats.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 000000000..32981a612 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"U32_F"\n\t", (u32_t)mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"U32_F"\n\t", (u32_t)mem->used)); + LWIP_PLATFORM_DIAG(("max: %"U32_F"\n\t", (u32_t)mem->max)); + LWIP_PLATFORM_DIAG(("err: %"U32_F"\n", (u32_t)mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int index) +{ + if (index < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"U32_F"\n\t", (u32_t)sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"U32_F"\n\t", (u32_t)sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"U32_F"\n\t", (u32_t)sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"U32_F"\n\t", (u32_t)sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"U32_F"\n\t", (u32_t)sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"U32_F"\n\t", (u32_t)sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"U32_F"\n\t", (u32_t)sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"U32_F"\n\t", (u32_t)sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"U32_F"\n", (u32_t)sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/sys.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 000000000..4d059a44a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,106 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * @verbinclude "sys_arch.txt" + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 000000000..7103a527a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2169 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref raw_api and @ref netconn + * + * Common functions for the TCP implementation, such as functinos + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & ~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char * const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = + { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; + /* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb ** const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#if LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) */ +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen*)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb* pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->flags |= TF_BACKLOGPEND; + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb* pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + pcb->flags &= ~TF_BACKLOGPEND; + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + if (pcb->state == ESTABLISHED) { + /* move to TIME_WAIT since we close actively */ + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + /* CLOSE_WAIT: deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + memp_free(MEMP_TCP_PCB, pcb); + } + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + memp_free(MEMP_TCP_PCB, pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + memp_free(MEMP_TCP_PCB_LISTEN, pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + memp_free(MEMP_TCP_PCB, pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + pcb->flags |= TF_CLOSEPEND; + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + pcb->flags |= TF_RXCLOSED; + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + pcb->flags |= TF_RXCLOSED; + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + memp_free(MEMP_TCP_PCB, pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + memp_free(MEMP_TCP_PCB, pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == NULL), the IP address of + * the outgoing network interface is used instead. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IP4_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + /* still need to check for ipaddr == NULL in IPv6 only case */ + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr)) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + LWIP_ERROR("tcp_listen: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen*)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + memp_free(MEMP_TCP_PCB, pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + int wnd_inflation; + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + pcb->rcv_wnd += len; + if (pcb->rcv_wnd > TCP_WND_MAX(pcb)) { + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else if (pcb->rcv_wnd == 0) { + /* rcv_wnd overflowed */ + if ((pcb->state == CLOSE_WAIT) || (pcb->state == LAST_ACK)) { + /* In passive close, we allow this, since the FIN bit is added to rcv_wnd + by the stack itself, since it is not mandatory for an application + to call tcp_recved() for the FIN bit, but e.g. the netconn API does so. */ + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + LWIP_ASSERT("tcp_recved: len wrapped rcv_wnd\n", 0); + } + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + if (tcp_port++ == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + if (++n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + err_t ret; + u32_t iss; + u16_t old_local_port; + + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + /* check if we have a route to the remote host */ + if (ip_addr_isany(&pcb->local_ip)) { + /* no local IP address set, yet. */ + struct netif *netif; + const ip_addr_t *local_ip; + ip_route_get_local_ip(&pcb->local_ip, &pcb->remote_ip, netif, local_ip); + if ((netif == NULL) || (local_ip == NULL)) { + /* Don't even try to send a SYN packet if we have no route + since that will fail. */ + return ERR_RTE; + } + /* Use the address as local address of the pcb. */ + ip_addr_copy(pcb->local_ip, *local_ip); + } + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } + else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + /* If snd_wnd is zero, use persist timer to send 1 byte probes + * instead of using the standard retransmission mechanism. */ + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff-1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + if (tcp_zero_window_probe(pcb) == ERR_OK) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if (pcb->rtime >= 0) { + ++pcb->rtime; + } + + if (pcb->unacked != NULL && pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff)-1); + pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto(pcb); + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) + { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) + { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (u32_t)tcp_ticks - pcb->tmr >= pcb->rto * TCP_OOSEQ_TIMEOUT) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + memp_free(MEMP_TCP_PCB, pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + memp_free(MEMP_TCP_PCB, pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + pcb->flags &= ~(TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if (refused_flags & PBUF_FLAG_TCP_FIN +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has the same or lower priority than + * 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We kill the oldest active connection that has lower priority than prio. */ + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->prio <= mprio && + (u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing active connections with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb * pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Used to specify the argument that should be passed callback + * functions. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a TCP + * connection receives data. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when TCP data + * has been successfully delivered to the remote host. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen*)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called periodically + * from TCP. The interval is specified in terms of the TCP coarse + * timer interval, which is called twice a second. + * + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + } + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if (pcb->state != TIME_WAIT && + pcb->state != LISTEN && + pcb->flags & TF_ACK_DELAY) { + pcb->flags |= TF_ACK_NOW; + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by using ip_route to determine the netif used to send to the address and + * calculating the minimum of TCP_MSS and that netif's mtu (if set). + */ +u16_t +tcp_eff_send_mss_impl(u16_t sendmss, const ip_addr_t *dest +#if LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING + , const ip_addr_t *src +#endif /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */ + ) +{ + u16_t mss_s; + struct netif *outif; + s16_t mtu; + + outif = ip_route(src, dest); +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + mss_s = mtu - IP6_HLEN - TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + mss_s = mtu - IP_HLEN - TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t* old_addr, struct tcp_pcb* pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) +{ + struct tcp_pcb_listen *lpcb, *next; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = next) { + next = lpcb->next; + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char* +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 000000000..17ceaffd4 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,1837 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U)); + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t* tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN(tcphdr) * 4; + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = hdrlen_bytes - TCP_HLEN; + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_header(p, -(s16_t)hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_header(p, -TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = tcphdr_optlen - tcphdr_opt1len; + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_header(p, -(s16_t)tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t*)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_header(p->next, -(s16_t)opt2len); + p->tot_len -= opt2len; + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len + ((flags & (TCP_FIN | TCP_SYN)) ? 1 : 0); + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); + tcp_timewait_input(pcb); + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; + #endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); + tcp_listen_input(lpcb); + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + memp_free(MEMP_TCP_PCB, pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) + { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + memp_free(MEMP_TCP_PCB, pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + npcb->flags |= TF_BACKLOGPEND; +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + pcb->flags |= TF_ACK_NOW; + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we only send an ACK + and wait for a re-send with matching sequence number. + This violates RFC 793, but is required to protection against + CVE-2004-0230 (RST spoofing attack). */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + pcb->flags &= ~TF_ACK_DELAY; + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", + (pcb->listener == NULL) || (pcb->listener->accept != NULL)); +#endif + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + struct tcp_seg *next; +#if TCP_QUEUE_OOSEQ + struct tcp_seg *prev, *cseg; +#endif /* TCP_QUEUE_OOSEQ */ + s32_t off; + s16_t m; + u32_t right_wnd_edge; + u16_t new_tot_len; + int found_dupack = 0; +#if TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS + u32_t ooseq_blen; + u16_t ooseq_qlen; +#endif /* TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS */ + + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + if (pcb->snd_wnd == 0) { + if (pcb->persist_backoff == 0) { + /* start persist timer */ + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + } + } else if (pcb->persist_backoff > 0) { + /* stop persist timer */ + pcb->persist_backoff = 0; + } + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window, but not if it means that + the value overflows. */ + if ((tcpwnd_size_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) { + pcb->cwnd += pcb->mss; + } + } else if (pcb->dupacks == 3) { + /* Do fast retransmit */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + pcb->flags &= ~TF_INFR; + pcb->cwnd = pcb->ssthresh; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (pcb->sa >> 3) + pcb->sv; + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + if ((tcpwnd_size_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) { + pcb->cwnd += pcb->mss; + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + tcpwnd_size_t new_cwnd = (pcb->cwnd + pcb->mss * pcb->mss / pcb->cwnd); + if (new_cwnd > pcb->cwnd) { + pcb->cwnd = new_cwnd; + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL? + lwip_ntohl(pcb->unacked->tcphdr->seqno): 0, + pcb->unacked != NULL? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked): 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + while (pcb->unacked != NULL && + TCP_SEQ_LEQ(lwip_ntohl(pcb->unacked->tcphdr->seqno) + + TCP_TCPLEN(pcb->unacked), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unacked\n", + lwip_ntohl(pcb->unacked->tcphdr->seqno), + lwip_ntohl(pcb->unacked->tcphdr->seqno) + + TCP_TCPLEN(pcb->unacked))); + + next = pcb->unacked; + pcb->unacked = pcb->unacked->next; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p))); + + pcb->snd_queuelen -= pbuf_clen(next->p); + recv_acked += next->len; + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing unacked)\n", (tcpwnd_size_t)pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + } + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + while (pcb->unsent != NULL && + TCP_SEQ_BETWEEN(ackno, lwip_ntohl(pcb->unsent->tcphdr->seqno) + + TCP_TCPLEN(pcb->unsent), pcb->snd_nxt)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unsent\n", + lwip_ntohl(pcb->unsent->tcphdr->seqno), lwip_ntohl(pcb->unsent->tcphdr->seqno) + + TCP_TCPLEN(pcb->unsent))); + + next = pcb->unsent; + pcb->unsent = pcb->unsent->next; +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p))); + /* Prevent ACK for FIN to generate a sent event */ + pcb->snd_queuelen -= pbuf_clen(next->p); + recv_acked += next->len; + tcp_seg_free(next); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing unsent)\n", (tcpwnd_size_t)pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + } + pcb->snd_buf += recv_acked; + /* End of ACK for new data processing. */ + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = m - (pcb->sa >> 3); + pcb->sa += m; + if (m < 0) { + m = -m; + } + m = m - (pcb->sv >> 2); + pcb->sv += m; + pcb->rto = (pcb->sa >> 3) + pcb->sv; + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + off = pcb->rcv_nxt - seqno; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off < 0x7fff)); + if (inseg.p->len < off) { + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* KJM following line changed (with addition of new_tot_len var) + to fix bug #9076 + inseg.p->tot_len -= p->len; */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + if (pbuf_header(p, (s16_t)-off)) { + /* Do we need to cope with this failing? Assert for now */ + LWIP_ASSERT("pbuf_header failed", 0); + } + } else { + if (pbuf_header(inseg.p, (s16_t)-off)) { + /* Do we need to cope with this failing? Assert for now */ + LWIP_ASSERT("pbuf_header failed", 0); + } + } + inseg.len -= (u16_t)(pcb->rcv_nxt - seqno); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } + else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + prev = next; + next = next->next; + tcp_seg_free(prev); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + tcp_send_empty_ack(pcb); +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + + prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno+1, next->tcphdr->seqno-1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + prev = next; + } + } +#if TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ + ooseq_blen = 0; + ooseq_qlen = 0; + prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + ooseq_blen += p->tot_len; + ooseq_qlen += pbuf_clen(p); + if ((ooseq_blen > TCP_OOSEQ_MAX_BYTES) || + (ooseq_qlen > TCP_OOSEQ_MAX_PBUFS)) { + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } +#endif /* TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS */ +#endif /* TCP_QUEUE_OOSEQ */ + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_getoptbyte(void) +{ + if ((tcphdr_opt2 == NULL) || (tcp_optidx < tcphdr_opt1len)) { + u8_t* opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[tcp_optidx++]; + } else { + u8_t idx = (u8_t)(tcp_optidx++ - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_getoptbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (tcp_getoptbyte() << 8); + mss |= tcp_getoptbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_getoptbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + pcb->flags |= TF_WND_SCALE; + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_getoptbyte(); + tsval |= (tcp_getoptbyte() << 8); + tsval |= (tcp_getoptbyte() << 16); + tsval |= (tcp_getoptbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + pcb->flags |= TF_TIMESTAMP; + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno+tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_getoptbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 000000000..babc67fb9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,1671 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(pcb->local_port); + tcphdr->dest = lwip_htons(pcb->remote_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), TCP_ACK); + tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + pcb->flags |= TF_FIN; + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by tcp_write and tcp_enqueue_flags. + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param flags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + pcb->flags |= TF_NAGLEMEMERR; + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + pcb->flags |= TF_NAGLEMEMERR; + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen = 0; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + /* don't allocate segments bigger than half the maximum window we ever received */ + u16_t mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max/2)); + mss_local = mss_local ? mss_local : pcb->mss; + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } +#endif /* LWIP_TCP_TIMESTAMPS */ + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH(last_unsent->flags); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (p->type == PBUF_ROM && (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom*)concat_p)->payload = (const u8_t*)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t*)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t*)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t*)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom*)p2)->payload = (const u8_t*)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE)==0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + pcb->flags |= TF_NAGLEMEMERR; + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Enqueue TCP options for transmission. + * + * Called by tcp_connect(), tcp_listen_input(), and tcp_send_ctrl(). + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + + /* check for configured max queuelen and possible overflow (FIN flag should always come through!) */ + if (((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) && + ((flags & TCP_FIN) == 0)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + pcb->flags |= TF_NAGLEMEMERR; + return ERR_MEM; + } + + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + pcb->flags |= TF_NAGLEMEMERR; + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + pcb->flags |= TF_NAGLEMEMERR; + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + pcb->flags |= TF_FIN; + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(struct tcp_pcb *pcb, u32_t *opts) +{ + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = 0; + struct netif *netif; +#if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP + struct tcp_hdr *tcphdr; +#endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */ + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS); + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } +#if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP + tcphdr = (struct tcp_hdr *)p->payload; +#endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */ + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (pcb->flags & TF_TIMESTAMP) { + tcp_build_timestamp_option(pcb, (u32_t *)(tcphdr + 1)); + } +#endif + + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + err = ERR_RTE; + } else { +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + &pcb->local_ip, &pcb->remote_ip); + } +#endif + NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); + err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, + pcb->ttl, pcb->tos, IP_PROTO_TCP, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + } + pbuf_free(p); + + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + /* If the TF_ACK_NOW flag is set and no data will be sent (either + * because the ->unsent queue is empty or because the window does + * not allow it), construct an empty ACK segment and send it. + * + * If data is to be sent, we will just piggyback the ACK (see below). + */ + if (pcb->flags & TF_ACK_NOW && + (seg == NULL || + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd)) { + return tcp_send_empty_ack(pcb); + } + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if TCP_OUTPUT_DEBUG + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void*)pcb->unsent)); + } +#endif /* TCP_OUTPUT_DEBUG */ +#if TCP_CWND_DEBUG + if (seg == NULL) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } +#endif /* TCP_CWND_DEBUG */ + /* Check if we need to start the persistent timer when the next unsent segment + * does not fit within the remaining send window and RTO timer is not running (we + * have no in-flight data). A traditional approach would fill the remaining window + * with part of the unsent segment (which will engage zero-window probing upon + * reception of the zero window update from the receiver). This ensures the + * subsequent window update is reliably received. With the goal of being lightweight, + * we avoid splitting the unsent segment and treat the window as already zero. + */ + if (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd && + wnd > 0 && wnd == pcb->snd_wnd && pcb->unacked == NULL) { + /* Start the persist timer */ + if (pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + } + goto output_done; + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + pcb->flags |= TF_NAGLEMEMERR; + return err; + } + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +output_done: +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + pcb->flags &= ~TF_NAGLEMEMERR; + return ERR_OK; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; + + if (seg->p->ref != 1) { + /* This can happen if the pbuf of this segment is still referenced by the + netif driver due to deferred transmission. Since this function modifies + p->len, we must not continue in this case. */ + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss(TCP_MSS, &pcb->local_ip, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == (TCPH_HDRLEN(seg->tcphdr) * 4)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN(seg->tcphdr) * 4, &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc += (u16_t)~(seg->chksum); + seg->tcphdr->chksum = FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct netif *netif; + p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= sizeof(struct tcp_hdr))); + + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(local_port); + tcphdr->dest = lwip_htons(remote_port); + tcphdr->seqno = lwip_htonl(seqno); + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK); +#if LWIP_WND_SCALE + tcphdr->wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + tcphdr->wnd = PP_HTONS(TCP_WND); +#endif + tcphdr->chksum = 0; + tcphdr->urgp = 0; + + TCP_STATS_INC(tcp.xmit); + MIB2_STATS_INC(mib2.tcpoutrsts); + + netif = ip_route(local_ip, remote_ip); + if (netif != NULL) { +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + local_ip, remote_ip); + } +#endif + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ip_output_if(p, local_ip, remote_ip, TCP_TTL, 0, IP_PROTO_TCP, netif); + } + pbuf_free(p); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + if (pcb->unacked == NULL) { + return; + } + + /* Move all unacked segments to the head of the unsent queue */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next); + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + if (pcb->unacked == NULL) { + return; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + seg = pcb->unacked; + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + tcp_rexmit(pcb); + + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2*pcb->mss))); + pcb->ssthresh = 2*pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + pcb->flags |= TF_INFR; + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } +} + + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct netif *netif; + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, 0, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + err = ERR_RTE; + } else { +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + &pcb->local_ip, &pcb->remote_ip); + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + /* Send output to IP */ + NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); + err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + } + pbuf_free(p); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + struct netif *netif; + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + seg = pcb->unacked; + + if (seg == NULL) { + seg = pcb->unsent; + } + if (seg == NULL) { + /* nothing to send, zero window probe not needed */ + return ERR_OK; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, 0, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + err = ERR_RTE; + } else { +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + &pcb->local_ip, &pcb->remote_ip); + } +#endif + TCP_STATS_INC(tcp.xmit); + + /* Send output to IP */ + NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); + err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + 0, IP_PROTO_TCP, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + } + + pbuf_free(p); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 000000000..5c9b29d4f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,433 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ +}; + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; +static u32_t timeouts_last_time; + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +/** + * Timer callback function that calls mld6_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +cyclic_timer(void *arg) +{ + const struct lwip_cyclic_timer* cyclic = (const struct lwip_cyclic_timer*)arg; +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + sys_timeout(cyclic->interval_ms, cyclic_timer, arg); +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, cyclic_timer, LWIP_CONST_CAST(void*, &lwip_cyclic_timers[i])); + } + + /* Initialise timestamp for sys_check_timeouts */ + timeouts_last_time = sys_now(); +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + struct sys_timeo *timeout, *t; + u32_t now, diff; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + now = sys_now(); + if (next_timeout == NULL) { + diff = 0; + timeouts_last_time = now; + } else { + diff = now - timeouts_last_time; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = msecs + diff; +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p msecs=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, msecs, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + + if (next_timeout->time > msecs) { + next_timeout->time -= msecs; + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + timeout->time -= t->time; + if (t->next == NULL || t->next->time > timeout->time) { + if (t->next != NULL) { + t->next->time -= timeout->time; + } else if (timeout->time > msecs) { + /* If this is the case, 'timeouts_last_time' and 'now' differs too much. + This can be due to sys_check_timeouts() not being called at the right + times, but also when stopping in a breakpoint. Anyway, let's assume + this is not wanted, so add the first timer's time instead of 'diff' */ + timeout->time = msecs + next_timeout->time; + } + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + /* If not the last one, add time of this one back to next */ + if (t->next != NULL) { + t->next->time += t->time; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +#if !NO_SYS && !defined __DOXYGEN__ +static +#endif /* !NO_SYS */ +void +sys_check_timeouts(void) +{ + if (next_timeout) { + struct sys_timeo *tmptimeout; + u32_t diff; + sys_timeout_handler handler; + void *arg; + u8_t had_one; + u32_t now; + + now = sys_now(); + /* this cares for wraparounds */ + diff = now - timeouts_last_time; + do { + PBUF_CHECK_FREE_OOSEQ(); + had_one = 0; + tmptimeout = next_timeout; + if (tmptimeout && (tmptimeout->time <= diff)) { + /* timeout has expired */ + had_one = 1; + timeouts_last_time += tmptimeout->time; + diff -= tmptimeout->time; + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s arg=%p\n", + tmptimeout->handler_name, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { +#if !NO_SYS + /* For LWIP_TCPIP_CORE_LOCKING, lock the core before calling the + timeout handler function. */ + LOCK_TCPIP_CORE(); +#endif /* !NO_SYS */ + handler(arg); +#if !NO_SYS + UNLOCK_TCPIP_CORE(); +#endif /* !NO_SYS */ + } + LWIP_TCPIP_THREAD_ALIVE(); + } + /* repeat until all expired timers have been called */ + } while (had_one); + } +} + +/** Set back the timestamp of the last call to sys_check_timeouts() + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + timeouts_last_time = sys_now(); +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t diff; + if (next_timeout == NULL) { + return 0xffffffff; + } + diff = sys_now() - timeouts_last_time; + if (diff > next_timeout->time) { + return 0; + } else { + return next_timeout->time - diff; + } +} + +#if !NO_SYS + +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +void +sys_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime; + +again: + if (!next_timeout) { + sys_arch_mbox_fetch(mbox, msg, 0); + return; + } + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == 0 || sys_arch_mbox_fetch(mbox, msg, sleeptime) == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} + +#endif /* NO_SYS */ + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/udp.c b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 000000000..00592590e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1191 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref raw_api and @ref netconn + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & ~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#if LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print(UDP_DEBUG, ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print(UDP_DEBUG, ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print(UDP_DEBUG, &pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print(UDP_DEBUG, &pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if (((pcb->flags & UDP_FLAGS_CONNECTED) == 0) && + ((uncon_pcb == NULL) +#if SO_REUSE + /* prefer specific IPs over cath-all */ + || !ip_addr_isany(&pcb->local_ip) +#endif /* SO_REUSE */ + )) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_header(p, -UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + u8_t p_header_changed = 0; + s16_t hdrs_len = (s16_t)(ip_current_header_tot_len() + UDP_HLEN); + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + /* for that, move payload to IP header again */ + if (p_header_changed == 0) { + pbuf_header_force(p, hdrs_len); + p_header_changed = 1; + } + q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (q != NULL) { + err_t err = pbuf_copy(q, p); + if (err == ERR_OK) { + /* move payload to UDP data */ + pbuf_header(q, -hdrs_len); + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } + if (p_header_changed) { + /* and move payload to UDP data again */ + pbuf_header(p, -hdrs_len); + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Send data using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + if ((pcb == NULL) || IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + if ((pcb == NULL) || IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + const ip_addr_t *dst_ip_route = dst_ip; + + if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + +#if LWIP_IPV6 || (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) + if (ip_addr_ismulticast(dst_ip_route)) { +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + /* For multicast, find a netif based on source address. */ + dst_ip_route = &pcb->local_ip; + } else +#endif /* LWIP_IPV6 */ + { +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->multicast_ip that is used for routing. */ + if (!ip_addr_isany_val(pcb->multicast_ip) && + !ip4_addr_cmp(ip_2_ip4(&pcb->multicast_ip), IP4_ADDR_BROADCAST)) { + dst_ip_route = &pcb->multicast_ip; + } +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS */ + } + } +#endif /* LWIP_IPV6 || (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) */ + + /* find the outgoing network interface for this packet */ + if(IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + /* Don't call ip_route() with IP_ANY_TYPE */ + netif = ip_route(IP46_ADDR_ANY(IP_GET_TYPE(dst_ip_route)), dst_ip_route); + } else { + netif = ip_route(&pcb->local_ip, dst_ip_route); + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) || (LWIP_IPV6 && LWIP_IPV6_MLD) + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_SET_HWADDRHINT(netif, NULL); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + /* still need to check for ipaddr == NULL in IPv6 only case */ + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches? */ + ip_addr_cmp(&ipcb->local_ip, ipaddr)) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, &pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Connect an UDP PCB. + * + * This will associate the UDP PCB with the remote address. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + &pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Disconnect a UDP PCB + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + /* mark PCB as unconnected */ + pcb->flags &= ~UDP_FLAGS_CONNECTED; +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB + * + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Remove an UDP PCB. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Create a UDP PCB. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) +{ + struct udp_pcb* upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 000000000..4b658c668 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,400 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 + +/* Flags for struct netconn.flags (u8_t) */ +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ + + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last error this netconn had */ + err_t last_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + int recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores how much is already sent. */ + size_t write_offset; + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/** Set conn->last_err to err but don't overwrite fatal errors */ +#define NETCONN_SET_SAFE_ERR(conn, err) do { if ((conn) != NULL) { \ + SYS_ARCH_DECL_PROTECT(netconn_set_safe_err_lev); \ + SYS_ARCH_PROTECT(netconn_set_safe_err_lev); \ + if (!ERR_IS_FATAL((conn)->last_err)) { \ + (conn)->last_err = err; \ + } \ + SYS_ARCH_UNPROTECT(netconn_set_safe_err_lev); \ +}} while(0); + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +#define netconn_err(conn) ((conn)->last_err) +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + (conn)->flags |= NETCONN_FLAG_NON_BLOCKING; \ +} else { \ + (conn)->flags &= ~ NETCONN_FLAG_NON_BLOCKING; }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + (conn)->flags |= NETCONN_FLAG_IPV6_V6ONLY; \ +} else { \ + (conn)->flags &= ~ NETCONN_FLAG_IPV6_V6ONLY; }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 000000000..0283a0b99 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 + +struct fs_file { + const char *data; + int len; + int index; + void *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 000000000..57831dd95 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,236 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/* + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handler. The iIndex parameter provides the index of the + * CGI within the ppcURLs array passed to http_set_cgi_handler. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in the incoming + * HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/* + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to + */ +extern void httpd_cgi_handler(const char* uri, int iNumParams, char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/* + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in a .shtml, .ssi or .shtm file where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the ppcTags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client,should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the ppcTags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or a negative number to indicate a failure (tag not + * recognized, for example). + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. To work around this, any SSI tag which needs to + * output JavaScript code must do so in an encapsulated way, sending the whole + * HTML section as a single include. + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HTTPD_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 000000000..7d085c437 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,322 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style) */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style) */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the tag name */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT 80 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct tcp_pcb* pcb); */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) (2 * tcp_mss(pcb)) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Set this to 1 to include "fsdata_custom.c" instead of "fsdata.c" for the + * file system (to prevent changing the file included in CVS) */ +#if !defined HTTPD_USE_CUSTOM_FSDATA || defined __DOXYGEN__ +#define HTTPD_USE_CUSTOM_FSDATA 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 000000000..63a0c0c47 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 000000000..b19f80c0a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,69 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_H +#define LWIP_HDR_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +void mdns_resp_init(void); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); + +err_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); +void mdns_resp_netif_settings_changed(struct netif *netif); + +#endif /* LWIP_MDNS_RESPONDER */ + +#endif /* LWIP_HDR_MDNS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 000000000..9f8e5f545 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 000000000..9a48b5f32 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,66 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 000000000..f0f5bab24 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,244 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_t mqtt_client_t; + +/** @ingroup mqtt + * Default MQTT port */ +#define MQTT_PORT 1883 + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name and password, set to NULL if not used */ + const char* client_user; + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + const char* will_msg; + u8_t will_qos; + u8_t will_retain; +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + MQTT_CONNECT_ACCEPTED = 0, + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + MQTT_CONNECT_REFUSED_SERVER = 3, + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + MQTT_CONNECT_DISCONNECTED = 256, + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +/** + * Pending request item, binds application callback to pending server requests + */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_t +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct tcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + + +/** Connect to server */ +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +/** Disconnect from server */ +void mqtt_disconnect(mqtt_client_t *client); + +/** Create new client */ +mqtt_client_t *mqtt_client_new(void); + +/** Check connection status */ +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +/** Set callback to call for incoming publish */ +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +/** Common function for subscribe and unsubscribe */ +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + + +/** Publish data to topic */ +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 000000000..7d078299e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 000000000..412660617 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,43 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 000000000..f4d203953 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,59 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 000000000..5f267be7e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,128 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 000000000..a8325725c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,364 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x8000 + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 000000000..392ee2577 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 000000000..d4ae06820 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,293 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The maximum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The minimum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 000000000..99bcdf95f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 000000000..6930d0b4f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 000000000..5eb353870 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 000000000..99a7fd266 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,90 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#if LWIP_SNMP && LWIP_SNMP_V3 + +#define SNMP_V3_AUTH_ALGO_INVAL 0 +#define SNMP_V3_AUTH_ALGO_MD5 1 +#define SNMP_V3_AUTH_ALGO_SHA 2 + +#define SNMP_V3_PRIV_ALGO_INVAL 0 +#define SNMP_V3_PRIV_ALGO_DES 1 +#define SNMP_V3_PRIV_ALGO_AES 2 + +#define SNMP_V3_PRIV_MODE_DECRYPT 0 +#define SNMP_V3_PRIV_MODE_ENCRYPT 1 + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, u8_t *auth_algo, u8_t *auth_key, u8_t *priv_algo, u8_t *priv_key); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + u8_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + u8_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 000000000..3910917b5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,76 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, char *server); +char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 000000000..c28b86418 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,173 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds instead of this one + * if you need the additional precision. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT 123 +#endif + +/** Set this to 1 to allow config of SNTP server(s) by DNS name */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the LI, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY 0 +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC SNTP_STARTUP_DELAY +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 3 seconds. + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 3000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 000000000..bf115dcf7 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,105 @@ +/****************************************************************//** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + ********************************************************************/ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_ON +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT 69 +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS 50 +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 000000000..1ad91dc0d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,94 @@ +/****************************************************************//** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + ********************************************************************/ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 000000000..f366ab5c8 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,319 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/** C++ const_cast (val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 000000000..5c6d13e7d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 000000000..d19e613cb --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,167 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#ifndef LWIP_PLATFORM_ASSERT +#error "If you want to use LWIP_ASSERT, LWIP_PLATFORM_ASSERT(message) needs to be defined in your arch/cc.h" +#endif +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#ifndef LWIP_PLATFORM_DIAG +#error "If you want to use LWIP_DEBUG, LWIP_PLATFORM_DIAG(message) needs to be defined in your arch/cc.h" +#endif +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 000000000..2db7522e9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,141 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) (x) +#define lwip_ntohs(x) (x) +#define lwip_htonl(x) (x) +#define lwip_ntohl(x) (x) +#define PP_HTONS(x) (x) +#define PP_NTOHS(x) (x) +#define PP_HTONL(x) (x) +#define PP_NTOHL(x) (x) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((((x) & 0x00ffUL) << 8) | (((x) & 0xff00UL) >> 8)) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & 0x000000ffUL) << 24) | \ + (((x) & 0x0000ff00UL) << 8) | \ + (((x) & 0x00ff0000UL) >> 8) | \ + (((x) & 0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 000000000..97504919c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,143 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** incoming msg */ + struct dhcp_msg *msg_in; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + struct pbuf *p_out; /* pbuf of outcoming msg */ + struct dhcp_msg *msg_out; /* outgoing msg */ + u16_t options_out_len; /* outgoing msg options length */ + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 000000000..a6a44126f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,58 @@ +/** + * @file + * + * IPv6 address autoconfiguration as per RFC 4862. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * IPv6 address autoconfiguration as per RFC 4862. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + + +struct dhcp6 +{ + /*@todo: implement DHCP6*/ +}; + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 000000000..23fd4f55e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,130 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 000000000..4b07ac89b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,119 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +#define ERR_IS_FATAL(e) ((e) <= ERR_ABRT) + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 000000000..2d326cbb5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,193 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_INCLUDE to to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 000000000..be5badfc5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,106 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +s8_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +u8_t etharp_get_entry(u8_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#endif /* LWIP_IPV4 && LWIP_ARP */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 000000000..f393c9cda --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 000000000..d0d67b690 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 000000000..a5f812f34 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,70 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, u32_t pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 000000000..ca52a2d32 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 000000000..8d306fc1e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,172 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) +/* ATTENTION: the next define only works because both s_addr and ip4_addr_t are an u32_t effectively! */ +#define inet_addr_to_ip4addr_p(target_ip4addr_p, source_inaddr) ((target_ip4addr_p) = (ip4_addr_t*)&((source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3];} +/* ATTENTION: the next define only works because both in6_addr and ip6_addr_t are an u32_t[4] effectively! */ +#define inet6_addr_to_ip6addr_p(target_ip6addr_p, source_in6addr) ((target_ip6addr_p) = (ip6_addr_t*)(source_in6addr)) + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 000000000..799cab53b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) (((u) >> 16) + ((u) & 0x0000ffffUL)) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 000000000..721bb57f9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 0 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 3 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION (((u32_t)LWIP_VERSION_MAJOR) << 24 | ((u32_t)LWIP_VERSION_MINOR) << 16 | \ + ((u32_t)LWIP_VERSION_REVISION) << 8 | ((u32_t)LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 000000000..356dd749c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,319 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_HWADDRHINT +#define IP_PCB_ADDRHINT ;u8_t addr_hint +#else +#define IP_PCB_ADDRHINT +#endif /* LWIP_NETIF_HWADDRHINT */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_ADDRHINT + +struct ip_pcb { +/* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ((const struct ip_hdr*)(ip_data.current_ip4_header)) +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ((const struct ip_hdr*)(ip_data.current_ip4_header)) +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip6_current_header())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options |= (opt)) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options &= ~(opt)) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet with addr_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, addr_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, addr_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, addr_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(dest), ip_2_ip4(src))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, addr_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, addr_hint) +#define ip_route(src, dest) \ + ip4_route_src(dest, src) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, addr_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, addr_hint) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 000000000..e8ac2bc8c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(dest, src) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_HWADDRHINT +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, u8_t *addr_hint); +#endif /* LWIP_NETIF_HWADDRHINT */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 000000000..106a261c4 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,227 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/** + * struct ipaddr2 is used in the definition of the ARP packet format in + * order to support compilers that don't have structure packing. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr2 { + PACK_STRUCT_FIELD(u16_t addrw[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR2_COPY +#define IPADDR2_COPY(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16(&(ipaddr)), \ + ip4_addr2_16(&(ipaddr)), \ + ip4_addr3_16(&(ipaddr)), \ + ip4_addr4_16(&(ipaddr))) + +/* Get one byte from the 4-byte address */ +#define ip4_addr1(ipaddr) (((const u8_t*)(&(ipaddr)->addr))[0]) +#define ip4_addr2(ipaddr) (((const u8_t*)(&(ipaddr)->addr))[1]) +#define ip4_addr3(ipaddr) (((const u8_t*)(&(ipaddr)->addr))[2]) +#define ip4_addr4(ipaddr) (((const u8_t*)(&(ipaddr)->addr))[3]) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 000000000..18cd2d05f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 000000000..9c6d5936c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_HWADDRHINT +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, u8_t *addr_hint); +#endif /* LWIP_NETIF_HWADDRHINT */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 000000000..8d6ff2956 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,285 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3];}while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3];}while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0;}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]);}while(0) + + +/** + * Determine if two IPv6 address are on the same network. + * + * @arg addr1 IPv6 address 1 + * @arg addr2 IPv6 address 2 + * @return !0 if the network identifiers of both address match + */ +#define ip6_addr_netcmp(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +#define ip6_addr_cmp(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* @todo define get/set for well-know multicast addresses, e.g. ff02::1 */ +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL);}while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL);}while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id));}while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 000000000..2f94ee6e7 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,120 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, this needs to + * be enabled (to not overwrite part of the data). When enabled, the IPv6 header + * is copied instead of referencing it, which gives more room for struct ip6_reass_helper */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/* Copy the complete header of the first fragment to struct ip6_reassdata + or just point to its original location in the first pbuf? */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_HDRPTR +#define IPV6_FRAG_HDRREF(hdr) (&(hdr)) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_HDRPTR * +#define IPV6_FRAG_HDRREF(hdr) (hdr) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr IPV6_FRAG_HDRPTR iphdr; + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h new file mode 100644 index 000000000..4e989bfc5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h @@ -0,0 +1,407 @@ +/** + * @file + * IP address API (common IPv4 and IPv6) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_ADDR_H +#define LWIP_HDR_IP_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include "lwip/ip4_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup ipaddr + * IP address types for use in ip_addr_t.type member. + * @see tcp_new_ip_type(), udp_new_ip_type(), raw_new_ip_type(). + */ +enum lwip_ip_addr_type { + /** IPv4 */ + IPADDR_TYPE_V4 = 0U, + /** IPv6 */ + IPADDR_TYPE_V6 = 6U, + /** IPv4+IPv6 ("dual-stack") */ + IPADDR_TYPE_ANY = 46U +}; + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ipaddr + * A union struct for both IP version's addresses. + * ATTENTION: watch out for its size when adding IPv6 address scope! + */ +typedef struct ip_addr { + union { + ip6_addr_t ip6; + ip4_addr_t ip4; + } u_addr; + /** @ref lwip_ip_addr_type */ + u8_t type; +} ip_addr_t; + +extern const ip_addr_t ip_addr_any_type; + +/** @ingroup ip4addr */ +#define IPADDR4_INIT(u32val) { { { { u32val, 0ul, 0ul, 0ul } } }, IPADDR_TYPE_V4 } +/** @ingroup ip4addr */ +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +/** @ingroup ip6addr */ +#define IPADDR6_INIT(a, b, c, d) { { { { a, b, c, d } } }, IPADDR_TYPE_V6 } +/** @ingroup ip6addr */ +#define IPADDR6_INIT_HOST(a, b, c, d) { { { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } } }, IPADDR_TYPE_V6 } + +/** @ingroup ipaddr */ +#define IP_IS_ANY_TYPE_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_ANY) +/** @ingroup ipaddr */ +#define IPADDR_ANY_TYPE_INIT { { { { 0ul, 0ul, 0ul, 0ul } } }, IPADDR_TYPE_ANY } + +/** @ingroup ip4addr */ +#define IP_IS_V4_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4) +/** @ingroup ip6addr */ +#define IP_IS_V6_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V6) +/** @ingroup ip4addr */ +#define IP_IS_V4(ipaddr) (((ipaddr) == NULL) || IP_IS_V4_VAL(*(ipaddr))) +/** @ingroup ip6addr */ +#define IP_IS_V6(ipaddr) (((ipaddr) != NULL) && IP_IS_V6_VAL(*(ipaddr))) + +#define IP_SET_TYPE_VAL(ipaddr, iptype) do { (ipaddr).type = (iptype); }while(0) +#define IP_SET_TYPE(ipaddr, iptype) do { if((ipaddr) != NULL) { IP_SET_TYPE_VAL(*(ipaddr), iptype); }}while(0) +#define IP_GET_TYPE(ipaddr) ((ipaddr)->type) + +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) (IP_GET_TYPE(&pcb->local_ip) == IP_GET_TYPE(ipaddr)) +#define IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr) (IP_IS_ANY_TYPE_VAL(pcb->local_ip) || IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) + +/** @ingroup ip6addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip6(ipaddr) (&((ipaddr)->u_addr.ip6)) +/** @ingroup ip4addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip4(ipaddr) (&((ipaddr)->u_addr.ip4)) + +/** @ingroup ip4addr */ +#define IP_ADDR4(ipaddr,a,b,c,d) do { IP4_ADDR(ip_2_ip4(ipaddr),a,b,c,d); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V4); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) do { IP6_ADDR(ip_2_ip6(ipaddr),i0,i1,i2,i3); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V6); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +/** @ingroup ipaddr */ +#define ip_addr_copy(dest, src) do{ IP_SET_TYPE_VAL(dest, IP_GET_TYPE(&src)); if(IP_IS_V6_VAL(src)){ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), *ip_2_ip6(&(src))); }else{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), *ip_2_ip4(&(src))); }}while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6(dest, src) do{ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_copy_from_ip4(dest, src) do{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32(ipaddr, val) do{if(ipaddr){ip4_addr_set_u32(ip_2_ip4(ipaddr), val); \ + IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ip4addr */ +#define ip_addr_get_ip4_u32(ipaddr) (((ipaddr) && IP_IS_V4(ipaddr)) ? \ + ip4_addr_get_u32(ip_2_ip4(ipaddr)) : 0) +/** @ingroup ipaddr */ +#define ip_addr_set(dest, src) do{ IP_SET_TYPE(dest, IP_GET_TYPE(src)); if(IP_IS_V6(src)){ \ + ip6_addr_set(ip_2_ip6(dest), ip_2_ip6(src)); }else{ \ + ip4_addr_set(ip_2_ip4(dest), ip_2_ip4(src)); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_ipaddr(dest, src) ip_addr_set(dest, src) +/** @ingroup ipaddr */ +#define ip_addr_set_zero(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, 0); }while(0) +/** @ingroup ip5addr */ +#define ip_addr_set_zero_ip4(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_set_zero_ip6(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_hton(dest, src) do{if(IP_IS_V6(src)){ \ + ip6_addr_set_hton(ip_2_ip6(ipaddr), (src)); IP_SET_TYPE(dest, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_hton(ip_2_ip4(ipaddr), (src)); IP_SET_TYPE(dest, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_get_network(target, host, netmask) do{if(IP_IS_V6(host)){ \ + ip4_addr_set_zero(ip_2_ip4(target)); IP_SET_TYPE(target, IPADDR_TYPE_V6); } else { \ + ip4_addr_get_network(ip_2_ip4(target), ip_2_ip4(host), ip_2_ip4(netmask)); IP_SET_TYPE(target, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_netcmp(addr1, addr2, mask) ((IP_IS_V6(addr1) && IP_IS_V6(addr2)) ? \ + 0 : \ + ip4_addr_netcmp(ip_2_ip4(addr1), ip_2_ip4(addr2), mask)) +/** @ingroup ipaddr */ +#define ip_addr_cmp(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_isany(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isany(ip_2_ip6(ipaddr)) : \ + ip4_addr_isany(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isany_val(ipaddr) ((IP_IS_V6_VAL(ipaddr)) ? \ + ip6_addr_isany_val(*ip_2_ip6(&(ipaddr))) : \ + ip4_addr_isany_val(*ip_2_ip4(&(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isbroadcast(ipaddr, netif) ((IP_IS_V6(ipaddr)) ? \ + 0 : \ + ip4_addr_isbroadcast(ip_2_ip4(ipaddr), netif)) +/** @ingroup ipaddr */ +#define ip_addr_ismulticast(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_ismulticast(ip_2_ip6(ipaddr)) : \ + ip4_addr_ismulticast(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isloopback(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isloopback(ip_2_ip6(ipaddr)) : \ + ip4_addr_isloopback(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_islinklocal(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_islinklocal(ip_2_ip6(ipaddr)) : \ + ip4_addr_islinklocal(ip_2_ip4(ipaddr))) +#define ip_addr_debug_print(debug, ipaddr) do { if(IP_IS_V6(ipaddr)) { \ + ip6_addr_debug_print(debug, ip_2_ip6(ipaddr)); } else { \ + ip4_addr_debug_print(debug, ip_2_ip4(ipaddr)); }}while(0) +#define ip_addr_debug_print_val(debug, ipaddr) do { if(IP_IS_V6_VAL(ipaddr)) { \ + ip6_addr_debug_print_val(debug, *ip_2_ip6(&(ipaddr))); } else { \ + ip4_addr_debug_print_val(debug, *ip_2_ip4(&(ipaddr))); }}while(0) +/** @ingroup ipaddr */ +#define ipaddr_ntoa(addr) (((addr) == NULL) ? "NULL" : \ + ((IP_IS_V6(addr)) ? ip6addr_ntoa(ip_2_ip6(addr)) : ip4addr_ntoa(ip_2_ip4(addr)))) +/** @ingroup ipaddr */ +#define ipaddr_ntoa_r(addr, buf, buflen) (((addr) == NULL) ? "NULL" : \ + ((IP_IS_V6(addr)) ? ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen) : ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen))) +int ipaddr_aton(const char *cp, ip_addr_t *addr); + +/** @ingroup ipaddr */ +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +/** @ingroup ipaddr */ +#define ip4_2_ipv4_mapped_ipv6(ip6addr, ip4addr) do { \ + (ip6addr)->addr[3] = (ip4addr)->addr; \ + (ip6addr)->addr[2] = PP_HTONL(0x0000FFFFUL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[0] = 0; } while(0); + +/** @ingroup ipaddr */ +#define unmap_ipv4_mapped_ipv6(ip4addr, ip6addr) \ + (ip4addr)->addr = (ip6addr)->addr[3]; + +#define IP46_ADDR_ANY(type) (((type) == IPADDR_TYPE_V6)? IP6_ADDR_ANY : IP4_ADDR_ANY) + +#else /* LWIP_IPV4 && LWIP_IPV6 */ + +#define IP_ADDR_PCB_VERSION_MATCH(addr, pcb) 1 +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) 1 + +#if LWIP_IPV4 + +typedef ip4_addr_t ip_addr_t; +#define IPADDR4_INIT(u32val) { u32val } +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +#define IP_IS_V4_VAL(ipaddr) 1 +#define IP_IS_V6_VAL(ipaddr) 0 +#define IP_IS_V4(ipaddr) 1 +#define IP_IS_V6(ipaddr) 0 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V4 +#define ip_2_ip4(ipaddr) (ipaddr) +#define IP_ADDR4(ipaddr,a,b,c,d) IP4_ADDR(ipaddr,a,b,c,d) + +#define ip_addr_copy(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_copy_from_ip4(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_set_ip4_u32(ipaddr, val) ip4_addr_set_u32(ip_2_ip4(ipaddr), val) +#define ip_addr_get_ip4_u32(ipaddr) ip4_addr_get_u32(ip_2_ip4(ipaddr)) +#define ip_addr_set(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip4(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip4_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip4_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip4_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip4_addr_get_network(target, host, mask) +#define ip_addr_netcmp(addr1, addr2, mask) ip4_addr_netcmp(addr1, addr2, mask) +#define ip_addr_cmp(addr1, addr2) ip4_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip4_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip4_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip4_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip4_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) ip4_addr_isbroadcast(addr, netif) +#define ip_addr_ismulticast(ipaddr) ip4_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip4_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip4_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip4addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip4addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip4addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP4ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP4_ADDR_ANY) + +#else /* LWIP_IPV4 */ + +typedef ip6_addr_t ip_addr_t; +#define IPADDR6_INIT(a, b, c, d) { { a, b, c, d } } +#define IPADDR6_INIT_HOST(a, b, c, d) { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } } +#define IP_IS_V4_VAL(ipaddr) 0 +#define IP_IS_V6_VAL(ipaddr) 1 +#define IP_IS_V4(ipaddr) 0 +#define IP_IS_V6(ipaddr) 1 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V6 +#define ip_2_ip6(ipaddr) (ipaddr) +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) IP6_ADDR(ipaddr,i0,i1,i2,i3) +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_addr_copy(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_set(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip6(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip6_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip6_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip6_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip6_addr_set_zero(target) +#define ip_addr_netcmp(addr1, addr2, mask) 0 +#define ip_addr_cmp(addr1, addr2) ip6_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip6_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip6_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip6_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip6_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) 0 +#define ip_addr_ismulticast(ipaddr) ip6_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip6_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip6_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip6addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip6addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip6addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP6_ADDR_ANY) + +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_IPV4 + +extern const ip_addr_t ip_addr_any; +extern const ip_addr_t ip_addr_broadcast; + +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IP wildcard. + * Defined to @ref IP4_ADDR_ANY when IPv4 is enabled. + * Defined to @ref IP6_ADDR_ANY in IPv6 only systems. + * Use this if you can handle IPv4 _AND_ IPv6 addresses. + * Use @ref IP4_ADDR_ANY or @ref IP6_ADDR_ANY when the IP + * type matters. + */ +#define IP_ADDR_ANY IP4_ADDR_ANY +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IPv4 wildcard and the broadcast address + */ +#define IP4_ADDR_ANY (&ip_addr_any) +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip4_addr_t + * for the wildcard and the broadcast address + */ +#define IP4_ADDR_ANY4 (ip_2_ip4(&ip_addr_any)) + +/** @ingroup ip4addr */ +#define IP_ADDR_BROADCAST (&ip_addr_broadcast) +/** @ingroup ip4addr */ +#define IP4_ADDR_BROADCAST (ip_2_ip4(&ip_addr_broadcast)) + +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + +extern const ip_addr_t ip6_addr_any; + +/** + * @ingroup ip6addr + * IP6_ADDR_ANY can be used as a fixed ip_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY (&ip6_addr_any) +/** + * @ingroup ip6addr + * IP6_ADDR_ANY6 can be used as a fixed ip6_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY6 (ip_2_ip6(&ip6_addr_any)) + +#if !LWIP_IPV4 +/** IPv6-only configurations */ +#define IP_ADDR_ANY IP6_ADDR_ANY +#endif /* !LWIP_IPV4 */ + +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup ipaddr */ +#define IP_ANY_TYPE (&ip_addr_any_type) +#else +#define IP_ANY_TYPE IP_ADDR_ANY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h new file mode 100644 index 000000000..424de91b8 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h @@ -0,0 +1,82 @@ +/** + * @file + * Heap API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_MEM_H +#define LWIP_HDR_MEM_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if MEM_LIBC_MALLOC + +#include "lwip/arch.h" + +typedef size_t mem_size_t; +#define MEM_SIZE_F SZT_F + +#elif MEM_USE_POOLS + +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F + +#else + +/* MEM_SIZE would have to be aligned, but using 64000 here instead of + * 65535 leaves some room for alignment... + */ +#if MEM_SIZE > 64000L +typedef u32_t mem_size_t; +#define MEM_SIZE_F U32_F +#else +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F +#endif /* MEM_SIZE > 64000 */ +#endif + +void mem_init(void); +void *mem_trim(void *mem, mem_size_t size); +void *mem_malloc(mem_size_t size); +void *mem_calloc(mem_size_t count, mem_size_t size); +void mem_free(void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEM_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h new file mode 100644 index 000000000..c45aca838 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h @@ -0,0 +1,155 @@ +/** + * @file + * Memory pool API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_H +#define LWIP_HDR_MEMP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* run once with empty definition to handle all custom includes in lwippools.h */ +#define LWIP_MEMPOOL(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +/** Create the list of all memory pools managed by memp. MEMP_MAX represents a NULL pool at the end */ +typedef enum { +#define LWIP_MEMPOOL(name,num,size,desc) MEMP_##name, +#include "lwip/priv/memp_std.h" + MEMP_MAX +} memp_t; + +#include "lwip/priv/memp_priv.h" +#include "lwip/stats.h" + +extern const struct memp_desc* const memp_pools[MEMP_MAX]; + +/** + * @ingroup mempool + * Declare prototype for private memory pool if it is used in multiple files + */ +#define LWIP_MEMPOOL_PROTOTYPE(name) extern const struct memp_desc memp_ ## name + +#if MEMP_MEM_MALLOC + +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size) \ + }; + +#else /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Declare a private memory pool + * Private mempools example: + * .h: only when pool is used in multiple .c files: LWIP_MEMPOOL_PROTOTYPE(my_private_pool); + * .c: + * - in global variables section: LWIP_MEMPOOL_DECLARE(my_private_pool, 10, sizeof(foo), "Some description") + * - call ONCE before using pool (e.g. in some init() function): LWIP_MEMPOOL_INIT(my_private_pool); + * - allocate: void* my_new_mem = LWIP_MEMPOOL_ALLOC(my_private_pool); + * - free: LWIP_MEMPOOL_FREE(my_private_pool, my_new_mem); + * + * To relocate a pool, declare it as extern in cc.h. Example for GCC: + * extern u8_t __attribute__((section(".onchip_mem"))) memp_memory_my_private_pool[]; + */ +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_DECLARE_MEMORY_ALIGNED(memp_memory_ ## name ## _base, ((num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size)))); \ + \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + \ + static struct memp *memp_tab_ ## name; \ + \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size), \ + (num), \ + memp_memory_ ## name ## _base, \ + &memp_tab_ ## name \ + }; + +#endif /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Initialize a private memory pool + */ +#define LWIP_MEMPOOL_INIT(name) memp_init_pool(&memp_ ## name) +/** + * @ingroup mempool + * Allocate from a private memory pool + */ +#define LWIP_MEMPOOL_ALLOC(name) memp_malloc_pool(&memp_ ## name) +/** + * @ingroup mempool + * Free element from a private memory pool + */ +#define LWIP_MEMPOOL_FREE(name, x) memp_free_pool(&memp_ ## name, (x)) + +#if MEM_USE_POOLS +/** This structure is used to save the pool one element came from. + * This has to be defined here as it is required for pool size calculation. */ +struct memp_malloc_helper +{ + memp_t poolnr; +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + u16_t size; +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +}; +#endif /* MEM_USE_POOLS */ + +void memp_init(void); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_fn(memp_t type, const char* file, const int line); +#define memp_malloc(t) memp_malloc_fn((t), __FILE__, __LINE__) +#else +void *memp_malloc(memp_t type); +#endif +void memp_free(memp_t type, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h new file mode 100644 index 000000000..f8c99d9d9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h @@ -0,0 +1,99 @@ +/** + * @file + * + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_MLD6_H +#define LWIP_HDR_MLD6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_MLD && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** MLD group */ +struct mld_group { + /** next link */ + struct mld_group *next; + /** multicast address */ + ip6_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +#define MLD6_TMR_INTERVAL 100 /* Milliseconds */ + +err_t mld6_stop(struct netif *netif); +void mld6_report_groups(struct netif *netif); +void mld6_tmr(void); +struct mld_group *mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr); +void mld6_input(struct pbuf *p, struct netif *inp); +err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); + +/** @ingroup mld6 + * Get list head of MLD6 groups for netif. + * Note: The allnodes group IP is NOT in the list, since it must always + * be received for correct IPv6 operation. + * @see @ref netif_set_mld_mac_filter() + */ +#define netif_mld6_data(netif) ((struct mld_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_MLD && LWIP_IPV6 */ + +#endif /* LWIP_HDR_MLD6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h new file mode 100644 index 000000000..3dc0278f9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h @@ -0,0 +1,84 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_H +#define LWIP_HDR_ND6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define ND6_TMR_INTERVAL 1000 + +struct pbuf; +struct netif; + +void nd6_tmr(void); +void nd6_input(struct pbuf *p, struct netif *inp); +void nd6_clear_destination_cache(void); +struct netif *nd6_find_route(const ip6_addr_t *ip6addr); +err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp); +u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif); +#if LWIP_ND6_TCP_REACHABILITY_HINTS +void nd6_reachability_hint(const ip6_addr_t *ip6addr); +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ +void nd6_cleanup_netif(struct netif *netif); +#if LWIP_IPV6_MLD +void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state); +#endif /* LWIP_IPV6_MLD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h new file mode 100644 index 000000000..bfd7242c9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h @@ -0,0 +1,118 @@ +/** + * @file + * netbuf API (for netconn API) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETBUF_H +#define LWIP_HDR_NETBUF_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This netbuf has dest-addr/port set */ +#define NETBUF_FLAG_DESTADDR 0x01 +/** This netbuf includes a checksum */ +#define NETBUF_FLAG_CHKSUM 0x02 + +/** "Network buffer" - contains data and addressing info */ +struct netbuf { + struct pbuf *p, *ptr; + ip_addr_t addr; + u16_t port; +#if LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY +#if LWIP_CHECKSUM_ON_COPY + u8_t flags; +#endif /* LWIP_CHECKSUM_ON_COPY */ + u16_t toport_chksum; +#if LWIP_NETBUF_RECVINFO + ip_addr_t toaddr; +#endif /* LWIP_NETBUF_RECVINFO */ +#endif /* LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY */ +}; + +/* Network buffer functions: */ +struct netbuf * netbuf_new (void); +void netbuf_delete (struct netbuf *buf); +void * netbuf_alloc (struct netbuf *buf, u16_t size); +void netbuf_free (struct netbuf *buf); +err_t netbuf_ref (struct netbuf *buf, + const void *dataptr, u16_t size); +void netbuf_chain (struct netbuf *head, struct netbuf *tail); + +err_t netbuf_data (struct netbuf *buf, + void **dataptr, u16_t *len); +s8_t netbuf_next (struct netbuf *buf); +void netbuf_first (struct netbuf *buf); + + +#define netbuf_copy_partial(buf, dataptr, len, offset) \ + pbuf_copy_partial((buf)->p, (dataptr), (len), (offset)) +#define netbuf_copy(buf,dataptr,len) netbuf_copy_partial(buf, dataptr, len, 0) +#define netbuf_take(buf, dataptr, len) pbuf_take((buf)->p, dataptr, len) +#define netbuf_len(buf) ((buf)->p->tot_len) +#define netbuf_fromaddr(buf) (&((buf)->addr)) +#define netbuf_set_fromaddr(buf, fromaddr) ip_addr_set(&((buf)->addr), fromaddr) +#define netbuf_fromport(buf) ((buf)->port) +#if LWIP_NETBUF_RECVINFO +#define netbuf_destaddr(buf) (&((buf)->toaddr)) +#define netbuf_set_destaddr(buf, destaddr) ip_addr_set(&((buf)->toaddr), destaddr) +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_destport(buf) (((buf)->flags & NETBUF_FLAG_DESTADDR) ? (buf)->toport_chksum : 0) +#else /* LWIP_CHECKSUM_ON_COPY */ +#define netbuf_destport(buf) ((buf)->toport_chksum) +#endif /* LWIP_CHECKSUM_ON_COPY */ +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_set_chksum(buf, chksum) do { (buf)->flags = NETBUF_FLAG_CHKSUM; \ + (buf)->toport_chksum = chksum; } while(0) +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETBUF_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h new file mode 100644 index 000000000..6aae914be --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h @@ -0,0 +1,150 @@ +/** + * @file + * NETDB API (sockets) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETDB_H +#define LWIP_HDR_NETDB_H + +#include "lwip/opt.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/arch.h" +#include "lwip/inet.h" +#include "lwip/sockets.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* some rarely used options */ +#ifndef LWIP_DNS_API_DECLARE_H_ERRNO +#define LWIP_DNS_API_DECLARE_H_ERRNO 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_ERRORS +#define LWIP_DNS_API_DEFINE_ERRORS 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_FLAGS +#define LWIP_DNS_API_DEFINE_FLAGS 1 +#endif + +#ifndef LWIP_DNS_API_DECLARE_STRUCTS +#define LWIP_DNS_API_DECLARE_STRUCTS 1 +#endif + +#if LWIP_DNS_API_DEFINE_ERRORS +/** Errors used by the DNS API functions, h_errno can be one of them */ +#define EAI_NONAME 200 +#define EAI_SERVICE 201 +#define EAI_FAIL 202 +#define EAI_MEMORY 203 +#define EAI_FAMILY 204 + +#define HOST_NOT_FOUND 210 +#define NO_DATA 211 +#define NO_RECOVERY 212 +#define TRY_AGAIN 213 +#endif /* LWIP_DNS_API_DEFINE_ERRORS */ + +#if LWIP_DNS_API_DEFINE_FLAGS +/* input flags for struct addrinfo */ +#define AI_PASSIVE 0x01 +#define AI_CANONNAME 0x02 +#define AI_NUMERICHOST 0x04 +#define AI_NUMERICSERV 0x08 +#define AI_V4MAPPED 0x10 +#define AI_ALL 0x20 +#define AI_ADDRCONFIG 0x40 +#endif /* LWIP_DNS_API_DEFINE_FLAGS */ + +#if LWIP_DNS_API_DECLARE_STRUCTS +struct hostent { + char *h_name; /* Official name of the host. */ + char **h_aliases; /* A pointer to an array of pointers to alternative host names, + terminated by a null pointer. */ + int h_addrtype; /* Address type. */ + int h_length; /* The length, in bytes, of the address. */ + char **h_addr_list; /* A pointer to an array of pointers to network addresses (in + network byte order) for the host, terminated by a null pointer. */ +#define h_addr h_addr_list[0] /* for backward compatibility */ +}; + +struct addrinfo { + int ai_flags; /* Input flags. */ + int ai_family; /* Address family of socket. */ + int ai_socktype; /* Socket type. */ + int ai_protocol; /* Protocol of socket. */ + socklen_t ai_addrlen; /* Length of socket address. */ + struct sockaddr *ai_addr; /* Socket address of socket. */ + char *ai_canonname; /* Canonical name of service location. */ + struct addrinfo *ai_next; /* Pointer to next in list. */ +}; +#endif /* LWIP_DNS_API_DECLARE_STRUCTS */ + +#define NETDB_ELEM_SIZE (sizeof(struct addrinfo) + sizeof(struct sockaddr_storage) + DNS_MAX_NAME_LENGTH + 1) + +#if LWIP_DNS_API_DECLARE_H_ERRNO +/* application accessible error code set by the DNS API functions */ +extern int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO*/ + +struct hostent *lwip_gethostbyname(const char *name); +int lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop); +void lwip_freeaddrinfo(struct addrinfo *ai); +int lwip_getaddrinfo(const char *nodename, + const char *servname, + const struct addrinfo *hints, + struct addrinfo **res); + +#if LWIP_COMPAT_SOCKETS +/** @ingroup netdbapi */ +#define gethostbyname(name) lwip_gethostbyname(name) +/** @ingroup netdbapi */ +#define gethostbyname_r(name, ret, buf, buflen, result, h_errnop) \ + lwip_gethostbyname_r(name, ret, buf, buflen, result, h_errnop) +/** @ingroup netdbapi */ +#define freeaddrinfo(addrinfo) lwip_freeaddrinfo(addrinfo) +/** @ingroup netdbapi */ +#define getaddrinfo(nodname, servname, hints, res) \ + lwip_getaddrinfo(nodname, servname, hints, res) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS && LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETDB_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h new file mode 100644 index 000000000..61bac9fbc --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h @@ -0,0 +1,474 @@ +/** + * @file + * netif API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_H +#define LWIP_HDR_NETIF_H + +#include "lwip/opt.h" + +#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) + +#include "lwip/err.h" + +#include "lwip/ip_addr.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses are expected to be in + * the same byte order as in IP_PCB. */ + +/** Must be the maximum of all used hardware address lengths + across all types of interfaces in use. + This does not have to be changed, normally. */ +#ifndef NETIF_MAX_HWADDR_LEN +#define NETIF_MAX_HWADDR_LEN 6U +#endif + +/** + * @defgroup netif_flags Flags + * @ingroup netif + * @{ + */ + +/** Whether the network interface is 'up'. This is + * a software flag used to control whether this network + * interface is enabled and processes traffic. + * It must be set by the startup code before this netif can be used + * (also for dhcp/autoip). + */ +#define NETIF_FLAG_UP 0x01U +/** If set, the netif has broadcast capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_BROADCAST 0x02U +/** If set, the interface has an active link + * (set by the network interface driver). + * Either set by the netif driver in its init function (if the link + * is up at that time) or at a later point once the link comes up + * (if link detection is supported by the hardware). */ +#define NETIF_FLAG_LINK_UP 0x04U +/** If set, the netif is an ethernet device using ARP. + * Set by the netif driver in its init function. + * Used to check input packet types and use of DHCP. */ +#define NETIF_FLAG_ETHARP 0x08U +/** If set, the netif is an ethernet device. It might not use + * ARP or TCP/IP if it is used for PPPoE only. + */ +#define NETIF_FLAG_ETHERNET 0x10U +/** If set, the netif has IGMP capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_IGMP 0x20U +/** If set, the netif has MLD6 capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_MLD6 0x40U + +/** + * @} + */ + +enum lwip_internal_netif_client_data_index +{ +#if LWIP_DHCP + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, +#endif +#if LWIP_AUTOIP + LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, +#endif +#if LWIP_IGMP + LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, +#endif +#if LWIP_IPV6_MLD + LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, +#endif + LWIP_NETIF_CLIENT_DATA_INDEX_MAX +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_CHECKSUM_GEN_IP 0x0001 +#define NETIF_CHECKSUM_GEN_UDP 0x0002 +#define NETIF_CHECKSUM_GEN_TCP 0x0004 +#define NETIF_CHECKSUM_GEN_ICMP 0x0008 +#define NETIF_CHECKSUM_GEN_ICMP6 0x0010 +#define NETIF_CHECKSUM_CHECK_IP 0x0100 +#define NETIF_CHECKSUM_CHECK_UDP 0x0200 +#define NETIF_CHECKSUM_CHECK_TCP 0x0400 +#define NETIF_CHECKSUM_CHECK_ICMP 0x0800 +#define NETIF_CHECKSUM_CHECK_ICMP6 0x1000 +#define NETIF_CHECKSUM_ENABLE_ALL 0xFFFF +#define NETIF_CHECKSUM_DISABLE_ALL 0x0000 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +struct netif; + +/** MAC Filter Actions, these are passed to a netif's igmp_mac_filter or + * mld_mac_filter callback function. */ +enum netif_mac_filter_action { + /** Delete a filter entry */ + NETIF_DEL_MAC_FILTER = 0, + /** Add a filter entry */ + NETIF_ADD_MAC_FILTER = 1 +}; + +/** Function prototype for netif init functions. Set up flags and output/linkoutput + * callback functions in this function. + * + * @param netif The netif to initialize + */ +typedef err_t (*netif_init_fn)(struct netif *netif); +/** Function prototype for netif->input functions. This function is saved as 'input' + * callback function in the netif struct. Call it when a packet has been received. + * + * @param p The received packet, copied into a pbuf + * @param inp The netif which received the packet + */ +typedef err_t (*netif_input_fn)(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV4 +/** Function prototype for netif->output functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'etharp_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IP address to which the packet shall be sent + */ +typedef err_t (*netif_output_fn)(struct netif *netif, struct pbuf *p, + const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 +/** Function prototype for netif->output_ip6 functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'ethip6_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IPv6 address to which the packet shall be sent + */ +typedef err_t (*netif_output_ip6_fn)(struct netif *netif, struct pbuf *p, + const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +/** Function prototype for netif->linkoutput functions. Only used for ethernet + * netifs. This function is called by ARP when a packet shall be sent. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (raw ethernet packet) + */ +typedef err_t (*netif_linkoutput_fn)(struct netif *netif, struct pbuf *p); +/** Function prototype for netif status- or link-callback functions. */ +typedef void (*netif_status_callback_fn)(struct netif *netif); +#if LWIP_IPV4 && LWIP_IGMP +/** Function prototype for netif igmp_mac_filter functions */ +typedef err_t (*netif_igmp_mac_filter_fn)(struct netif *netif, + const ip4_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** Function prototype for netif mld_mac_filter functions */ +typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, + const ip6_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if LWIP_DHCP || LWIP_AUTOIP || LWIP_IGMP || LWIP_IPV6_MLD || (LWIP_NUM_NETIF_CLIENT_DATA > 0) +u8_t netif_alloc_client_data_id(void); +/** @ingroup netif_cd + * Set client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_set_client_data(netif, id, data) netif_get_client_data(netif, id) = (data) +/** @ingroup netif_cd + * Get client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_get_client_data(netif, id) (netif)->client_data[(id)] +#endif /* LWIP_DHCP || LWIP_AUTOIP || (LWIP_NUM_NETIF_CLIENT_DATA > 0) */ + +/** Generic data structure used for all lwIP network interfaces. + * The following fields should be filled in by the initialization + * function for the device driver: hwaddr_len, hwaddr[], mtu, flags */ +struct netif { + /** pointer to next in linked list */ + struct netif *next; + +#if LWIP_IPV4 + /** IP address configuration in network byte order */ + ip_addr_t ip_addr; + ip_addr_t netmask; + ip_addr_t gw; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Array of IPv6 addresses for this netif. */ + ip_addr_t ip6_addr[LWIP_IPV6_NUM_ADDRESSES]; + /** The state of each IPv6 address (Tentative, Preferred, etc). + * @see ip6_addr.h */ + u8_t ip6_addr_state[LWIP_IPV6_NUM_ADDRESSES]; +#endif /* LWIP_IPV6 */ + /** This function is called by the network device driver + * to pass a packet up the TCP/IP stack. */ + netif_input_fn input; +#if LWIP_IPV4 + /** This function is called by the IP module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually etharp_output() */ + netif_output_fn output; +#endif /* LWIP_IPV4 */ + /** This function is called by ethernet_output() when it wants + * to send a packet on the interface. This function outputs + * the pbuf as-is on the link medium. */ + netif_linkoutput_fn linkoutput; +#if LWIP_IPV6 + /** This function is called by the IPv6 module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually ethip6_output() */ + netif_output_ip6_fn output_ip6; +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + /** This function is called when the netif state is set to up or down + */ + netif_status_callback_fn status_callback; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + /** This function is called when the netif link is set to up or down + */ + netif_status_callback_fn link_callback; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK + /** This function is called when the netif has been removed */ + netif_status_callback_fn remove_callback; +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + /** This field can be set by the device driver and could point + * to state information for the device. */ + void *state; +#ifdef netif_get_client_data + void* client_data[LWIP_NETIF_CLIENT_DATA_INDEX_MAX + LWIP_NUM_NETIF_CLIENT_DATA]; +#endif +#if LWIP_IPV6_AUTOCONFIG + /** is this netif enabled for IPv6 autoconfiguration */ + u8_t ip6_autoconfig_enabled; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /** Number of Router Solicitation messages that remain to be sent. */ + u8_t rs_count; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if LWIP_NETIF_HOSTNAME + /* the hostname for this netif, NULL is a valid value */ + const char* hostname; +#endif /* LWIP_NETIF_HOSTNAME */ +#if LWIP_CHECKSUM_CTRL_PER_NETIF + u16_t chksum_flags; +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ + /** maximum transfer unit (in bytes) */ + u16_t mtu; + /** number of bytes used in hwaddr */ + u8_t hwaddr_len; + /** link level hardware address of this interface */ + u8_t hwaddr[NETIF_MAX_HWADDR_LEN]; + /** flags (@see @ref netif_flags) */ + u8_t flags; + /** descriptive abbreviation */ + char name[2]; + /** number of this interface */ + u8_t num; +#if MIB2_STATS + /** link type (from "snmp_ifType" enum from snmp_mib2.h) */ + u8_t link_type; + /** (estimate) link speed */ + u32_t link_speed; + /** timestamp at last change made (up/down) */ + u32_t ts; + /** counters */ + struct stats_mib2_netif_ctrs mib2_counters; +#endif /* MIB2_STATS */ +#if LWIP_IPV4 && LWIP_IGMP + /** This function could be called to add or delete an entry in the multicast + filter table of the ethernet MAC.*/ + netif_igmp_mac_filter_fn igmp_mac_filter; +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + /** This function could be called to add or delete an entry in the IPv6 multicast + filter table of the ethernet MAC. */ + netif_mld_mac_filter_fn mld_mac_filter; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if LWIP_NETIF_HWADDRHINT + u8_t *addr_hint; +#endif /* LWIP_NETIF_HWADDRHINT */ +#if ENABLE_LOOPBACK + /* List of packets to be queued for ourselves. */ + struct pbuf *loop_first; + struct pbuf *loop_last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t loop_cnt_current; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ +#endif /* ENABLE_LOOPBACK */ +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) do { \ + (netif)->chksum_flags = chksumflags; } while(0) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) if (((netif) == NULL) || (((netif)->chksum_flags & (chksumflag)) != 0)) +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +/** The list of network interfaces. */ +extern struct netif *netif_list; +/** The default network interface. */ +extern struct netif *netif_default; + +void netif_init(void); + +struct netif *netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); +#if LWIP_IPV4 +void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw); +#endif /* LWIP_IPV4 */ +void netif_remove(struct netif * netif); + +/* Returns a network interface given its name. The name is of the form + "et0", where the first two letters are the "name" field in the + netif structure, and the digit is in the num field in the same + structure. */ +struct netif *netif_find(const char *name); + +void netif_set_default(struct netif *netif); + +#if LWIP_IPV4 +void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr); +void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask); +void netif_set_gw(struct netif *netif, const ip4_addr_t *gw); +/** @ingroup netif_ip4 */ +#define netif_ip4_addr(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->ip_addr))) +/** @ingroup netif_ip4 */ +#define netif_ip4_netmask(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->netmask))) +/** @ingroup netif_ip4 */ +#define netif_ip4_gw(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->gw))) +/** @ingroup netif_ip4 */ +#define netif_ip_addr4(netif) ((const ip_addr_t*)&((netif)->ip_addr)) +/** @ingroup netif_ip4 */ +#define netif_ip_netmask4(netif) ((const ip_addr_t*)&((netif)->netmask)) +/** @ingroup netif_ip4 */ +#define netif_ip_gw4(netif) ((const ip_addr_t*)&((netif)->gw)) +#endif /* LWIP_IPV4 */ + +void netif_set_up(struct netif *netif); +void netif_set_down(struct netif *netif); +/** @ingroup netif + * Ask if an interface is up + */ +#define netif_is_up(netif) (((netif)->flags & NETIF_FLAG_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_STATUS_CALLBACK +void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback); +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK +void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback); +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +void netif_set_link_up(struct netif *netif); +void netif_set_link_down(struct netif *netif); +/** Ask if a link is up */ +#define netif_is_link_up(netif) (((netif)->flags & NETIF_FLAG_LINK_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_LINK_CALLBACK +void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback); +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_HOSTNAME +/** @ingroup netif */ +#define netif_set_hostname(netif, name) do { if((netif) != NULL) { (netif)->hostname = name; }}while(0) +/** @ingroup netif */ +#define netif_get_hostname(netif) (((netif) != NULL) ? ((netif)->hostname) : NULL) +#endif /* LWIP_NETIF_HOSTNAME */ + +#if LWIP_IGMP +/** @ingroup netif */ +#define netif_set_igmp_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->igmp_mac_filter = function; }}while(0) +#define netif_get_igmp_mac_filter(netif) (((netif) != NULL) ? ((netif)->igmp_mac_filter) : NULL) +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** @ingroup netif */ +#define netif_set_mld_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->mld_mac_filter = function; }}while(0) +#define netif_get_mld_mac_filter(netif) (((netif) != NULL) ? ((netif)->mld_mac_filter) : NULL) +#define netif_mld_mac_filter(netif, addr, action) do { if((netif) && (netif)->mld_mac_filter) { (netif)->mld_mac_filter((netif), (addr), (action)); }}while(0) +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if ENABLE_LOOPBACK +err_t netif_loop_output(struct netif *netif, struct pbuf *p); +void netif_poll(struct netif *netif); +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +void netif_poll_all(void); +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +err_t netif_input(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV6 +/** @ingroup netif_ip6 */ +#define netif_ip_addr6(netif, i) ((const ip_addr_t*)(&((netif)->ip6_addr[i]))) +/** @ingroup netif_ip6 */ +#define netif_ip6_addr(netif, i) ((const ip6_addr_t*)ip_2_ip6(&((netif)->ip6_addr[i]))) +void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6); +void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3); +#define netif_ip6_addr_state(netif, i) ((netif)->ip6_addr_state[i]) +void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state); +s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr); +void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit); +err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx); +#define netif_set_ip6_autoconfig_enabled(netif, action) do { if(netif) { (netif)->ip6_autoconfig_enabled = (action); }}while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_NETIF_HWADDRHINT +#define NETIF_SET_HWADDRHINT(netif, hint) ((netif)->addr_hint = (hint)) +#else /* LWIP_NETIF_HWADDRHINT */ +#define NETIF_SET_HWADDRHINT(netif, hint) +#endif /* LWIP_NETIF_HWADDRHINT */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h new file mode 100644 index 000000000..6c1b56bd7 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h @@ -0,0 +1,140 @@ +/** + * @file + * netif API (to be used from non-TCPIP threads) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_NETIFAPI_H +#define LWIP_HDR_NETIFAPI_H + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MPU_COMPATIBLE +#define NETIFAPI_IPADDR_DEF(type, m) type m +#else /* LWIP_MPU_COMPATIBLE */ +#define NETIFAPI_IPADDR_DEF(type, m) const type * m +#endif /* LWIP_MPU_COMPATIBLE */ + +typedef void (*netifapi_void_fn)(struct netif *netif); +typedef err_t (*netifapi_errt_fn)(struct netif *netif); + +struct netifapi_msg { + struct tcpip_api_call_data call; + struct netif *netif; + union { + struct { +#if LWIP_IPV4 + NETIFAPI_IPADDR_DEF(ip4_addr_t, ipaddr); + NETIFAPI_IPADDR_DEF(ip4_addr_t, netmask); + NETIFAPI_IPADDR_DEF(ip4_addr_t, gw); +#endif /* LWIP_IPV4 */ + void *state; + netif_init_fn init; + netif_input_fn input; + } add; + struct { + netifapi_void_fn voidfunc; + netifapi_errt_fn errtfunc; + } common; + } msg; +}; + + +/* API for application */ +err_t netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +err_t netifapi_netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, const ip4_addr_t *gw); +#endif /* LWIP_IPV4*/ + +err_t netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc); + +/** @ingroup netifapi_netif */ +#define netifapi_netif_remove(n) netifapi_netif_common(n, netif_remove, NULL) +/** @ingroup netifapi_netif */ +#define netifapi_netif_set_up(n) netifapi_netif_common(n, netif_set_up, NULL) +/** @ingroup netifapi_netif */ +#define netifapi_netif_set_down(n) netifapi_netif_common(n, netif_set_down, NULL) +/** @ingroup netifapi_netif */ +#define netifapi_netif_set_default(n) netifapi_netif_common(n, netif_set_default, NULL) +/** @ingroup netifapi_netif */ +#define netifapi_netif_set_link_up(n) netifapi_netif_common(n, netif_set_link_up, NULL) +/** @ingroup netifapi_netif */ +#define netifapi_netif_set_link_down(n) netifapi_netif_common(n, netif_set_link_down, NULL) + +/** + * @defgroup netifapi_dhcp4 DHCPv4 + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_dhcp4 */ +#define netifapi_dhcp_start(n) netifapi_netif_common(n, NULL, dhcp_start) +/** @ingroup netifapi_dhcp4 */ +#define netifapi_dhcp_stop(n) netifapi_netif_common(n, dhcp_stop, NULL) +/** @ingroup netifapi_dhcp4 */ +#define netifapi_dhcp_inform(n) netifapi_netif_common(n, dhcp_inform, NULL) +/** @ingroup netifapi_dhcp4 */ +#define netifapi_dhcp_renew(n) netifapi_netif_common(n, NULL, dhcp_renew) +/** @ingroup netifapi_dhcp4 */ +#define netifapi_dhcp_release(n) netifapi_netif_common(n, NULL, dhcp_release) + +/** + * @defgroup netifapi_autoip AUTOIP + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_autoip */ +#define netifapi_autoip_start(n) netifapi_netif_common(n, NULL, autoip_start) +/** @ingroup netifapi_autoip */ +#define netifapi_autoip_stop(n) netifapi_netif_common(n, NULL, autoip_stop) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETIF_API */ + +#endif /* LWIP_HDR_NETIFAPI_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h new file mode 100644 index 000000000..c04dd5bc9 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h @@ -0,0 +1,2876 @@ +/** + * @file + * + * lwIP Options Configuration + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* + * NOTE: || defined __DOXYGEN__ is a workaround for doxygen bug - + * without this, doxygen does not see the actual #define + */ + +#if !defined LWIP_HDR_OPT_H +#define LWIP_HDR_OPT_H + +/* + * Include user defined options first. Anything not defined in these files + * will be set to standard values. Override anything you don't like! + */ +#include "lwipopts.h" +#include "lwip/debug.h" + +/** + * @defgroup lwip_opts Options (lwipopts.h) + * @ingroup lwip + * + * @defgroup lwip_opts_debug Debugging + * @ingroup lwip_opts + * + * @defgroup lwip_opts_infrastructure Infrastructure + * @ingroup lwip_opts + * + * @defgroup lwip_opts_callback Callback-style APIs + * @ingroup lwip_opts + * + * @defgroup lwip_opts_threadsafe_apis Thread-safe APIs + * @ingroup lwip_opts + */ + + /* + ------------------------------------ + -------------- NO SYS -------------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_nosys NO_SYS + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * NO_SYS==1: Use lwIP without OS-awareness (no thread, semaphores, mutexes or + * mboxes). This means threaded APIs cannot be used (socket, netconn, + * i.e. everything in the 'api' folder), only the callback-style raw API is + * available (and you have to watch out for yourself that you don't access + * lwIP functions/structures from more than one context at a time!) + */ +#if !defined NO_SYS || defined __DOXYGEN__ +#define NO_SYS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_timers Timers + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_TIMERS==0: Drop support for sys_timeout and lwip-internal cyclic timers. + * (the array of lwip-internal cyclic timers is still provided) + * (check NO_SYS_NO_TIMERS for compatibility to old versions) + */ +#if !defined LWIP_TIMERS || defined __DOXYGEN__ +#ifdef NO_SYS_NO_TIMERS +#define LWIP_TIMERS (!NO_SYS || (NO_SYS && !NO_SYS_NO_TIMERS)) +#else +#define LWIP_TIMERS 1 +#endif +#endif + +/** + * LWIP_TIMERS_CUSTOM==1: Provide your own timer implementation. + * Function prototypes in timeouts.h and the array of lwip-internal cyclic timers + * are still included, but the implementation is not. The following functions + * will be required: sys_timeouts_init(), sys_timeout(), sys_untimeout(), + * sys_timeouts_mbox_fetch() + */ +#if !defined LWIP_TIMERS_CUSTOM || defined __DOXYGEN__ +#define LWIP_TIMERS_CUSTOM 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_memcpy memcpy + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMCPY: override this if you have a faster implementation at hand than the + * one included in your C library + */ +#if !defined MEMCPY || defined __DOXYGEN__ +#define MEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * SMEMCPY: override this with care! Some compilers (e.g. gcc) can inline a + * call to memcpy() if the length is known at compile time and is small. + */ +#if !defined SMEMCPY || defined __DOXYGEN__ +#define SMEMCPY(dst,src,len) memcpy(dst,src,len) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ----------- Core locking ----------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_lock Core locking and MPU + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MPU_COMPATIBLE: enables special memory management mechanism + * which makes lwip able to work on MPU (Memory Protection Unit) system + * by not passing stack-pointers to other threads + * (this decreases performance as memory is allocated from pools instead + * of keeping it on the stack) + */ +#if !defined LWIP_MPU_COMPATIBLE || defined __DOXYGEN__ +#define LWIP_MPU_COMPATIBLE 0 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING + * Creates a global mutex that is held during TCPIP thread operations. + * Can be locked by client code to perform lwIP operations without changing + * into TCPIP thread using callbacks. See LOCK_TCPIP_CORE() and + * UNLOCK_TCPIP_CORE(). + * Your system should provide mutexes supporting priority inversion to use this. + */ +#if !defined LWIP_TCPIP_CORE_LOCKING || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING 1 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING_INPUT: when LWIP_TCPIP_CORE_LOCKING is enabled, + * this lets tcpip_input() grab the mutex for input packets as well, + * instead of allocating a message and passing it to tcpip_thread. + * + * ATTENTION: this does not work when tcpip_input() is called from + * interrupt context! + */ +#if !defined LWIP_TCPIP_CORE_LOCKING_INPUT || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING_INPUT 0 +#endif + +/** + * SYS_LIGHTWEIGHT_PROT==1: enable inter-task protection (and task-vs-interrupt + * protection) for certain critical regions during buffer allocation, deallocation + * and memory allocation and deallocation. + * ATTENTION: This is required when using lwIP from more than one context! If + * you disable this, you must be sure what you are doing! + */ +#if !defined SYS_LIGHTWEIGHT_PROT || defined __DOXYGEN__ +#define SYS_LIGHTWEIGHT_PROT 1 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Memory options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_mem Heap and memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEM_LIBC_MALLOC==1: Use malloc/free/realloc provided by your C-library + * instead of the lwip internal allocator. Can save code size if you + * already use it. + */ +#if !defined MEM_LIBC_MALLOC || defined __DOXYGEN__ +#define MEM_LIBC_MALLOC 0 +#endif + +/** + * MEMP_MEM_MALLOC==1: Use mem_malloc/mem_free instead of the lwip pool allocator. + * Especially useful with MEM_LIBC_MALLOC but handle with care regarding execution + * speed (heap alloc can be much slower than pool alloc) and usage from interrupts + * (especially if your netif driver allocates PBUF_POOL pbufs for received frames + * from interrupt)! + * ATTENTION: Currently, this uses the heap for ALL pools (also for private pools, + * not only for internal pools defined in memp_std.h)! + */ +#if !defined MEMP_MEM_MALLOC || defined __DOXYGEN__ +#define MEMP_MEM_MALLOC 0 +#endif + +/** + * MEM_ALIGNMENT: should be set to the alignment of the CPU + * 4 byte alignment -> \#define MEM_ALIGNMENT 4 + * 2 byte alignment -> \#define MEM_ALIGNMENT 2 + */ +#if !defined MEM_ALIGNMENT || defined __DOXYGEN__ +#define MEM_ALIGNMENT 1 +#endif + +/** + * MEM_SIZE: the size of the heap memory. If the application will send + * a lot of data that needs to be copied, this should be set high. + */ +#if !defined MEM_SIZE || defined __DOXYGEN__ +#define MEM_SIZE 1600 +#endif + +/** + * MEMP_OVERFLOW_CHECK: memp overflow protection reserves a configurable + * amount of bytes before and after each memp element in every pool and fills + * it with a prominent default value. + * MEMP_OVERFLOW_CHECK == 0 no checking + * MEMP_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEMP_OVERFLOW_CHECK >= 2 checks each element in every pool every time + * memp_malloc() or memp_free() is called (useful but slow!) + */ +#if !defined MEMP_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEMP_OVERFLOW_CHECK 0 +#endif + +/** + * MEMP_SANITY_CHECK==1: run a sanity check after each memp_free() to make + * sure that there are no cycles in the linked lists. + */ +#if !defined MEMP_SANITY_CHECK || defined __DOXYGEN__ +#define MEMP_SANITY_CHECK 0 +#endif + +/** + * MEM_USE_POOLS==1: Use an alternative to malloc() by allocating from a set + * of memory pools of various sizes. When mem_malloc is called, an element of + * the smallest pool that can provide the length needed is returned. + * To use this, MEMP_USE_CUSTOM_POOLS also has to be enabled. + */ +#if !defined MEM_USE_POOLS || defined __DOXYGEN__ +#define MEM_USE_POOLS 0 +#endif + +/** + * MEM_USE_POOLS_TRY_BIGGER_POOL==1: if one malloc-pool is empty, try the next + * bigger pool - WARNING: THIS MIGHT WASTE MEMORY but it can make a system more + * reliable. */ +#if !defined MEM_USE_POOLS_TRY_BIGGER_POOL || defined __DOXYGEN__ +#define MEM_USE_POOLS_TRY_BIGGER_POOL 0 +#endif + +/** + * MEMP_USE_CUSTOM_POOLS==1: whether to include a user file lwippools.h + * that defines additional pools beyond the "standard" ones required + * by lwIP. If you set this to 1, you must have lwippools.h in your + * include path somewhere. + */ +#if !defined MEMP_USE_CUSTOM_POOLS || defined __DOXYGEN__ +#define MEMP_USE_CUSTOM_POOLS 0 +#endif + +/** + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. + * + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. + * + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: + * - pbuf_free_callback(p); + * - mem_free_callback(m); + */ +#if !defined LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT || defined __DOXYGEN__ +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Internal Memory Pool Sizes ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_memp Internal memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMP_NUM_PBUF: the number of memp struct pbufs (used for PBUF_ROM and PBUF_REF). + * If the application sends a lot of data out of ROM (or other static memory), + * this should be set high. + */ +#if !defined MEMP_NUM_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_PBUF 16 +#endif + +/** + * MEMP_NUM_RAW_PCB: Number of raw connection PCBs + * (requires the LWIP_RAW option) + */ +#if !defined MEMP_NUM_RAW_PCB || defined __DOXYGEN__ +#define MEMP_NUM_RAW_PCB 4 +#endif + +/** + * MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One + * per active UDP "connection". + * (requires the LWIP_UDP option) + */ +#if !defined MEMP_NUM_UDP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_UDP_PCB 4 +#endif + +/** + * MEMP_NUM_TCP_PCB: the number of simultaneously active TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB 5 +#endif + +/** + * MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB_LISTEN || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB_LISTEN 8 +#endif + +/** + * MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP segments. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_SEG || defined __DOXYGEN__ +#define MEMP_NUM_TCP_SEG 16 +#endif + +/** + * MEMP_NUM_REASSDATA: the number of IP packets simultaneously queued for + * reassembly (whole packets, not fragments!) + */ +#if !defined MEMP_NUM_REASSDATA || defined __DOXYGEN__ +#define MEMP_NUM_REASSDATA 5 +#endif + +/** + * MEMP_NUM_FRAG_PBUF: the number of IP fragments simultaneously sent + * (fragments, not whole packets!). + * This is only used with LWIP_NETIF_TX_SINGLE_PBUF==0 and only has to be > 1 + * with DMA-enabled MACs where the packet is not yet sent when netif->output + * returns. + */ +#if !defined MEMP_NUM_FRAG_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_FRAG_PBUF 15 +#endif + +/** + * MEMP_NUM_ARP_QUEUE: the number of simultaneously queued outgoing + * packets (pbufs) that are waiting for an ARP request (to resolve + * their destination address) to finish. + * (requires the ARP_QUEUEING option) + */ +#if !defined MEMP_NUM_ARP_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ARP_QUEUE 30 +#endif + +/** + * MEMP_NUM_IGMP_GROUP: The number of multicast groups whose network interfaces + * can be members at the same time (one per netif - allsystems group -, plus one + * per netif membership). + * (requires the LWIP_IGMP option) + */ +#if !defined MEMP_NUM_IGMP_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_IGMP_GROUP 8 +#endif + +/** + * MEMP_NUM_SYS_TIMEOUT: the number of simultaneously active timeouts. + * The default number of timeouts is calculated here for all enabled modules. + * The formula expects settings to be either '0' or '1'. + */ +#if !defined MEMP_NUM_SYS_TIMEOUT || defined __DOXYGEN__ +#define MEMP_NUM_SYS_TIMEOUT (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + (PPP_SUPPORT*6*MEMP_NUM_PPP_PCB) + (LWIP_IPV6 ? (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD) : 0)) +#endif + +/** + * MEMP_NUM_NETBUF: the number of struct netbufs. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETBUF || defined __DOXYGEN__ +#define MEMP_NUM_NETBUF 2 +#endif + +/** + * MEMP_NUM_NETCONN: the number of struct netconns. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETCONN || defined __DOXYGEN__ +#define MEMP_NUM_NETCONN 4 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_API: the number of struct tcpip_msg, which are used + * for callback/timeout API communication. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_API || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_API 8 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_INPKT: the number of struct tcpip_msg, which are used + * for incoming packets. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_INPKT || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_INPKT 8 +#endif + +/** + * MEMP_NUM_NETDB: the number of concurrently running lwip_addrinfo() calls + * (before freeing the corresponding memory using lwip_freeaddrinfo()). + */ +#if !defined MEMP_NUM_NETDB || defined __DOXYGEN__ +#define MEMP_NUM_NETDB 1 +#endif + +/** + * MEMP_NUM_LOCALHOSTLIST: the number of host entries in the local host list + * if DNS_LOCAL_HOSTLIST_IS_DYNAMIC==1. + */ +#if !defined MEMP_NUM_LOCALHOSTLIST || defined __DOXYGEN__ +#define MEMP_NUM_LOCALHOSTLIST 1 +#endif + +/** + * PBUF_POOL_SIZE: the number of buffers in the pbuf pool. + */ +#if !defined PBUF_POOL_SIZE || defined __DOXYGEN__ +#define PBUF_POOL_SIZE 16 +#endif + +/** MEMP_NUM_API_MSG: the number of concurrently active calls to various + * socket, netconn, and tcpip functions + */ +#if !defined MEMP_NUM_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_DNS_API_MSG: the number of concurrently active calls to netconn_gethostbyname + */ +#if !defined MEMP_NUM_DNS_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_DNS_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA: the number of concurrently active calls + * to getsockopt/setsockopt + */ +#if !defined MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA || defined __DOXYGEN__ +#define MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_NETIFAPI_MSG: the number of concurrently active calls to the + * netifapi functions + */ +#if !defined MEMP_NUM_NETIFAPI_MSG || defined __DOXYGEN__ +#define MEMP_NUM_NETIFAPI_MSG MEMP_NUM_TCPIP_MSG_API +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- ARP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_arp ARP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ARP==1: Enable ARP functionality. + */ +#if !defined LWIP_ARP || defined __DOXYGEN__ +#define LWIP_ARP 1 +#endif + +/** + * ARP_TABLE_SIZE: Number of active MAC-IP address pairs cached. + */ +#if !defined ARP_TABLE_SIZE || defined __DOXYGEN__ +#define ARP_TABLE_SIZE 10 +#endif + +/** the time an ARP entry stays valid after its last update, + * for ARP_TMR_INTERVAL = 1000, this is + * (60 * 5) seconds = 5 minutes. + */ +#if !defined ARP_MAXAGE || defined __DOXYGEN__ +#define ARP_MAXAGE 300 +#endif + +/** + * ARP_QUEUEING==1: Multiple outgoing packets are queued during hardware address + * resolution. By default, only the most recent packet is queued per IP address. + * This is sufficient for most protocols and mainly reduces TCP connection + * startup time. Set this to 1 if you know your application sends more than one + * packet in a row to an IP address that is not in the ARP cache. + */ +#if !defined ARP_QUEUEING || defined __DOXYGEN__ +#define ARP_QUEUEING 0 +#endif + +/** The maximum number of packets which may be queued for each + * unresolved address by other network layers. Defaults to 3, 0 means disabled. + * Old packets are dropped, new packets are queued. + */ +#if !defined ARP_QUEUE_LEN || defined __DOXYGEN__ +#define ARP_QUEUE_LEN 3 +#endif + +/** + * ETHARP_SUPPORT_VLAN==1: support receiving and sending ethernet packets with + * VLAN header. See the description of LWIP_HOOK_VLAN_CHECK and + * LWIP_HOOK_VLAN_SET hooks to check/set VLAN headers. + * Additionally, you can define ETHARP_VLAN_CHECK to an u16_t VLAN ID to check. + * If ETHARP_VLAN_CHECK is defined, only VLAN-traffic for this VLAN is accepted. + * If ETHARP_VLAN_CHECK is not defined, all traffic is accepted. + * Alternatively, define a function/define ETHARP_VLAN_CHECK_FN(eth_hdr, vlan) + * that returns 1 to accept a packet or 0 to drop a packet. + */ +#if !defined ETHARP_SUPPORT_VLAN || defined __DOXYGEN__ +#define ETHARP_SUPPORT_VLAN 0 +#endif + +/** LWIP_ETHERNET==1: enable ethernet support even though ARP might be disabled + */ +#if !defined LWIP_ETHERNET || defined __DOXYGEN__ +#define LWIP_ETHERNET LWIP_ARP +#endif + +/** ETH_PAD_SIZE: number of bytes added before the ethernet header to ensure + * alignment of payload after that header. Since the header is 14 bytes long, + * without this padding e.g. addresses in the IP header will not be aligned + * on a 32-bit boundary, so setting this to 2 can speed up 32-bit-platforms. + */ +#if !defined ETH_PAD_SIZE || defined __DOXYGEN__ +#define ETH_PAD_SIZE 0 +#endif + +/** ETHARP_SUPPORT_STATIC_ENTRIES==1: enable code to support static ARP table + * entries (using etharp_add_static_entry/etharp_remove_static_entry). + */ +#if !defined ETHARP_SUPPORT_STATIC_ENTRIES || defined __DOXYGEN__ +#define ETHARP_SUPPORT_STATIC_ENTRIES 0 +#endif + +/** ETHARP_TABLE_MATCH_NETIF==1: Match netif for ARP table entries. + * If disabled, duplicate IP address on multiple netifs are not supported + * (but this should only occur for AutoIP). + */ +#if !defined ETHARP_TABLE_MATCH_NETIF || defined __DOXYGEN__ +#define ETHARP_TABLE_MATCH_NETIF 0 +#endif +/** + * @} + */ + +/* + -------------------------------- + ---------- IP options ---------- + -------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv4 IPv4 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV4==1: Enable IPv4 + */ +#if !defined LWIP_IPV4 || defined __DOXYGEN__ +#define LWIP_IPV4 1 +#endif + +/** + * IP_FORWARD==1: Enables the ability to forward IP packets across network + * interfaces. If you are going to run lwIP on a device with only one network + * interface, define this to 0. + */ +#if !defined IP_FORWARD || defined __DOXYGEN__ +#define IP_FORWARD 0 +#endif + +/** + * IP_REASSEMBLY==1: Reassemble incoming fragmented IP packets. Note that + * this option does not affect outgoing packet sizes, which can be controlled + * via IP_FRAG. + */ +#if !defined IP_REASSEMBLY || defined __DOXYGEN__ +#define IP_REASSEMBLY 1 +#endif + +/** + * IP_FRAG==1: Fragment outgoing IP packets if their size exceeds MTU. Note + * that this option does not affect incoming packet sizes, which can be + * controlled via IP_REASSEMBLY. + */ +#if !defined IP_FRAG || defined __DOXYGEN__ +#define IP_FRAG 1 +#endif + +#if !LWIP_IPV4 +/* disable IPv4 extensions when IPv4 is disabled */ +#undef IP_FORWARD +#define IP_FORWARD 0 +#undef IP_REASSEMBLY +#define IP_REASSEMBLY 0 +#undef IP_FRAG +#define IP_FRAG 0 +#endif /* !LWIP_IPV4 */ + +/** + * IP_OPTIONS_ALLOWED: Defines the behavior for IP options. + * IP_OPTIONS_ALLOWED==0: All packets with IP options are dropped. + * IP_OPTIONS_ALLOWED==1: IP options are allowed (but not parsed). + */ +#if !defined IP_OPTIONS_ALLOWED || defined __DOXYGEN__ +#define IP_OPTIONS_ALLOWED 1 +#endif + +/** + * IP_REASS_MAXAGE: Maximum time (in multiples of IP_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IP_REASS_MAXAGE || defined __DOXYGEN__ +#define IP_REASS_MAXAGE 3 +#endif + +/** + * IP_REASS_MAX_PBUFS: Total maximum amount of pbufs waiting to be reassembled. + * Since the received pbufs are enqueued, be sure to configure + * PBUF_POOL_SIZE > IP_REASS_MAX_PBUFS so that the stack is still able to receive + * packets even if the maximum amount of fragments is enqueued for reassembly! + */ +#if !defined IP_REASS_MAX_PBUFS || defined __DOXYGEN__ +#define IP_REASS_MAX_PBUFS 10 +#endif + +/** + * IP_DEFAULT_TTL: Default value for Time-To-Live used by transport layers. + */ +#if !defined IP_DEFAULT_TTL || defined __DOXYGEN__ +#define IP_DEFAULT_TTL 255 +#endif + +/** + * IP_SOF_BROADCAST=1: Use the SOF_BROADCAST field to enable broadcast + * filter per pcb on udp and raw send operations. To enable broadcast filter + * on recv operations, you also have to set IP_SOF_BROADCAST_RECV=1. + */ +#if !defined IP_SOF_BROADCAST || defined __DOXYGEN__ +#define IP_SOF_BROADCAST 0 +#endif + +/** + * IP_SOF_BROADCAST_RECV (requires IP_SOF_BROADCAST=1) enable the broadcast + * filter on recv operations. + */ +#if !defined IP_SOF_BROADCAST_RECV || defined __DOXYGEN__ +#define IP_SOF_BROADCAST_RECV 0 +#endif + +/** + * IP_FORWARD_ALLOW_TX_ON_RX_NETIF==1: allow ip_forward() to send packets back + * out on the netif where it was received. This should only be used for + * wireless networks. + * ATTENTION: When this is 1, make sure your netif driver correctly marks incoming + * link-layer-broadcast/multicast packets as such using the corresponding pbuf flags! + */ +#if !defined IP_FORWARD_ALLOW_TX_ON_RX_NETIF || defined __DOXYGEN__ +#define IP_FORWARD_ALLOW_TX_ON_RX_NETIF 0 +#endif + +/** + * LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS==1: randomize the local port for the first + * local TCP/UDP pcb (default==0). This can prevent creating predictable port + * numbers after booting a device. + */ +#if !defined LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS || defined __DOXYGEN__ +#define LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- ICMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_icmp ICMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ICMP==1: Enable ICMP module inside the IP stack. + * Be careful, disable that make your product non-compliant to RFC1122 + */ +#if !defined LWIP_ICMP || defined __DOXYGEN__ +#define LWIP_ICMP 1 +#endif + +/** + * ICMP_TTL: Default value for Time-To-Live used by ICMP packets. + */ +#if !defined ICMP_TTL || defined __DOXYGEN__ +#define ICMP_TTL (IP_DEFAULT_TTL) +#endif + +/** + * LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only) + */ +#if !defined LWIP_BROADCAST_PING || defined __DOXYGEN__ +#define LWIP_BROADCAST_PING 0 +#endif + +/** + * LWIP_MULTICAST_PING==1: respond to multicast pings (default is unicast only) + */ +#if !defined LWIP_MULTICAST_PING || defined __DOXYGEN__ +#define LWIP_MULTICAST_PING 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- RAW options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_raw RAW + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined LWIP_RAW || defined __DOXYGEN__ +#define LWIP_RAW 0 +#endif + +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined RAW_TTL || defined __DOXYGEN__ +#define RAW_TTL (IP_DEFAULT_TTL) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dhcp DHCP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_DHCP==1: Enable DHCP module. + */ +#if !defined LWIP_DHCP || defined __DOXYGEN__ +#define LWIP_DHCP 0 +#endif +#if !LWIP_IPV4 +/* disable DHCP when IPv4 is disabled */ +#undef LWIP_DHCP +#define LWIP_DHCP 0 +#endif /* !LWIP_IPV4 */ + +/** + * DHCP_DOES_ARP_CHECK==1: Do an ARP check on the offered address. + */ +#if !defined DHCP_DOES_ARP_CHECK || defined __DOXYGEN__ +#define DHCP_DOES_ARP_CHECK ((LWIP_DHCP) && (LWIP_ARP)) +#endif + +/** + * LWIP_DHCP_CHECK_LINK_UP==1: dhcp_start() only really starts if the netif has + * NETIF_FLAG_LINK_UP set in its flags. As this is only an optimization and + * netif drivers might not set this flag, the default is off. If enabled, + * netif_set_link_up() must be called to continue dhcp starting. + */ +#if !defined LWIP_DHCP_CHECK_LINK_UP +#define LWIP_DHCP_CHECK_LINK_UP 0 +#endif + +/** + * LWIP_DHCP_BOOTP_FILE==1: Store offered_si_addr and boot_file_name. + */ +#if !defined LWIP_DHCP_BOOTP_FILE || defined __DOXYGEN__ +#define LWIP_DHCP_BOOTP_FILE 0 +#endif + +/** + * LWIP_DHCP_GETS_NTP==1: Request NTP servers with discover/select. For each + * response packet, an callback is called, which has to be provided by the port: + * void dhcp_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP_MAX_DNS_SERVERS > 0: Request DNS servers with discover/select. + * DHCP servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- AUTOIP options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_autoip AUTOIP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_AUTOIP==1: Enable AUTOIP module. + */ +#if !defined LWIP_AUTOIP || defined __DOXYGEN__ +#define LWIP_AUTOIP 0 +#endif +#if !LWIP_IPV4 +/* disable AUTOIP when IPv4 is disabled */ +#undef LWIP_AUTOIP +#define LWIP_AUTOIP 0 +#endif /* !LWIP_IPV4 */ + +/** + * LWIP_DHCP_AUTOIP_COOP==1: Allow DHCP and AUTOIP to be both enabled on + * the same interface at the same time. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP 0 +#endif + +/** + * LWIP_DHCP_AUTOIP_COOP_TRIES: Set to the number of DHCP DISCOVER probes + * that should be sent before falling back on AUTOIP (the DHCP client keeps + * running in this case). This can be set as low as 1 to get an AutoIP address + * very quickly, but you should be prepared to handle a changing IP address + * when DHCP overrides AutoIP. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP_TRIES || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP_TRIES 9 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- SNMP MIB2 support ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_mib2 SNMP MIB2 callbacks + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MIB2_CALLBACKS==1: Turn on SNMP MIB2 callbacks. + * Turn this on to get callbacks needed to implement MIB2. + * Usually MIB2_STATS should be enabled, too. + */ +#if !defined LWIP_MIB2_CALLBACKS || defined __DOXYGEN__ +#define LWIP_MIB2_CALLBACKS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- Multicast/IGMP options ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_igmp IGMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_IGMP==1: Turn on IGMP module. + */ +#if !defined LWIP_IGMP || defined __DOXYGEN__ +#define LWIP_IGMP 0 +#endif +#if !LWIP_IPV4 +#undef LWIP_IGMP +#define LWIP_IGMP 0 +#endif + +/** + * LWIP_MULTICAST_TX_OPTIONS==1: Enable multicast TX support like the socket options + * IP_MULTICAST_TTL/IP_MULTICAST_IF/IP_MULTICAST_LOOP + */ +#if !defined LWIP_MULTICAST_TX_OPTIONS || defined __DOXYGEN__ +#define LWIP_MULTICAST_TX_OPTIONS (LWIP_IGMP && LWIP_UDP) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DNS options ----------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dns DNS + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_DNS==1: Turn on DNS module. UDP must be available for DNS + * transport. + */ +#if !defined LWIP_DNS || defined __DOXYGEN__ +#define LWIP_DNS 0 +#endif + +/** DNS maximum number of entries to maintain locally. */ +#if !defined DNS_TABLE_SIZE || defined __DOXYGEN__ +#define DNS_TABLE_SIZE 4 +#endif + +/** DNS maximum host name length supported in the name table. */ +#if !defined DNS_MAX_NAME_LENGTH || defined __DOXYGEN__ +#define DNS_MAX_NAME_LENGTH 256 +#endif + +/** The maximum of DNS servers + * The first server can be initialized automatically by defining + * DNS_SERVER_ADDRESS(ipaddr), where 'ipaddr' is an 'ip_addr_t*' + */ +#if !defined DNS_MAX_SERVERS || defined __DOXYGEN__ +#define DNS_MAX_SERVERS 2 +#endif + +/** DNS do a name checking between the query and the response. */ +#if !defined DNS_DOES_NAME_CHECK || defined __DOXYGEN__ +#define DNS_DOES_NAME_CHECK 1 +#endif + +/** LWIP_DNS_SECURE: controls the security level of the DNS implementation + * Use all DNS security features by default. + * This is overridable but should only be needed by very small targets + * or when using against non standard DNS servers. */ +#if !defined LWIP_DNS_SECURE || defined __DOXYGEN__ +#define LWIP_DNS_SECURE (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) +#endif + +/* A list of DNS security features follows */ +#define LWIP_DNS_SECURE_RAND_XID 1 +#define LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING 2 +#define LWIP_DNS_SECURE_RAND_SRC_PORT 4 + +/** DNS_LOCAL_HOSTLIST: Implements a local host-to-address list. If enabled, you have to define an initializer: + * \#define DNS_LOCAL_HOSTLIST_INIT {DNS_LOCAL_HOSTLIST_ELEM("host_ip4", IPADDR4_INIT_BYTES(1,2,3,4)), \ + * DNS_LOCAL_HOSTLIST_ELEM("host_ip6", IPADDR6_INIT_HOST(123, 234, 345, 456)} + * + * Instead, you can also use an external function: + * \#define DNS_LOOKUP_LOCAL_EXTERN(x) extern err_t my_lookup_function(const char *name, ip_addr_t *addr, u8_t dns_addrtype) + * that looks up the IP address and returns ERR_OK if found (LWIP_DNS_ADDRTYPE_xxx is passed in dns_addrtype). + */ +#if !defined DNS_LOCAL_HOSTLIST || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST 0 +#endif /* DNS_LOCAL_HOSTLIST */ + +/** If this is turned on, the local host-list can be dynamically changed + * at runtime. */ +#if !defined DNS_LOCAL_HOSTLIST_IS_DYNAMIC || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST_IS_DYNAMIC 0 +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Set this to 1 to enable querying ".local" names via mDNS + * using a One-Shot Multicast DNS Query */ +#if !defined LWIP_DNS_SUPPORT_MDNS_QUERIES || defined __DOXYGEN__ +#define LWIP_DNS_SUPPORT_MDNS_QUERIES 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- UDP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_udp UDP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_UDP==1: Turn on UDP. + */ +#if !defined LWIP_UDP || defined __DOXYGEN__ +#define LWIP_UDP 1 +#endif + +/** + * LWIP_UDPLITE==1: Turn on UDP-Lite. (Requires LWIP_UDP) + */ +#if !defined LWIP_UDPLITE || defined __DOXYGEN__ +#define LWIP_UDPLITE 0 +#endif + +/** + * UDP_TTL: Default Time-To-Live value. + */ +#if !defined UDP_TTL || defined __DOXYGEN__ +#define UDP_TTL (IP_DEFAULT_TTL) +#endif + +/** + * LWIP_NETBUF_RECVINFO==1: append destination addr and port to every netbuf. + */ +#if !defined LWIP_NETBUF_RECVINFO || defined __DOXYGEN__ +#define LWIP_NETBUF_RECVINFO 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- TCP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_tcp TCP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_TCP==1: Turn on TCP. + */ +#if !defined LWIP_TCP || defined __DOXYGEN__ +#define LWIP_TCP 1 +#endif + +/** + * TCP_TTL: Default Time-To-Live value. + */ +#if !defined TCP_TTL || defined __DOXYGEN__ +#define TCP_TTL (IP_DEFAULT_TTL) +#endif + +/** + * TCP_WND: The size of a TCP window. This must be at least + * (2 * TCP_MSS) for things to work well. + * ATTENTION: when using TCP_RCV_SCALE, TCP_WND is the total size + * with scaling applied. Maximum window value in the TCP header + * will be TCP_WND >> TCP_RCV_SCALE + */ +#if !defined TCP_WND || defined __DOXYGEN__ +#define TCP_WND (4 * TCP_MSS) +#endif + +/** + * TCP_MAXRTX: Maximum number of retransmissions of data segments. + */ +#if !defined TCP_MAXRTX || defined __DOXYGEN__ +#define TCP_MAXRTX 12 +#endif + +/** + * TCP_SYNMAXRTX: Maximum number of retransmissions of SYN segments. + */ +#if !defined TCP_SYNMAXRTX || defined __DOXYGEN__ +#define TCP_SYNMAXRTX 6 +#endif + +/** + * TCP_QUEUE_OOSEQ==1: TCP will queue segments that arrive out of order. + * Define to 0 if your device is low on memory. + */ +#if !defined TCP_QUEUE_OOSEQ || defined __DOXYGEN__ +#define TCP_QUEUE_OOSEQ (LWIP_TCP) +#endif + +/** + * TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default, + * you might want to increase this.) + * For the receive side, this MSS is advertised to the remote side + * when opening a connection. For the transmit size, this MSS sets + * an upper limit on the MSS advertised by the remote host. + */ +#if !defined TCP_MSS || defined __DOXYGEN__ +#define TCP_MSS 536 +#endif + +/** + * TCP_CALCULATE_EFF_SEND_MSS: "The maximum size of a segment that TCP really + * sends, the 'effective send MSS,' MUST be the smaller of the send MSS (which + * reflects the available reassembly buffer size at the remote host) and the + * largest size permitted by the IP layer" (RFC 1122) + * Setting this to 1 enables code that checks TCP_MSS against the MTU of the + * netif used for a connection and limits the MSS if it would be too big otherwise. + */ +#if !defined TCP_CALCULATE_EFF_SEND_MSS || defined __DOXYGEN__ +#define TCP_CALCULATE_EFF_SEND_MSS 1 +#endif + + +/** + * TCP_SND_BUF: TCP sender buffer space (bytes). + * To achieve good performance, this should be at least 2 * TCP_MSS. + */ +#if !defined TCP_SND_BUF || defined __DOXYGEN__ +#define TCP_SND_BUF (2 * TCP_MSS) +#endif + +/** + * TCP_SND_QUEUELEN: TCP sender buffer space (pbufs). This must be at least + * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. + */ +#if !defined TCP_SND_QUEUELEN || defined __DOXYGEN__ +#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#endif + +/** + * TCP_SNDLOWAT: TCP writable space (bytes). This must be less than + * TCP_SND_BUF. It is the amount of space which must be available in the + * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). + */ +#if !defined TCP_SNDLOWAT || defined __DOXYGEN__ +#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#endif + +/** + * TCP_SNDQUEUELOWAT: TCP writable bufs (pbuf count). This must be less + * than TCP_SND_QUEUELEN. If the number of pbufs queued on a pcb drops below + * this number, select returns writable (combined with TCP_SNDLOWAT). + */ +#if !defined TCP_SNDQUEUELOWAT || defined __DOXYGEN__ +#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#endif + +/** + * TCP_OOSEQ_MAX_BYTES: The maximum number of bytes queued on ooseq per pcb. + * Default is 0 (no limit). Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_BYTES || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_BYTES 0 +#endif + +/** + * TCP_OOSEQ_MAX_PBUFS: The maximum number of pbufs queued on ooseq per pcb. + * Default is 0 (no limit). Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_PBUFS || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_PBUFS 0 +#endif + +/** + * TCP_LISTEN_BACKLOG: Enable the backlog option for tcp listen pcb. + */ +#if !defined TCP_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_LISTEN_BACKLOG 0 +#endif + +/** + * The maximum allowed backlog for TCP listen netconns. + * This backlog is used unless another is explicitly specified. + * 0xff is the maximum (u8_t). + */ +#if !defined TCP_DEFAULT_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_DEFAULT_LISTEN_BACKLOG 0xff +#endif + +/** + * TCP_OVERSIZE: The maximum number of bytes that tcp_write may + * allocate ahead of time in an attempt to create shorter pbuf chains + * for transmission. The meaningful range is 0 to TCP_MSS. Some + * suggested values are: + * + * 0: Disable oversized allocation. Each tcp_write() allocates a new + pbuf (old behaviour). + * 1: Allocate size-aligned pbufs with minimal excess. Use this if your + * scatter-gather DMA requires aligned fragments. + * 128: Limit the pbuf/memory overhead to 20%. + * TCP_MSS: Try to create unfragmented TCP packets. + * TCP_MSS/4: Try to create 4 fragments or less per TCP packet. + */ +#if !defined TCP_OVERSIZE || defined __DOXYGEN__ +#define TCP_OVERSIZE TCP_MSS +#endif + +/** + * LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option. + * The timestamp option is currently only used to help remote hosts, it is not + * really used locally. Therefore, it is only enabled when a TS option is + * received in the initial SYN packet from a remote host. + */ +#if !defined LWIP_TCP_TIMESTAMPS || defined __DOXYGEN__ +#define LWIP_TCP_TIMESTAMPS 0 +#endif + +/** + * TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an + * explicit window update + */ +#if !defined TCP_WND_UPDATE_THRESHOLD || defined __DOXYGEN__ +#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#endif + +/** + * LWIP_EVENT_API and LWIP_CALLBACK_API: Only one of these should be set to 1. + * LWIP_EVENT_API==1: The user defines lwip_tcp_event() to receive all + * events (accept, sent, etc) that happen in the system. + * LWIP_CALLBACK_API==1: The PCB callback function is called directly + * for the event. This is the default. + */ +#if !defined(LWIP_EVENT_API) && !defined(LWIP_CALLBACK_API) || defined __DOXYGEN__ +#define LWIP_EVENT_API 0 +#define LWIP_CALLBACK_API 1 +#else +#ifndef LWIP_EVENT_API +#define LWIP_EVENT_API 0 +#endif +#ifndef LWIP_CALLBACK_API +#define LWIP_CALLBACK_API 0 +#endif +#endif + +/** + * LWIP_WND_SCALE and TCP_RCV_SCALE: + * Set LWIP_WND_SCALE to 1 to enable window scaling. + * Set TCP_RCV_SCALE to the desired scaling factor (shift count in the + * range of [0..14]). + * When LWIP_WND_SCALE is enabled but TCP_RCV_SCALE is 0, we can use a large + * send window while having a small receive window only. + */ +#if !defined LWIP_WND_SCALE || defined __DOXYGEN__ +#define LWIP_WND_SCALE 0 +#define TCP_RCV_SCALE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- Pbuf options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_pbuf PBUF + * @ingroup lwip_opts + * @{ + */ +/** + * PBUF_LINK_HLEN: the number of bytes that should be allocated for a + * link level header. The default is 14, the standard value for + * Ethernet. + */ +#if !defined PBUF_LINK_HLEN || defined __DOXYGEN__ +#if defined LWIP_HOOK_VLAN_SET && !defined __DOXYGEN__ +#define PBUF_LINK_HLEN (18 + ETH_PAD_SIZE) +#else /* LWIP_HOOK_VLAN_SET */ +#define PBUF_LINK_HLEN (14 + ETH_PAD_SIZE) +#endif /* LWIP_HOOK_VLAN_SET */ +#endif + +/** + * PBUF_LINK_ENCAPSULATION_HLEN: the number of bytes that should be allocated + * for an additional encapsulation header before ethernet headers (e.g. 802.11) + */ +#if !defined PBUF_LINK_ENCAPSULATION_HLEN || defined __DOXYGEN__ +#define PBUF_LINK_ENCAPSULATION_HLEN 0u +#endif + +/** + * PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. The default is + * designed to accommodate single full size TCP frame in one pbuf, including + * TCP_MSS, IP header, and link header. + */ +#if !defined PBUF_POOL_BUFSIZE || defined __DOXYGEN__ +#define PBUF_POOL_BUFSIZE LWIP_MEM_ALIGN_SIZE(TCP_MSS+40+PBUF_LINK_ENCAPSULATION_HLEN+PBUF_LINK_HLEN) +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Network Interfaces options ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_netif NETIF + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_NETIF_HOSTNAME==1: use DHCP_OPTION_HOSTNAME with netif's hostname + * field. + */ +#if !defined LWIP_NETIF_HOSTNAME || defined __DOXYGEN__ +#define LWIP_NETIF_HOSTNAME 0 +#endif + +/** + * LWIP_NETIF_API==1: Support netif api (in netifapi.c) + */ +#if !defined LWIP_NETIF_API || defined __DOXYGEN__ +#define LWIP_NETIF_API 0 +#endif + +/** + * LWIP_NETIF_STATUS_CALLBACK==1: Support a callback function whenever an interface + * changes its up/down status (i.e., due to DHCP IP acquisition) + */ +#if !defined LWIP_NETIF_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_LINK_CALLBACK==1: Support a callback function from an interface + * whenever the link changes (i.e., link down) + */ +#if !defined LWIP_NETIF_LINK_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LINK_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_REMOVE_CALLBACK==1: Support a callback function that is called + * when a netif has been removed + */ +#if !defined LWIP_NETIF_REMOVE_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_REMOVE_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_HWADDRHINT==1: Cache link-layer-address hints (e.g. table + * indices) in struct netif. TCP and UDP can make use of this to prevent + * scanning the ARP table for every sent packet. While this is faster for big + * ARP tables or many concurrent connections, it might be counterproductive + * if you have a tiny ARP table or if there never are concurrent connections. + */ +#if !defined LWIP_NETIF_HWADDRHINT || defined __DOXYGEN__ +#define LWIP_NETIF_HWADDRHINT 0 +#endif + +/** + * LWIP_NETIF_TX_SINGLE_PBUF: if this is set to 1, lwIP tries to put all data + * to be sent into one single pbuf. This is for compatibility with DMA-enabled + * MACs that do not support scatter-gather. + * Beware that this might involve CPU-memcpy before transmitting that would not + * be needed without this flag! Use this only if you need to! + * + * @todo: TCP and IP-frag do not work with this, yet: + */ +#if !defined LWIP_NETIF_TX_SINGLE_PBUF || defined __DOXYGEN__ +#define LWIP_NETIF_TX_SINGLE_PBUF 0 +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * LWIP_NUM_NETIF_CLIENT_DATA: Number of clients that may store + * data in client_data member array of struct netif. + */ +#if !defined LWIP_NUM_NETIF_CLIENT_DATA || defined __DOXYGEN__ +#define LWIP_NUM_NETIF_CLIENT_DATA 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- LOOPIF options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_loop Loopback interface + * @ingroup lwip_opts_netif + * @{ + */ +/** + * LWIP_HAVE_LOOPIF==1: Support loop interface (127.0.0.1). + * This is only needed when no real netifs are available. If at least one other + * netif is available, loopback traffic uses this netif. + */ +#if !defined LWIP_HAVE_LOOPIF || defined __DOXYGEN__ +#define LWIP_HAVE_LOOPIF LWIP_NETIF_LOOPBACK +#endif + +/** + * LWIP_LOOPIF_MULTICAST==1: Support multicast/IGMP on loop interface (127.0.0.1). + */ +#if !defined LWIP_LOOPIF_MULTICAST || defined __DOXYGEN__ +#define LWIP_LOOPIF_MULTICAST 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP + * address equal to the netif IP address, looping them back up the stack. + */ +#if !defined LWIP_NETIF_LOOPBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK 0 +#endif + +/** + * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback + * sending for each netif (0 = disabled) + */ +#if !defined LWIP_LOOPBACK_MAX_PBUFS || defined __DOXYGEN__ +#define LWIP_LOOPBACK_MAX_PBUFS 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in + * the system, as netifs must change how they behave depending on this setting + * for the LWIP_NETIF_LOOPBACK option to work. + * Setting this is needed to avoid reentering non-reentrant functions like + * tcp_input(). + * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a + * multithreaded environment like tcpip.c. In this case, netif->input() + * is called directly. + * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup. + * The packets are put on a list and netif_poll() must be called in + * the main application loop. + */ +#if !defined LWIP_NETIF_LOOPBACK_MULTITHREADING || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Thread options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_thread Threading + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * TCPIP_THREAD_NAME: The name assigned to the main tcpip thread. + */ +#if !defined TCPIP_THREAD_NAME || defined __DOXYGEN__ +#define TCPIP_THREAD_NAME "tcpip_thread" +#endif + +/** + * TCPIP_THREAD_STACKSIZE: The stack size used by the main tcpip thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_STACKSIZE || defined __DOXYGEN__ +#define TCPIP_THREAD_STACKSIZE 0 +#endif + +/** + * TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_PRIO || defined __DOXYGEN__ +#define TCPIP_THREAD_PRIO 1 +#endif + +/** + * TCPIP_MBOX_SIZE: The mailbox size for the tcpip thread messages + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when tcpip_init is called. + */ +#if !defined TCPIP_MBOX_SIZE || defined __DOXYGEN__ +#define TCPIP_MBOX_SIZE 0 +#endif + +/** + * Define this to something that triggers a watchdog. This is called from + * tcpip_thread after processing a message. + */ +#if !defined LWIP_TCPIP_THREAD_ALIVE || defined __DOXYGEN__ +#define LWIP_TCPIP_THREAD_ALIVE() +#endif + +/** + * SLIPIF_THREAD_NAME: The name assigned to the slipif_loop thread. + */ +#if !defined SLIPIF_THREAD_NAME || defined __DOXYGEN__ +#define SLIPIF_THREAD_NAME "slipif_loop" +#endif + +/** + * SLIP_THREAD_STACKSIZE: The stack size used by the slipif_loop thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_STACKSIZE || defined __DOXYGEN__ +#define SLIPIF_THREAD_STACKSIZE 0 +#endif + +/** + * SLIPIF_THREAD_PRIO: The priority assigned to the slipif_loop thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_PRIO || defined __DOXYGEN__ +#define SLIPIF_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_THREAD_NAME: The name assigned to any other lwIP thread. + */ +#if !defined DEFAULT_THREAD_NAME || defined __DOXYGEN__ +#define DEFAULT_THREAD_NAME "lwIP" +#endif + +/** + * DEFAULT_THREAD_STACKSIZE: The stack size used by any other lwIP thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_STACKSIZE || defined __DOXYGEN__ +#define DEFAULT_THREAD_STACKSIZE 0 +#endif + +/** + * DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_PRIO || defined __DOXYGEN__ +#define DEFAULT_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_RAW_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_RAW. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_RAW_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_RAW_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_UDP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_UDP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_UDP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_UDP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_TCP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_TCP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_TCP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_TCP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_ACCEPTMBOX_SIZE: The mailbox size for the incoming connections. + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when the acceptmbox is created. + */ +#if !defined DEFAULT_ACCEPTMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_ACCEPTMBOX_SIZE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/** + * @defgroup lwip_opts_netconn Netconn + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c) + */ +#if !defined LWIP_NETCONN || defined __DOXYGEN__ +#define LWIP_NETCONN 1 +#endif + +/** LWIP_TCPIP_TIMEOUT==1: Enable tcpip_timeout/tcpip_untimeout to create + * timers running in tcpip_thread from another thread. + */ +#if !defined LWIP_TCPIP_TIMEOUT || defined __DOXYGEN__ +#define LWIP_TCPIP_TIMEOUT 0 +#endif + +/** LWIP_NETCONN_SEM_PER_THREAD==1: Use one (thread-local) semaphore per + * thread calling socket/netconn functions instead of allocating one + * semaphore per netconn (and per select etc.) + * ATTENTION: a thread-local semaphore for API calls is needed: + * - LWIP_NETCONN_THREAD_SEM_GET() returning a sys_sem_t* + * - LWIP_NETCONN_THREAD_SEM_ALLOC() creating the semaphore + * - LWIP_NETCONN_THREAD_SEM_FREE() freeing the semaphore + * The latter 2 can be invoked up by calling netconn_thread_init()/netconn_thread_cleanup(). + * Ports may call these for threads created with sys_thread_new(). + */ +#if !defined LWIP_NETCONN_SEM_PER_THREAD || defined __DOXYGEN__ +#define LWIP_NETCONN_SEM_PER_THREAD 0 +#endif + +/** LWIP_NETCONN_FULLDUPLEX==1: Enable code that allows reading from one thread, + * writing from a 2nd thread and closing from a 3rd thread at the same time. + * ATTENTION: This is currently really alpha! Some requirements: + * - LWIP_NETCONN_SEM_PER_THREAD==1 is required to use one socket/netconn from + * multiple threads at once + * - sys_mbox_free() has to unblock receive tasks waiting on recvmbox/acceptmbox + * and prevent a task pending on this during/after deletion + */ +#if !defined LWIP_NETCONN_FULLDUPLEX || defined __DOXYGEN__ +#define LWIP_NETCONN_FULLDUPLEX 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Socket options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_socket Sockets + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_SOCKET==1: Enable Socket API (require to use sockets.c) + */ +#if !defined LWIP_SOCKET || defined __DOXYGEN__ +#define LWIP_SOCKET 1 +#endif + +/* LWIP_SOCKET_SET_ERRNO==1: Set errno when socket functions cannot complete + * successfully, as required by POSIX. Default is POSIX-compliant. + */ +#if !defined LWIP_SOCKET_SET_ERRNO || defined __DOXYGEN__ +#define LWIP_SOCKET_SET_ERRNO 1 +#endif + +/** + * LWIP_COMPAT_SOCKETS==1: Enable BSD-style sockets functions names through defines. + * LWIP_COMPAT_SOCKETS==2: Same as ==1 but correctly named functions are created. + * While this helps code completion, it might conflict with existing libraries. + * (only used if you use sockets.c) + */ +#if !defined LWIP_COMPAT_SOCKETS || defined __DOXYGEN__ +#define LWIP_COMPAT_SOCKETS 1 +#endif + +/** + * LWIP_POSIX_SOCKETS_IO_NAMES==1: Enable POSIX-style sockets functions names. + * Disable this option if you use a POSIX operating system that uses the same + * names (read, write & close). (only used if you use sockets.c) + */ +#if !defined LWIP_POSIX_SOCKETS_IO_NAMES || defined __DOXYGEN__ +#define LWIP_POSIX_SOCKETS_IO_NAMES 1 +#endif + +/** + * LWIP_SOCKET_OFFSET==n: Increases the file descriptor number created by LwIP with n. + * This can be useful when there are multiple APIs which create file descriptors. + * When they all start with a different offset and you won't make them overlap you can + * re implement read/write/close/ioctl/fnctl to send the requested action to the right + * library (sharing select will need more work though). + */ +#if !defined LWIP_SOCKET_OFFSET || defined __DOXYGEN__ +#define LWIP_SOCKET_OFFSET 0 +#endif + +/** + * LWIP_TCP_KEEPALIVE==1: Enable TCP_KEEPIDLE, TCP_KEEPINTVL and TCP_KEEPCNT + * options processing. Note that TCP_KEEPIDLE and TCP_KEEPINTVL have to be set + * in seconds. (does not require sockets.c, and will affect tcp.c) + */ +#if !defined LWIP_TCP_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_TCP_KEEPALIVE 0 +#endif + +/** + * LWIP_SO_SNDTIMEO==1: Enable send timeout for sockets/netconns and + * SO_SNDTIMEO processing. + */ +#if !defined LWIP_SO_SNDTIMEO || defined __DOXYGEN__ +#define LWIP_SO_SNDTIMEO 0 +#endif + +/** + * LWIP_SO_RCVTIMEO==1: Enable receive timeout for sockets/netconns and + * SO_RCVTIMEO processing. + */ +#if !defined LWIP_SO_RCVTIMEO || defined __DOXYGEN__ +#define LWIP_SO_RCVTIMEO 0 +#endif + +/** + * LWIP_SO_SNDRCVTIMEO_NONSTANDARD==1: SO_RCVTIMEO/SO_SNDTIMEO take an int + * (milliseconds, much like winsock does) instead of a struct timeval (default). + */ +#if !defined LWIP_SO_SNDRCVTIMEO_NONSTANDARD || defined __DOXYGEN__ +#define LWIP_SO_SNDRCVTIMEO_NONSTANDARD 0 +#endif + +/** + * LWIP_SO_RCVBUF==1: Enable SO_RCVBUF processing. + */ +#if !defined LWIP_SO_RCVBUF || defined __DOXYGEN__ +#define LWIP_SO_RCVBUF 0 +#endif + +/** + * LWIP_SO_LINGER==1: Enable SO_LINGER processing. + */ +#if !defined LWIP_SO_LINGER || defined __DOXYGEN__ +#define LWIP_SO_LINGER 0 +#endif + +/** + * If LWIP_SO_RCVBUF is used, this is the default value for recv_bufsize. + */ +#if !defined RECV_BUFSIZE_DEFAULT || defined __DOXYGEN__ +#define RECV_BUFSIZE_DEFAULT INT_MAX +#endif + +/** + * By default, TCP socket/netconn close waits 20 seconds max to send the FIN + */ +#if !defined LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT || defined __DOXYGEN__ +#define LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT 20000 +#endif + +/** + * SO_REUSE==1: Enable SO_REUSEADDR option. + */ +#if !defined SO_REUSE || defined __DOXYGEN__ +#define SO_REUSE 0 +#endif + +/** + * SO_REUSE_RXTOALL==1: Pass a copy of incoming broadcast/multicast packets + * to all local matches if SO_REUSEADDR is turned on. + * WARNING: Adds a memcpy for every packet if passing to more than one pcb! + */ +#if !defined SO_REUSE_RXTOALL || defined __DOXYGEN__ +#define SO_REUSE_RXTOALL 0 +#endif + +/** + * LWIP_FIONREAD_LINUXMODE==0 (default): ioctl/FIONREAD returns the amount of + * pending data in the network buffer. This is the way windows does it. It's + * the default for lwIP since it is smaller. + * LWIP_FIONREAD_LINUXMODE==1: ioctl/FIONREAD returns the size of the next + * pending datagram in bytes. This is the way linux does it. This code is only + * here for compatibility. + */ +#if !defined LWIP_FIONREAD_LINUXMODE || defined __DOXYGEN__ +#define LWIP_FIONREAD_LINUXMODE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------- + ---------- Statistics options ---------- + ---------------------------------------- +*/ +/** + * @defgroup lwip_opts_stats Statistics + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_STATS==1: Enable statistics collection in lwip_stats. + */ +#if !defined LWIP_STATS || defined __DOXYGEN__ +#define LWIP_STATS 1 +#endif + +#if LWIP_STATS + +/** + * LWIP_STATS_DISPLAY==1: Compile in the statistics output functions. + */ +#if !defined LWIP_STATS_DISPLAY || defined __DOXYGEN__ +#define LWIP_STATS_DISPLAY 0 +#endif + +/** + * LINK_STATS==1: Enable link stats. + */ +#if !defined LINK_STATS || defined __DOXYGEN__ +#define LINK_STATS 1 +#endif + +/** + * ETHARP_STATS==1: Enable etharp stats. + */ +#if !defined ETHARP_STATS || defined __DOXYGEN__ +#define ETHARP_STATS (LWIP_ARP) +#endif + +/** + * IP_STATS==1: Enable IP stats. + */ +#if !defined IP_STATS || defined __DOXYGEN__ +#define IP_STATS 1 +#endif + +/** + * IPFRAG_STATS==1: Enable IP fragmentation stats. Default is + * on if using either frag or reass. + */ +#if !defined IPFRAG_STATS || defined __DOXYGEN__ +#define IPFRAG_STATS (IP_REASSEMBLY || IP_FRAG) +#endif + +/** + * ICMP_STATS==1: Enable ICMP stats. + */ +#if !defined ICMP_STATS || defined __DOXYGEN__ +#define ICMP_STATS 1 +#endif + +/** + * IGMP_STATS==1: Enable IGMP stats. + */ +#if !defined IGMP_STATS || defined __DOXYGEN__ +#define IGMP_STATS (LWIP_IGMP) +#endif + +/** + * UDP_STATS==1: Enable UDP stats. Default is on if + * UDP enabled, otherwise off. + */ +#if !defined UDP_STATS || defined __DOXYGEN__ +#define UDP_STATS (LWIP_UDP) +#endif + +/** + * TCP_STATS==1: Enable TCP stats. Default is on if TCP + * enabled, otherwise off. + */ +#if !defined TCP_STATS || defined __DOXYGEN__ +#define TCP_STATS (LWIP_TCP) +#endif + +/** + * MEM_STATS==1: Enable mem.c stats. + */ +#if !defined MEM_STATS || defined __DOXYGEN__ +#define MEM_STATS ((MEM_LIBC_MALLOC == 0) && (MEM_USE_POOLS == 0)) +#endif + +/** + * MEMP_STATS==1: Enable memp.c pool stats. + */ +#if !defined MEMP_STATS || defined __DOXYGEN__ +#define MEMP_STATS (MEMP_MEM_MALLOC == 0) +#endif + +/** + * SYS_STATS==1: Enable system stats (sem and mbox counts, etc). + */ +#if !defined SYS_STATS || defined __DOXYGEN__ +#define SYS_STATS (NO_SYS == 0) +#endif + +/** + * IP6_STATS==1: Enable IPv6 stats. + */ +#if !defined IP6_STATS || defined __DOXYGEN__ +#define IP6_STATS (LWIP_IPV6) +#endif + +/** + * ICMP6_STATS==1: Enable ICMP for IPv6 stats. + */ +#if !defined ICMP6_STATS || defined __DOXYGEN__ +#define ICMP6_STATS (LWIP_IPV6 && LWIP_ICMP6) +#endif + +/** + * IP6_FRAG_STATS==1: Enable IPv6 fragmentation stats. + */ +#if !defined IP6_FRAG_STATS || defined __DOXYGEN__ +#define IP6_FRAG_STATS (LWIP_IPV6 && (LWIP_IPV6_FRAG || LWIP_IPV6_REASS)) +#endif + +/** + * MLD6_STATS==1: Enable MLD for IPv6 stats. + */ +#if !defined MLD6_STATS || defined __DOXYGEN__ +#define MLD6_STATS (LWIP_IPV6 && LWIP_IPV6_MLD) +#endif + +/** + * ND6_STATS==1: Enable Neighbor discovery for IPv6 stats. + */ +#if !defined ND6_STATS || defined __DOXYGEN__ +#define ND6_STATS (LWIP_IPV6) +#endif + +/** + * MIB2_STATS==1: Stats for SNMP MIB2. + */ +#if !defined MIB2_STATS || defined __DOXYGEN__ +#define MIB2_STATS 0 +#endif + +#else + +#define LINK_STATS 0 +#define ETHARP_STATS 0 +#define IP_STATS 0 +#define IPFRAG_STATS 0 +#define ICMP_STATS 0 +#define IGMP_STATS 0 +#define UDP_STATS 0 +#define TCP_STATS 0 +#define MEM_STATS 0 +#define MEMP_STATS 0 +#define SYS_STATS 0 +#define LWIP_STATS_DISPLAY 0 +#define IP6_STATS 0 +#define ICMP6_STATS 0 +#define IP6_FRAG_STATS 0 +#define MLD6_STATS 0 +#define ND6_STATS 0 +#define MIB2_STATS 0 + +#endif /* LWIP_STATS */ +/** + * @} + */ + +/* + -------------------------------------- + ---------- Checksum options ---------- + -------------------------------------- +*/ +/** + * @defgroup lwip_opts_checksum Checksum + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_CHECKSUM_CTRL_PER_NETIF==1: Checksum generation/check can be enabled/disabled + * per netif. + * ATTENTION: if enabled, the CHECKSUM_GEN_* and CHECKSUM_CHECK_* defines must be enabled! + */ +#if !defined LWIP_CHECKSUM_CTRL_PER_NETIF || defined __DOXYGEN__ +#define LWIP_CHECKSUM_CTRL_PER_NETIF 0 +#endif + +/** + * CHECKSUM_GEN_IP==1: Generate checksums in software for outgoing IP packets. + */ +#if !defined CHECKSUM_GEN_IP || defined __DOXYGEN__ +#define CHECKSUM_GEN_IP 1 +#endif + +/** + * CHECKSUM_GEN_UDP==1: Generate checksums in software for outgoing UDP packets. + */ +#if !defined CHECKSUM_GEN_UDP || defined __DOXYGEN__ +#define CHECKSUM_GEN_UDP 1 +#endif + +/** + * CHECKSUM_GEN_TCP==1: Generate checksums in software for outgoing TCP packets. + */ +#if !defined CHECKSUM_GEN_TCP || defined __DOXYGEN__ +#define CHECKSUM_GEN_TCP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP==1: Generate checksums in software for outgoing ICMP packets. + */ +#if !defined CHECKSUM_GEN_ICMP || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP6==1: Generate checksums in software for outgoing ICMP6 packets. + */ +#if !defined CHECKSUM_GEN_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP6 1 +#endif + +/** + * CHECKSUM_CHECK_IP==1: Check checksums in software for incoming IP packets. + */ +#if !defined CHECKSUM_CHECK_IP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_IP 1 +#endif + +/** + * CHECKSUM_CHECK_UDP==1: Check checksums in software for incoming UDP packets. + */ +#if !defined CHECKSUM_CHECK_UDP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_UDP 1 +#endif + +/** + * CHECKSUM_CHECK_TCP==1: Check checksums in software for incoming TCP packets. + */ +#if !defined CHECKSUM_CHECK_TCP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_TCP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP==1: Check checksums in software for incoming ICMP packets. + */ +#if !defined CHECKSUM_CHECK_ICMP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP6==1: Check checksums in software for incoming ICMPv6 packets + */ +#if !defined CHECKSUM_CHECK_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP6 1 +#endif + +/** + * LWIP_CHECKSUM_ON_COPY==1: Calculate checksum when copying data from + * application buffers to pbufs. + */ +#if !defined LWIP_CHECKSUM_ON_COPY || defined __DOXYGEN__ +#define LWIP_CHECKSUM_ON_COPY 0 +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv6 IPv6 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV6==1: Enable IPv6 + */ +#if !defined LWIP_IPV6 || defined __DOXYGEN__ +#define LWIP_IPV6 0 +#endif + +/** + * LWIP_IPV6_NUM_ADDRESSES: Number of IPv6 addresses per netif. + */ +#if !defined LWIP_IPV6_NUM_ADDRESSES || defined __DOXYGEN__ +#define LWIP_IPV6_NUM_ADDRESSES 3 +#endif + +/** + * LWIP_IPV6_FORWARD==1: Forward IPv6 packets across netifs + */ +#if !defined LWIP_IPV6_FORWARD || defined __DOXYGEN__ +#define LWIP_IPV6_FORWARD 0 +#endif + +/** + * LWIP_IPV6_FRAG==1: Fragment outgoing IPv6 packets that are too big. + */ +#if !defined LWIP_IPV6_FRAG || defined __DOXYGEN__ +#define LWIP_IPV6_FRAG 0 +#endif + +/** + * LWIP_IPV6_REASS==1: reassemble incoming IPv6 packets that fragmented + */ +#if !defined LWIP_IPV6_REASS || defined __DOXYGEN__ +#define LWIP_IPV6_REASS (LWIP_IPV6) +#endif + +/** + * LWIP_IPV6_SEND_ROUTER_SOLICIT==1: Send router solicitation messages during + * network startup. + */ +#if !defined LWIP_IPV6_SEND_ROUTER_SOLICIT || defined __DOXYGEN__ +#define LWIP_IPV6_SEND_ROUTER_SOLICIT 1 +#endif + +/** + * LWIP_IPV6_AUTOCONFIG==1: Enable stateless address autoconfiguration as per RFC 4862. + */ +#if !defined LWIP_IPV6_AUTOCONFIG || defined __DOXYGEN__ +#define LWIP_IPV6_AUTOCONFIG (LWIP_IPV6) +#endif + +/** + * LWIP_IPV6_DUP_DETECT_ATTEMPTS=[0..7]: Number of duplicate address detection attempts. + */ +#if !defined LWIP_IPV6_DUP_DETECT_ATTEMPTS || defined __DOXYGEN__ +#define LWIP_IPV6_DUP_DETECT_ATTEMPTS 1 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_icmp6 ICMP6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ICMP6==1: Enable ICMPv6 (mandatory per RFC) + */ +#if !defined LWIP_ICMP6 || defined __DOXYGEN__ +#define LWIP_ICMP6 (LWIP_IPV6) +#endif + +/** + * LWIP_ICMP6_DATASIZE: bytes from original packet to send back in + * ICMPv6 error messages. + */ +#if !defined LWIP_ICMP6_DATASIZE || defined __DOXYGEN__ +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/** + * LWIP_ICMP6_HL: default hop limit for ICMPv6 messages + */ +#if !defined LWIP_ICMP6_HL || defined __DOXYGEN__ +#define LWIP_ICMP6_HL 255 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_mld6 Multicast listener discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_MLD==1: Enable multicast listener discovery protocol. + * If LWIP_IPV6 is enabled but this setting is disabled, the MAC layer must + * indiscriminately pass all inbound IPv6 multicast traffic to lwIP. + */ +#if !defined LWIP_IPV6_MLD || defined __DOXYGEN__ +#define LWIP_IPV6_MLD (LWIP_IPV6) +#endif + +/** + * MEMP_NUM_MLD6_GROUP: Max number of IPv6 multicast groups that can be joined. + * There must be enough groups so that each netif can join the solicited-node + * multicast group for each of its local addresses, plus one for MDNS if + * applicable, plus any number of groups to be joined on UDP sockets. + */ +#if !defined MEMP_NUM_MLD6_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_MLD6_GROUP 4 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_nd6 Neighbor discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ND6_QUEUEING==1: queue outgoing IPv6 packets while MAC address + * is being resolved. + */ +#if !defined LWIP_ND6_QUEUEING || defined __DOXYGEN__ +#define LWIP_ND6_QUEUEING (LWIP_IPV6) +#endif + +/** + * MEMP_NUM_ND6_QUEUE: Max number of IPv6 packets to queue during MAC resolution. + */ +#if !defined MEMP_NUM_ND6_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ND6_QUEUE 20 +#endif + +/** + * LWIP_ND6_NUM_NEIGHBORS: Number of entries in IPv6 neighbor cache + */ +#if !defined LWIP_ND6_NUM_NEIGHBORS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_NEIGHBORS 10 +#endif + +/** + * LWIP_ND6_NUM_DESTINATIONS: number of entries in IPv6 destination cache + */ +#if !defined LWIP_ND6_NUM_DESTINATIONS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_DESTINATIONS 10 +#endif + +/** + * LWIP_ND6_NUM_PREFIXES: number of entries in IPv6 on-link prefixes cache + */ +#if !defined LWIP_ND6_NUM_PREFIXES || defined __DOXYGEN__ +#define LWIP_ND6_NUM_PREFIXES 5 +#endif + +/** + * LWIP_ND6_NUM_ROUTERS: number of entries in IPv6 default router cache + */ +#if !defined LWIP_ND6_NUM_ROUTERS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_ROUTERS 3 +#endif + +/** + * LWIP_ND6_MAX_MULTICAST_SOLICIT: max number of multicast solicit messages to send + * (neighbor solicit and router solicit) + */ +#if !defined LWIP_ND6_MAX_MULTICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_MULTICAST_SOLICIT 3 +#endif + +/** + * LWIP_ND6_MAX_UNICAST_SOLICIT: max number of unicast neighbor solicitation messages + * to send during neighbor reachability detection. + */ +#if !defined LWIP_ND6_MAX_UNICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_UNICAST_SOLICIT 3 +#endif + +/** + * Unused: See ND RFC (time in milliseconds). + */ +#if !defined LWIP_ND6_MAX_ANYCAST_DELAY_TIME || defined __DOXYGEN__ +#define LWIP_ND6_MAX_ANYCAST_DELAY_TIME 1000 +#endif + +/** + * Unused: See ND RFC + */ +#if !defined LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT 3 +#endif + +/** + * LWIP_ND6_REACHABLE_TIME: default neighbor reachable time (in milliseconds). + * May be updated by router advertisement messages. + */ +#if !defined LWIP_ND6_REACHABLE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_REACHABLE_TIME 30000 +#endif + +/** + * LWIP_ND6_RETRANS_TIMER: default retransmission timer for solicitation messages + */ +#if !defined LWIP_ND6_RETRANS_TIMER || defined __DOXYGEN__ +#define LWIP_ND6_RETRANS_TIMER 1000 +#endif + +/** + * LWIP_ND6_DELAY_FIRST_PROBE_TIME: Delay before first unicast neighbor solicitation + * message is sent, during neighbor reachability detection. + */ +#if !defined LWIP_ND6_DELAY_FIRST_PROBE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_DELAY_FIRST_PROBE_TIME 5000 +#endif + +/** + * LWIP_ND6_ALLOW_RA_UPDATES==1: Allow Router Advertisement messages to update + * Reachable time and retransmission timers, and netif MTU. + */ +#if !defined LWIP_ND6_ALLOW_RA_UPDATES || defined __DOXYGEN__ +#define LWIP_ND6_ALLOW_RA_UPDATES 1 +#endif + +/** + * LWIP_ND6_TCP_REACHABILITY_HINTS==1: Allow TCP to provide Neighbor Discovery + * with reachability hints for connected destinations. This helps avoid sending + * unicast neighbor solicitation messages. + */ +#if !defined LWIP_ND6_TCP_REACHABILITY_HINTS || defined __DOXYGEN__ +#define LWIP_ND6_TCP_REACHABILITY_HINTS 1 +#endif + +/** + * LWIP_ND6_RDNSS_MAX_DNS_SERVERS > 0: Use IPv6 Router Advertisement Recursive + * DNS Server Option (as per RFC 6106) to copy a defined maximum number of DNS + * servers to the DNS module. + */ +#if !defined LWIP_ND6_RDNSS_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_ND6_RDNSS_MAX_DNS_SERVERS 0 +#endif +/** + * @} + */ + +/** + * LWIP_IPV6_DHCP6==1: enable DHCPv6 stateful address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6 || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6 0 +#endif + +/* + --------------------------------------- + ---------- Hook options --------------- + --------------------------------------- +*/ + +/** + * @defgroup lwip_opts_hooks Hooks + * @ingroup lwip_opts_infrastructure + * Hooks are undefined by default, define them to a function if you need them. + * @{ + */ + +/** + * LWIP_HOOK_FILENAME: Custom filename to #include in files that provide hooks. + * Declare your hook function prototypes in there, you may also #include all headers + * providing data types that are need in this file. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_FILENAME "path/to/my/lwip_hooks.h" +#endif + +/** + * LWIP_HOOK_TCP_ISN: + * Hook for generation of the Initial Sequence Number (ISN) for a new TCP + * connection. The default lwIP ISN generation algorithm is very basic and may + * allow for TCP spoofing attacks. This hook provides the means to implement + * the standardized ISN generation algorithm from RFC 6528 (see contrib/adons/tcp_isn), + * or any other desired algorithm as a replacement. + * Called from tcp_connect() and tcp_listen_input() when an ISN is needed for + * a new TCP connection, if TCP support (@ref LWIP_TCP) is enabled.\n + * Signature: u32_t my_hook_tcp_isn(const ip_addr_t* local_ip, u16_t local_port, const ip_addr_t* remote_ip, u16_t remote_port); + * - it may be necessary to use "struct ip_addr" (ip4_addr, ip6_addr) instead of "ip_addr_t" in function declarations\n + * Arguments: + * - local_ip: pointer to the local IP address of the connection + * - local_port: local port number of the connection (host-byte order) + * - remote_ip: pointer to the remote IP address of the connection + * - remote_port: remote port number of the connection (host-byte order)\n + * Return value: + * - the 32-bit Initial Sequence Number to use for the new TCP connection. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_ISN(local_ip, local_port, remote_ip, remote_port) +#endif + +/** + * LWIP_HOOK_IP4_INPUT(pbuf, input_netif): + * - called from ip_input() (IPv4) + * - pbuf: received struct pbuf passed to ip_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP4_ROUTE(dest): + * - called from ip_route() (IPv4) + * - dest: destination IPv4 address + * Returns the destination netif or NULL if no destination netif is found. In + * that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE() +#endif + +/** + * LWIP_HOOK_IP4_ROUTE_SRC(dest, src): + * - source-based routing for IPv4 (see LWIP_HOOK_IP4_ROUTE(), src may be NULL) + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE_SRC(dest, src) +#endif + +/** + * LWIP_HOOK_ETHARP_GET_GW(netif, dest): + * - called from etharp_output() (IPv4) + * - netif: the netif used for sending + * - dest: the destination IPv4 address + * Returns the IPv4 address of the gateway to handle the specified destination + * IPv4 address. If NULL is returned, the netif's default gateway is used. + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv4 routing together with + * LWIP_HOOK_IP4_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ETHARP_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_IP6_INPUT(pbuf, input_netif): + * - called from ip6_input() (IPv6) + * - pbuf: received struct pbuf passed to ip6_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP6_ROUTE(src, dest): + * - called from ip6_route() (IPv6) + * - src: sourc IPv6 address + * - dest: destination IPv6 address + * Returns the destination netif or NULL if no destination netif is found. In + * that case, ip6_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_ROUTE(src, dest) +#endif + +/** + * LWIP_HOOK_ND6_GET_GW(netif, dest): + * - called from nd6_get_next_hop_entry() (IPv6) + * - netif: the netif used for sending + * - dest: the destination IPv6 address + * Returns the IPv6 address of the next hop to handle the specified destination + * IPv6 address. If NULL is returned, a NDP-discovered router is used instead. + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv6 routing together with + * LWIP_HOOK_IP6_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ND6_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr): + * - called from ethernet_input() if VLAN support is enabled + * - netif: struct netif on which the packet has been received + * - eth_hdr: struct eth_hdr of the packet + * - vlan_hdr: struct eth_vlan_hdr of the packet + * Return values: + * - 0: Packet must be dropped. + * - != 0: Packet must be accepted. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr) +#endif + +/** + * LWIP_HOOK_VLAN_SET: + * Hook can be used to set prio_vid field of vlan_hdr. If you need to store data + * on per-netif basis to implement this callback, see @ref netif_cd. + * Called from ethernet_output() if VLAN support (@ref ETHARP_SUPPORT_VLAN) is enabled.\n + * Signature: s32_t my_hook_vlan_set(struct netif* netif, struct pbuf* pbuf, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type);\n + * Arguments: + * - netif: struct netif that the packet will be sent through + * - p: struct pbuf packet to be sent + * - src: source eth address + * - dst: destination eth address + * - eth_type: ethernet type to packet to be sent\n + * + * + * Return values: + * - <0: Packet shall not contain VLAN header. + * - 0 <= return value <= 0xFFFF: Packet shall contain VLAN header. Return value is prio_vid in host byte order. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type) +#endif + +/** + * LWIP_HOOK_MEMP_AVAILABLE(memp_t_type): + * - called from memp_free() when a memp pool was empty and an item is now available + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_MEMP_AVAILABLE(memp_t_type) +#endif + +/** + * LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif): + * Called from ethernet_input() when an unknown eth type is encountered. + * Return ERR_OK if packet is accepted, any error code otherwise. + * Payload points to ethernet header! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif) +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Debugging options ---------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_debugmsg Debug messages + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_DBG_MIN_LEVEL: After masking, the value of the debug is + * compared against this value. If it is smaller, then debugging + * messages are written. + * @see debugging_levels + */ +#if !defined LWIP_DBG_MIN_LEVEL || defined __DOXYGEN__ +#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL +#endif + +/** + * LWIP_DBG_TYPES_ON: A mask that can be used to globally enable/disable + * debug messages of certain types. + * @see debugging_levels + */ +#if !defined LWIP_DBG_TYPES_ON || defined __DOXYGEN__ +#define LWIP_DBG_TYPES_ON LWIP_DBG_ON +#endif + +/** + * ETHARP_DEBUG: Enable debugging in etharp.c. + */ +#if !defined ETHARP_DEBUG || defined __DOXYGEN__ +#define ETHARP_DEBUG LWIP_DBG_OFF +#endif + +/** + * NETIF_DEBUG: Enable debugging in netif.c. + */ +#if !defined NETIF_DEBUG || defined __DOXYGEN__ +#define NETIF_DEBUG LWIP_DBG_OFF +#endif + +/** + * PBUF_DEBUG: Enable debugging in pbuf.c. + */ +#if !defined PBUF_DEBUG || defined __DOXYGEN__ +#define PBUF_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_LIB_DEBUG: Enable debugging in api_lib.c. + */ +#if !defined API_LIB_DEBUG || defined __DOXYGEN__ +#define API_LIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_MSG_DEBUG: Enable debugging in api_msg.c. + */ +#if !defined API_MSG_DEBUG || defined __DOXYGEN__ +#define API_MSG_DEBUG LWIP_DBG_OFF +#endif + +/** + * SOCKETS_DEBUG: Enable debugging in sockets.c. + */ +#if !defined SOCKETS_DEBUG || defined __DOXYGEN__ +#define SOCKETS_DEBUG LWIP_DBG_OFF +#endif + +/** + * ICMP_DEBUG: Enable debugging in icmp.c. + */ +#if !defined ICMP_DEBUG || defined __DOXYGEN__ +#define ICMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IGMP_DEBUG: Enable debugging in igmp.c. + */ +#if !defined IGMP_DEBUG || defined __DOXYGEN__ +#define IGMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * INET_DEBUG: Enable debugging in inet.c. + */ +#if !defined INET_DEBUG || defined __DOXYGEN__ +#define INET_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_DEBUG: Enable debugging for IP. + */ +#if !defined IP_DEBUG || defined __DOXYGEN__ +#define IP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_REASS_DEBUG: Enable debugging in ip_frag.c for both frag & reass. + */ +#if !defined IP_REASS_DEBUG || defined __DOXYGEN__ +#define IP_REASS_DEBUG LWIP_DBG_OFF +#endif + +/** + * RAW_DEBUG: Enable debugging in raw.c. + */ +#if !defined RAW_DEBUG || defined __DOXYGEN__ +#define RAW_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEM_DEBUG: Enable debugging in mem.c. + */ +#if !defined MEM_DEBUG || defined __DOXYGEN__ +#define MEM_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEMP_DEBUG: Enable debugging in memp.c. + */ +#if !defined MEMP_DEBUG || defined __DOXYGEN__ +#define MEMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SYS_DEBUG: Enable debugging in sys.c. + */ +#if !defined SYS_DEBUG || defined __DOXYGEN__ +#define SYS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TIMERS_DEBUG: Enable debugging in timers.c. + */ +#if !defined TIMERS_DEBUG || defined __DOXYGEN__ +#define TIMERS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_DEBUG: Enable debugging for TCP. + */ +#if !defined TCP_DEBUG || defined __DOXYGEN__ +#define TCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_INPUT_DEBUG: Enable debugging in tcp_in.c for incoming debug. + */ +#if !defined TCP_INPUT_DEBUG || defined __DOXYGEN__ +#define TCP_INPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_FR_DEBUG: Enable debugging in tcp_in.c for fast retransmit. + */ +#if !defined TCP_FR_DEBUG || defined __DOXYGEN__ +#define TCP_FR_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RTO_DEBUG: Enable debugging in TCP for retransmit + * timeout. + */ +#if !defined TCP_RTO_DEBUG || defined __DOXYGEN__ +#define TCP_RTO_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_CWND_DEBUG: Enable debugging for TCP congestion window. + */ +#if !defined TCP_CWND_DEBUG || defined __DOXYGEN__ +#define TCP_CWND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_WND_DEBUG: Enable debugging in tcp_in.c for window updating. + */ +#if !defined TCP_WND_DEBUG || defined __DOXYGEN__ +#define TCP_WND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_OUTPUT_DEBUG: Enable debugging in tcp_out.c output functions. + */ +#if !defined TCP_OUTPUT_DEBUG || defined __DOXYGEN__ +#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RST_DEBUG: Enable debugging for TCP with the RST message. + */ +#if !defined TCP_RST_DEBUG || defined __DOXYGEN__ +#define TCP_RST_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_QLEN_DEBUG: Enable debugging for TCP queue lengths. + */ +#if !defined TCP_QLEN_DEBUG || defined __DOXYGEN__ +#define TCP_QLEN_DEBUG LWIP_DBG_OFF +#endif + +/** + * UDP_DEBUG: Enable debugging in UDP. + */ +#if !defined UDP_DEBUG || defined __DOXYGEN__ +#define UDP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCPIP_DEBUG: Enable debugging in tcpip.c. + */ +#if !defined TCPIP_DEBUG || defined __DOXYGEN__ +#define TCPIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SLIP_DEBUG: Enable debugging in slipif.c. + */ +#if !defined SLIP_DEBUG || defined __DOXYGEN__ +#define SLIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP_DEBUG: Enable debugging in dhcp.c. + */ +#if !defined DHCP_DEBUG || defined __DOXYGEN__ +#define DHCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * AUTOIP_DEBUG: Enable debugging in autoip.c. + */ +#if !defined AUTOIP_DEBUG || defined __DOXYGEN__ +#define AUTOIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DNS_DEBUG: Enable debugging for DNS. + */ +#if !defined DNS_DEBUG || defined __DOXYGEN__ +#define DNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP6_DEBUG: Enable debugging for IPv6. + */ +#if !defined IP6_DEBUG || defined __DOXYGEN__ +#define IP6_DEBUG LWIP_DBG_OFF +#endif +/** + * @} + */ + +/* + -------------------------------------------------- + ---------- Performance tracking options ---------- + -------------------------------------------------- +*/ +/** + * @defgroup lwip_opts_perf Performance + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_PERF: Enable performance testing for lwIP + * (if enabled, arch/perf.h is included) + */ +#if !defined LWIP_PERF || defined __DOXYGEN__ +#define LWIP_PERF 0 +#endif +/** + * @} + */ + +#endif /* LWIP_HDR_OPT_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h new file mode 100644 index 000000000..a8b00a0fe --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h @@ -0,0 +1,263 @@ +/** + * @file + * pbuf API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_PBUF_H +#define LWIP_HDR_PBUF_H + +#include "lwip/opt.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** LWIP_SUPPORT_CUSTOM_PBUF==1: Custom pbufs behave much like their pbuf type + * but they are allocated by external code (initialised by calling + * pbuf_alloced_custom()) and when pbuf_free gives up their last reference, they + * are freed by calling pbuf_custom->custom_free_function(). + * Currently, the pbuf_custom code is only needed for one specific configuration + * of IP_FRAG, unless required by external driver/application code. */ +#ifndef LWIP_SUPPORT_CUSTOM_PBUF +#define LWIP_SUPPORT_CUSTOM_PBUF ((IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG)) +#endif + +/* @todo: We need a mechanism to prevent wasting memory in every pbuf + (TCP vs. UDP, IPv4 vs. IPv6: UDP/IPv4 packets may waste up to 28 bytes) */ + +#define PBUF_TRANSPORT_HLEN 20 +#if LWIP_IPV6 +#define PBUF_IP_HLEN 40 +#else +#define PBUF_IP_HLEN 20 +#endif + +/** + * @ingroup pbuf + * Enumeration of pbuf layers + */ +typedef enum { + /** Includes spare room for transport layer header, e.g. UDP header. + * Use this if you intend to pass the pbuf to functions like udp_send(). + */ + PBUF_TRANSPORT, + /** Includes spare room for IP header. + * Use this if you intend to pass the pbuf to functions like raw_send(). + */ + PBUF_IP, + /** Includes spare room for link layer header (ethernet header). + * Use this if you intend to pass the pbuf to functions like ethernet_output(). + * @see PBUF_LINK_HLEN + */ + PBUF_LINK, + /** Includes spare room for additional encapsulation header before ethernet + * headers (e.g. 802.11). + * Use this if you intend to pass the pbuf to functions like netif->linkoutput(). + * @see PBUF_LINK_ENCAPSULATION_HLEN + */ + PBUF_RAW_TX, + /** Use this for input packets in a netif driver when calling netif->input() + * in the most common case - ethernet-layer netif driver. */ + PBUF_RAW +} pbuf_layer; + +/** + * @ingroup pbuf + * Enumeration of pbuf types + */ +typedef enum { + /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload + are allocated in one piece of contiguous memory (so the first payload byte + can be calculated from struct pbuf). + pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might + change in future versions). + This should be used for all OUTGOING packets (TX).*/ + PBUF_RAM, + /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in + totally different memory areas. Since it points to ROM, payload does not + have to be copied when queued for transmission. */ + PBUF_ROM, + /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change + so it has to be duplicated when queued before transmitting, depending on + who has a 'ref' to it. */ + PBUF_REF, + /** pbuf payload refers to RAM. This one comes from a pool and should be used + for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct + pbuf and its payload are allocated in one piece of contiguous memory (so + the first payload byte can be calculated from struct pbuf). + Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, + you are unable to receive TCP acks! */ + PBUF_POOL +} pbuf_type; + + +/** indicates this packet's data should be immediately passed to the application */ +#define PBUF_FLAG_PUSH 0x01U +/** indicates this is a custom pbuf: pbuf_free calls pbuf_custom->custom_free_function() + when the last reference is released (plus custom PBUF_RAM cannot be trimmed) */ +#define PBUF_FLAG_IS_CUSTOM 0x02U +/** indicates this pbuf is UDP multicast to be looped back */ +#define PBUF_FLAG_MCASTLOOP 0x04U +/** indicates this pbuf was received as link-level broadcast */ +#define PBUF_FLAG_LLBCAST 0x08U +/** indicates this pbuf was received as link-level multicast */ +#define PBUF_FLAG_LLMCAST 0x10U +/** indicates this pbuf includes a TCP FIN flag */ +#define PBUF_FLAG_TCP_FIN 0x20U + +/** Main packet buffer struct */ +struct pbuf { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + void *payload; + + /** + * total length of this buffer and all next buffers in chain + * belonging to the same packet. + * + * For non-queue packet chains this is the invariant: + * p->tot_len == p->len + (p->next? p->next->tot_len: 0) + */ + u16_t tot_len; + + /** length of this buffer */ + u16_t len; + + /** pbuf_type as u8_t instead of enum to save space */ + u8_t /*pbuf_type*/ type; + + /** misc flags */ + u8_t flags; + + /** + * the reference count always equals the number of pointers + * that refer to this pbuf. This can be pointers from an application, + * the stack itself, or pbuf->next pointers from a chain. + */ + u16_t ref; +}; + + +/** Helper struct for const-correctness only. + * The only meaning of this one is to provide a const payload pointer + * for PBUF_ROM type. + */ +struct pbuf_rom { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + const void *payload; +}; + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** Prototype for a function to free a custom pbuf */ +typedef void (*pbuf_free_custom_fn)(struct pbuf *p); + +/** A custom pbuf: like a pbuf, but following a function pointer to free it. */ +struct pbuf_custom { + /** The actual pbuf */ + struct pbuf pbuf; + /** This function is called when pbuf_free deallocates this pbuf(_custom) */ + pbuf_free_custom_fn custom_free_function; +}; +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** Define this to 0 to prevent freeing ooseq pbufs when the PBUF_POOL is empty */ +#ifndef PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_FREE_OOSEQ 1 +#endif /* PBUF_POOL_FREE_OOSEQ */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ +extern volatile u8_t pbuf_free_ooseq_pending; +void pbuf_free_ooseq(void); +/** When not using sys_check_timeouts(), call PBUF_CHECK_FREE_OOSEQ() + at regular intervals from main level to check if ooseq pbufs need to be + freed! */ +#define PBUF_CHECK_FREE_OOSEQ() do { if(pbuf_free_ooseq_pending) { \ + /* pbuf_alloc() reported PBUF_POOL to be empty -> try to free some \ + ooseq queued pbufs now */ \ + pbuf_free_ooseq(); }}while(0) +#else /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ */ + /* Otherwise declare an empty PBUF_CHECK_FREE_OOSEQ */ + #define PBUF_CHECK_FREE_OOSEQ() +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ*/ + +/* Initializes the pbuf module. This call is empty for now, but may not be in future. */ +#define pbuf_init() + +struct pbuf *pbuf_alloc(pbuf_layer l, u16_t length, pbuf_type type); +#if LWIP_SUPPORT_CUSTOM_PBUF +struct pbuf *pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, + struct pbuf_custom *p, void *payload_mem, + u16_t payload_mem_len); +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ +void pbuf_realloc(struct pbuf *p, u16_t size); +u8_t pbuf_header(struct pbuf *p, s16_t header_size); +u8_t pbuf_header_force(struct pbuf *p, s16_t header_size); +void pbuf_ref(struct pbuf *p); +u8_t pbuf_free(struct pbuf *p); +u16_t pbuf_clen(const struct pbuf *p); +void pbuf_cat(struct pbuf *head, struct pbuf *tail); +void pbuf_chain(struct pbuf *head, struct pbuf *tail); +struct pbuf *pbuf_dechain(struct pbuf *p); +err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from); +u16_t pbuf_copy_partial(const struct pbuf *p, void *dataptr, u16_t len, u16_t offset); +err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len); +err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset); +struct pbuf *pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset); +struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer); +#if LWIP_CHECKSUM_ON_COPY +err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum); +#endif /* LWIP_CHECKSUM_ON_COPY */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest); +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +u8_t pbuf_get_at(const struct pbuf* p, u16_t offset); +int pbuf_try_get_at(const struct pbuf* p, u16_t offset); +void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data); +u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n); +u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset); +u16_t pbuf_strstr(const struct pbuf* p, const char* substr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PBUF_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h new file mode 100644 index 000000000..792e28818 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h @@ -0,0 +1,216 @@ +/** + * @file + * netconn API lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_MSG_H +#define LWIP_HDR_API_MSG_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MPU_COMPATIBLE +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_MSG_M_DEF_SEM(m) *m +#else +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif +#else /* LWIP_MPU_COMPATIBLE */ +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif /* LWIP_MPU_COMPATIBLE */ + +/* For the netconn API, these values are use as a bitmask! */ +#define NETCONN_SHUT_RD 1 +#define NETCONN_SHUT_WR 2 +#define NETCONN_SHUT_RDWR (NETCONN_SHUT_RD | NETCONN_SHUT_WR) + +/* IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ +/** This struct includes everything that is necessary to execute a function + for a netconn in another thread context (mainly used to process netconns + in the tcpip_thread context to be thread safe). */ +struct api_msg { + /** The netconn which to process - always needed: it includes the semaphore + which is used to block the application thread until the function finished. */ + struct netconn *conn; + /** The return value of the function executed in tcpip_thread. */ + err_t err; + /** Depending on the executed function, one of these union members is used */ + union { + /** used for lwip_netconn_do_send */ + struct netbuf *b; + /** used for lwip_netconn_do_newconn */ + struct { + u8_t proto; + } n; + /** used for lwip_netconn_do_bind and lwip_netconn_do_connect */ + struct { + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; + } bc; + /** used for lwip_netconn_do_getaddr */ + struct { + ip_addr_t API_MSG_M_DEF(ipaddr); + u16_t API_MSG_M_DEF(port); + u8_t local; + } ad; + /** used for lwip_netconn_do_write */ + struct { + const void *dataptr; + size_t len; + u8_t apiflags; +#if LWIP_SO_SNDTIMEO + u32_t time_started; +#endif /* LWIP_SO_SNDTIMEO */ + } w; + /** used for lwip_netconn_do_recv */ + struct { + u32_t len; + } r; +#if LWIP_TCP + /** used for lwip_netconn_do_close (/shutdown) */ + struct { + u8_t shut; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + u32_t time_started; +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + u8_t polls_left; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + } sd; +#endif /* LWIP_TCP */ +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) + /** used for lwip_netconn_do_join_leave_group */ + struct { + API_MSG_M_DEF_C(ip_addr_t, multiaddr); + API_MSG_M_DEF_C(ip_addr_t, netif_addr); + enum netconn_igmp join_or_leave; + } jl; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if TCP_LISTEN_BACKLOG + struct { + u8_t backlog; + } lb; +#endif /* TCP_LISTEN_BACKLOG */ + } msg; +#if LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t* op_completed_sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define LWIP_API_MSG_SEM(msg) ((msg)->op_completed_sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define LWIP_API_MSG_SEM(msg) (&(msg)->conn->op_completed) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + +#if LWIP_DNS +/** As lwip_netconn_do_gethostbyname requires more arguments but doesn't require a netconn, + it has its own struct (to avoid struct api_msg getting bigger than necessary). + lwip_netconn_do_gethostbyname must be called using tcpip_callback instead of tcpip_apimsg + (see netconn_gethostbyname). */ +struct dns_api_msg { + /** Hostname to query or dotted IP address string */ +#if LWIP_MPU_COMPATIBLE + char name[DNS_MAX_NAME_LENGTH]; +#else /* LWIP_MPU_COMPATIBLE */ + const char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + /** The resolved address is stored here */ + ip_addr_t API_MSG_M_DEF(addr); +#if LWIP_IPV4 && LWIP_IPV6 + /** Type of resolve call */ + u8_t dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /** This semaphore is posted when the name is resolved, the application thread + should wait on it. */ + sys_sem_t API_MSG_M_DEF_SEM(sem); + /** Errors are given back here */ + err_t API_MSG_M_DEF(err); +}; +#endif /* LWIP_DNS */ + +#if LWIP_TCP +extern u8_t netconn_aborted; +#endif /* LWIP_TCP */ + +void lwip_netconn_do_newconn (void *m); +void lwip_netconn_do_delconn (void *m); +void lwip_netconn_do_bind (void *m); +void lwip_netconn_do_connect (void *m); +void lwip_netconn_do_disconnect (void *m); +void lwip_netconn_do_listen (void *m); +void lwip_netconn_do_send (void *m); +void lwip_netconn_do_recv (void *m); +#if TCP_LISTEN_BACKLOG +void lwip_netconn_do_accepted (void *m); +#endif /* TCP_LISTEN_BACKLOG */ +void lwip_netconn_do_write (void *m); +void lwip_netconn_do_getaddr (void *m); +void lwip_netconn_do_close (void *m); +void lwip_netconn_do_shutdown (void *m); +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +void lwip_netconn_do_join_leave_group(void *m); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +void lwip_netconn_do_gethostbyname(void *arg); +#endif /* LWIP_DNS */ + +struct netconn* netconn_alloc(enum netconn_type t, netconn_callback callback); +void netconn_free(struct netconn *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_MSG_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h new file mode 100644 index 000000000..e4a765501 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h @@ -0,0 +1,183 @@ +/** + * @file + * memory pools lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_PRIV_H +#define LWIP_HDR_MEMP_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEMP_OVERFLOW_CHECK +/* if MEMP_OVERFLOW_CHECK is turned on, we reserve some bytes at the beginning + * and at the end of each element, initialize them as 0xcd and check + * them later. */ +/* If MEMP_OVERFLOW_CHECK is >= 2, on every call to memp_malloc or memp_free, + * every single element in each pool is checked! + * This is VERY SLOW but also very helpful. */ +/* MEMP_SANITY_REGION_BEFORE and MEMP_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEMP_SANITY_REGION_BEFORE +#define MEMP_SANITY_REGION_BEFORE 16 +#endif /* MEMP_SANITY_REGION_BEFORE*/ +#if MEMP_SANITY_REGION_BEFORE > 0 +#define MEMP_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_BEFORE) +#else +#define MEMP_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEMP_SANITY_REGION_BEFORE*/ +#ifndef MEMP_SANITY_REGION_AFTER +#define MEMP_SANITY_REGION_AFTER 16 +#endif /* MEMP_SANITY_REGION_AFTER*/ +#if MEMP_SANITY_REGION_AFTER > 0 +#define MEMP_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_AFTER) +#else +#define MEMP_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEMP_SANITY_REGION_AFTER*/ + +/* MEMP_SIZE: save space for struct memp and for sanity check */ +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEMP_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEMP_SANITY_REGION_AFTER_ALIGNED) + +#else /* MEMP_OVERFLOW_CHECK */ + +/* No sanity checks + * We don't need to preserve the struct memp while not allocated, so we + * can save a little space and set MEMP_SIZE to 0. + */ +#define MEMP_SIZE 0 +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x)) + +#endif /* MEMP_OVERFLOW_CHECK */ + +#if !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK +struct memp { + struct memp *next; +#if MEMP_OVERFLOW_CHECK + const char *file; + int line; +#endif /* MEMP_OVERFLOW_CHECK */ +}; +#endif /* !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK */ + +#if MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS +/* Use a helper type to get the start and end of the user "memory pools" for mem_malloc */ +typedef enum { + /* Get the first (via: + MEMP_POOL_HELPER_START = ((u8_t) 1*MEMP_POOL_A + 0*MEMP_POOL_B + 0*MEMP_POOL_C + 0)*/ + MEMP_POOL_HELPER_FIRST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START 1 +#define LWIP_MALLOC_MEMPOOL(num, size) * MEMP_POOL_##size + 0 +#define LWIP_MALLOC_MEMPOOL_END +#include "lwip/priv/memp_std.h" + ) , + /* Get the last (via: + MEMP_POOL_HELPER_END = ((u8_t) 0 + MEMP_POOL_A*0 + MEMP_POOL_B*0 + MEMP_POOL_C*1) */ + MEMP_POOL_HELPER_LAST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL(num, size) 0 + MEMP_POOL_##size * +#define LWIP_MALLOC_MEMPOOL_END 1 +#include "lwip/priv/memp_std.h" + ) +} memp_pool_helper_t; + +/* The actual start and stop values are here (cast them over) + We use this helper type and these defines so we can avoid using const memp_t values */ +#define MEMP_POOL_FIRST ((memp_t) MEMP_POOL_HELPER_FIRST) +#define MEMP_POOL_LAST ((memp_t) MEMP_POOL_HELPER_LAST) +#endif /* MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS */ + +/** Memory pool descriptor */ +struct memp_desc { +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY + /** Textual description */ + const char *desc; +#endif /* LWIP_DEBUG || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY */ +#if MEMP_STATS + /** Statistics */ + struct stats_mem *stats; +#endif + + /** Element size */ + u16_t size; + +#if !MEMP_MEM_MALLOC + /** Number of elements */ + u16_t num; + + /** Base address */ + u8_t *base; + + /** First free element of each pool. Elements form a linked list. */ + struct memp **tab; +#endif /* MEMP_MEM_MALLOC */ +}; + +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY +#define DECLARE_LWIP_MEMPOOL_DESC(desc) (desc), +#else +#define DECLARE_LWIP_MEMPOOL_DESC(desc) +#endif + +#if MEMP_STATS +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) static struct stats_mem name; +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) &name, +#else +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) +#endif + +void memp_init_pool(const struct memp_desc *desc); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_pool_fn(const struct memp_desc* desc, const char* file, const int line); +#define memp_malloc_pool(d) memp_malloc_pool_fn((d), __FILE__, __LINE__) +#else +void *memp_malloc_pool(const struct memp_desc *desc); +#endif +void memp_free_pool(const struct memp_desc* desc, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h new file mode 100644 index 000000000..fb91202dc --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h @@ -0,0 +1,146 @@ +/** + * @file + * lwIP internal memory pools (do not use in application code) + * This file is deliberately included multiple times: once with empty + * definition of LWIP_MEMPOOL() to handle all includes and multiple times + * to build up various lists of mem pools. + */ + +/* + * SETUP: Make sure we define everything we will need. + * + * We have create three types of pools: + * 1) MEMPOOL - standard pools + * 2) MALLOC_MEMPOOL - to be used by mem_malloc in mem.c + * 3) PBUF_MEMPOOL - a mempool of pbuf's, so include space for the pbuf struct + * + * If the include'r doesn't require any special treatment of each of the types + * above, then will declare #2 & #3 to be just standard mempools. + */ +#ifndef LWIP_MALLOC_MEMPOOL +/* This treats "malloc pools" just like any other pool. + The pools are a little bigger to provide 'size' as the amount of user data. */ +#define LWIP_MALLOC_MEMPOOL(num, size) LWIP_MEMPOOL(POOL_##size, num, (size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))), "MALLOC_"#size) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL_END +#endif /* LWIP_MALLOC_MEMPOOL */ + +#ifndef LWIP_PBUF_MEMPOOL +/* This treats "pbuf pools" just like any other pool. + * Allocates buffers for a pbuf struct AND a payload size */ +#define LWIP_PBUF_MEMPOOL(name, num, payload, desc) LWIP_MEMPOOL(name, num, (MEMP_ALIGN_SIZE(sizeof(struct pbuf)) + MEMP_ALIGN_SIZE(payload)), desc) +#endif /* LWIP_PBUF_MEMPOOL */ + + +/* + * A list of internal pools used by LWIP. + * + * LWIP_MEMPOOL(pool_name, number_elements, element_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + */ +#if LWIP_RAW +LWIP_MEMPOOL(RAW_PCB, MEMP_NUM_RAW_PCB, sizeof(struct raw_pcb), "RAW_PCB") +#endif /* LWIP_RAW */ + +#if LWIP_UDP +LWIP_MEMPOOL(UDP_PCB, MEMP_NUM_UDP_PCB, sizeof(struct udp_pcb), "UDP_PCB") +#endif /* LWIP_UDP */ + +#if LWIP_TCP +LWIP_MEMPOOL(TCP_PCB, MEMP_NUM_TCP_PCB, sizeof(struct tcp_pcb), "TCP_PCB") +LWIP_MEMPOOL(TCP_PCB_LISTEN, MEMP_NUM_TCP_PCB_LISTEN, sizeof(struct tcp_pcb_listen), "TCP_PCB_LISTEN") +LWIP_MEMPOOL(TCP_SEG, MEMP_NUM_TCP_SEG, sizeof(struct tcp_seg), "TCP_SEG") +#endif /* LWIP_TCP */ + +#if LWIP_IPV4 && IP_REASSEMBLY +LWIP_MEMPOOL(REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip_reassdata), "REASSDATA") +#endif /* LWIP_IPV4 && IP_REASSEMBLY */ +#if (IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG) +LWIP_MEMPOOL(FRAG_PBUF, MEMP_NUM_FRAG_PBUF, sizeof(struct pbuf_custom_ref),"FRAG_PBUF") +#endif /* IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF || (LWIP_IPV6 && LWIP_IPV6_FRAG) */ + +#if LWIP_NETCONN || LWIP_SOCKET +LWIP_MEMPOOL(NETBUF, MEMP_NUM_NETBUF, sizeof(struct netbuf), "NETBUF") +LWIP_MEMPOOL(NETCONN, MEMP_NUM_NETCONN, sizeof(struct netconn), "NETCONN") +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if NO_SYS==0 +LWIP_MEMPOOL(TCPIP_MSG_API, MEMP_NUM_TCPIP_MSG_API, sizeof(struct tcpip_msg), "TCPIP_MSG_API") +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL(API_MSG, MEMP_NUM_API_MSG, sizeof(struct api_msg), "API_MSG") +#if LWIP_DNS +LWIP_MEMPOOL(DNS_API_MSG, MEMP_NUM_DNS_API_MSG, sizeof(struct dns_api_msg), "DNS_API_MSG") +#endif +#if LWIP_SOCKET && !LWIP_TCPIP_CORE_LOCKING +LWIP_MEMPOOL(SOCKET_SETGETSOCKOPT_DATA, MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA, sizeof(struct lwip_setgetsockopt_data), "SOCKET_SETGETSOCKOPT_DATA") +#endif +#if LWIP_NETIF_API +LWIP_MEMPOOL(NETIFAPI_MSG, MEMP_NUM_NETIFAPI_MSG, sizeof(struct netifapi_msg), "NETIFAPI_MSG") +#endif +#endif /* LWIP_MPU_COMPATIBLE */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT +LWIP_MEMPOOL(TCPIP_MSG_INPKT,MEMP_NUM_TCPIP_MSG_INPKT, sizeof(struct tcpip_msg), "TCPIP_MSG_INPKT") +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#endif /* NO_SYS==0 */ + +#if LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING +LWIP_MEMPOOL(ARP_QUEUE, MEMP_NUM_ARP_QUEUE, sizeof(struct etharp_q_entry), "ARP_QUEUE") +#endif /* LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING */ + +#if LWIP_IGMP +LWIP_MEMPOOL(IGMP_GROUP, MEMP_NUM_IGMP_GROUP, sizeof(struct igmp_group), "IGMP_GROUP") +#endif /* LWIP_IGMP */ + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM +LWIP_MEMPOOL(SYS_TIMEOUT, MEMP_NUM_SYS_TIMEOUT, sizeof(struct sys_timeo), "SYS_TIMEOUT") +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ + +#if LWIP_DNS && LWIP_SOCKET +LWIP_MEMPOOL(NETDB, MEMP_NUM_NETDB, NETDB_ELEM_SIZE, "NETDB") +#endif /* LWIP_DNS && LWIP_SOCKET */ +#if LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC +LWIP_MEMPOOL(LOCALHOSTLIST, MEMP_NUM_LOCALHOSTLIST, LOCALHOSTLIST_ELEM_SIZE, "LOCALHOSTLIST") +#endif /* LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +#if LWIP_IPV6 && LWIP_ND6_QUEUEING +LWIP_MEMPOOL(ND6_QUEUE, MEMP_NUM_ND6_QUEUE, sizeof(struct nd6_q_entry), "ND6_QUEUE") +#endif /* LWIP_IPV6 && LWIP_ND6_QUEUEING */ + +#if LWIP_IPV6 && LWIP_IPV6_REASS +LWIP_MEMPOOL(IP6_REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip6_reassdata), "IP6_REASSDATA") +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +LWIP_MEMPOOL(MLD6_GROUP, MEMP_NUM_MLD6_GROUP, sizeof(struct mld_group), "MLD6_GROUP") +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + +/* + * A list of pools of pbuf's used by LWIP. + * + * LWIP_PBUF_MEMPOOL(pool_name, number_elements, pbuf_payload_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + * This allocates enough space for the pbuf struct and a payload. + * (Example: pbuf_payload_size=0 allocates only size for the struct) + */ +LWIP_PBUF_MEMPOOL(PBUF, MEMP_NUM_PBUF, 0, "PBUF_REF/ROM") +LWIP_PBUF_MEMPOOL(PBUF_POOL, PBUF_POOL_SIZE, PBUF_POOL_BUFSIZE, "PBUF_POOL") + + +/* + * Allow for user-defined pools; this must be explicitly set in lwipopts.h + * since the default is to NOT look for lwippools.h + */ +#if MEMP_USE_CUSTOM_POOLS +#include "lwippools.h" +#endif /* MEMP_USE_CUSTOM_POOLS */ + +/* + * REQUIRED CLEANUP: Clear up so we don't get "multiply defined" error later + * (#undef is ignored for something that is not defined) + */ +#undef LWIP_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL_START +#undef LWIP_MALLOC_MEMPOOL_END +#undef LWIP_PBUF_MEMPOOL diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h new file mode 100644 index 000000000..3377ddef0 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h @@ -0,0 +1,144 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_PRIV_H +#define LWIP_HDR_ND6_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ND6_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct nd6_q_entry { + struct nd6_q_entry *next; + struct pbuf *p; +}; +#endif /* LWIP_ND6_QUEUEING */ + +/** Struct for tables. */ +struct nd6_neighbor_cache_entry { + ip6_addr_t next_hop_address; + struct netif *netif; + u8_t lladdr[NETIF_MAX_HWADDR_LEN]; + /*u32_t pmtu;*/ +#if LWIP_ND6_QUEUEING + /** Pointer to queue of pending outgoing packets on this entry. */ + struct nd6_q_entry *q; +#else /* LWIP_ND6_QUEUEING */ + /** Pointer to a single pending outgoing packet on this entry. */ + struct pbuf *q; +#endif /* LWIP_ND6_QUEUEING */ + u8_t state; + u8_t isrouter; + union { + u32_t reachable_time; /* in ms since value may originate from network packet */ + u32_t delay_time; /* ticks (ND6_TMR_INTERVAL) */ + u32_t probes_sent; + u32_t stale_time; /* ticks (ND6_TMR_INTERVAL) */ + } counter; +}; + +struct nd6_destination_cache_entry { + ip6_addr_t destination_addr; + ip6_addr_t next_hop_addr; + u16_t pmtu; + u32_t age; +}; + +struct nd6_prefix_list_entry { + ip6_addr_t prefix; + struct netif *netif; + u32_t invalidation_timer; /* in ms since value may originate from network packet */ +#if LWIP_IPV6_AUTOCONFIG + u8_t flags; +#define ND6_PREFIX_AUTOCONFIG_AUTONOMOUS 0x01 +#define ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED 0x02 +#define ND6_PREFIX_AUTOCONFIG_ADDRESS_DUPLICATE 0x04 +#endif /* LWIP_IPV6_AUTOCONFIG */ +}; + +struct nd6_router_list_entry { + struct nd6_neighbor_cache_entry *neighbor_entry; + u32_t invalidation_timer; /* in ms since value may originate from network packet */ + u8_t flags; +}; + +enum nd6_neighbor_cache_entry_state { + ND6_NO_ENTRY = 0, + ND6_INCOMPLETE, + ND6_REACHABLE, + ND6_STALE, + ND6_DELAY, + ND6_PROBE +}; + +/* Router tables. */ +/* @todo make these static? and entries accessible through API? */ +extern struct nd6_neighbor_cache_entry neighbor_cache[]; +extern struct nd6_destination_cache_entry destination_cache[]; +extern struct nd6_prefix_list_entry prefix_list[]; +extern struct nd6_router_list_entry default_router_list[]; + +/* Default values, can be updated by a RA message. */ +extern u32_t reachable_time; +extern u32_t retrans_timer; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_PRIV_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h new file mode 100644 index 000000000..7ae37e475 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h @@ -0,0 +1,507 @@ +/** + * @file + * TCP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_PRIV_H +#define LWIP_HDR_TCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcp.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/tcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Functions for interfacing with TCP: */ + +/* Lower layer interface to TCP: */ +void tcp_init (void); /* Initialize this module. */ +void tcp_tmr (void); /* Must be called every + TCP_TMR_INTERVAL + ms. (Typically 250 ms). */ +/* It is also possible to call these two functions at the right + intervals (instead of calling tcp_tmr()). */ +void tcp_slowtmr (void); +void tcp_fasttmr (void); + +/* Call this from a netif driver (watch out for threading issues!) that has + returned a memory error on transmit and now has free buffers to send more. + This iterates all active pcbs that had an error and tries to call + tcp_output, so use this with care as it might slow down the system. */ +void tcp_txnow (void); + +/* Only used by IP to pass a TCP segment to TCP: */ +void tcp_input (struct pbuf *p, struct netif *inp); +/* Used within the TCP code only: */ +struct tcp_pcb * tcp_alloc (u8_t prio); +void tcp_abandon (struct tcp_pcb *pcb, int reset); +err_t tcp_send_empty_ack(struct tcp_pcb *pcb); +void tcp_rexmit (struct tcp_pcb *pcb); +void tcp_rexmit_rto (struct tcp_pcb *pcb); +void tcp_rexmit_fast (struct tcp_pcb *pcb); +u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb); +err_t tcp_process_refused_data(struct tcp_pcb *pcb); + +/** + * This is the Nagle algorithm: try to combine user data to send as few TCP + * segments as possible. Only send if + * - no previously transmitted data on the connection remains unacknowledged or + * - the TF_NODELAY flag is set (nagle algorithm turned off for this pcb) or + * - the only unsent segment is at least pcb->mss bytes long (or there is more + * than one unsent segment - with lwIP, this can happen although unsent->len < mss) + * - or if we are in fast-retransmit (TF_INFR) + */ +#define tcp_do_output_nagle(tpcb) ((((tpcb)->unacked == NULL) || \ + ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ + (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ + ((tpcb)->unsent->len >= (tpcb)->mss))) || \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ) ? 1 : 0) +#define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) + + +#define TCP_SEQ_LT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) < 0) +#define TCP_SEQ_LEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) <= 0) +#define TCP_SEQ_GT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) > 0) +#define TCP_SEQ_GEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) >= 0) +/* is b<=a<=c? */ +#if 0 /* see bug #10548 */ +#define TCP_SEQ_BETWEEN(a,b,c) ((c)-(b) >= (a)-(b)) +#endif +#define TCP_SEQ_BETWEEN(a,b,c) (TCP_SEQ_GEQ(a,b) && TCP_SEQ_LEQ(a,c)) + +#ifndef TCP_TMR_INTERVAL +#define TCP_TMR_INTERVAL 250 /* The TCP timer interval in milliseconds. */ +#endif /* TCP_TMR_INTERVAL */ + +#ifndef TCP_FAST_INTERVAL +#define TCP_FAST_INTERVAL TCP_TMR_INTERVAL /* the fine grained timeout in milliseconds */ +#endif /* TCP_FAST_INTERVAL */ + +#ifndef TCP_SLOW_INTERVAL +#define TCP_SLOW_INTERVAL (2*TCP_TMR_INTERVAL) /* the coarse grained timeout in milliseconds */ +#endif /* TCP_SLOW_INTERVAL */ + +#define TCP_FIN_WAIT_TIMEOUT 20000 /* milliseconds */ +#define TCP_SYN_RCVD_TIMEOUT 20000 /* milliseconds */ + +#define TCP_OOSEQ_TIMEOUT 6U /* x RTO */ + +#ifndef TCP_MSL +#define TCP_MSL 60000UL /* The maximum segment lifetime in milliseconds */ +#endif + +/* Keepalive values, compliant with RFC 1122. Don't change this unless you know what you're doing */ +#ifndef TCP_KEEPIDLE_DEFAULT +#define TCP_KEEPIDLE_DEFAULT 7200000UL /* Default KEEPALIVE timer in milliseconds */ +#endif + +#ifndef TCP_KEEPINTVL_DEFAULT +#define TCP_KEEPINTVL_DEFAULT 75000UL /* Default Time between KEEPALIVE probes in milliseconds */ +#endif + +#ifndef TCP_KEEPCNT_DEFAULT +#define TCP_KEEPCNT_DEFAULT 9U /* Default Counter for KEEPALIVE probes */ +#endif + +#define TCP_MAXIDLE TCP_KEEPCNT_DEFAULT * TCP_KEEPINTVL_DEFAULT /* Maximum KEEPALIVE probe time */ + +#define TCP_TCPLEN(seg) ((seg)->len + (((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0) ? 1U : 0U)) + +/** Flags used on input processing, not on pcb->flags +*/ +#define TF_RESET (u8_t)0x08U /* Connection was reset. */ +#define TF_CLOSED (u8_t)0x10U /* Connection was successfully closed. */ +#define TF_GOT_FIN (u8_t)0x20U /* Connection was closed by the remote end. */ + + +#if LWIP_EVENT_API + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) ret = lwip_tcp_event(arg, (pcb),\ + LWIP_EVENT_ACCEPT, NULL, 0, err) +#define TCP_EVENT_SENT(pcb,space,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_SENT, NULL, space, ERR_OK) +#define TCP_EVENT_RECV(pcb,p,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, (p), 0, (err)) +#define TCP_EVENT_CLOSED(pcb,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, NULL, 0, ERR_OK) +#define TCP_EVENT_CONNECTED(pcb,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_CONNECTED, NULL, 0, (err)) +#define TCP_EVENT_POLL(pcb,ret) do { if ((pcb)->state != SYN_RCVD) { \ + ret = lwip_tcp_event((pcb)->callback_arg, (pcb), LWIP_EVENT_POLL, NULL, 0, ERR_OK); \ + } else { \ + ret = ERR_ARG; } } while(0) +#define TCP_EVENT_ERR(last_state,errf,arg,err) do { if (last_state != SYN_RCVD) { \ + lwip_tcp_event((arg), NULL, LWIP_EVENT_ERR, NULL, 0, (err)); } } while(0) + +#else /* LWIP_EVENT_API */ + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) \ + do { \ + if((lpcb)->accept != NULL) \ + (ret) = (lpcb)->accept((arg),(pcb),(err)); \ + else (ret) = ERR_ARG; \ + } while (0) + +#define TCP_EVENT_SENT(pcb,space,ret) \ + do { \ + if((pcb)->sent != NULL) \ + (ret) = (pcb)->sent((pcb)->callback_arg,(pcb),(space)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_RECV(pcb,p,err,ret) \ + do { \ + if((pcb)->recv != NULL) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),(p),(err));\ + } else { \ + (ret) = tcp_recv_null(NULL, (pcb), (p), (err)); \ + } \ + } while (0) + +#define TCP_EVENT_CLOSED(pcb,ret) \ + do { \ + if(((pcb)->recv != NULL)) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),NULL,ERR_OK);\ + } else { \ + (ret) = ERR_OK; \ + } \ + } while (0) + +#define TCP_EVENT_CONNECTED(pcb,err,ret) \ + do { \ + if((pcb)->connected != NULL) \ + (ret) = (pcb)->connected((pcb)->callback_arg,(pcb),(err)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_POLL(pcb,ret) \ + do { \ + if((pcb)->poll != NULL) \ + (ret) = (pcb)->poll((pcb)->callback_arg,(pcb)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_ERR(last_state,errf,arg,err) \ + do { \ + LWIP_UNUSED_ARG(last_state); \ + if((errf) != NULL) \ + (errf)((arg),(err)); \ + } while (0) + +#endif /* LWIP_EVENT_API */ + +/** Enabled extra-check for TCP_OVERSIZE if LWIP_DEBUG is enabled */ +#if TCP_OVERSIZE && defined(LWIP_DEBUG) +#define TCP_OVERSIZE_DBGCHECK 1 +#else +#define TCP_OVERSIZE_DBGCHECK 0 +#endif + +/** Don't generate checksum on copy if CHECKSUM_GEN_TCP is disabled */ +#define TCP_CHECKSUM_ON_COPY (LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_TCP) + +/* This structure represents a TCP segment on the unsent, unacked and ooseq queues */ +struct tcp_seg { + struct tcp_seg *next; /* used when putting segments on a queue */ + struct pbuf *p; /* buffer containing data + TCP header */ + u16_t len; /* the TCP length of this segment */ +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_left; /* Extra bytes available at the end of the last + pbuf in unsent (used for asserting vs. + tcp_pcb.unsent_oversize only) */ +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + u16_t chksum; + u8_t chksum_swapped; +#endif /* TCP_CHECKSUM_ON_COPY */ + u8_t flags; +#define TF_SEG_OPTS_MSS (u8_t)0x01U /* Include MSS option. */ +#define TF_SEG_OPTS_TS (u8_t)0x02U /* Include timestamp option. */ +#define TF_SEG_DATA_CHECKSUMMED (u8_t)0x04U /* ALL data (not the header) is + checksummed into 'chksum' */ +#define TF_SEG_OPTS_WND_SCALE (u8_t)0x08U /* Include WND SCALE option */ + struct tcp_hdr *tcphdr; /* the TCP header */ +}; + +#define LWIP_TCP_OPT_EOL 0 +#define LWIP_TCP_OPT_NOP 1 +#define LWIP_TCP_OPT_MSS 2 +#define LWIP_TCP_OPT_WS 3 +#define LWIP_TCP_OPT_TS 8 + +#define LWIP_TCP_OPT_LEN_MSS 4 +#if LWIP_TCP_TIMESTAMPS +#define LWIP_TCP_OPT_LEN_TS 10 +#define LWIP_TCP_OPT_LEN_TS_OUT 12 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_TS_OUT 0 +#endif +#if LWIP_WND_SCALE +#define LWIP_TCP_OPT_LEN_WS 3 +#define LWIP_TCP_OPT_LEN_WS_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_WS_OUT 0 +#endif + +#define LWIP_TCP_OPT_LENGTH(flags) \ + (flags & TF_SEG_OPTS_MSS ? LWIP_TCP_OPT_LEN_MSS : 0) + \ + (flags & TF_SEG_OPTS_TS ? LWIP_TCP_OPT_LEN_TS_OUT : 0) + \ + (flags & TF_SEG_OPTS_WND_SCALE ? LWIP_TCP_OPT_LEN_WS_OUT : 0) + +/** This returns a TCP header option for MSS in an u32_t */ +#define TCP_BUILD_MSS_OPTION(mss) lwip_htonl(0x02040000 | ((mss) & 0xFFFF)) + +#if LWIP_WND_SCALE +#define TCPWNDSIZE_F U32_F +#define TCPWND_MAX 0xFFFFFFFFU +#define TCPWND_CHECK16(x) LWIP_ASSERT("window size > 0xFFFF", (x) <= 0xFFFF) +#define TCPWND_MIN16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#else /* LWIP_WND_SCALE */ +#define TCPWNDSIZE_F U16_F +#define TCPWND_MAX 0xFFFFU +#define TCPWND_CHECK16(x) +#define TCPWND_MIN16(x) x +#endif /* LWIP_WND_SCALE */ + +/* Global variables: */ +extern struct tcp_pcb *tcp_input_pcb; +extern u32_t tcp_ticks; +extern u8_t tcp_active_pcbs_changed; + +/* The TCP PCB lists. */ +union tcp_listen_pcbs_t { /* List of all TCP PCBs in LISTEN state. */ + struct tcp_pcb_listen *listen_pcbs; + struct tcp_pcb *pcbs; +}; +extern struct tcp_pcb *tcp_bound_pcbs; +extern union tcp_listen_pcbs_t tcp_listen_pcbs; +extern struct tcp_pcb *tcp_active_pcbs; /* List of all TCP PCBs that are in a + state in which they accept or send + data. */ +extern struct tcp_pcb *tcp_tw_pcbs; /* List of all TCP PCBs in TIME-WAIT. */ + +#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT 3 +#define NUM_TCP_PCB_LISTS 4 +extern struct tcp_pcb ** const tcp_pcb_lists[NUM_TCP_PCB_LISTS]; + +/* Axioms about the above lists: + 1) Every TCP PCB that is not CLOSED is in one of the lists. + 2) A PCB is only in one of the lists. + 3) All PCBs in the tcp_listen_pcbs list is in LISTEN state. + 4) All PCBs in the tcp_tw_pcbs list is in TIME-WAIT state. +*/ +/* Define two macros, TCP_REG and TCP_RMV that registers a TCP PCB + with a PCB list or removes a PCB from a list, respectively. */ +#ifndef TCP_DEBUG_PCB_LISTS +#define TCP_DEBUG_PCB_LISTS 0 +#endif +#if TCP_DEBUG_PCB_LISTS +#define TCP_REG(pcbs, npcb) do {\ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_REG %p local port %d\n", (npcb), (npcb)->local_port)); \ + for (tcp_tmp_pcb = *(pcbs); \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + LWIP_ASSERT("TCP_REG: already registered\n", tcp_tmp_pcb != (npcb)); \ + } \ + LWIP_ASSERT("TCP_REG: pcb->state != CLOSED", ((pcbs) == &tcp_bound_pcbs) || ((npcb)->state != CLOSED)); \ + (npcb)->next = *(pcbs); \ + LWIP_ASSERT("TCP_REG: npcb->next != npcb", (npcb)->next != (npcb)); \ + *(pcbs) = (npcb); \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + tcp_timer_needed(); \ + } while(0) +#define TCP_RMV(pcbs, npcb) do { \ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_ASSERT("TCP_RMV: pcbs != NULL", *(pcbs) != NULL); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removing %p from %p\n", (npcb), *(pcbs))); \ + if(*(pcbs) == (npcb)) { \ + *(pcbs) = (*pcbs)->next; \ + } else for (tcp_tmp_pcb = *(pcbs); tcp_tmp_pcb != NULL; tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + (npcb)->next = NULL; \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removed %p from %p\n", (npcb), *(pcbs))); \ + } while(0) + +#else /* LWIP_DEBUG */ + +#define TCP_REG(pcbs, npcb) \ + do { \ + (npcb)->next = *pcbs; \ + *(pcbs) = (npcb); \ + tcp_timer_needed(); \ + } while (0) + +#define TCP_RMV(pcbs, npcb) \ + do { \ + if(*(pcbs) == (npcb)) { \ + (*(pcbs)) = (*pcbs)->next; \ + } \ + else { \ + struct tcp_pcb *tcp_tmp_pcb; \ + for (tcp_tmp_pcb = *pcbs; \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + } \ + (npcb)->next = NULL; \ + } while(0) + +#endif /* LWIP_DEBUG */ + +#define TCP_REG_ACTIVE(npcb) \ + do { \ + TCP_REG(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_RMV_ACTIVE(npcb) \ + do { \ + TCP_RMV(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_PCB_REMOVE_ACTIVE(pcb) \ + do { \ + tcp_pcb_remove(&tcp_active_pcbs, pcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + + +/* Internal functions: */ +struct tcp_pcb *tcp_pcb_copy(struct tcp_pcb *pcb); +void tcp_pcb_purge(struct tcp_pcb *pcb); +void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb); + +void tcp_segs_free(struct tcp_seg *seg); +void tcp_seg_free(struct tcp_seg *seg); +struct tcp_seg *tcp_seg_copy(struct tcp_seg *seg); + +#define tcp_ack(pcb) \ + do { \ + if((pcb)->flags & TF_ACK_DELAY) { \ + (pcb)->flags &= ~TF_ACK_DELAY; \ + (pcb)->flags |= TF_ACK_NOW; \ + } \ + else { \ + (pcb)->flags |= TF_ACK_DELAY; \ + } \ + } while (0) + +#define tcp_ack_now(pcb) \ + do { \ + (pcb)->flags |= TF_ACK_NOW; \ + } while (0) + +err_t tcp_send_fin(struct tcp_pcb *pcb); +err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags); + +void tcp_rexmit_seg(struct tcp_pcb *pcb, struct tcp_seg *seg); + +void tcp_rst(u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port); + +u32_t tcp_next_iss(struct tcp_pcb *pcb); + +err_t tcp_keepalive(struct tcp_pcb *pcb); +err_t tcp_zero_window_probe(struct tcp_pcb *pcb); +void tcp_trigger_input_pcb_close(void); + +#if TCP_CALCULATE_EFF_SEND_MSS +u16_t tcp_eff_send_mss_impl(u16_t sendmss, const ip_addr_t *dest +#if LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING + , const ip_addr_t *src +#endif /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */ + ); +#if LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING +#define tcp_eff_send_mss(sendmss, src, dest) tcp_eff_send_mss_impl(sendmss, dest, src) +#else /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */ +#define tcp_eff_send_mss(sendmss, src, dest) tcp_eff_send_mss_impl(sendmss, dest) +#endif /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */ +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +#if LWIP_CALLBACK_API +err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err); +#endif /* LWIP_CALLBACK_API */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +void tcp_debug_print(struct tcp_hdr *tcphdr); +void tcp_debug_print_flags(u8_t flags); +void tcp_debug_print_state(enum tcp_state s); +void tcp_debug_print_pcbs(void); +s16_t tcp_pcbs_sane(void); +#else +# define tcp_debug_print(tcphdr) +# define tcp_debug_print_flags(flags) +# define tcp_debug_print_state(s) +# define tcp_debug_print_pcbs() +# define tcp_pcbs_sane() 1 +#endif /* TCP_DEBUG */ + +/** External function (implemented in timers.c), called when TCP detects + * that a timer is needed (i.e. active- or time-wait-pcb found). */ +void tcp_timer_needed(void); + +void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_PRIV_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h new file mode 100644 index 000000000..41d078a3d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h @@ -0,0 +1,160 @@ +/** + * @file + * TCPIP API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_PRIV_H +#define LWIP_HDR_TCPIP_PRIV_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpip.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct pbuf; +struct netif; + +#if LWIP_MPU_COMPATIBLE +#define API_VAR_REF(name) (*(name)) +#define API_VAR_DECLARE(type, name) type * name +#define API_VAR_ALLOC(type, pool, name, errorval) do { \ + name = (type *)memp_malloc(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \ + name = (type *)LWIP_MEMPOOL_ALLOC(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_FREE(pool, name) memp_free(pool, name) +#define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name) +#define API_EXPR_REF(expr) (&(expr)) +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_EXPR_REF_SEM(expr) (expr) +#else +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#endif +#define API_EXPR_DEREF(expr) expr +#define API_MSG_M_DEF(m) m +#define API_MSG_M_DEF_C(t, m) t m +#else /* LWIP_MPU_COMPATIBLE */ +#define API_VAR_REF(name) name +#define API_VAR_DECLARE(type, name) type name +#define API_VAR_ALLOC(type, pool, name, errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) +#define API_VAR_FREE(pool, name) +#define API_VAR_FREE_POOL(pool, name) +#define API_EXPR_REF(expr) expr +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#define API_EXPR_DEREF(expr) (*(expr)) +#define API_MSG_M_DEF(m) *m +#define API_MSG_M_DEF_C(t, m) const t * m +#endif /* LWIP_MPU_COMPATIBLE */ + +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem); + +struct tcpip_api_call_data +{ +#if !LWIP_TCPIP_CORE_LOCKING + err_t err; +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +#else /* !LWIP_TCPIP_CORE_LOCKING */ + u8_t dummy; /* avoid empty struct :-( */ +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +}; +typedef err_t (*tcpip_api_call_fn)(struct tcpip_api_call_data* call); +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call); + +enum tcpip_msg_type { + TCPIP_MSG_API, + TCPIP_MSG_API_CALL, + TCPIP_MSG_INPKT, +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + TCPIP_MSG_TIMEOUT, + TCPIP_MSG_UNTIMEOUT, +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + TCPIP_MSG_CALLBACK, + TCPIP_MSG_CALLBACK_STATIC +}; + +struct tcpip_msg { + enum tcpip_msg_type type; + union { + struct { + tcpip_callback_fn function; + void* msg; + } api_msg; + struct { + tcpip_api_call_fn function; + struct tcpip_api_call_data *arg; + sys_sem_t *sem; + } api_call; + struct { + struct pbuf *p; + struct netif *netif; + netif_input_fn input_fn; + } inp; + struct { + tcpip_callback_fn function; + void *ctx; + } cb; +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + struct { + u32_t msecs; + sys_timeout_handler h; + void *arg; + } tmo; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + } msg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_PRIV_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h new file mode 100644 index 000000000..3fe15327b --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h @@ -0,0 +1,78 @@ +/** + * @file + * AutoIP protocol definitions + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_PROT_AUTOIP_H +#define LWIP_HDR_PROT_AUTOIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* 169.254.0.0 */ +#define AUTOIP_NET 0xA9FE0000 +/* 169.254.1.0 */ +#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100) +/* 169.254.254.255 */ +#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF) + +/* RFC 3927 Constants */ +#define PROBE_WAIT 1 /* second (initial random delay) */ +#define PROBE_MIN 1 /* second (minimum delay till repeated probe) */ +#define PROBE_MAX 2 /* seconds (maximum delay till repeated probe) */ +#define PROBE_NUM 3 /* (number of probe packets) */ +#define ANNOUNCE_NUM 2 /* (number of announcement packets) */ +#define ANNOUNCE_INTERVAL 2 /* seconds (time between announcement packets) */ +#define ANNOUNCE_WAIT 2 /* seconds (delay before announcing) */ +#define MAX_CONFLICTS 10 /* (max conflicts before rate limiting) */ +#define RATE_LIMIT_INTERVAL 60 /* seconds (delay between successive attempts) */ +#define DEFEND_INTERVAL 10 /* seconds (min. wait between defensive ARPs) */ + +/* AutoIP client states */ +typedef enum { + AUTOIP_STATE_OFF = 0, + AUTOIP_STATE_PROBING = 1, + AUTOIP_STATE_ANNOUNCING = 2, + AUTOIP_STATE_BOUND = 3 +} autoip_state_enum_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_AUTOIP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h new file mode 100644 index 000000000..30f87b94e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h @@ -0,0 +1,183 @@ +/** + * @file + * DHCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_PROT_DHCP_H +#define LWIP_HDR_PROT_DHCP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DHCP_CLIENT_PORT 68 +#define DHCP_SERVER_PORT 67 + + + /* DHCP message item offsets and length */ +#define DHCP_CHADDR_LEN 16U +#define DHCP_SNAME_OFS 44U +#define DHCP_SNAME_LEN 64U +#define DHCP_FILE_OFS 108U +#define DHCP_FILE_LEN 128U +#define DHCP_MSG_LEN 236U +#define DHCP_OPTIONS_OFS (DHCP_MSG_LEN + 4U) /* 4 byte: cookie */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCP message */ +struct dhcp_msg +{ + PACK_STRUCT_FLD_8(u8_t op); + PACK_STRUCT_FLD_8(u8_t htype); + PACK_STRUCT_FLD_8(u8_t hlen); + PACK_STRUCT_FLD_8(u8_t hops); + PACK_STRUCT_FIELD(u32_t xid); + PACK_STRUCT_FIELD(u16_t secs); + PACK_STRUCT_FIELD(u16_t flags); + PACK_STRUCT_FLD_S(ip4_addr_p_t ciaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t yiaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t siaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t giaddr); + PACK_STRUCT_FLD_8(u8_t chaddr[DHCP_CHADDR_LEN]); + PACK_STRUCT_FLD_8(u8_t sname[DHCP_SNAME_LEN]); + PACK_STRUCT_FLD_8(u8_t file[DHCP_FILE_LEN]); + PACK_STRUCT_FIELD(u32_t cookie); +#define DHCP_MIN_OPTIONS_LEN 68U +/** make sure user does not configure this too small */ +#if ((defined(DHCP_OPTIONS_LEN)) && (DHCP_OPTIONS_LEN < DHCP_MIN_OPTIONS_LEN)) +# undef DHCP_OPTIONS_LEN +#endif +/** allow this to be configured in lwipopts.h, but not too small */ +#if (!defined(DHCP_OPTIONS_LEN)) +/** set this to be sufficient for your options in outgoing DHCP msgs */ +# define DHCP_OPTIONS_LEN DHCP_MIN_OPTIONS_LEN +#endif + PACK_STRUCT_FLD_8(u8_t options[DHCP_OPTIONS_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP client states */ +typedef enum { + DHCP_STATE_OFF = 0, + DHCP_STATE_REQUESTING = 1, + DHCP_STATE_INIT = 2, + DHCP_STATE_REBOOTING = 3, + DHCP_STATE_REBINDING = 4, + DHCP_STATE_RENEWING = 5, + DHCP_STATE_SELECTING = 6, + DHCP_STATE_INFORMING = 7, + DHCP_STATE_CHECKING = 8, + DHCP_STATE_PERMANENT = 9, /* not yet implemented */ + DHCP_STATE_BOUND = 10, + DHCP_STATE_RELEASING = 11, /* not yet implemented */ + DHCP_STATE_BACKING_OFF = 12 +} dhcp_state_enum_t; + +/* DHCP op codes */ +#define DHCP_BOOTREQUEST 1 +#define DHCP_BOOTREPLY 2 + +/* DHCP message types */ +#define DHCP_DISCOVER 1 +#define DHCP_OFFER 2 +#define DHCP_REQUEST 3 +#define DHCP_DECLINE 4 +#define DHCP_ACK 5 +#define DHCP_NAK 6 +#define DHCP_RELEASE 7 +#define DHCP_INFORM 8 + +/** DHCP hardware type, currently only ethernet is supported */ +#define DHCP_HTYPE_ETH 1 + +#define DHCP_MAGIC_COOKIE 0x63825363UL + +/* This is a list of options for BOOTP and DHCP, see RFC 2132 for descriptions */ + +/* BootP options */ +#define DHCP_OPTION_PAD 0 +#define DHCP_OPTION_SUBNET_MASK 1 /* RFC 2132 3.3 */ +#define DHCP_OPTION_ROUTER 3 +#define DHCP_OPTION_DNS_SERVER 6 +#define DHCP_OPTION_HOSTNAME 12 +#define DHCP_OPTION_IP_TTL 23 +#define DHCP_OPTION_MTU 26 +#define DHCP_OPTION_BROADCAST 28 +#define DHCP_OPTION_TCP_TTL 37 +#define DHCP_OPTION_NTP 42 +#define DHCP_OPTION_END 255 + +/* DHCP options */ +#define DHCP_OPTION_REQUESTED_IP 50 /* RFC 2132 9.1, requested IP address */ +#define DHCP_OPTION_LEASE_TIME 51 /* RFC 2132 9.2, time in seconds, in 4 bytes */ +#define DHCP_OPTION_OVERLOAD 52 /* RFC2132 9.3, use file and/or sname field for options */ + +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_MESSAGE_TYPE_LEN 1 + +#define DHCP_OPTION_SERVER_ID 54 /* RFC 2132 9.7, server IP address */ +#define DHCP_OPTION_PARAMETER_REQUEST_LIST 55 /* RFC 2132 9.8, requested option types */ + +#define DHCP_OPTION_MAX_MSG_SIZE 57 /* RFC 2132 9.10, message size accepted >= 576 */ +#define DHCP_OPTION_MAX_MSG_SIZE_LEN 2 + +#define DHCP_OPTION_T1 58 /* T1 renewal time */ +#define DHCP_OPTION_T2 59 /* T2 rebinding time */ +#define DHCP_OPTION_US 60 +#define DHCP_OPTION_CLIENT_ID 61 +#define DHCP_OPTION_TFTP_SERVERNAME 66 +#define DHCP_OPTION_BOOTFILE 67 + +/* possible combinations of overloading the file and sname fields with options */ +#define DHCP_OVERLOAD_NONE 0 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_SNAME_FILE 3 + + +#ifdef __cplusplus +} +#endif + +#endif /*LWIP_HDR_PROT_DHCP_H*/ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h new file mode 100644 index 000000000..022a32c15 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h @@ -0,0 +1,140 @@ +/** + * @file + * DNS - host name to IP address resolver. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_PROT_DNS_H +#define LWIP_HDR_PROT_DNS_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS server port address */ +#ifndef DNS_SERVER_PORT +#define DNS_SERVER_PORT 53 +#endif + +/* DNS field TYPE used for "Resource Records" */ +#define DNS_RRTYPE_A 1 /* a host address */ +#define DNS_RRTYPE_NS 2 /* an authoritative name server */ +#define DNS_RRTYPE_MD 3 /* a mail destination (Obsolete - use MX) */ +#define DNS_RRTYPE_MF 4 /* a mail forwarder (Obsolete - use MX) */ +#define DNS_RRTYPE_CNAME 5 /* the canonical name for an alias */ +#define DNS_RRTYPE_SOA 6 /* marks the start of a zone of authority */ +#define DNS_RRTYPE_MB 7 /* a mailbox domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_MG 8 /* a mail group member (EXPERIMENTAL) */ +#define DNS_RRTYPE_MR 9 /* a mail rename domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_NULL 10 /* a null RR (EXPERIMENTAL) */ +#define DNS_RRTYPE_WKS 11 /* a well known service description */ +#define DNS_RRTYPE_PTR 12 /* a domain name pointer */ +#define DNS_RRTYPE_HINFO 13 /* host information */ +#define DNS_RRTYPE_MINFO 14 /* mailbox or mail list information */ +#define DNS_RRTYPE_MX 15 /* mail exchange */ +#define DNS_RRTYPE_TXT 16 /* text strings */ +#define DNS_RRTYPE_AAAA 28 /* IPv6 address */ +#define DNS_RRTYPE_SRV 33 /* service location */ +#define DNS_RRTYPE_ANY 255 /* any type */ + +/* DNS field CLASS used for "Resource Records" */ +#define DNS_RRCLASS_IN 1 /* the Internet */ +#define DNS_RRCLASS_CS 2 /* the CSNET class (Obsolete - used only for examples in some obsolete RFCs) */ +#define DNS_RRCLASS_CH 3 /* the CHAOS class */ +#define DNS_RRCLASS_HS 4 /* Hesiod [Dyer 87] */ +#define DNS_RRCLASS_ANY 255 /* any class */ +#define DNS_RRCLASS_FLUSH 0x800 /* Flush bit */ + +/* DNS protocol flags */ +#define DNS_FLAG1_RESPONSE 0x80 +#define DNS_FLAG1_OPCODE_STATUS 0x10 +#define DNS_FLAG1_OPCODE_INVERSE 0x08 +#define DNS_FLAG1_OPCODE_STANDARD 0x00 +#define DNS_FLAG1_AUTHORATIVE 0x04 +#define DNS_FLAG1_TRUNC 0x02 +#define DNS_FLAG1_RD 0x01 +#define DNS_FLAG2_RA 0x80 +#define DNS_FLAG2_ERR_MASK 0x0f +#define DNS_FLAG2_ERR_NONE 0x00 +#define DNS_FLAG2_ERR_NAME 0x03 + +#define DNS_HDR_GET_OPCODE(hdr) ((((hdr)->flags1) >> 3) & 0xF) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** DNS message header */ +struct dns_hdr { + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FLD_8(u8_t flags1); + PACK_STRUCT_FLD_8(u8_t flags2); + PACK_STRUCT_FIELD(u16_t numquestions); + PACK_STRUCT_FIELD(u16_t numanswers); + PACK_STRUCT_FIELD(u16_t numauthrr); + PACK_STRUCT_FIELD(u16_t numextrarr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define SIZEOF_DNS_HDR 12 + + +/* Multicast DNS definitions */ + +/** UDP port for multicast DNS queries */ +#ifndef DNS_MQUERY_PORT +#define DNS_MQUERY_PORT 5353 +#endif + +/* IPv4 group for multicast DNS queries: 224.0.0.251 */ +#ifndef DNS_MQUERY_IPV4_GROUP_INIT +#define DNS_MQUERY_IPV4_GROUP_INIT IPADDR4_INIT_BYTES(224,0,0,251) +#endif + +/* IPv6 group for multicast DNS queries: FF02::FB */ +#ifndef DNS_MQUERY_IPV6_GROUP_INIT +#define DNS_MQUERY_IPV6_GROUP_INIT IPADDR6_INIT_HOST(0xFF020000,0,0,0xFB) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DNS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h new file mode 100644 index 000000000..175e6bc9c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h @@ -0,0 +1,91 @@ +/** + * @file + * ARP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHARP_H +#define LWIP_HDR_PROT_ETHARP_H + +#include "lwip/arch.h" +#include "lwip/prot/ethernet.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETHARP_HWADDR_LEN +#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** the ARP message, see RFC 826 ("Packet format") */ +struct etharp_hdr { + PACK_STRUCT_FIELD(u16_t hwtype); + PACK_STRUCT_FIELD(u16_t proto); + PACK_STRUCT_FLD_8(u8_t hwlen); + PACK_STRUCT_FLD_8(u8_t protolen); + PACK_STRUCT_FIELD(u16_t opcode); + PACK_STRUCT_FLD_S(struct eth_addr shwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr2 sipaddr); + PACK_STRUCT_FLD_S(struct eth_addr dhwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr2 dipaddr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETHARP_HDR 28 + +/* ARP hwtype values */ +enum etharp_hwtype { + HWTYPE_ETHERNET = 1 + /* others not used */ +}; + +/* ARP message types (opcodes) */ +enum etharp_opcode { + ARP_REQUEST = 1, + ARP_REPLY = 2 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHARP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h new file mode 100644 index 000000000..33dbe332a --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h @@ -0,0 +1,170 @@ +/** + * @file + * Ethernet protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHERNET_H +#define LWIP_HDR_PROT_ETHERNET_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETH_HWADDR_LEN +#ifdef ETHARP_HWADDR_LEN +#define ETH_HWADDR_LEN ETHARP_HWADDR_LEN /* compatibility mode */ +#else +#define ETH_HWADDR_LEN 6 +#endif +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct eth_addr { + PACK_STRUCT_FLD_8(u8_t addr[ETH_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** Ethernet header */ +struct eth_hdr { +#if ETH_PAD_SIZE + PACK_STRUCT_FLD_8(u8_t padding[ETH_PAD_SIZE]); +#endif + PACK_STRUCT_FLD_S(struct eth_addr dest); + PACK_STRUCT_FLD_S(struct eth_addr src); + PACK_STRUCT_FIELD(u16_t type); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETH_HDR (14 + ETH_PAD_SIZE) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** VLAN header inserted between ethernet header and payload + * if 'type' in ethernet header is ETHTYPE_VLAN. + * See IEEE802.Q */ +struct eth_vlan_hdr { + PACK_STRUCT_FIELD(u16_t prio_vid); + PACK_STRUCT_FIELD(u16_t tpid); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_VLAN_HDR 4 +#define VLAN_ID(vlan_hdr) (lwip_htons((vlan_hdr)->prio_vid) & 0xFFF) + +/** + * @ingroup ethernet + * A list of often ethtypes (although lwIP does not use all of them): */ +enum eth_type { + /** Internet protocol v4 */ + ETHTYPE_IP = 0x0800U, + /** Address resolution protocol */ + ETHTYPE_ARP = 0x0806U, + /** Wake on lan */ + ETHTYPE_WOL = 0x0842U, + /** RARP */ + ETHTYPE_RARP = 0x8035U, + /** Virtual local area network */ + ETHTYPE_VLAN = 0x8100U, + /** Internet protocol v6 */ + ETHTYPE_IPV6 = 0x86DDU, + /** PPP Over Ethernet Discovery Stage */ + ETHTYPE_PPPOEDISC = 0x8863U, + /** PPP Over Ethernet Session Stage */ + ETHTYPE_PPPOE = 0x8864U, + /** Jumbo Frames */ + ETHTYPE_JUMBO = 0x8870U, + /** Process field network */ + ETHTYPE_PROFINET = 0x8892U, + /** Ethernet for control automation technology */ + ETHTYPE_ETHERCAT = 0x88A4U, + /** Link layer discovery protocol */ + ETHTYPE_LLDP = 0x88CCU, + /** Serial real-time communication system */ + ETHTYPE_SERCOS = 0x88CDU, + /** Media redundancy protocol */ + ETHTYPE_MRP = 0x88E3U, + /** Precision time protocol */ + ETHTYPE_PTP = 0x88F7U, + /** Q-in-Q, 802.1ad */ + ETHTYPE_QINQ = 0x9100U +}; + +/** The 24-bit IANA IPv4-multicast OUI is 01-00-5e: */ +#define LL_IP4_MULTICAST_ADDR_0 0x01 +#define LL_IP4_MULTICAST_ADDR_1 0x00 +#define LL_IP4_MULTICAST_ADDR_2 0x5e + +/** IPv6 multicast uses this prefix */ +#define LL_IP6_MULTICAST_ADDR_0 0x33 +#define LL_IP6_MULTICAST_ADDR_1 0x33 + +/** MEMCPY-like macro to copy to/from struct eth_addr's that are local variables + * or known to be 32-bit aligned within the protocol header. */ +#ifndef ETHADDR32_COPY +#define ETHADDR32_COPY(dst, src) SMEMCPY(dst, src, ETH_HWADDR_LEN) +#endif + +/** MEMCPY-like macro to copy to/from struct eth_addr's that are no local + * variables and known to be 16-bit aligned within the protocol header. */ +#ifndef ETHADDR16_COPY +#define ETHADDR16_COPY(dst, src) SMEMCPY(dst, src, ETH_HWADDR_LEN) +#endif + +#define eth_addr_cmp(addr1, addr2) (memcmp((addr1)->addr, (addr2)->addr, ETH_HWADDR_LEN) == 0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHERNET_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h new file mode 100644 index 000000000..d8183fd3d --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h @@ -0,0 +1,91 @@ +/** + * @file + * ICMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP_H +#define LWIP_HDR_PROT_ICMP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ICMP_ER 0 /* echo reply */ +#define ICMP_DUR 3 /* destination unreachable */ +#define ICMP_SQ 4 /* source quench */ +#define ICMP_RD 5 /* redirect */ +#define ICMP_ECHO 8 /* echo */ +#define ICMP_TE 11 /* time exceeded */ +#define ICMP_PP 12 /* parameter problem */ +#define ICMP_TS 13 /* timestamp */ +#define ICMP_TSR 14 /* timestamp reply */ +#define ICMP_IRQ 15 /* information request */ +#define ICMP_IR 16 /* information reply */ +#define ICMP_AM 17 /* address mask request */ +#define ICMP_AMR 18 /* address mask reply */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +/** This is the standard ICMP header only that the u32_t data + * is split to two u16_t like ICMP echo needs it. + * This header is also used for other ICMP types that do not + * use the data part. + */ +PACK_STRUCT_BEGIN +struct icmp_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Compatibility defines, old versions used to combine type and code to an u16_t */ +#define ICMPH_TYPE(hdr) ((hdr)->type) +#define ICMPH_CODE(hdr) ((hdr)->code) +#define ICMPH_TYPE_SET(hdr, t) ((hdr)->type = (t)) +#define ICMPH_CODE_SET(hdr, c) ((hdr)->code = (c)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h new file mode 100644 index 000000000..b1c6d56b4 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h @@ -0,0 +1,170 @@ +/** + * @file + * ICMP6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP6_H +#define LWIP_HDR_PROT_ICMP6_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP type */ +enum icmp6_type { + /** Destination unreachable */ + ICMP6_TYPE_DUR = 1, + /** Packet too big */ + ICMP6_TYPE_PTB = 2, + /** Time exceeded */ + ICMP6_TYPE_TE = 3, + /** Parameter problem */ + ICMP6_TYPE_PP = 4, + /** Private experimentation */ + ICMP6_TYPE_PE1 = 100, + /** Private experimentation */ + ICMP6_TYPE_PE2 = 101, + /** Reserved for expansion of error messages */ + ICMP6_TYPE_RSV_ERR = 127, + + /** Echo request */ + ICMP6_TYPE_EREQ = 128, + /** Echo reply */ + ICMP6_TYPE_EREP = 129, + /** Multicast listener query */ + ICMP6_TYPE_MLQ = 130, + /** Multicast listener report */ + ICMP6_TYPE_MLR = 131, + /** Multicast listener done */ + ICMP6_TYPE_MLD = 132, + /** Router solicitation */ + ICMP6_TYPE_RS = 133, + /** Router advertisement */ + ICMP6_TYPE_RA = 134, + /** Neighbor solicitation */ + ICMP6_TYPE_NS = 135, + /** Neighbor advertisement */ + ICMP6_TYPE_NA = 136, + /** Redirect */ + ICMP6_TYPE_RD = 137, + /** Multicast router advertisement */ + ICMP6_TYPE_MRA = 151, + /** Multicast router solicitation */ + ICMP6_TYPE_MRS = 152, + /** Multicast router termination */ + ICMP6_TYPE_MRT = 153, + /** Private experimentation */ + ICMP6_TYPE_PE3 = 200, + /** Private experimentation */ + ICMP6_TYPE_PE4 = 201, + /** Reserved for expansion of informational messages */ + ICMP6_TYPE_RSV_INF = 255 +}; + +/** ICMP destination unreachable codes */ +enum icmp6_dur_code { + /** No route to destination */ + ICMP6_DUR_NO_ROUTE = 0, + /** Communication with destination administratively prohibited */ + ICMP6_DUR_PROHIBITED = 1, + /** Beyond scope of source address */ + ICMP6_DUR_SCOPE = 2, + /** Address unreachable */ + ICMP6_DUR_ADDRESS = 3, + /** Port unreachable */ + ICMP6_DUR_PORT = 4, + /** Source address failed ingress/egress policy */ + ICMP6_DUR_POLICY = 5, + /** Reject route to destination */ + ICMP6_DUR_REJECT_ROUTE = 6 +}; + +/** ICMP time exceeded codes */ +enum icmp6_te_code { + /** Hop limit exceeded in transit */ + ICMP6_TE_HL = 0, + /** Fragment reassembly time exceeded */ + ICMP6_TE_FRAG = 1 +}; + +/** ICMP parameter code */ +enum icmp6_pp_code { + /** Erroneous header field encountered */ + ICMP6_PP_FIELD = 0, + /** Unrecognized next header type encountered */ + ICMP6_PP_HEADER = 1, + /** Unrecognized IPv6 option encountered */ + ICMP6_PP_OPTION = 2 +}; + +/** This is the standard ICMP6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t data); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** This is the ICMP6 header adapted for echo req/resp. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h new file mode 100644 index 000000000..73bc27c84 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h @@ -0,0 +1,90 @@ +/** + * @file + * IGMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IGMP_H +#define LWIP_HDR_PROT_IGMP_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * IGMP constants + */ +#define IGMP_TTL 1 +#define IGMP_MINLEN 8 +#define ROUTER_ALERT 0x9404U +#define ROUTER_ALERTLEN 4 + +/* + * IGMP message types, including version number. + */ +#define IGMP_MEMB_QUERY 0x11 /* Membership query */ +#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */ + +/* Group membership states */ +#define IGMP_GROUP_NON_MEMBER 0 +#define IGMP_GROUP_DELAYING_MEMBER 1 +#define IGMP_GROUP_IDLE_MEMBER 2 + +/** + * IGMP packet format. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct igmp_msg { + PACK_STRUCT_FLD_8(u8_t igmp_msgtype); + PACK_STRUCT_FLD_8(u8_t igmp_maxresp); + PACK_STRUCT_FIELD(u16_t igmp_checksum); + PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IGMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h new file mode 100644 index 000000000..077c93b06 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h @@ -0,0 +1,51 @@ +/** + * @file + * IP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP_H +#define LWIP_HDR_PROT_IP_H + +#include "lwip/arch.h" + +#define IP_PROTO_ICMP 1 +#define IP_PROTO_IGMP 2 +#define IP_PROTO_UDP 17 +#define IP_PROTO_UDPLITE 136 +#define IP_PROTO_TCP 6 + +/** This operates on a void* by loading the first byte */ +#define IP_HDR_GET_VERSION(ptr) ((*(u8_t*)(ptr)) >> 4) + +#endif /* LWIP_HDR_PROT_IP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h new file mode 100644 index 000000000..e791a06bf --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h @@ -0,0 +1,127 @@ +/** + * @file + * IPv4 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP4_H +#define LWIP_HDR_PROT_IP4_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip4_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_packed { + PACK_STRUCT_FIELD(u32_t addr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +typedef struct ip4_addr_packed ip4_addr_p_t; + +/* Size of the IPv4 header. Same as 'sizeof(struct ip_hdr)'. */ +#define IP_HLEN 20 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/* The IPv4 header */ +struct ip_hdr { + /* version / header length */ + PACK_STRUCT_FLD_8(u8_t _v_hl); + /* type of service */ + PACK_STRUCT_FLD_8(u8_t _tos); + /* total length */ + PACK_STRUCT_FIELD(u16_t _len); + /* identification */ + PACK_STRUCT_FIELD(u16_t _id); + /* fragment offset field */ + PACK_STRUCT_FIELD(u16_t _offset); +#define IP_RF 0x8000U /* reserved fragment flag */ +#define IP_DF 0x4000U /* don't fragment flag */ +#define IP_MF 0x2000U /* more fragments flag */ +#define IP_OFFMASK 0x1fffU /* mask for fragmenting bits */ + /* time to live */ + PACK_STRUCT_FLD_8(u8_t _ttl); + /* protocol*/ + PACK_STRUCT_FLD_8(u8_t _proto); + /* checksum */ + PACK_STRUCT_FIELD(u16_t _chksum); + /* source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip4_addr_p_t src); + PACK_STRUCT_FLD_S(ip4_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Macros to get struct ip_hdr fields: */ +#define IPH_V(hdr) ((hdr)->_v_hl >> 4) +#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f) +#define IPH_TOS(hdr) ((hdr)->_tos) +#define IPH_LEN(hdr) ((hdr)->_len) +#define IPH_ID(hdr) ((hdr)->_id) +#define IPH_OFFSET(hdr) ((hdr)->_offset) +#define IPH_TTL(hdr) ((hdr)->_ttl) +#define IPH_PROTO(hdr) ((hdr)->_proto) +#define IPH_CHKSUM(hdr) ((hdr)->_chksum) + +/* Macros to set struct ip_hdr fields: */ +#define IPH_VHL_SET(hdr, v, hl) (hdr)->_v_hl = (u8_t)((((v) << 4) | (hl))) +#define IPH_TOS_SET(hdr, tos) (hdr)->_tos = (tos) +#define IPH_LEN_SET(hdr, len) (hdr)->_len = (len) +#define IPH_ID_SET(hdr, id) (hdr)->_id = (id) +#define IPH_OFFSET_SET(hdr, off) (hdr)->_offset = (off) +#define IPH_TTL_SET(hdr, ttl) (hdr)->_ttl = (u8_t)(ttl) +#define IPH_PROTO_SET(hdr, proto) (hdr)->_proto = (u8_t)(proto) +#define IPH_CHKSUM_SET(hdr, chksum) (hdr)->_chksum = (chksum) + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP4_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h new file mode 100644 index 000000000..066bdb29e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h @@ -0,0 +1,169 @@ +/** + * @file + * IPv6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP6_H +#define LWIP_HDR_PROT_IP6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip6_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_addr_packed { + PACK_STRUCT_FIELD(u32_t addr[4]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +typedef struct ip6_addr_packed ip6_addr_p_t; + +#define IP6_HLEN 40 + +#define IP6_NEXTH_HOPBYHOP 0 +#define IP6_NEXTH_TCP 6 +#define IP6_NEXTH_UDP 17 +#define IP6_NEXTH_ENCAPS 41 +#define IP6_NEXTH_ROUTING 43 +#define IP6_NEXTH_FRAGMENT 44 +#define IP6_NEXTH_ICMP6 58 +#define IP6_NEXTH_NONE 59 +#define IP6_NEXTH_DESTOPTS 60 +#define IP6_NEXTH_UDPLITE 136 + +/** The IPv6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hdr { + /** version / traffic class / flow label */ + PACK_STRUCT_FIELD(u32_t _v_tc_fl); + /** payload length */ + PACK_STRUCT_FIELD(u16_t _plen); + /** next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /** hop limit */ + PACK_STRUCT_FLD_8(u8_t _hoplim); + /** source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip6_addr_p_t src); + PACK_STRUCT_FLD_S(ip6_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Hop-by-hop router alert option. */ +#define IP6_HBH_HLEN 8 +#define IP6_PAD1_OPTION 0 +#define IP6_PADN_ALERT_OPTION 1 +#define IP6_ROUTER_ALERT_OPTION 5 +#define IP6_ROUTER_ALERT_VALUE_MLD 0 +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hbh_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length */ + PACK_STRUCT_FLD_8(u8_t _hlen); + /* router alert option type */ + PACK_STRUCT_FLD_8(u8_t _ra_opt_type); + /* router alert option data len */ + PACK_STRUCT_FLD_8(u8_t _ra_opt_dlen); + /* router alert option data */ + PACK_STRUCT_FIELD(u16_t _ra_opt_data); + /* PadN option type */ + PACK_STRUCT_FLD_8(u8_t _padn_opt_type); + /* PadN option data len */ + PACK_STRUCT_FLD_8(u8_t _padn_opt_dlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Fragment header. */ +#define IP6_FRAG_HLEN 8 +#define IP6_FRAG_OFFSET_MASK 0xfff8 +#define IP6_FRAG_MORE_FLAG 0x0001 +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_frag_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t reserved); + /* fragment offset */ + PACK_STRUCT_FIELD(u16_t _fragment_offset); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u32_t _identification); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP6H_V(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 28) & 0x0f) +#define IP6H_TC(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 20) & 0xff) +#define IP6H_FL(hdr) (lwip_ntohl((hdr)->_v_tc_fl) & 0x000fffff) +#define IP6H_PLEN(hdr) (lwip_ntohs((hdr)->_plen)) +#define IP6H_NEXTH(hdr) ((hdr)->_nexth) +#define IP6H_NEXTH_P(hdr) ((u8_t *)(hdr) + 6) +#define IP6H_HOPLIM(hdr) ((hdr)->_hoplim) + +#define IP6H_VTCFL_SET(hdr, v, tc, fl) (hdr)->_v_tc_fl = (lwip_htonl((((u32_t)(v)) << 28) | (((u32_t)(tc)) << 20) | (fl))) +#define IP6H_PLEN_SET(hdr, plen) (hdr)->_plen = lwip_htons(plen) +#define IP6H_NEXTH_SET(hdr, nexth) (hdr)->_nexth = (nexth) +#define IP6H_HOPLIM_SET(hdr, hl) (hdr)->_hoplim = (u8_t)(hl) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h new file mode 100644 index 000000000..8989a57bb --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h @@ -0,0 +1,70 @@ +/** + * @file + * MLD6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_MLD6_H +#define LWIP_HDR_PROT_MLD6_H + +#include "lwip/arch.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Multicast listener report/query/done message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mld_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t max_resp_delay); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t multicast_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_MLD6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h new file mode 100644 index 000000000..e6999dfb5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h @@ -0,0 +1,277 @@ +/** + * @file + * ND6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ND6_H +#define LWIP_HDR_PROT_ND6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Neighbor solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ns_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Neighbor advertisement message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct na_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FLD_8(u8_t reserved[3]); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define ND6_FLAG_ROUTER (0x80) +#define ND6_FLAG_SOLICITED (0x40) +#define ND6_FLAG_OVERRIDE (0x20) + +/** Router solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rs_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Router advertisement message header. */ +#define ND6_RA_FLAG_MANAGED_ADDR_CONFIG (0x80) +#define ND6_RA_FLAG_OTHER_CONFIG (0x40) +#define ND6_RA_FLAG_HOME_AGENT (0x20) +#define ND6_RA_PREFERENCE_MASK (0x18) +#define ND6_RA_PREFERENCE_HIGH (0x08) +#define ND6_RA_PREFERENCE_MEDIUM (0x00) +#define ND6_RA_PREFERENCE_LOW (0x18) +#define ND6_RA_PREFERENCE_DISABLED (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ra_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t current_hop_limit); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u16_t router_lifetime); + PACK_STRUCT_FIELD(u32_t reachable_time); + PACK_STRUCT_FIELD(u32_t retrans_timer); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirect message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirect_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + PACK_STRUCT_FLD_S(ip6_addr_p_t destination_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Link-layer address option. */ +#define ND6_OPTION_TYPE_SOURCE_LLADDR (0x01) +#define ND6_OPTION_TYPE_TARGET_LLADDR (0x02) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct lladdr_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t addr[NETIF_MAX_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Prefix information option. */ +#define ND6_OPTION_TYPE_PREFIX_INFO (0x03) +#define ND6_PREFIX_FLAG_ON_LINK (0x80) +#define ND6_PREFIX_FLAG_AUTONOMOUS (0x40) +#define ND6_PREFIX_FLAG_ROUTER_ADDRESS (0x20) +#define ND6_PREFIX_FLAG_SITE_PREFIX (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct prefix_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u32_t valid_lifetime); + PACK_STRUCT_FIELD(u32_t preferred_lifetime); + PACK_STRUCT_FLD_8(u8_t reserved2[3]); + PACK_STRUCT_FLD_8(u8_t site_prefix_length); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirected header option. */ +#define ND6_OPTION_TYPE_REDIR_HDR (0x04) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirected_header_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t reserved[6]); + /* Portion of redirected packet follows. */ + /* PACK_STRUCT_FLD_8(u8_t redirected[8]); */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MTU option. */ +#define ND6_OPTION_TYPE_MTU (0x05) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mtu_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t mtu); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Route information option. */ +#define ND6_OPTION_TYPE_ROUTE_INFO (24) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct route_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t preference); + PACK_STRUCT_FIELD(u32_t route_lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Recursive DNS Server Option. */ +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS +#define LWIP_RDNSS_OPTION_MAX_SERVERS LWIP_ND6_RDNSS_MAX_DNS_SERVERS +#else +#define LWIP_RDNSS_OPTION_MAX_SERVERS 1 +#endif +#define ND6_OPTION_TYPE_RDNSS (25) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rdnss_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t rdnss_address[LWIP_RDNSS_OPTION_MAX_SERVERS]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ND6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h new file mode 100644 index 000000000..0951d1136 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h @@ -0,0 +1,97 @@ +/** + * @file + * TCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_TCP_H +#define LWIP_HDR_PROT_TCP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Length of the TCP header, excluding options. */ +#define TCP_HLEN 20 + +/* Fields are (of course) in network byte order. + * Some fields are converted to host byte order in tcp_input(). + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct tcp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); + PACK_STRUCT_FIELD(u32_t seqno); + PACK_STRUCT_FIELD(u32_t ackno); + PACK_STRUCT_FIELD(u16_t _hdrlen_rsvd_flags); + PACK_STRUCT_FIELD(u16_t wnd); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t urgp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* TCP header flags bits */ +#define TCP_FIN 0x01U +#define TCP_SYN 0x02U +#define TCP_RST 0x04U +#define TCP_PSH 0x08U +#define TCP_ACK 0x10U +#define TCP_URG 0x20U +#define TCP_ECE 0x40U +#define TCP_CWR 0x80U +/* Valid TCP header flags */ +#define TCP_FLAGS 0x3fU + +#define TCPH_HDRLEN(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)) +#define TCPH_FLAGS(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS)) + +#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = lwip_htons(((len) << 12) | TCPH_FLAGS(phdr)) +#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = (((phdr)->_hdrlen_rsvd_flags & PP_HTONS(~TCP_FLAGS)) | lwip_htons(flags)) +#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) (phdr)->_hdrlen_rsvd_flags = (u16_t)(lwip_htons((u16_t)((len) << 12) | (flags))) + +#define TCPH_SET_FLAG(phdr, flags ) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | lwip_htons(flags)) +#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags & ~lwip_htons(flags)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_TCP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h new file mode 100644 index 000000000..4904b7e2e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h @@ -0,0 +1,68 @@ +/** + * @file + * UDP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_UDP_H +#define LWIP_HDR_PROT_UDP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_HLEN 8 + +/* Fields are (of course) in network byte order. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct udp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); /* src/dest UDP ports */ + PACK_STRUCT_FIELD(u16_t len); + PACK_STRUCT_FIELD(u16_t chksum); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_UDP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h new file mode 100644 index 000000000..9f37a0fb2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h @@ -0,0 +1,118 @@ +/** + * @file + * raw API (to be used from TCPIP thread)\n + * See also @ref raw_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_H +#define LWIP_HDR_RAW_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/def.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct raw_pcb; + +/** Function prototype for raw pcb receive callback functions. + * @param arg user supplied argument (raw_pcb.recv_arg) + * @param pcb the raw_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @return 1 if the packet was 'eaten' (aka. deleted), + * 0 if the packet lives on + * If returning 1, the callback is responsible for freeing the pbuf + * if it's not used any more. + */ +typedef u8_t (*raw_recv_fn)(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr); + +/** the RAW protocol control block */ +struct raw_pcb { + /* Common members of all PCB types */ + IP_PCB; + + struct raw_pcb *next; + + u8_t protocol; + + /** receive callback function */ + raw_recv_fn recv; + /* user-supplied argument for the recv callback */ + void *recv_arg; +#if LWIP_IPV6 + /* fields for handling checksum computations as per RFC3542. */ + u16_t chksum_offset; + u8_t chksum_reqd; +#endif +}; + +/* The following functions is the application layer interface to the + RAW code. */ +struct raw_pcb * raw_new (u8_t proto); +struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto); +void raw_remove (struct raw_pcb *pcb); +err_t raw_bind (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +err_t raw_connect (struct raw_pcb *pcb, const ip_addr_t *ipaddr); + +err_t raw_sendto (struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr); +err_t raw_send (struct raw_pcb *pcb, struct pbuf *p); + +void raw_recv (struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg); + +/* The following functions are the lower layer interface to RAW. */ +u8_t raw_input (struct pbuf *p, struct netif *inp); +#define raw_init() /* Compatibility define, no init needed. */ + +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +/* for compatibility with older implementation */ +#define raw_new_ip6(proto) raw_new_ip_type(IPADDR_TYPE_V6, proto) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h new file mode 100644 index 000000000..efa8adea2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + */ + +/* + * This is the interface to the platform specific serial IO module + * It needs to be implemented by those platforms which need SLIP or PPP + */ + +#ifndef SIO_H +#define SIO_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If you want to define sio_fd_t elsewhere or differently, + define this in your cc.h file. */ +#ifndef __sio_fd_t_defined +typedef void * sio_fd_t; +#endif + +/* The following functions can be defined to something else in your cc.h file + or be implemented in your custom sio.c file. */ + +#ifndef sio_open +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum); +#endif + +#ifndef sio_send +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd); +#endif + +#ifndef sio_recv +/** + * Receives a single character from the serial device. + * + * @param fd serial device handle + * + * @note This function will block until a character is received. + */ +u8_t sio_recv(sio_fd_t fd); +#endif + +#ifndef sio_read +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_tryread +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_write +/** + * Writes to the serial device. + * + * @param fd serial device handle + * @param data pointer to data to send + * @param len length (in bytes) of data to send + * @return number of bytes actually sent + * + * @note This function will block until all data can be sent. + */ +u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_read_abort +/** + * Aborts a blocking sio_read() call. + * + * @param fd serial device handle + */ +void sio_read_abort(sio_fd_t fd); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SIO_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h new file mode 100644 index 000000000..80e6e57c2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h @@ -0,0 +1,213 @@ +/** + * @file + * SNMP support API for implementing netifs and statitics for MIB2 + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_H +#define LWIP_HDR_SNMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct udp_pcb; +struct netif; + +/** + * @defgroup netif_mib2 MIB2 statistics + * @ingroup netif + */ + +/* MIB2 statistics functions */ +#if MIB2_STATS /* don't build if not configured for use in lwipopts.h */ +/** + * @ingroup netif_mib2 + * @see RFC1213, "MIB-II, 6. Definitions" + */ +enum snmp_ifType { + snmp_ifType_other=1, /* none of the following */ + snmp_ifType_regular1822, + snmp_ifType_hdh1822, + snmp_ifType_ddn_x25, + snmp_ifType_rfc877_x25, + snmp_ifType_ethernet_csmacd, + snmp_ifType_iso88023_csmacd, + snmp_ifType_iso88024_tokenBus, + snmp_ifType_iso88025_tokenRing, + snmp_ifType_iso88026_man, + snmp_ifType_starLan, + snmp_ifType_proteon_10Mbit, + snmp_ifType_proteon_80Mbit, + snmp_ifType_hyperchannel, + snmp_ifType_fddi, + snmp_ifType_lapb, + snmp_ifType_sdlc, + snmp_ifType_ds1, /* T-1 */ + snmp_ifType_e1, /* european equiv. of T-1 */ + snmp_ifType_basicISDN, + snmp_ifType_primaryISDN, /* proprietary serial */ + snmp_ifType_propPointToPointSerial, + snmp_ifType_ppp, + snmp_ifType_softwareLoopback, + snmp_ifType_eon, /* CLNP over IP [11] */ + snmp_ifType_ethernet_3Mbit, + snmp_ifType_nsip, /* XNS over IP */ + snmp_ifType_slip, /* generic SLIP */ + snmp_ifType_ultra, /* ULTRA technologies */ + snmp_ifType_ds3, /* T-3 */ + snmp_ifType_sip, /* SMDS */ + snmp_ifType_frame_relay +}; + +/** This macro has a precision of ~49 days because sys_now returns u32_t. \#define your own if you want ~490 days. */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) (*(ptrToVal) = (sys_now() / 10)) +#endif + +/** + * @ingroup netif_mib2 + * Increment stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_INC(n, x) do { ++(n)->mib2_counters.x; } while(0) +/** + * @ingroup netif_mib2 + * Add value to stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_ADD(n, x, val) do { (n)->mib2_counters.x += (val); } while(0) + +/** + * @ingroup netif_mib2 + * Init MIB2 statistic counters in netif + * @param netif Netif to init + * @param type one of enum @ref snmp_ifType + * @param speed your link speed here (units: bits per second) + */ +#define MIB2_INIT_NETIF(netif, type, speed) do { \ + (netif)->link_type = (type); \ + (netif)->link_speed = (speed);\ + (netif)->ts = 0; \ + (netif)->mib2_counters.ifinoctets = 0; \ + (netif)->mib2_counters.ifinucastpkts = 0; \ + (netif)->mib2_counters.ifinnucastpkts = 0; \ + (netif)->mib2_counters.ifindiscards = 0; \ + (netif)->mib2_counters.ifinerrors = 0; \ + (netif)->mib2_counters.ifinunknownprotos = 0; \ + (netif)->mib2_counters.ifoutoctets = 0; \ + (netif)->mib2_counters.ifoutucastpkts = 0; \ + (netif)->mib2_counters.ifoutnucastpkts = 0; \ + (netif)->mib2_counters.ifoutdiscards = 0; \ + (netif)->mib2_counters.ifouterrors = 0; } while(0) +#else /* MIB2_STATS */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) +#endif +#define MIB2_INIT_NETIF(netif, type, speed) +#define MIB2_STATS_NETIF_INC(n, x) +#define MIB2_STATS_NETIF_ADD(n, x, val) +#endif /* MIB2_STATS */ + +/* LWIP MIB2 callbacks */ +#if LWIP_MIB2_CALLBACKS /* don't build if not configured for use in lwipopts.h */ +/* network interface */ +void mib2_netif_added(struct netif *ni); +void mib2_netif_removed(struct netif *ni); + +#if LWIP_IPV4 && LWIP_ARP +/* ARP (for atTable and ipNetToMediaTable) */ +void mib2_add_arp_entry(struct netif *ni, ip4_addr_t *ip); +void mib2_remove_arp_entry(struct netif *ni, ip4_addr_t *ip); +#else /* LWIP_IPV4 && LWIP_ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) +#endif /* LWIP_IPV4 && LWIP_ARP */ + +/* IP */ +#if LWIP_IPV4 +void mib2_add_ip4(struct netif *ni); +void mib2_remove_ip4(struct netif *ni); +void mib2_add_route_ip4(u8_t dflt, struct netif *ni); +void mib2_remove_route_ip4(u8_t dflt, struct netif *ni); +#endif /* LWIP_IPV4 */ + +/* UDP */ +#if LWIP_UDP +void mib2_udp_bind(struct udp_pcb *pcb); +void mib2_udp_unbind(struct udp_pcb *pcb); +#endif /* LWIP_UDP */ + +#else /* LWIP_MIB2_CALLBACKS */ +/* LWIP_MIB2_CALLBACKS support not available */ +/* define everything to be empty */ + +/* network interface */ +#define mib2_netif_added(ni) +#define mib2_netif_removed(ni) + +/* ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) + +/* IP */ +#define mib2_add_ip4(ni) +#define mib2_remove_ip4(ni) +#define mib2_add_route_ip4(dflt, ni) +#define mib2_remove_route_ip4(dflt, ni) + +/* UDP */ +#define mib2_udp_bind(pcb) +#define mib2_udp_unbind(pcb) +#endif /* LWIP_MIB2_CALLBACKS */ + +/* for source-code compatibility reasons only, can be removed (not used internally) */ +#define NETIF_INIT_SNMP MIB2_INIT_NETIF +#define snmp_add_ifinoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifinoctets, value) +#define snmp_inc_ifinucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinucastpkts) +#define snmp_inc_ifinnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinnucastpkts) +#define snmp_inc_ifindiscards(ni) MIB2_STATS_NETIF_INC(ni, ifindiscards) +#define snmp_inc_ifinerrors(ni) MIB2_STATS_NETIF_INC(ni, ifinerrors) +#define snmp_inc_ifinunknownprotos(ni) MIB2_STATS_NETIF_INC(ni, ifinunknownprotos) +#define snmp_add_ifoutoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifoutoctets, value) +#define snmp_inc_ifoutucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutucastpkts) +#define snmp_inc_ifoutnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutnucastpkts) +#define snmp_inc_ifoutdiscards(ni) MIB2_STATS_NETIF_INC(ni, ifoutdiscards) +#define snmp_inc_ifouterrors(ni) MIB2_STATS_NETIF_INC(ni, ifouterrors) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SNMP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h new file mode 100644 index 000000000..ab332921f --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h @@ -0,0 +1,593 @@ +/** + * @file + * Socket API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + + +#ifndef LWIP_HDR_SOCKETS_H +#define LWIP_HDR_SOCKETS_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/inet.h" +#include "lwip/errno.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's sa_family_t, define SA_FAMILY_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(sa_family_t) && !defined(SA_FAMILY_T_DEFINED) +typedef u8_t sa_family_t; +#endif +/* If your port already typedef's in_port_t, define IN_PORT_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_port_t) && !defined(IN_PORT_T_DEFINED) +typedef u16_t in_port_t; +#endif + +#if LWIP_IPV4 +/* members are in network byte order */ +struct sockaddr_in { + u8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; +#define SIN_ZERO_LEN 8 + char sin_zero[SIN_ZERO_LEN]; +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +struct sockaddr_in6 { + u8_t sin6_len; /* length of this structure */ + sa_family_t sin6_family; /* AF_INET6 */ + in_port_t sin6_port; /* Transport layer port # */ + u32_t sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + u32_t sin6_scope_id; /* Set of interfaces for scope */ +}; +#endif /* LWIP_IPV6 */ + +struct sockaddr { + u8_t sa_len; + sa_family_t sa_family; + char sa_data[14]; +}; + +struct sockaddr_storage { + u8_t s2_len; + sa_family_t ss_family; + char s2_data1[2]; + u32_t s2_data2[3]; +#if LWIP_IPV6 + u32_t s2_data3[3]; +#endif /* LWIP_IPV6 */ +}; + +/* If your port already typedef's socklen_t, define SOCKLEN_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(socklen_t) && !defined(SOCKLEN_T_DEFINED) +typedef u32_t socklen_t; +#endif + +struct lwip_sock; + +#if !LWIP_TCPIP_CORE_LOCKING +/** Maximum optlen used by setsockopt/getsockopt */ +#define LWIP_SETGETSOCKOPT_MAXOPTLEN 16 + +/** This struct is used to pass data to the set/getsockopt_internal + * functions running in tcpip_thread context (only a void* is allowed) */ +struct lwip_setgetsockopt_data { + /** socket index for which to change options */ + int s; + /** level of the option to process */ + int level; + /** name of the option to process */ + int optname; + /** set: value to set the option to + * get: value of the option is stored here */ +#if LWIP_MPU_COMPATIBLE + u8_t optval[LWIP_SETGETSOCKOPT_MAXOPTLEN]; +#else + union { + void *p; + const void *pc; + } optval; +#endif + /** size of *optval */ + socklen_t optlen; + /** if an error occurs, it is temporarily stored here */ + err_t err; + /** semaphore to wake up the calling task */ + void* completed_sem; +}; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !defined(iovec) +struct iovec { + void *iov_base; + size_t iov_len; +}; +#endif + +struct msghdr { + void *msg_name; + socklen_t msg_namelen; + struct iovec *msg_iov; + int msg_iovlen; + void *msg_control; + socklen_t msg_controllen; + int msg_flags; +}; + +/* Socket protocol types (TCP/UDP/RAW) */ +#define SOCK_STREAM 1 +#define SOCK_DGRAM 2 +#define SOCK_RAW 3 + +/* + * Option flags per-socket. These must match the SOF_ flags in ip.h (checked in init.c) + */ +#define SO_REUSEADDR 0x0004 /* Allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_BROADCAST 0x0020 /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + + +/* + * Additional options, not kept in so_options. + */ +#define SO_DEBUG 0x0001 /* Unimplemented: turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_DONTROUTE 0x0010 /* Unimplemented: just use interface addresses */ +#define SO_USELOOPBACK 0x0040 /* Unimplemented: bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_DONTLINGER ((int)(~SO_LINGER)) +#define SO_OOBINLINE 0x0100 /* Unimplemented: leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* Unimplemented: allow local address & port reuse */ +#define SO_SNDBUF 0x1001 /* Unimplemented: send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* Unimplemented: send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* Unimplemented: receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ +#define SO_NO_CHECK 0x100a /* don't create UDP checksum */ + + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time in seconds */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xfff /* options for socket level */ + + +#define AF_UNSPEC 0 +#define AF_INET 2 +#if LWIP_IPV6 +#define AF_INET6 10 +#else /* LWIP_IPV6 */ +#define AF_INET6 AF_UNSPEC +#endif /* LWIP_IPV6 */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_UNSPEC AF_UNSPEC + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#if LWIP_IPV6 +#define IPPROTO_IPV6 41 +#define IPPROTO_ICMPV6 58 +#endif /* LWIP_IPV6 */ +#define IPPROTO_UDPLITE 136 +#define IPPROTO_RAW 255 + +/* Flags we can use with send and recv. */ +#define MSG_PEEK 0x01 /* Peeks at an incoming message */ +#define MSG_WAITALL 0x02 /* Unimplemented: Requests that the function block until the full amount of data requested can be returned */ +#define MSG_OOB 0x04 /* Unimplemented: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific */ +#define MSG_DONTWAIT 0x08 /* Nonblocking i/o for this operation only */ +#define MSG_MORE 0x10 /* Sender will send more */ + + +/* + * Options for level IPPROTO_IP + */ +#define IP_TOS 1 +#define IP_TTL 2 + +#if LWIP_TCP +/* + * Options for level IPPROTO_TCP + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_KEEPALIVE 0x02 /* send KEEPALIVE probes when idle for pcb->keep_idle milliseconds */ +#define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ +#define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ +#define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* + * Options for level IPPROTO_IPV6 + */ +#define IPV6_CHECKSUM 7 /* RFC3542: calculate and insert the ICMPv6 checksum for raw sockets. */ +#define IPV6_V6ONLY 27 /* RFC3493: boolean control to restrict AF_INET6 sockets to IPv6 communications only. */ +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE +/* + * Options for level IPPROTO_UDPLITE + */ +#define UDPLITE_SEND_CSCOV 0x01 /* sender checksum coverage */ +#define UDPLITE_RECV_CSCOV 0x02 /* minimal receiver checksum coverage */ +#endif /* LWIP_UDP && LWIP_UDPLITE*/ + + +#if LWIP_MULTICAST_TX_OPTIONS +/* + * Options and types for UDP multicast traffic handling + */ +#define IP_MULTICAST_TTL 5 +#define IP_MULTICAST_IF 6 +#define IP_MULTICAST_LOOP 7 +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IGMP +/* + * Options and types related to multicast membership + */ +#define IP_ADD_MEMBERSHIP 3 +#define IP_DROP_MEMBERSHIP 4 + +typedef struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +} ip_mreq; +#endif /* LWIP_IGMP */ + +/* + * The Type of Service provides an indication of the abstract + * parameters of the quality of service desired. These parameters are + * to be used to guide the selection of the actual service parameters + * when transmitting a datagram through a particular network. Several + * networks offer service precedence, which somehow treats high + * precedence traffic as more important than other traffic (generally + * by accepting only traffic above a certain precedence at time of high + * load). The major choice is a three way tradeoff between low-delay, + * high-reliability, and high-throughput. + * The use of the Delay, Throughput, and Reliability indications may + * increase the cost (in some sense) of the service. In many networks + * better performance for one of these parameters is coupled with worse + * performance on another. Except for very unusual cases at most two + * of these three indications should be set. + */ +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_LOWCOST 0x02 +#define IPTOS_MINCOST IPTOS_LOWCOST + +/* + * The Network Control precedence designation is intended to be used + * within a network only. The actual use and control of that + * designation is up to each network. The Internetwork Control + * designation is intended for use by gateway control originators only. + * If the actual use of these precedence designations is of concern to + * a particular network, it is the responsibility of that network to + * control the access to, and use of, those precedence designations. + */ +#define IPTOS_PREC_MASK 0xe0 +#define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* + * Commands for ioctlsocket(), taken from the BSD file fcntl.h. + * lwip_ioctl only supports FIONREAD and FIONBIO, for now + * + * Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 128 bytes. + */ +#if !defined(FIONREAD) || !defined(FIONBIO) +#define IOCPARM_MASK 0x7fU /* parameters must be < 128 bytes */ +#define IOC_VOID 0x20000000UL /* no parameters */ +#define IOC_OUT 0x40000000UL /* copy out parameters */ +#define IOC_IN 0x80000000UL /* copy in parameters */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* 0x20000000 distinguishes new & + old ioctl's */ +#define _IO(x,y) (IOC_VOID|((x)<<8)|(y)) + +#define _IOR(x,y,t) (IOC_OUT|(((long)sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y)) + +#define _IOW(x,y,t) (IOC_IN|(((long)sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y)) +#endif /* !defined(FIONREAD) || !defined(FIONBIO) */ + +#ifndef FIONREAD +#define FIONREAD _IOR('f', 127, unsigned long) /* get # bytes to read */ +#endif +#ifndef FIONBIO +#define FIONBIO _IOW('f', 126, unsigned long) /* set/clear non-blocking i/o */ +#endif + +/* Socket I/O Controls: unimplemented */ +#ifndef SIOCSHIWAT +#define SIOCSHIWAT _IOW('s', 0, unsigned long) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, unsigned long) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, unsigned long) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, unsigned long) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, unsigned long) /* at oob mark? */ +#endif + +/* commands for fnctl */ +#ifndef F_GETFL +#define F_GETFL 3 +#endif +#ifndef F_SETFL +#define F_SETFL 4 +#endif + +/* File status flags and file access modes for fnctl, + these are bits in an int. */ +#ifndef O_NONBLOCK +#define O_NONBLOCK 1 /* nonblocking I/O */ +#endif +#ifndef O_NDELAY +#define O_NDELAY 1 /* same as O_NONBLOCK, for compatibility */ +#endif + +#ifndef SHUT_RD + #define SHUT_RD 0 + #define SHUT_WR 1 + #define SHUT_RDWR 2 +#endif + +/* FD_SET used for lwip_select */ +#ifndef FD_SET +#undef FD_SETSIZE +/* Make FD_SETSIZE match NUM_SOCKETS in socket.c */ +#define FD_SETSIZE MEMP_NUM_NETCONN +#define FDSETSAFESET(n, code) do { \ + if (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0)) { \ + code; }} while(0) +#define FDSETSAFEGET(n, code) (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0) ?\ + (code) : 0) +#define FD_SET(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] |= (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_CLR(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] &= ~(1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ISSET(n,p) FDSETSAFEGET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ZERO(p) memset((void*)(p), 0, sizeof(*(p))) + +typedef struct fd_set +{ + unsigned char fd_bits [(FD_SETSIZE+7)/8]; +} fd_set; + +#elif LWIP_SOCKET_OFFSET +#error LWIP_SOCKET_OFFSET does not work with external FD_SET! +#elif FD_SETSIZE < MEMP_NUM_NETCONN +#error "external FD_SETSIZE too small for number of sockets" +#endif /* FD_SET */ + +/** LWIP_TIMEVAL_PRIVATE: if you want to use the struct timeval provided + * by your system, set this to 0 and include in cc.h */ +#ifndef LWIP_TIMEVAL_PRIVATE +#define LWIP_TIMEVAL_PRIVATE 1 +#endif + +#if LWIP_TIMEVAL_PRIVATE +struct timeval { + long tv_sec; /* seconds */ + long tv_usec; /* and microseconds */ +}; +#endif /* LWIP_TIMEVAL_PRIVATE */ + +#define lwip_socket_init() /* Compatibility define, no init needed. */ +void lwip_socket_thread_init(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void lwip_socket_thread_cleanup(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ + +#if LWIP_COMPAT_SOCKETS == 2 +/* This helps code parsers/code completion by not having the COMPAT functions as defines */ +#define lwip_accept accept +#define lwip_bind bind +#define lwip_shutdown shutdown +#define lwip_getpeername getpeername +#define lwip_getsockname getsockname +#define lwip_setsockopt setsockopt +#define lwip_getsockopt getsockopt +#define lwip_close closesocket +#define lwip_connect connect +#define lwip_listen listen +#define lwip_recv recv +#define lwip_recvfrom recvfrom +#define lwip_send send +#define lwip_sendmsg sendmsg +#define lwip_sendto sendto +#define lwip_socket socket +#define lwip_select select +#define lwip_ioctlsocket ioctl + +#if LWIP_POSIX_SOCKETS_IO_NAMES +#define lwip_read read +#define lwip_write write +#define lwip_writev writev +#undef lwip_close +#define lwip_close close +#define closesocket(s) close(s) +#define lwip_fcntl fcntl +#define lwip_ioctl ioctl +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS == 2 */ + +int lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int lwip_bind(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_shutdown(int s, int how); +int lwip_getpeername (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockname (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen); +int lwip_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen); +int lwip_close(int s); +int lwip_connect(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_listen(int s, int backlog); +int lwip_recv(int s, void *mem, size_t len, int flags); +int lwip_read(int s, void *mem, size_t len); +int lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); +int lwip_send(int s, const void *dataptr, size_t size, int flags); +int lwip_sendmsg(int s, const struct msghdr *message, int flags); +int lwip_sendto(int s, const void *dataptr, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen); +int lwip_socket(int domain, int type, int protocol); +int lwip_write(int s, const void *dataptr, size_t size); +int lwip_writev(int s, const struct iovec *iov, int iovcnt); +int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout); +int lwip_ioctl(int s, long cmd, void *argp); +int lwip_fcntl(int s, int cmd, int val); + +#if LWIP_COMPAT_SOCKETS +#if LWIP_COMPAT_SOCKETS != 2 +/** @ingroup socket */ +#define accept(s,addr,addrlen) lwip_accept(s,addr,addrlen) +/** @ingroup socket */ +#define bind(s,name,namelen) lwip_bind(s,name,namelen) +/** @ingroup socket */ +#define shutdown(s,how) lwip_shutdown(s,how) +/** @ingroup socket */ +#define getpeername(s,name,namelen) lwip_getpeername(s,name,namelen) +/** @ingroup socket */ +#define getsockname(s,name,namelen) lwip_getsockname(s,name,namelen) +/** @ingroup socket */ +#define setsockopt(s,level,optname,opval,optlen) lwip_setsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define getsockopt(s,level,optname,opval,optlen) lwip_getsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define closesocket(s) lwip_close(s) +/** @ingroup socket */ +#define connect(s,name,namelen) lwip_connect(s,name,namelen) +/** @ingroup socket */ +#define listen(s,backlog) lwip_listen(s,backlog) +/** @ingroup socket */ +#define recv(s,mem,len,flags) lwip_recv(s,mem,len,flags) +/** @ingroup socket */ +#define recvfrom(s,mem,len,flags,from,fromlen) lwip_recvfrom(s,mem,len,flags,from,fromlen) +/** @ingroup socket */ +#define send(s,dataptr,size,flags) lwip_send(s,dataptr,size,flags) +/** @ingroup socket */ +#define sendmsg(s,message,flags) lwip_sendmsg(s,message,flags) +/** @ingroup socket */ +#define sendto(s,dataptr,size,flags,to,tolen) lwip_sendto(s,dataptr,size,flags,to,tolen) +/** @ingroup socket */ +#define socket(domain,type,protocol) lwip_socket(domain,type,protocol) +/** @ingroup socket */ +#define select(maxfdp1,readset,writeset,exceptset,timeout) lwip_select(maxfdp1,readset,writeset,exceptset,timeout) +/** @ingroup socket */ +#define ioctlsocket(s,cmd,argp) lwip_ioctl(s,cmd,argp) + +#if LWIP_POSIX_SOCKETS_IO_NAMES +/** @ingroup socket */ +#define read(s,mem,len) lwip_read(s,mem,len) +/** @ingroup socket */ +#define write(s,dataptr,len) lwip_write(s,dataptr,len) +/** @ingroup socket */ +#define writev(s,iov,iovcnt) lwip_writev(s,iov,iovcnt) +/** @ingroup socket */ +#define close(s) lwip_close(s) +/** @ingroup socket */ +#define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) +/** @ingroup socket */ +#define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS != 2 */ + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup socket */ +#define inet_ntop(af,src,dst,size) \ + (((af) == AF_INET6) ? ip6addr_ntoa_r((const ip6_addr_t*)(src),(dst),(size)) \ + : (((af) == AF_INET) ? ip4addr_ntoa_r((const ip4_addr_t*)(src),(dst),(size)) : NULL)) +/** @ingroup socket */ +#define inet_pton(af,src,dst) \ + (((af) == AF_INET6) ? ip6addr_aton((src),(ip6_addr_t*)(dst)) \ + : (((af) == AF_INET) ? ip4addr_aton((src),(ip4_addr_t*)(dst)) : 0)) +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ +#define inet_ntop(af,src,dst,size) \ + (((af) == AF_INET) ? ip4addr_ntoa_r((const ip4_addr_t*)(src),(dst),(size)) : NULL) +#define inet_pton(af,src,dst) \ + (((af) == AF_INET) ? ip4addr_aton((src),(ip4_addr_t*)(dst)) : 0) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +#define inet_ntop(af,src,dst,size) \ + (((af) == AF_INET6) ? ip6addr_ntoa_r((const ip6_addr_t*)(src),(dst),(size)) : NULL) +#define inet_pton(af,src,dst) \ + (((af) == AF_INET6) ? ip6addr_aton((src),(ip6_addr_t*)(dst)) : 0) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h new file mode 100644 index 000000000..a0c818bbf --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h @@ -0,0 +1,491 @@ +/** + * @file + * Statistics API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_STATS_H +#define LWIP_HDR_STATS_H + +#include "lwip/opt.h" + +#include "lwip/mem.h" +#include "lwip/memp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_STATS + +#ifndef LWIP_STATS_LARGE +#define LWIP_STATS_LARGE 0 +#endif + +#if LWIP_STATS_LARGE +#define STAT_COUNTER u32_t +#define STAT_COUNTER_F U32_F +#else +#define STAT_COUNTER u16_t +#define STAT_COUNTER_F U16_F +#endif + +/** Protocol related stats */ +struct stats_proto { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER fw; /* Forwarded packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER rterr; /* Routing error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER opterr; /* Error in options. */ + STAT_COUNTER err; /* Misc error. */ + STAT_COUNTER cachehit; +}; + +/** IGMP stats */ +struct stats_igmp { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER rx_v1; /* Received v1 frames. */ + STAT_COUNTER rx_group; /* Received group-specific queries. */ + STAT_COUNTER rx_general; /* Received general queries. */ + STAT_COUNTER rx_report; /* Received reports. */ + STAT_COUNTER tx_join; /* Sent joins. */ + STAT_COUNTER tx_leave; /* Sent leaves. */ + STAT_COUNTER tx_report; /* Sent reports. */ +}; + +/** Memory stats */ +struct stats_mem { +#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY + const char *name; +#endif /* defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY */ + STAT_COUNTER err; + mem_size_t avail; + mem_size_t used; + mem_size_t max; + STAT_COUNTER illegal; +}; + +/** System element stats */ +struct stats_syselem { + STAT_COUNTER used; + STAT_COUNTER max; + STAT_COUNTER err; +}; + +/** System stats */ +struct stats_sys { + struct stats_syselem sem; + struct stats_syselem mutex; + struct stats_syselem mbox; +}; + +/** SNMP MIB2 stats */ +struct stats_mib2 { + /* IP */ + u32_t ipinhdrerrors; + u32_t ipinaddrerrors; + u32_t ipinunknownprotos; + u32_t ipindiscards; + u32_t ipindelivers; + u32_t ipoutrequests; + u32_t ipoutdiscards; + u32_t ipoutnoroutes; + u32_t ipreasmoks; + u32_t ipreasmfails; + u32_t ipfragoks; + u32_t ipfragfails; + u32_t ipfragcreates; + u32_t ipreasmreqds; + u32_t ipforwdatagrams; + u32_t ipinreceives; + + /* TCP */ + u32_t tcpactiveopens; + u32_t tcppassiveopens; + u32_t tcpattemptfails; + u32_t tcpestabresets; + u32_t tcpoutsegs; + u32_t tcpretranssegs; + u32_t tcpinsegs; + u32_t tcpinerrs; + u32_t tcpoutrsts; + + /* UDP */ + u32_t udpindatagrams; + u32_t udpnoports; + u32_t udpinerrors; + u32_t udpoutdatagrams; + + /* ICMP */ + u32_t icmpinmsgs; + u32_t icmpinerrors; + u32_t icmpindestunreachs; + u32_t icmpintimeexcds; + u32_t icmpinparmprobs; + u32_t icmpinsrcquenchs; + u32_t icmpinredirects; + u32_t icmpinechos; + u32_t icmpinechoreps; + u32_t icmpintimestamps; + u32_t icmpintimestampreps; + u32_t icmpinaddrmasks; + u32_t icmpinaddrmaskreps; + u32_t icmpoutmsgs; + u32_t icmpouterrors; + u32_t icmpoutdestunreachs; + u32_t icmpouttimeexcds; + u32_t icmpoutechos; /* can be incremented by user application ('ping') */ + u32_t icmpoutechoreps; +}; + +/** + * @ingroup netif_mib2 + * SNMP MIB2 interface stats + */ +struct stats_mib2_netif_ctrs { + /** The total number of octets received on the interface, including framing characters */ + u32_t ifinoctets; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * not addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinucastpkts; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinnucastpkts; + /** The number of inbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being deliverable to a higher-layer protocol. One possible + * reason for discarding such a packet could be to free up buffer space */ + u32_t ifindiscards; + /** For packet-oriented interfaces, the number of inbound packets that contained errors + * preventing them from being deliverable to a higher-layer protocol. For character- + * oriented or fixed-length interfaces, the number of inbound transmission units that + * contained errors preventing them from being deliverable to a higher-layer protocol. */ + u32_t ifinerrors; + /** For packet-oriented interfaces, the number of packets received via the interface which + * were discarded because of an unknown or unsupported protocol. For character-oriented + * or fixed-length interfaces that support protocol multiplexing the number of transmission + * units received via the interface which were discarded because of an unknown or unsupported + * protocol. For any interface that does not support protocol multiplexing, this counter will + * always be 0 */ + u32_t ifinunknownprotos; + /** The total number of octets transmitted out of the interface, including framing characters. */ + u32_t ifoutoctets; + /** The total number of packets that higher-level protocols requested be transmitted, and + * which were not addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutucastpkts; + /** The total number of packets that higher-level protocols requested be transmitted, and which + * were addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutnucastpkts; + /** The number of outbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being transmitted. One possible reason for discarding + * such a packet could be to free up buffer space. */ + u32_t ifoutdiscards; + /** For packet-oriented interfaces, the number of outbound packets that could not be transmitted + * because of errors. For character-oriented or fixed-length interfaces, the number of outbound + * transmission units that could not be transmitted because of errors. */ + u32_t ifouterrors; +}; + +/** lwIP stats container */ +struct stats_ { +#if LINK_STATS + /** Link level */ + struct stats_proto link; +#endif +#if ETHARP_STATS + /** ARP */ + struct stats_proto etharp; +#endif +#if IPFRAG_STATS + /** Fragmentation */ + struct stats_proto ip_frag; +#endif +#if IP_STATS + /** IP */ + struct stats_proto ip; +#endif +#if ICMP_STATS + /** ICMP */ + struct stats_proto icmp; +#endif +#if IGMP_STATS + /** IGMP */ + struct stats_igmp igmp; +#endif +#if UDP_STATS + /** UDP */ + struct stats_proto udp; +#endif +#if TCP_STATS + /** TCP */ + struct stats_proto tcp; +#endif +#if MEM_STATS + /** Heap */ + struct stats_mem mem; +#endif +#if MEMP_STATS + /** Internal memory pools */ + struct stats_mem *memp[MEMP_MAX]; +#endif +#if SYS_STATS + /** System */ + struct stats_sys sys; +#endif +#if IP6_STATS + /** IPv6 */ + struct stats_proto ip6; +#endif +#if ICMP6_STATS + /** ICMP6 */ + struct stats_proto icmp6; +#endif +#if IP6_FRAG_STATS + /** IPv6 fragmentation */ + struct stats_proto ip6_frag; +#endif +#if MLD6_STATS + /** Multicast listener discovery */ + struct stats_igmp mld6; +#endif +#if ND6_STATS + /** Neighbor discovery */ + struct stats_proto nd6; +#endif +#if MIB2_STATS + /** SNMP MIB2 */ + struct stats_mib2 mib2; +#endif +}; + +/** Global variable containing lwIP internal statistics. Add this to your debugger's watchlist. */ +extern struct stats_ lwip_stats; + +/** Init statistics */ +void stats_init(void); + +#define STATS_INC(x) ++lwip_stats.x +#define STATS_DEC(x) --lwip_stats.x +#define STATS_INC_USED(x, y) do { lwip_stats.x.used += y; \ + if (lwip_stats.x.max < lwip_stats.x.used) { \ + lwip_stats.x.max = lwip_stats.x.used; \ + } \ + } while(0) +#define STATS_GET(x) lwip_stats.x +#else /* LWIP_STATS */ +#define stats_init() +#define STATS_INC(x) +#define STATS_DEC(x) +#define STATS_INC_USED(x) +#endif /* LWIP_STATS */ + +#if TCP_STATS +#define TCP_STATS_INC(x) STATS_INC(x) +#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP") +#else +#define TCP_STATS_INC(x) +#define TCP_STATS_DISPLAY() +#endif + +#if UDP_STATS +#define UDP_STATS_INC(x) STATS_INC(x) +#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP") +#else +#define UDP_STATS_INC(x) +#define UDP_STATS_DISPLAY() +#endif + +#if ICMP_STATS +#define ICMP_STATS_INC(x) STATS_INC(x) +#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP") +#else +#define ICMP_STATS_INC(x) +#define ICMP_STATS_DISPLAY() +#endif + +#if IGMP_STATS +#define IGMP_STATS_INC(x) STATS_INC(x) +#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp, "IGMP") +#else +#define IGMP_STATS_INC(x) +#define IGMP_STATS_DISPLAY() +#endif + +#if IP_STATS +#define IP_STATS_INC(x) STATS_INC(x) +#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP") +#else +#define IP_STATS_INC(x) +#define IP_STATS_DISPLAY() +#endif + +#if IPFRAG_STATS +#define IPFRAG_STATS_INC(x) STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG") +#else +#define IPFRAG_STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() +#endif + +#if ETHARP_STATS +#define ETHARP_STATS_INC(x) STATS_INC(x) +#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP") +#else +#define ETHARP_STATS_INC(x) +#define ETHARP_STATS_DISPLAY() +#endif + +#if LINK_STATS +#define LINK_STATS_INC(x) STATS_INC(x) +#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK") +#else +#define LINK_STATS_INC(x) +#define LINK_STATS_DISPLAY() +#endif + +#if MEM_STATS +#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y +#define MEM_STATS_INC(x) SYS_ARCH_INC(lwip_stats.mem.x, 1) +#define MEM_STATS_INC_USED(x, y) SYS_ARCH_INC(lwip_stats.mem.x, y) +#define MEM_STATS_DEC_USED(x, y) SYS_ARCH_DEC(lwip_stats.mem.x, y) +#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP") +#else +#define MEM_STATS_AVAIL(x, y) +#define MEM_STATS_INC(x) +#define MEM_STATS_INC_USED(x, y) +#define MEM_STATS_DEC_USED(x, y) +#define MEM_STATS_DISPLAY() +#endif + + #if MEMP_STATS +#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i]->x) +#define MEMP_STATS_DISPLAY(i) stats_display_memp(lwip_stats.memp[i], i) +#define MEMP_STATS_GET(x, i) STATS_GET(memp[i]->x) + #else +#define MEMP_STATS_DEC(x, i) +#define MEMP_STATS_DISPLAY(i) +#define MEMP_STATS_GET(x, i) 0 +#endif + +#if SYS_STATS +#define SYS_STATS_INC(x) STATS_INC(sys.x) +#define SYS_STATS_DEC(x) STATS_DEC(sys.x) +#define SYS_STATS_INC_USED(x) STATS_INC_USED(sys.x, 1) +#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys) +#else +#define SYS_STATS_INC(x) +#define SYS_STATS_DEC(x) +#define SYS_STATS_INC_USED(x) +#define SYS_STATS_DISPLAY() +#endif + +#if IP6_STATS +#define IP6_STATS_INC(x) STATS_INC(x) +#define IP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6, "IPv6") +#else +#define IP6_STATS_INC(x) +#define IP6_STATS_DISPLAY() +#endif + +#if ICMP6_STATS +#define ICMP6_STATS_INC(x) STATS_INC(x) +#define ICMP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp6, "ICMPv6") +#else +#define ICMP6_STATS_INC(x) +#define ICMP6_STATS_DISPLAY() +#endif + +#if IP6_FRAG_STATS +#define IP6_FRAG_STATS_INC(x) STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6_frag, "IPv6 FRAG") +#else +#define IP6_FRAG_STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() +#endif + +#if MLD6_STATS +#define MLD6_STATS_INC(x) STATS_INC(x) +#define MLD6_STATS_DISPLAY() stats_display_igmp(&lwip_stats.mld6, "MLDv1") +#else +#define MLD6_STATS_INC(x) +#define MLD6_STATS_DISPLAY() +#endif + +#if ND6_STATS +#define ND6_STATS_INC(x) STATS_INC(x) +#define ND6_STATS_DISPLAY() stats_display_proto(&lwip_stats.nd6, "ND") +#else +#define ND6_STATS_INC(x) +#define ND6_STATS_DISPLAY() +#endif + +#if MIB2_STATS +#define MIB2_STATS_INC(x) STATS_INC(x) +#else +#define MIB2_STATS_INC(x) +#endif + +/* Display of statistics */ +#if LWIP_STATS_DISPLAY +void stats_display(void); +void stats_display_proto(struct stats_proto *proto, const char *name); +void stats_display_igmp(struct stats_igmp *igmp, const char *name); +void stats_display_mem(struct stats_mem *mem, const char *name); +void stats_display_memp(struct stats_mem *mem, int index); +void stats_display_sys(struct stats_sys *sys); +#else /* LWIP_STATS_DISPLAY */ +#define stats_display() +#define stats_display_proto(proto, name) +#define stats_display_igmp(igmp, name) +#define stats_display_mem(mem, name) +#define stats_display_memp(mem, index) +#define stats_display_sys(sys) +#endif /* LWIP_STATS_DISPLAY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_STATS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h new file mode 100644 index 000000000..98c38d78e --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h @@ -0,0 +1,455 @@ +/** + * @file + * OS abstraction layer + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#ifndef LWIP_HDR_SYS_H +#define LWIP_HDR_SYS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if NO_SYS + +/* For a totally minimal and standalone system, we provide null + definitions of the sys_ functions. */ +typedef u8_t sys_sem_t; +typedef u8_t sys_mutex_t; +typedef u8_t sys_mbox_t; + +#define sys_sem_new(s, c) ERR_OK +#define sys_sem_signal(s) +#define sys_sem_wait(s) +#define sys_arch_sem_wait(s,t) +#define sys_sem_free(s) +#define sys_sem_valid(s) 0 +#define sys_sem_valid_val(s) 0 +#define sys_sem_set_invalid(s) +#define sys_sem_set_invalid_val(s) +#define sys_mutex_new(mu) ERR_OK +#define sys_mutex_lock(mu) +#define sys_mutex_unlock(mu) +#define sys_mutex_free(mu) +#define sys_mutex_valid(mu) 0 +#define sys_mutex_set_invalid(mu) +#define sys_mbox_new(m, s) ERR_OK +#define sys_mbox_fetch(m,d) +#define sys_mbox_tryfetch(m,d) +#define sys_mbox_post(m,d) +#define sys_mbox_trypost(m,d) +#define sys_mbox_free(m) +#define sys_mbox_valid(m) +#define sys_mbox_valid_val(m) +#define sys_mbox_set_invalid(m) +#define sys_mbox_set_invalid_val(m) + +#define sys_thread_new(n,t,a,s,p) + +#define sys_msleep(t) + +#else /* NO_SYS */ + +/** Return code for timeouts from sys_arch_mbox_fetch and sys_arch_sem_wait */ +#define SYS_ARCH_TIMEOUT 0xffffffffUL + +/** sys_mbox_tryfetch() returns SYS_MBOX_EMPTY if appropriate. + * For now we use the same magic value, but we allow this to change in future. + */ +#define SYS_MBOX_EMPTY SYS_ARCH_TIMEOUT + +#include "lwip/err.h" +#include "arch/sys_arch.h" + +/** Function prototype for thread functions */ +typedef void (*lwip_thread_fn)(void *arg); + +/* Function prototypes for functions to be implemented by platform ports + (in sys_arch.c) */ + +/* Mutex functions: */ + +/** Define LWIP_COMPAT_MUTEX if the port has no mutexes and binary semaphores + should be used instead */ +#ifndef LWIP_COMPAT_MUTEX +#define LWIP_COMPAT_MUTEX 0 +#endif + +#if LWIP_COMPAT_MUTEX +/* for old ports that don't have mutexes: define them to binary semaphores */ +#define sys_mutex_t sys_sem_t +#define sys_mutex_new(mutex) sys_sem_new(mutex, 1) +#define sys_mutex_lock(mutex) sys_sem_wait(mutex) +#define sys_mutex_unlock(mutex) sys_sem_signal(mutex) +#define sys_mutex_free(mutex) sys_sem_free(mutex) +#define sys_mutex_valid(mutex) sys_sem_valid(mutex) +#define sys_mutex_set_invalid(mutex) sys_sem_set_invalid(mutex) + +#else /* LWIP_COMPAT_MUTEX */ + +/** + * @ingroup sys_mutex + * Create a new mutex. + * Note that mutexes are expected to not be taken recursively by the lwIP code, + * so both implementation types (recursive or non-recursive) should work. + * @param mutex pointer to the mutex to create + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mutex_new(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Lock a mutex + * @param mutex the mutex to lock + */ +void sys_mutex_lock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Unlock a mutex + * @param mutex the mutex to unlock + */ +void sys_mutex_unlock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Delete a semaphore + * @param mutex the mutex to delete + */ +void sys_mutex_free(sys_mutex_t *mutex); +#ifndef sys_mutex_valid +/** + * @ingroup sys_mutex + * Check if a mutex is valid/allocated: return 1 for valid, 0 for invalid + */ +int sys_mutex_valid(sys_mutex_t *mutex); +#endif +#ifndef sys_mutex_set_invalid +/** + * @ingroup sys_mutex + * Set a mutex invalid so that sys_mutex_valid returns 0 + */ +void sys_mutex_set_invalid(sys_mutex_t *mutex); +#endif +#endif /* LWIP_COMPAT_MUTEX */ + +/* Semaphore functions: */ + +/** + * @ingroup sys_sem + * Create a new semaphore + * @param sem pointer to the semaphore to create + * @param count initial count of the semaphore + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_sem_new(sys_sem_t *sem, u8_t count); +/** + * @ingroup sys_sem + * Signals a semaphore + * @param sem the semaphore to signal + */ +void sys_sem_signal(sys_sem_t *sem); +/** + * @ingroup sys_sem + * Wait for a semaphore for the specified timeout + * @param sem the semaphore to wait for + * @param timeout timeout in milliseconds to wait (0 = wait forever) + * @return time (in milliseconds) waited for the semaphore + * or SYS_ARCH_TIMEOUT on timeout + */ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +/** + * @ingroup sys_sem + * Delete a semaphore + * @param sem semaphore to delete + */ +void sys_sem_free(sys_sem_t *sem); +/** Wait for a semaphore - forever/no timeout */ +#define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) +#ifndef sys_sem_valid +/** + * @ingroup sys_sem + * Check if a semaphore is valid/allocated: return 1 for valid, 0 for invalid + */ +int sys_sem_valid(sys_sem_t *sem); +#endif +#ifndef sys_sem_set_invalid +/** + * @ingroup sys_sem + * Set a semaphore invalid so that sys_sem_valid returns 0 + */ +void sys_sem_set_invalid(sys_sem_t *sem); +#endif +#ifndef sys_sem_valid_val +/** + * Same as sys_sem_valid() but taking a value, not a pointer + */ +#define sys_sem_valid_val(sem) sys_sem_valid(&(sem)) +#endif +#ifndef sys_sem_set_invalid_val +/** + * Same as sys_sem_set_invalid() but taking a value, not a pointer + */ +#define sys_sem_set_invalid_val(sem) sys_sem_set_invalid(&(sem)) +#endif + +#ifndef sys_msleep +/** + * @ingroup sys_misc + * Sleep for specified number of ms + */ +void sys_msleep(u32_t ms); /* only has a (close to) 1 ms resolution. */ +#endif + +/* Mailbox functions. */ + +/** + * @ingroup sys_mbox + * Create a new mbox of specified size + * @param mbox pointer to the mbox to create + * @param size (minimum) number of messages in this mbox + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mbox_new(sys_mbox_t *mbox, int size); +/** + * @ingroup sys_mbox + * Post a message to an mbox - may not fail + * -> blocks if full, only used from tasks not from ISR + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +void sys_mbox_post(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full or ISR + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Wait for a new message to arrive in the mbox + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @param timeout maximum time (in milliseconds) to wait for a message (0 = wait forever) + * @return time (in milliseconds) waited for a message, may be 0 if not waited + or SYS_ARCH_TIMEOUT on timeout + * The returned time has to be accurate to prevent timer jitter! + */ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +/* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ +#ifndef sys_arch_mbox_tryfetch +/** + * @ingroup sys_mbox + * Wait for a new message to arrive in the mbox + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @return 0 (milliseconds) if a message has been received + * or SYS_MBOX_EMPTY if the mailbox is empty + */ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +#endif +/** + * For now, we map straight to sys_arch implementation. + */ +#define sys_mbox_tryfetch(mbox, msg) sys_arch_mbox_tryfetch(mbox, msg) +/** + * @ingroup sys_mbox + * Delete an mbox + * @param mbox mbox to delete + */ +void sys_mbox_free(sys_mbox_t *mbox); +#define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) +#ifndef sys_mbox_valid +/** + * @ingroup sys_mbox + * Check if an mbox is valid/allocated: return 1 for valid, 0 for invalid + */ +int sys_mbox_valid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_set_invalid +/** + * @ingroup sys_mbox + * Set an mbox invalid so that sys_mbox_valid returns 0 + */ +void sys_mbox_set_invalid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_valid_val +/** + * Same as sys_mbox_valid() but taking a value, not a pointer + */ +#define sys_mbox_valid_val(mbox) sys_mbox_valid(&(mbox)) +#endif +#ifndef sys_mbox_set_invalid_val +/** + * Same as sys_mbox_set_invalid() but taking a value, not a pointer + */ +#define sys_mbox_set_invalid_val(mbox) sys_mbox_set_invalid(&(mbox)) +#endif + + +/** + * @ingroup sys_misc + * The only thread function: + * Creates a new thread + * ATTENTION: although this function returns a value, it MUST NOT FAIL (ports have to assert this!) + * @param name human-readable name for the thread (used for debugging purposes) + * @param thread thread-function + * @param arg parameter passed to 'thread' + * @param stacksize stack size in bytes for the new thread (may be ignored by ports) + * @param prio priority of the new thread (may be ignored by ports) */ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio); + +#endif /* NO_SYS */ + +/* sys_init() must be called before anything else. */ +void sys_init(void); + +#ifndef sys_jiffies +/** + * Ticks/jiffies since power up. + */ +u32_t sys_jiffies(void); +#endif + +/** + * @ingroup sys_time + * Returns the current time in milliseconds, + * may be the same as sys_jiffies or at least based on it. + */ +u32_t sys_now(void); + +/* Critical Region Protection */ +/* These functions must be implemented in the sys_arch.c file. + In some implementations they can provide a more light-weight protection + mechanism than using semaphores. Otherwise semaphores can be used for + implementation */ +#ifndef SYS_ARCH_PROTECT +/** SYS_LIGHTWEIGHT_PROT + * define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection + * for certain critical regions during buffer allocation, deallocation and memory + * allocation and deallocation. + */ +#if SYS_LIGHTWEIGHT_PROT + +/** + * @ingroup sys_prot + * SYS_ARCH_DECL_PROTECT + * declare a protection variable. This macro will default to defining a variable of + * type sys_prot_t. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h. + */ +#define SYS_ARCH_DECL_PROTECT(lev) sys_prot_t lev +/** + * @ingroup sys_prot + * SYS_ARCH_PROTECT + * Perform a "fast" protect. This could be implemented by + * disabling interrupts for an embedded system or by using a semaphore or + * mutex. The implementation should allow calling SYS_ARCH_PROTECT when + * already protected. The old protection level is returned in the variable + * "lev". This macro will default to calling the sys_arch_protect() function + * which should be implemented in sys_arch.c. If a particular port needs a + * different implementation, then this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_PROTECT(lev) lev = sys_arch_protect() +/** + * @ingroup sys_prot + * SYS_ARCH_UNPROTECT + * Perform a "fast" set of the protection level to "lev". This could be + * implemented by setting the interrupt level to "lev" within the MACRO or by + * using a semaphore or mutex. This macro will default to calling the + * sys_arch_unprotect() function which should be implemented in + * sys_arch.c. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_UNPROTECT(lev) sys_arch_unprotect(lev) +sys_prot_t sys_arch_protect(void); +void sys_arch_unprotect(sys_prot_t pval); + +#else + +#define SYS_ARCH_DECL_PROTECT(lev) +#define SYS_ARCH_PROTECT(lev) +#define SYS_ARCH_UNPROTECT(lev) + +#endif /* SYS_LIGHTWEIGHT_PROT */ + +#endif /* SYS_ARCH_PROTECT */ + +/* + * Macros to set/get and increase/decrease variables in a thread-safe way. + * Use these for accessing variable that are used from more than one thread. + */ + +#ifndef SYS_ARCH_INC +#define SYS_ARCH_INC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var += val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_INC */ + +#ifndef SYS_ARCH_DEC +#define SYS_ARCH_DEC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var -= val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_DEC */ + +#ifndef SYS_ARCH_GET +#define SYS_ARCH_GET(var, ret) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + ret = var; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_GET */ + +#ifndef SYS_ARCH_SET +#define SYS_ARCH_SET(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var = val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_SET */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SYS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h new file mode 100644 index 000000000..3845b9fd5 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h @@ -0,0 +1,433 @@ +/** + * @file + * TCP API (to be used from TCPIP thread)\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_H +#define LWIP_HDR_TCP_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct tcp_pcb; + +/** Function prototype for tcp accept callback functions. Called when a new + * connection can be accepted on a listening pcb. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param newpcb The new connection pcb + * @param err An error code if there has been an error accepting. + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_accept_fn)(void *arg, struct tcp_pcb *newpcb, err_t err); + +/** Function prototype for tcp receive callback functions. Called when data has + * been received. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which received data + * @param p The received data (or NULL when the connection has been closed!) + * @param err An error code if there has been an error receiving + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_recv_fn)(void *arg, struct tcp_pcb *tpcb, + struct pbuf *p, err_t err); + +/** Function prototype for tcp sent callback functions. Called when sent data has + * been acknowledged by the remote side. Use it to free corresponding resources. + * This also means that the pcb has now space available to send new data. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb for which data has been acknowledged + * @param len The amount of bytes acknowledged + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_sent_fn)(void *arg, struct tcp_pcb *tpcb, + u16_t len); + +/** Function prototype for tcp poll callback functions. Called periodically as + * specified by @see tcp_poll. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb tcp pcb + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_poll_fn)(void *arg, struct tcp_pcb *tpcb); + +/** Function prototype for tcp error callback functions. Called when the pcb + * receives a RST or is unexpectedly closed for any other reason. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param err Error code to indicate why the pcb has been closed + * ERR_ABRT: aborted through tcp_abort or by a TCP timer + * ERR_RST: the connection was reset by the remote host + */ +typedef void (*tcp_err_fn)(void *arg, err_t err); + +/** Function prototype for tcp connected callback functions. Called when a pcb + * is connected to the remote side after initiating a connection attempt by + * calling tcp_connect(). + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which is connected + * @param err An unused error code, always ERR_OK currently ;-) @todo! + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + * + * @note When a connection attempt fails, the error callback is currently called! + */ +typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); + +#if LWIP_WND_SCALE +#define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) +#define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) +#define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +typedef u32_t tcpwnd_size_t; +#else +#define RCV_WND_SCALE(pcb, wnd) (wnd) +#define SND_WND_SCALE(pcb, wnd) (wnd) +#define TCPWND16(x) (x) +#define TCP_WND_MAX(pcb) TCP_WND +typedef u16_t tcpwnd_size_t; +#endif + +#if LWIP_WND_SCALE || TCP_LISTEN_BACKLOG || LWIP_TCP_TIMESTAMPS +typedef u16_t tcpflags_t; +#else +typedef u8_t tcpflags_t; +#endif + +enum tcp_state { + CLOSED = 0, + LISTEN = 1, + SYN_SENT = 2, + SYN_RCVD = 3, + ESTABLISHED = 4, + FIN_WAIT_1 = 5, + FIN_WAIT_2 = 6, + CLOSE_WAIT = 7, + CLOSING = 8, + LAST_ACK = 9, + TIME_WAIT = 10 +}; + +/** + * members common to struct tcp_pcb and struct tcp_listen_pcb + */ +#define TCP_PCB_COMMON(type) \ + type *next; /* for the linked list */ \ + void *callback_arg; \ + enum tcp_state state; /* TCP state */ \ + u8_t prio; \ + /* ports are in host byte order */ \ + u16_t local_port + + +/** the TCP protocol control block for listening pcbs */ +struct tcp_pcb_listen { +/** Common members of all PCB types */ + IP_PCB; +/** Protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb_listen); + +#if LWIP_CALLBACK_API + /* Function to call when a listener has been connected. */ + tcp_accept_fn accept; +#endif /* LWIP_CALLBACK_API */ + +#if TCP_LISTEN_BACKLOG + u8_t backlog; + u8_t accepts_pending; +#endif /* TCP_LISTEN_BACKLOG */ +}; + + +/** the TCP protocol control block */ +struct tcp_pcb { +/** common PCB members */ + IP_PCB; +/** protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb); + + /* ports are in host byte order */ + u16_t remote_port; + + tcpflags_t flags; +#define TF_ACK_DELAY 0x01U /* Delayed ACK. */ +#define TF_ACK_NOW 0x02U /* Immediate ACK. */ +#define TF_INFR 0x04U /* In fast recovery. */ +#define TF_CLOSEPEND 0x08U /* If this is set, tcp_close failed to enqueue the FIN (retried in tcp_tmr) */ +#define TF_RXCLOSED 0x10U /* rx closed by tcp_shutdown */ +#define TF_FIN 0x20U /* Connection was closed locally (FIN segment enqueued). */ +#define TF_NODELAY 0x40U /* Disable Nagle algorithm */ +#define TF_NAGLEMEMERR 0x80U /* nagle enabled, memerr, try to output to prevent delayed ACK to happen */ +#if LWIP_WND_SCALE +#define TF_WND_SCALE 0x0100U /* Window Scale option enabled */ +#endif +#if TCP_LISTEN_BACKLOG +#define TF_BACKLOGPEND 0x0200U /* If this is set, a connection pcb has increased the backlog on its listener */ +#endif +#if LWIP_TCP_TIMESTAMPS +#define TF_TIMESTAMP 0x0400U /* Timestamp option enabled */ +#endif + + /* the rest of the fields are in host byte order + as we have to do some math with them */ + + /* Timers */ + u8_t polltmr, pollinterval; + u8_t last_timer; + u32_t tmr; + + /* receiver variables */ + u32_t rcv_nxt; /* next seqno expected */ + tcpwnd_size_t rcv_wnd; /* receiver window available */ + tcpwnd_size_t rcv_ann_wnd; /* receiver window to announce */ + u32_t rcv_ann_right_edge; /* announced right edge of window */ + + /* Retransmission timer. */ + s16_t rtime; + + u16_t mss; /* maximum segment size */ + + /* RTT (round trip time) estimation variables */ + u32_t rttest; /* RTT estimate in 500ms ticks */ + u32_t rtseq; /* sequence number being timed */ + s16_t sa, sv; /* @todo document this */ + + s16_t rto; /* retransmission time-out */ + u8_t nrtx; /* number of retransmissions */ + + /* fast retransmit/recovery */ + u8_t dupacks; + u32_t lastack; /* Highest acknowledged seqno. */ + + /* congestion avoidance/control variables */ + tcpwnd_size_t cwnd; + tcpwnd_size_t ssthresh; + + /* sender variables */ + u32_t snd_nxt; /* next new seqno to be sent */ + u32_t snd_wl1, snd_wl2; /* Sequence and acknowledgement numbers of last + window update. */ + u32_t snd_lbb; /* Sequence number of next byte to be buffered. */ + tcpwnd_size_t snd_wnd; /* sender window */ + tcpwnd_size_t snd_wnd_max; /* the maximum sender window announced by the remote host */ + + tcpwnd_size_t snd_buf; /* Available buffer space for sending (in bytes). */ +#define TCP_SNDQUEUELEN_OVERFLOW (0xffffU-3) + u16_t snd_queuelen; /* Number of pbufs currently in the send buffer. */ + +#if TCP_OVERSIZE + /* Extra bytes available at the end of the last pbuf in unsent. */ + u16_t unsent_oversize; +#endif /* TCP_OVERSIZE */ + + /* These are ordered by sequence number: */ + struct tcp_seg *unsent; /* Unsent (queued) segments. */ + struct tcp_seg *unacked; /* Sent but unacknowledged segments. */ +#if TCP_QUEUE_OOSEQ + struct tcp_seg *ooseq; /* Received out of sequence segments. */ +#endif /* TCP_QUEUE_OOSEQ */ + + struct pbuf *refused_data; /* Data previously received but not yet taken by upper layer */ + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + struct tcp_pcb_listen* listener; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + +#if LWIP_CALLBACK_API + /* Function to be called when more send buffer space is available. */ + tcp_sent_fn sent; + /* Function to be called when (in-sequence) data has arrived. */ + tcp_recv_fn recv; + /* Function to be called when a connection has been set up. */ + tcp_connected_fn connected; + /* Function which is called periodically. */ + tcp_poll_fn poll; + /* Function to be called whenever a fatal error occurs. */ + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + +#if LWIP_TCP_TIMESTAMPS + u32_t ts_lastacksent; + u32_t ts_recent; +#endif /* LWIP_TCP_TIMESTAMPS */ + + /* idle time before KEEPALIVE is sent */ + u32_t keep_idle; +#if LWIP_TCP_KEEPALIVE + u32_t keep_intvl; + u32_t keep_cnt; +#endif /* LWIP_TCP_KEEPALIVE */ + + /* Persist timer counter */ + u8_t persist_cnt; + /* Persist timer back-off */ + u8_t persist_backoff; + + /* KEEPALIVE counter */ + u8_t keep_cnt_sent; + +#if LWIP_WND_SCALE + u8_t snd_scale; + u8_t rcv_scale; +#endif +}; + +#if LWIP_EVENT_API + +enum lwip_event { + LWIP_EVENT_ACCEPT, + LWIP_EVENT_SENT, + LWIP_EVENT_RECV, + LWIP_EVENT_CONNECTED, + LWIP_EVENT_POLL, + LWIP_EVENT_ERR +}; + +err_t lwip_tcp_event(void *arg, struct tcp_pcb *pcb, + enum lwip_event, + struct pbuf *p, + u16_t size, + err_t err); + +#endif /* LWIP_EVENT_API */ + +/* Application program's interface: */ +struct tcp_pcb * tcp_new (void); +struct tcp_pcb * tcp_new_ip_type (u8_t type); + +void tcp_arg (struct tcp_pcb *pcb, void *arg); +#if LWIP_CALLBACK_API +void tcp_recv (struct tcp_pcb *pcb, tcp_recv_fn recv); +void tcp_sent (struct tcp_pcb *pcb, tcp_sent_fn sent); +void tcp_err (struct tcp_pcb *pcb, tcp_err_fn err); +void tcp_accept (struct tcp_pcb *pcb, tcp_accept_fn accept); +#endif /* LWIP_CALLBACK_API */ +void tcp_poll (struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval); + +#if LWIP_TCP_TIMESTAMPS +#define tcp_mss(pcb) (((pcb)->flags & TF_TIMESTAMP) ? ((pcb)->mss - 12) : (pcb)->mss) +#else /* LWIP_TCP_TIMESTAMPS */ +#define tcp_mss(pcb) ((pcb)->mss) +#endif /* LWIP_TCP_TIMESTAMPS */ +#define tcp_sndbuf(pcb) (TCPWND16((pcb)->snd_buf)) +#define tcp_sndqueuelen(pcb) ((pcb)->snd_queuelen) +/** @ingroup tcp_raw */ +#define tcp_nagle_disable(pcb) ((pcb)->flags |= TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_enable(pcb) ((pcb)->flags = (tcpflags_t)((pcb)->flags & ~TF_NODELAY)) +/** @ingroup tcp_raw */ +#define tcp_nagle_disabled(pcb) (((pcb)->flags & TF_NODELAY) != 0) + +#if TCP_LISTEN_BACKLOG +#define tcp_backlog_set(pcb, new_backlog) do { \ + LWIP_ASSERT("pcb->state == LISTEN (called for wrong pcb?)", (pcb)->state == LISTEN); \ + ((struct tcp_pcb_listen *)(pcb))->backlog = ((new_backlog) ? (new_backlog) : 1); } while(0) +void tcp_backlog_delayed(struct tcp_pcb* pcb); +void tcp_backlog_accepted(struct tcp_pcb* pcb); +#else /* TCP_LISTEN_BACKLOG */ +#define tcp_backlog_set(pcb, new_backlog) +#define tcp_backlog_delayed(pcb) +#define tcp_backlog_accepted(pcb) +#endif /* TCP_LISTEN_BACKLOG */ +#define tcp_accepted(pcb) /* compatibility define, not needed any more */ + +void tcp_recved (struct tcp_pcb *pcb, u16_t len); +err_t tcp_bind (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +err_t tcp_connect (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port, tcp_connected_fn connected); + +struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err); +struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog); +/** @ingroup tcp_raw */ +#define tcp_listen(pcb) tcp_listen_with_backlog(pcb, TCP_DEFAULT_LISTEN_BACKLOG) + +void tcp_abort (struct tcp_pcb *pcb); +err_t tcp_close (struct tcp_pcb *pcb); +err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx); + +/* Flags for "apiflags" parameter in tcp_write */ +#define TCP_WRITE_FLAG_COPY 0x01 +#define TCP_WRITE_FLAG_MORE 0x02 + +err_t tcp_write (struct tcp_pcb *pcb, const void *dataptr, u16_t len, + u8_t apiflags); + +void tcp_setprio (struct tcp_pcb *pcb, u8_t prio); + +#define TCP_PRIO_MIN 1 +#define TCP_PRIO_NORMAL 64 +#define TCP_PRIO_MAX 127 + +err_t tcp_output (struct tcp_pcb *pcb); + + +const char* tcp_debug_state_str(enum tcp_state s); + +/* for compatibility with older implementation */ +#define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h new file mode 100644 index 000000000..6bf797812 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h @@ -0,0 +1,106 @@ +/** + * @file + * Functions to sync with TCPIP thread + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_H +#define LWIP_HDR_TCPIP_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/timeouts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +extern sys_mutex_t lock_tcpip_core; +/** Lock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define LOCK_TCPIP_CORE() sys_mutex_lock(&lock_tcpip_core) +/** Unlock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define UNLOCK_TCPIP_CORE() sys_mutex_unlock(&lock_tcpip_core) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define LOCK_TCPIP_CORE() +#define UNLOCK_TCPIP_CORE() +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +struct pbuf; +struct netif; + +/** Function prototype for the init_done function passed to tcpip_init */ +typedef void (*tcpip_init_done_fn)(void *arg); +/** Function prototype for functions passed to tcpip_callback() */ +typedef void (*tcpip_callback_fn)(void *ctx); + +/* Forward declarations */ +struct tcpip_callback_msg; + +void tcpip_init(tcpip_init_done_fn tcpip_init_done, void *arg); + +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn); +err_t tcpip_input(struct pbuf *p, struct netif *inp); + +err_t tcpip_callback_with_block(tcpip_callback_fn function, void *ctx, u8_t block); +/** + * @ingroup lwip_os + * @see tcpip_callback_with_block + */ +#define tcpip_callback(f, ctx) tcpip_callback_with_block(f, ctx, 1) + +struct tcpip_callback_msg* tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx); +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg); +err_t tcpip_trycallback(struct tcpip_callback_msg* msg); + +/* free pbufs or heap memory from another context without blocking */ +err_t pbuf_free_callback(struct pbuf *p); +err_t mem_free_callback(void *m); + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg); +err_t tcpip_untimeout(sys_timeout_handler h, void *arg); +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h new file mode 100644 index 000000000..d119056ad --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h @@ -0,0 +1,121 @@ +/** + * @file + * Timer implementations + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_TIMEOUTS_H +#define LWIP_HDR_TIMEOUTS_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#if !NO_SYS +#include "lwip/sys.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef LWIP_DEBUG_TIMERNAMES +#ifdef LWIP_DEBUG +#define LWIP_DEBUG_TIMERNAMES SYS_DEBUG +#else /* LWIP_DEBUG */ +#define LWIP_DEBUG_TIMERNAMES 0 +#endif /* LWIP_DEBUG*/ +#endif + +/** Function prototype for a stack-internal timer function that has to be + * called at a defined interval */ +typedef void (* lwip_cyclic_timer_handler)(void); + +/** This struct contains information about a stack-internal timer function + that has to be called at a defined interval */ +struct lwip_cyclic_timer { + u32_t interval_ms; + lwip_cyclic_timer_handler handler; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +extern const struct lwip_cyclic_timer lwip_cyclic_timers[]; + +#if LWIP_TIMERS + +/** Function prototype for a timeout callback function. Register such a function + * using sys_timeout(). + * + * @param arg Additional argument to pass to the function - set up by sys_timeout() + */ +typedef void (* sys_timeout_handler)(void *arg); + +struct sys_timeo { + struct sys_timeo *next; + u32_t time; + sys_timeout_handler h; + void *arg; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +void sys_timeouts_init(void); + +#if LWIP_DEBUG_TIMERNAMES +void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name); +#define sys_timeout(msecs, handler, arg) sys_timeout_debug(msecs, handler, arg, #handler) +#else /* LWIP_DEBUG_TIMERNAMES */ +void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg); +#endif /* LWIP_DEBUG_TIMERNAMES */ + +void sys_untimeout(sys_timeout_handler handler, void *arg); +void sys_restart_timeouts(void); +#if NO_SYS +void sys_check_timeouts(void); +u32_t sys_timeouts_sleeptime(void); +#else /* NO_SYS */ +void sys_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg); +#endif /* NO_SYS */ + + +#endif /* LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_TIMEOUTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h new file mode 100644 index 000000000..4671a64be --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h @@ -0,0 +1,182 @@ +/** + * @file + * UDP API (to be used from TCPIP thread)\n + * See also @ref udp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_UDP_H +#define LWIP_HDR_UDP_H + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_FLAGS_NOCHKSUM 0x01U +#define UDP_FLAGS_UDPLITE 0x02U +#define UDP_FLAGS_CONNECTED 0x04U +#define UDP_FLAGS_MULTICAST_LOOP 0x08U + +struct udp_pcb; + +/** Function prototype for udp pcb receive callback functions + * addr and port are in same byte order as in the pcb + * The callback is responsible for freeing the pbuf + * if it's not used any more. + * + * ATTENTION: Be aware that 'addr' might point into the pbuf 'p' so freeing this pbuf + * can make 'addr' invalid, too. + * + * @param arg user supplied argument (udp_pcb.recv_arg) + * @param pcb the udp_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @param port the remote port from which the packet was received + */ +typedef void (*udp_recv_fn)(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port); + +/** the UDP protocol control block */ +struct udp_pcb { +/** Common members of all PCB types */ + IP_PCB; + +/* Protocol specific PCB members */ + + struct udp_pcb *next; + + u8_t flags; + /** ports are in host byte order */ + u16_t local_port, remote_port; + +#if LWIP_MULTICAST_TX_OPTIONS + /** outgoing network interface for multicast packets */ + ip_addr_t multicast_ip; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_UDPLITE + /** used for UDP_LITE only */ + u16_t chksum_len_rx, chksum_len_tx; +#endif /* LWIP_UDPLITE */ + + /** receive callback function */ + udp_recv_fn recv; + /** user-supplied argument for the recv callback */ + void *recv_arg; +}; +/* udp_pcbs export for external reference (e.g. SNMP agent) */ +extern struct udp_pcb *udp_pcbs; + +/* The following functions is the application layer interface to the + UDP code. */ +struct udp_pcb * udp_new (void); +struct udp_pcb * udp_new_ip_type(u8_t type); +void udp_remove (struct udp_pcb *pcb); +err_t udp_bind (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +err_t udp_connect (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_disconnect (struct udp_pcb *pcb); +void udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, + void *recv_arg); +err_t udp_sendto_if (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif); +err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, const ip_addr_t *src_ip); +err_t udp_sendto (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port); +err_t udp_send (struct udp_pcb *pcb, struct pbuf *p); + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, u8_t have_chksum, + u16_t chksum); +err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + u8_t have_chksum, u16_t chksum); +err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum); +err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, + u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +#define udp_flags(pcb) ((pcb)->flags) +#define udp_setflags(pcb, f) ((pcb)->flags = (f)) + +/* The following functions are the lower layer interface to UDP. */ +void udp_input (struct pbuf *p, struct netif *inp); + +void udp_init (void); + +/* for compatibility with older implementation */ +#define udp_new_ip6() udp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_MULTICAST_TX_OPTIONS +#define udp_set_multicast_netif_addr(pcb, ip4addr) ip_addr_copy_from_ip4((pcb)->multicast_ip, *(ip4addr)) +#define udp_get_multicast_netif_addr(pcb) ip_2_ip4(&(pcb)->multicast_ip) +#define udp_set_multicast_ttl(pcb, value) do { (pcb)->mcast_ttl = value; } while(0) +#define udp_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if UDP_DEBUG +void udp_debug_print(struct udp_hdr *udphdr); +#else +#define udp_debug_print(udphdr) +#endif + +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_UDP */ + +#endif /* LWIP_HDR_UDP_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h new file mode 100644 index 000000000..c00de0498 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h @@ -0,0 +1,3 @@ +/* ARP has been moved to core/ipv4, provide this #include for compatibility only */ +#include "lwip/etharp.h" +#include "netif/ethernet.h" diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h new file mode 100644 index 000000000..6e94e16a2 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h @@ -0,0 +1,77 @@ +/** + * @file + * Ethernet input function - handles INCOMING ethernet level traffic + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHERNET_H +#define LWIP_HDR_NETIF_ETHERNET_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ARP || LWIP_ETHERNET + +/** Define this to 1 and define LWIP_ARP_FILTER_NETIF_FN(pbuf, netif, type) + * to a filter function that returns the correct netif when using multiple + * netifs on one hardware interface where the netif's low-level receive + * routine cannot decide for the correct netif (e.g. when mapping multiple + * IP addresses to one hardware interface). + */ +#ifndef LWIP_ARP_FILTER_NETIF +#define LWIP_ARP_FILTER_NETIF 0 +#endif + +err_t ethernet_input(struct pbuf *p, struct netif *netif); +err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type); + +extern const struct eth_addr ethbroadcast, ethzero; + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_ETHERNET_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h new file mode 100644 index 000000000..db49e30b4 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h @@ -0,0 +1,86 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_H +#define LWIP_HDR_LOWPAN6_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 && LWIP_6LOWPAN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define LOWPAN6_TMR_INTERVAL 1000 + +void lowpan6_tmr(void); + +err_t lowpan6_set_context(u8_t index, const ip6_addr_t * context); +err_t lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low); + +#if LWIP_IPV4 +err_t lowpan4_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ +err_t lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t lowpan6_input(struct pbuf * p, struct netif *netif); +err_t lowpan6_if_init(struct netif *netif); + +/* pan_id in network byte order. */ +err_t lowpan6_set_pan_id(u16_t pan_id); + +#if !NO_SYS +err_t tcpip_6lowpan_input(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_6LOWPAN */ + +#endif /* LWIP_HDR_LOWPAN6_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h new file mode 100644 index 000000000..96d57804c --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h @@ -0,0 +1,70 @@ +/** + * @file + * 6LowPAN options list + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_OPTS_H +#define LWIP_HDR_LOWPAN6_OPTS_H + +#include "lwip/opt.h" + +#ifndef LWIP_6LOWPAN +#define LWIP_6LOWPAN 0 +#endif + +#ifndef LWIP_6LOWPAN_NUM_CONTEXTS +#define LWIP_6LOWPAN_NUM_CONTEXTS 10 +#endif + +#ifndef LWIP_6LOWPAN_INFER_SHORT_ADDRESS +#define LWIP_6LOWPAN_INFER_SHORT_ADDRESS 1 +#endif + +#ifndef LWIP_6LOWPAN_IPHC +#define LWIP_6LOWPAN_IPHC 1 +#endif + +#ifndef LWIP_6LOWPAN_HW_CRC +#define LWIP_6LOWPAN_HW_CRC 1 +#endif + +#ifndef LOWPAN6_DEBUG +#define LOWPAN6_DEBUG LWIP_DBG_OFF +#endif + +#endif /* LWIP_HDR_LOWPAN6_OPTS_H */ diff --git a/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h new file mode 100644 index 000000000..ce43af762 --- /dev/null +++ b/src/third_party/stm32_r1b1/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h @@ -0,0 +1,156 @@ +/* + * ccp.h - Definitions for PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + *