Skip to content

Commit

Permalink
Merge branch 'release_internal' into release_external
Browse files Browse the repository at this point in the history
* release_internal:
  Add new function to unit tests
  Review correction
  Move indirect queue size public API to net_interface
  Review corrections
  Increase Thread SED buffer size for big packets
  Fix indirect queue packet ordering
  API for changing Thread SED parent buffer size
  • Loading branch information
deepakvenugopal committed Feb 9, 2018
2 parents c25e476 + ed76459 commit 43c7ec2
Show file tree
Hide file tree
Showing 6 changed files with 189 additions and 25 deletions.
17 changes: 17 additions & 0 deletions nanostack/net_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -987,6 +987,23 @@ void arm_print_protocols2(void (*print_fn)(const char *fmt, ...), char sep);
*
*/
extern void net_get_version_information(uint8_t *ptr);

/**
* \brief Set buffer size for sleepy device parent.
*
* This function can be used to set sleepy device parent buffer size and packet threshold.
*
* Note! In Thread mode parent buffer size is automatically set during Thread initialization.
*
* \param big_packet_threshold Indicate how long packets are considered big. For Thread, must be 106 bytes.
* \param small_packets_per_child_count Number of small packets stored for each sleepy children. For Thread, must be at least 1.
* \param big_packets_total_count Total number of big packets parent can store for all sleepy children. For Thread, must be at least 1.
* \return 0 on success, <0 on errors.
*/

extern int arm_nwk_sleepy_device_parent_buffer_size_set(int8_t interface_id, uint16_t big_packet_threshold, uint16_t small_packets_per_child_count, uint16_t big_packets_total_count);


#ifdef __cplusplus
}
#endif
Expand Down
14 changes: 14 additions & 0 deletions nanostack/thread_management_if.h
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,20 @@ int thread_management_network_certificate_set(int8_t interface_id, const unsigne
*/
int thread_management_partition_weighting_set(int8_t interface_id, uint8_t partition_weighting);

/**
* Set Thread Sleepy End Device parent packet buffer size.
*
* This function can be used to adjust count of packets SED parent is storing.
*
* \param interface_id Network interface ID.
* \param small_packets_per_child_count Number of small packets parent is storing for each SED.
* \param big_packets_total_count Number of big packets parent can store for all SEDs.
*
* \return 0, OK.
* \return <0 fail.
*/
int thread_management_sed_parent_buffer_size_set(int8_t interface_id, uint16_t small_packets_per_child_count, uint16_t big_packets_total_count);

#ifdef __cplusplus
}
#endif
Expand Down
10 changes: 5 additions & 5 deletions source/6LoWPAN/Thread/thread_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,12 +237,12 @@
* least one (1) 106-octet IPv6 datagram per attached SED".
*
* The defines below tell how many small (i.e. up to the big packet
* threshold) packets total and big (i.e. over the big packet threshold)
* packets per sleepy child we buffer in the indirect TX queue. The
* minimum values are 1 for both, but here we use 2 for better
* performance.
* threshold) packets per sleepy child and big (i.e. over the big
* packet threshold) packets total we buffer in the indirect TX
* queue. The minimum values are 1 for both, but here we use larger
* value for better performance.
*/
#define THREAD_INDIRECT_BIG_PACKETS_TOTAL 2
#define THREAD_INDIRECT_BIG_PACKETS_TOTAL 10
#define THREAD_INDIRECT_SMALL_PACKETS_PER_CHILD 2

/**
Expand Down
24 changes: 12 additions & 12 deletions source/6LoWPAN/Thread/thread_management_if.c
Original file line number Diff line number Diff line change
Expand Up @@ -1378,11 +1378,6 @@ int8_t thread_management_get_request_full_nwk_data(int8_t interface_id, bool *fu

int thread_management_device_certificate_set(int8_t interface_id, const unsigned char *device_certificate_ptr, uint16_t device_certificate_len, const unsigned char *priv_key_ptr, uint16_t priv_key_len)
{
(void) interface_id;
(void) device_certificate_ptr;
(void) device_certificate_len;
(void) priv_key_ptr;
(void) priv_key_len;
#ifdef HAVE_THREAD
protocol_interface_info_entry_t *cur;

Expand All @@ -1395,16 +1390,16 @@ int thread_management_device_certificate_set(int8_t interface_id, const unsigned
return thread_extension_bootstrap_device_certificate_set(cur, device_certificate_ptr, device_certificate_len, priv_key_ptr, priv_key_len);

#else
(void) interface_id;
(void) device_certificate_ptr;
(void) device_certificate_len;
(void) priv_key_ptr;
(void) priv_key_len;
return -1;
#endif
}
int thread_management_network_certificate_set(int8_t interface_id, const unsigned char *network_certificate_ptr, uint16_t network_certificate_len, const unsigned char *priv_key_ptr, uint16_t priv_key_len)
{
(void) interface_id;
(void) network_certificate_ptr;
(void) network_certificate_len;
(void) priv_key_ptr;
(void) priv_key_len;
#ifdef HAVE_THREAD
protocol_interface_info_entry_t *cur;

Expand All @@ -1420,14 +1415,17 @@ int thread_management_network_certificate_set(int8_t interface_id, const unsigne

return thread_extension_bootstrap_network_private_key_set(cur, priv_key_ptr, priv_key_len);
#else
(void) interface_id;
(void) network_certificate_ptr;
(void) network_certificate_len;
(void) priv_key_ptr;
(void) priv_key_len;
return -1;
#endif
}

int thread_management_partition_weighting_set(int8_t interface_id, uint8_t partition_weighting)
{
(void) interface_id;
(void) partition_weighting;
#ifdef HAVE_THREAD
protocol_interface_info_entry_t *cur;

Expand All @@ -1449,6 +1447,8 @@ int thread_management_partition_weighting_set(int8_t interface_id, uint8_t parti

return 0;
#else
(void) interface_id;
(void) partition_weighting;
return -1;
#endif
}
135 changes: 127 additions & 8 deletions source/6LoWPAN/adaptation_interface.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@

#define TRACE_GROUP "6lAd"

// #define EXTRA_DEBUG_EXTRA
#ifdef EXTRA_DEBUG_EXTRA
#define tr_debug_extra(...) tr_debug(__VA_ARGS__)
#else
#define tr_debug_extra(...)
#endif

typedef struct {
uint16_t tag; /*!< Fragmentation datagram TAG ID */
uint16_t size; /*!< Datagram Total Size (uncompressed) */
Expand All @@ -57,7 +64,8 @@ typedef struct {
uint8_t unfrag_len; /*!< Length of headers that precede the FRAG header */
bool fragmented_data:1;
bool first_fragment:1;
bool indirectData:1;
bool indirect_data:1;
bool indirect_data_cached:1; /* Data cached for delayed transmission as mac request is already active */
buffer_t *buf;
uint8_t *fragmenter_buf;
ns_list_link_t link; /*!< List link entry */
Expand Down Expand Up @@ -116,6 +124,8 @@ static int8_t lowpan_message_fragmentation_init(buffer_t *buf, fragmenter_tx_ent
static bool lowpan_message_fragmentation_message_write(const fragmenter_tx_entry_t *frag_entry, mcps_data_req_t *dataReq);
static void lowpan_adaptation_indirect_queue_free_message(struct protocol_interface_info_entry *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);

static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr);

//Discover
static fragmenter_interface_t *lowpan_adaptation_interface_discover(int8_t interfaceId)
{
Expand Down Expand Up @@ -362,6 +372,7 @@ static fragmenter_tx_entry_t *lowpan_indirect_entry_allocate(uint16_t fragment_b
indirec_entry->buf = NULL;
indirec_entry->fragmented_data = false;
indirec_entry->first_fragment = true;
indirec_entry->indirect_data_cached = false;

return indirec_entry;
}
Expand Down Expand Up @@ -480,7 +491,7 @@ static fragmenter_tx_entry_t * lowpan_adaptation_tx_process_init(fragmenter_inte

lowpan_active_buffer_state_reset(tx_entry);

tx_entry->indirectData = indirect;
tx_entry->indirect_data = indirect;

return tx_entry;
}
Expand Down Expand Up @@ -589,6 +600,80 @@ static void lowpan_adaptation_data_request_primitiv_set(const buffer_t *buf, mcp
}
}

static bool lowpan_adaptation_indirect_cache_sanity_check(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr)
{
fragmenter_tx_entry_t *active_tx_entry;
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
if (fragmenter_tx_entry->indirect_data_cached == false) {
// active entry, jump to next one
continue;
}

// cached entry found, check if it has pending data reguest
active_tx_entry = lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, fragmenter_tx_entry);

if (active_tx_entry == NULL) {
// entry is in cache and is not sent to mac => trigger this
tr_debug_extra("sanity check, push seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
fragmenter_tx_entry->indirect_data_cached = false;
lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
return true;
}
}

return false;
}

static bool lowpan_adaptation_indirect_cache_trigger(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
{
tr_debug_extra("lowpan_adaptation_indirect_cache_trigger()");

if (ns_list_count(&interface_ptr->indirect_tx_queue) == 0) {
return false;
}

/* Trigger first cached entry */
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
if (fragmenter_tx_entry->indirect_data_cached) {
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
tr_debug_extra("pushing seq %d to addr %s", fragmenter_tx_entry->buf->seq, trace_ipv6(fragmenter_tx_entry->buf->dst_sa.address));
fragmenter_tx_entry->indirect_data_cached = false;
lowpan_data_request_to_mac(cur, fragmenter_tx_entry->buf, fragmenter_tx_entry);
return true;
}
}
}

/* Sanity check, If nothing can be triggered from own address, check cache queue */
return lowpan_adaptation_indirect_cache_sanity_check(cur, interface_ptr);
}

static fragmenter_tx_entry_t* lowpan_adaptation_indirect_mac_data_request_active(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
{
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
if (fragmenter_tx_entry->indirect_data_cached == false) {
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
tr_debug_extra("active seq: %d", fragmenter_tx_entry->buf->seq);
return fragmenter_tx_entry;
}
}
}
return NULL;
}

static fragmenter_tx_entry_t* lowpan_adaptation_indirect_first_cached_request_get(fragmenter_interface_t *interface_ptr, fragmenter_tx_entry_t *tx_ptr)
{
ns_list_foreach(fragmenter_tx_entry_t, fragmenter_tx_entry, &interface_ptr->indirect_tx_queue) {
if (fragmenter_tx_entry->indirect_data_cached == true) {
if (addr_ipv6_equal(tx_ptr->buf->dst_sa.address, fragmenter_tx_entry->buf->dst_sa.address)) {
tr_debug_extra("first cached seq: %d", fragmenter_tx_entry->buf->seq);
return fragmenter_tx_entry;
}
}
}
return NULL;
}

static void lowpan_adaptation_make_room_for_small_packet(protocol_interface_info_entry_t *cur, fragmenter_interface_t *interface_ptr, mle_neigh_table_entry_t *neighbour_to_count)
{
if (interface_ptr->max_indirect_small_packets_per_child == 0) {
Expand Down Expand Up @@ -618,6 +703,7 @@ static void lowpan_adaptation_make_room_for_big_packet(struct protocol_interface
ns_list_foreach_reverse_safe(fragmenter_tx_entry_t, tx_entry, &interface_ptr->indirect_tx_queue) {
if (buffer_data_length(tx_entry->buf) > interface_ptr->indirect_big_packet_threshold) {
if (++count >= interface_ptr->max_indirect_big_packets_total) {
tr_debug_extra("free seq: %d", tx_entry->buf->seq);
lowpan_adaptation_indirect_queue_free_message(cur, interface_ptr, tx_entry);
}
}
Expand Down Expand Up @@ -714,21 +800,45 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff

if (indirect) {
//Add to indirectQUue
fragmenter_tx_entry_t *tx_ptr_cached;
mle_neigh_table_entry_t *mle_entry = mle_class_get_by_link_address(cur->id, buf->dst_sa.address + 2, buf->dst_sa.addr_type);
if (mle_entry) {
buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) mle_entry->timeout_rx * MLE_TIMER_TICKS_MS;
} else {
buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout;
}

tr_debug_extra("indirect seq: %d, addr=%s", tx_ptr->buf->seq, trace_ipv6(buf->dst_sa.address));

// Make room for new message if needed */
if (buffer_data_length(buf) <= interface_ptr->indirect_big_packet_threshold) {
lowpan_adaptation_make_room_for_small_packet(cur, interface_ptr, mle_entry);
} else {
lowpan_adaptation_make_room_for_big_packet(cur, interface_ptr);
}

ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr);
if (mle_entry) {
buf->link_specific.ieee802_15_4.indirectTTL = (uint32_t) mle_entry->timeout_rx * MLE_TIMER_TICKS_MS;
} else {
buf->link_specific.ieee802_15_4.indirectTTL = cur->mac_parameters->mac_in_direct_entry_timeout;
if (lowpan_adaptation_indirect_mac_data_request_active(interface_ptr, tx_ptr)) {
// mac is handling previous data request, add new one to be cached */
tr_debug_extra("caching seq: %d", tx_ptr->buf->seq);
tx_ptr->indirect_data_cached = true;
}

ns_list_add_to_end(&interface_ptr->indirect_tx_queue, tx_ptr);

// Check if current message can be delivered to MAC or should some cached message be delivered first
tx_ptr_cached = lowpan_adaptation_indirect_first_cached_request_get(interface_ptr, tx_ptr);
if (tx_ptr->indirect_data_cached == false && tx_ptr_cached) {
tr_debug_extra("sending cached seq: %d", tx_ptr_cached->buf->seq);
// set current message to cache
tx_ptr->indirect_data_cached = true;
// swap entries
tx_ptr = tx_ptr_cached;
tx_ptr->indirect_data_cached = false;
buf = tx_ptr_cached->buf;
} else if (tx_ptr->indirect_data_cached == true) {
// There is mac data request ongoing and new req was sent to cache
return 0;
}
}

lowpan_data_request_to_mac(cur, buf, tx_ptr);
Expand All @@ -741,7 +851,6 @@ int8_t lowpan_adaptation_interface_tx(protocol_interface_info_entry_t *cur, buff

}


static bool lowpan_adaptation_tx_process_ready(fragmenter_tx_entry_t *tx_ptr)
{
if (!tx_ptr->fragmented_data) {
Expand Down Expand Up @@ -880,11 +989,21 @@ int8_t lowpan_adaptation_interface_tx_confirm(protocol_interface_info_entry_t *c

//Check is there more packets
if (lowpan_adaptation_tx_process_ready(tx_ptr)) {
bool triggered_from_indirect_cache = false;
if (tx_ptr->fragmented_data && active_direct_confirm) {
//Clean
interface_ptr->fragmenter_active = false;
}

if (tx_ptr->buf->link_specific.ieee802_15_4.indirectTxProcess) {
triggered_from_indirect_cache = lowpan_adaptation_indirect_cache_trigger(cur, interface_ptr, tx_ptr);
}

lowpan_adaptation_data_process_clean(interface_ptr, tx_ptr, map_mlme_status_to_socket_event(confirm->status));

if (triggered_from_indirect_cache) {
return 0;
}
} else {
lowpan_data_request_to_mac(cur, buf, tx_ptr);
}
Expand Down
14 changes: 14 additions & 0 deletions source/libNET/src/ns_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include "RPL/rpl_data.h"
#endif
#include "ccmLIB.h"
#include "6LoWPAN/lowpan_adaptation_interface.h"
#include "6LoWPAN/Bootstraps/network_lib.h"
#include "6LoWPAN/Bootstraps/protocol_6lowpan.h"
#include "6LoWPAN/Bootstraps/protocol_6lowpan_bootstrap.h"
Expand Down Expand Up @@ -1415,3 +1416,16 @@ void arm_ncache_flush(void)
{
nwk_interface_flush_neigh_cache();
}

int arm_nwk_sleepy_device_parent_buffer_size_set(int8_t interface_id, uint16_t big_packet_threshold, uint16_t small_packets_per_child_count, uint16_t big_packets_total_count)
{
protocol_interface_info_entry_t *cur;

cur = protocol_stack_interface_info_get_by_id(interface_id);
if (cur) {
return lowpan_adaptation_indirect_queue_params_set(cur, big_packet_threshold,
big_packets_total_count, small_packets_per_child_count);
}
return -1;
}

0 comments on commit 43c7ec2

Please sign in to comment.