From 9678516c2acf1a220f4ca21e22b0f28bfc9024a9 Mon Sep 17 00:00:00 2001 From: Manfred Karrer Date: Fri, 9 Nov 2018 20:35:58 -0500 Subject: [PATCH 1/2] Replace containsKey with putIfAbsent call. Remove log. --- .../main/java/bisq/core/payment/AccountAgeWitnessService.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/src/main/java/bisq/core/payment/AccountAgeWitnessService.java b/core/src/main/java/bisq/core/payment/AccountAgeWitnessService.java index 38cac92d482..f06a4b35ca5 100644 --- a/core/src/main/java/bisq/core/payment/AccountAgeWitnessService.java +++ b/core/src/main/java/bisq/core/payment/AccountAgeWitnessService.java @@ -136,9 +136,7 @@ private void republishAllFiatAccounts() { } private void addToMap(AccountAgeWitness accountAgeWitness) { - log.debug("addToMap hash=" + Utilities.bytesAsHexString(accountAgeWitness.getHash())); - if (!accountAgeWitnessMap.containsKey(accountAgeWitness.getHashAsByteArray())) - accountAgeWitnessMap.put(accountAgeWitness.getHashAsByteArray(), accountAgeWitness); + accountAgeWitnessMap.putIfAbsent(accountAgeWitness.getHashAsByteArray(), accountAgeWitness); } From 27dea71bfdae0b909da64b91a1a17c1f9856321f Mon Sep 17 00:00:00 2001 From: Manfred Karrer Date: Fri, 9 Nov 2018 20:43:07 -0500 Subject: [PATCH 2/2] Optimize processing of initial trade statistics and witness data We changed the earlier behaviour with delayed execution of chunks of the list as it caused worse results as if it is processed in one go. Main reason is probably that listeners trigger more code and if that is called early at startup we have better chances that the user has not already navigated to a screen where the trade statistics are used for UI rendering. We need to take care that the update period between releases stay short as with the current situation before 0.9 release we receive 4000 objects with a newly installed client, which causes the application to stay stuck for quite a while at startup. --- .../p2p/peers/getdata/RequestDataHandler.java | 49 ++++++++----------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/p2p/src/main/java/bisq/network/p2p/peers/getdata/RequestDataHandler.java b/p2p/src/main/java/bisq/network/p2p/peers/getdata/RequestDataHandler.java index eaac5021e56..0f913e32520 100644 --- a/p2p/src/main/java/bisq/network/p2p/peers/getdata/RequestDataHandler.java +++ b/p2p/src/main/java/bisq/network/p2p/peers/getdata/RequestDataHandler.java @@ -44,13 +44,13 @@ import com.google.common.util.concurrent.SettableFuture; import java.util.ArrayList; +import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; @@ -273,34 +273,25 @@ public void onMessage(NetworkEnvelope networkEnvelope, Connection connection) { }); } - // We process the LazyProcessedStoragePayload items (TradeStatistics) in batches with a delay in between. - // We want avoid that the UI get stuck when processing many entries. - // The dataStorage.add call is a bit expensive as sig checks is done there. - - // Using a background thread might be an alternative but it would require much more effort and - // it would also decrease user experience if the app gets under heavy load (like at startup with wallet sync). - // Beside that we mitigated the problem already as we will not get the whole TradeStatistics as we - // pass the excludeKeys and we pack the latest data dump - // into the resources, so a new user do not need to request all data. - - // In future we will probably limit by date or load on demand from user intent to not get too much data. - - // We split the list into sub lists with max 50 items and delay each batch with 200 ms. - int size = processDelayedItems.size(); - int chunkSize = 50; - int chunks = 1 + size / chunkSize; - int startIndex = 0; - for (int i = 0; i < chunks && startIndex < size; i++, startIndex += chunkSize) { - long delay = (i + 1) * 200; - int endIndex = Math.min(size, startIndex + chunkSize); - List subList = processDelayedItems.subList(startIndex, endIndex); - UserThread.runAfter(() -> subList.stream().forEach(item -> { - if (item instanceof ProtectedStorageEntry) - dataStorage.addProtectedStorageEntry((ProtectedStorageEntry) item, sender, null, false, false); - else if (item instanceof PersistableNetworkPayload) - dataStorage.addPersistableNetworkPayload((PersistableNetworkPayload) item, sender, false, false, false, false); - }), delay, TimeUnit.MILLISECONDS); - } + // We changed the earlier behaviour with delayed execution of chunks of the list as it caused + // worse results as if it is processed in one go. + // Main reason is probably that listeners trigger more code and if that is called early at + // startup we have better chances that the user has not already navigated to a screen where the + // trade statistics are used for UI rendering. + // We need to take care that the update period between releases stay short as with the current + // situation before 0.9 release we receive 4000 objects with a newly installed client, which + // causes the application to stay stuck for quite a while at startup. + log.info("Start processing {} delayedItems.", processDelayedItems.size()); + long startTs = new Date().getTime(); + processDelayedItems.forEach(item -> { + if (item instanceof ProtectedStorageEntry) + dataStorage.addProtectedStorageEntry((ProtectedStorageEntry) item, sender, null, + false, false); + else if (item instanceof PersistableNetworkPayload) + dataStorage.addPersistableNetworkPayload((PersistableNetworkPayload) item, sender, + false, false, false, false); + }); + log.info("Processing delayedItems completed after {} sec.", (new Date().getTime() - startTs) / 1000D); cleanup(); listener.onComplete();