Skip to content

Commit

Permalink
Introduce record version history for optimistic store
Browse files Browse the repository at this point in the history
Rolling back optimistic updates is smart now, it keeps the history of record updates.
If mutation finished it's execution it rolls back only its version of the record and keeps the rest versions for still running mutations.

Closes #583
  • Loading branch information
sav007 committed Aug 9, 2017
1 parent 2cab769 commit 5e600d6
Show file tree
Hide file tree
Showing 3 changed files with 168 additions and 24 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -304,4 +304,97 @@ private MockResponse mockResponse(String fileName) throws IOException {
assertThat(watcherData.get(2).reviews().get(2).stars()).isEqualTo(5);
assertThat(watcherData.get(2).reviews().get(2).commentary()).isEqualTo("Amazing");
}

@Test public void two_optimistic_reverse_rollback_order() throws Exception {
server.enqueue(mockResponse("HeroAndFriendsNameWithIdsResponse.json"));
HeroAndFriendsNamesWithIDsQuery query1 = new HeroAndFriendsNamesWithIDsQuery(Episode.JEDI);
apolloClient.query(query1).execute();

server.enqueue(mockResponse("HeroNameWithIdResponse.json"));
HeroNameWithIdQuery query2 = new HeroNameWithIdQuery();
apolloClient.query(query2).execute();

UUID mutationId1 = UUID.randomUUID();
HeroAndFriendsNamesWithIDsQuery.Data data1 = new HeroAndFriendsNamesWithIDsQuery.Data(
new HeroAndFriendsNamesWithIDsQuery.Hero(
"Droid",
"2001",
"R222-D222",
Arrays.asList(
new HeroAndFriendsNamesWithIDsQuery.Friend(
"Human",
"1000",
"Robocop"
),
new HeroAndFriendsNamesWithIDsQuery.Friend(
"Human",
"1003",
"Batman"
)
)
)
);
apolloClient.apolloStore().writeOptimisticUpdatesAndPublish(query1, data1, mutationId1).execute();

UUID mutationId2 = UUID.randomUUID();
HeroNameWithIdQuery.Data data2 = new HeroNameWithIdQuery.Data(new HeroNameWithIdQuery.Hero(
"Human",
"1000",
"Spiderman"
));
apolloClient.apolloStore().writeOptimisticUpdatesAndPublish(query2, data2, mutationId2).execute();

// check if query1 see optimistic updates
data1 = apolloClient.query(query1).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data1.hero().id()).isEqualTo("2001");
assertThat(data1.hero().name()).isEqualTo("R222-D222");
assertThat(data1.hero().friends()).hasSize(2);
assertThat(data1.hero().friends().get(0).id()).isEqualTo("1000");
assertThat(data1.hero().friends().get(0).name()).isEqualTo("Spiderman");
assertThat(data1.hero().friends().get(1).id()).isEqualTo("1003");
assertThat(data1.hero().friends().get(1).name()).isEqualTo("Batman");

// check if query2 see the latest optimistic updates
data2 = apolloClient.query(query2).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data2.hero().id()).isEqualTo("1000");
assertThat(data2.hero().name()).isEqualTo("Spiderman");

// rollback query2 optimistic updates
apolloClient.apolloStore().rollbackOptimisticUpdates(mutationId2).execute();

// check if query1 see the latest optimistic updates
data1 = apolloClient.query(query1).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data1.hero().id()).isEqualTo("2001");
assertThat(data1.hero().name()).isEqualTo("R222-D222");
assertThat(data1.hero().friends()).hasSize(2);
assertThat(data1.hero().friends().get(0).id()).isEqualTo("1000");
assertThat(data1.hero().friends().get(0).name()).isEqualTo("Robocop");
assertThat(data1.hero().friends().get(1).id()).isEqualTo("1003");
assertThat(data1.hero().friends().get(1).name()).isEqualTo("Batman");

// check if query2 see the latest optimistic updates
data2 = apolloClient.query(query2).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data2.hero().id()).isEqualTo("1000");
assertThat(data2.hero().name()).isEqualTo("Robocop");

// rollback query1 optimistic updates
apolloClient.apolloStore().rollbackOptimisticUpdates(mutationId1).execute();

// check if query1 see the latest non-optimistic updates
data1 = apolloClient.query(query1).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data1.hero().id()).isEqualTo("2001");
assertThat(data1.hero().name()).isEqualTo("R2-D2");
assertThat(data1.hero().friends()).hasSize(3);
assertThat(data1.hero().friends().get(0).id()).isEqualTo("1000");
assertThat(data1.hero().friends().get(0).name()).isEqualTo("SuperMan");
assertThat(data1.hero().friends().get(1).id()).isEqualTo("1002");
assertThat(data1.hero().friends().get(1).name()).isEqualTo("Han Solo");
assertThat(data1.hero().friends().get(2).id()).isEqualTo("1003");
assertThat(data1.hero().friends().get(2).name()).isEqualTo("Leia Organa");

// check if query2 see the latest non-optimistic updates
data2 = apolloClient.query(query2).responseFetcher(ApolloResponseFetchers.CACHE_ONLY).execute().data();
assertThat(data2.hero().id()).isEqualTo("1000");
assertThat(data2.hero().name()).isEqualTo("SuperMan");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
import com.nytimes.android.external.cache.Cache;
import com.nytimes.android.external.cache.CacheBuilder;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
Expand All @@ -23,7 +22,7 @@
import static com.apollographql.apollo.api.internal.Utils.checkNotNull;

public final class OptimisticNormalizedCache extends NormalizedCache {
private final Cache<String, Record> lruCache = CacheBuilder.newBuilder().build();
private final Cache<String, RecordJournal> lruCache = CacheBuilder.newBuilder().build();

@Nullable @Override public Record loadRecord(@Nonnull final String key, @Nonnull final CacheHeaders cacheHeaders) {
checkNotNull(key, "key == null");
Expand All @@ -36,15 +35,15 @@ public final class OptimisticNormalizedCache extends NormalizedCache {
return Optional.fromNullable(cache.loadRecord(key, cacheHeaders));
}
});
final Record optimisticRecord = lruCache.getIfPresent(key);
if (optimisticRecord != null) {
return nonOptimisticRecord.transform(new Function<Record, Record>() {
final RecordJournal optimisticCacheEntry = lruCache.getIfPresent(key);
if (optimisticCacheEntry != null) {
return nonOptimisticRecord.map(new Function<Record, Record>() {
@Nonnull @Override public Record apply(@Nonnull Record record) {
Record result = record.toBuilder().build();
result.mergeWith(optimisticRecord);
Record result = record.clone();
result.mergeWith(optimisticCacheEntry.snapshot);
return result;
}
}).or(optimisticRecord);
}).or(optimisticCacheEntry.snapshot.clone());
} else {
return nonOptimisticRecord.orNull();
}
Expand Down Expand Up @@ -102,31 +101,79 @@ public final class OptimisticNormalizedCache extends NormalizedCache {
@Nonnull public Set<String> mergeOptimisticUpdate(@Nonnull final Record record) {
checkNotNull(record, "record == null");

final Record oldRecord = lruCache.getIfPresent(record.key());
if (oldRecord == null) {
lruCache.put(record.key(), record);
final RecordJournal cacheEntry = lruCache.getIfPresent(record.key());
if (cacheEntry == null) {
lruCache.put(record.key(), new RecordJournal(record));
return Collections.singleton(record.key());
} else {
Set<String> changedKeys = oldRecord.mergeWith(record);
//re-insert to trigger new weight calculation
lruCache.put(record.key(), oldRecord);
return changedKeys;
return cacheEntry.commit(record);
}
}

@Nonnull public Set<String> removeOptimisticUpdates(@Nonnull final UUID mutationId) {
checkNotNull(mutationId, "mutationId == null");

Map<String, Record> cachedRecords = lruCache.asMap();
List<String> invalidateKeys = new ArrayList<>();
for (Map.Entry<String, Record> cachedRecordEntry : cachedRecords.entrySet()) {
if (mutationId.equals(cachedRecordEntry.getValue().mutationId())
|| cachedRecordEntry.getValue().mutationId() == null) {
invalidateKeys.add(cachedRecordEntry.getKey());
Set<String> changedCacheKeys = new HashSet<>();
Set<String> removedKeys = new HashSet<>();
Map<String, RecordJournal> cachedRecords = lruCache.asMap();
for (Map.Entry<String, RecordJournal> entry : cachedRecords.entrySet()) {
String cacheKey = entry.getKey();
RecordJournal cacheEntry = entry.getValue();
changedCacheKeys.addAll(cacheEntry.revert(mutationId));
if (cacheEntry.history.isEmpty()) {
removedKeys.add(cacheKey);
}
}
lruCache.invalidateAll(invalidateKeys);
lruCache.invalidateAll(removedKeys);
return changedCacheKeys;
}

private final class RecordJournal {
Record snapshot;
final LinkedList<Record> history;

RecordJournal(Record mutationRecord) {
this.snapshot = mutationRecord.clone();
this.history = new LinkedList<>();
history.add(mutationRecord);
}

/**
* Commits new version of record to the history and invalidate snapshot version.
*/
Set<String> commit(Record record) {
history.addLast(record);
return snapshot.mergeWith(record);
}

return new HashSet<>(invalidateKeys);
/**
* Lookups record by mutation id, if it's found removes it from the history and invalidates snapshot record.
* Snapshot record is superposition of all record versions in the history.
*/
Set<String> revert(UUID mutationId) {
int recordIndex = -1;
for (int i = 0; i < history.size(); i++) {
if (mutationId.equals(history.get(i).mutationId())) {
recordIndex = i;
break;
}
}

if (recordIndex == -1) {
return Collections.emptySet();
}

Set<String> changedKeys = new HashSet<>();
changedKeys.add(history.remove(recordIndex).key());
for (int i = Math.max(0, recordIndex - 1); i < history.size(); i++) {
Record record = history.get(i);
if (i == Math.max(0, recordIndex - 1)) {
snapshot = record.clone();
} else {
changedKeys.addAll(snapshot.mergeWith(record));
}
}
return changedKeys;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ public UUID mutationId() {
return mutationId;
}

public Record clone() {
return toBuilder().build();
}

/**
* @param otherRecord The record to merge into this record.
* @return A set of field keys which have changed, or were added. A field key incorporates any GraphQL arguments in
Expand Down

0 comments on commit 5e600d6

Please sign in to comment.