diff --git a/benchmarks/src/main/java/zipkin/benchmarks/CodecBenchmarks.java b/benchmarks/src/main/java/zipkin/benchmarks/CodecBenchmarks.java index 197090bac6e..8bb06324495 100644 --- a/benchmarks/src/main/java/zipkin/benchmarks/CodecBenchmarks.java +++ b/benchmarks/src/main/java/zipkin/benchmarks/CodecBenchmarks.java @@ -41,8 +41,7 @@ import zipkin.Codec; import zipkin.Endpoint; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesDecoder; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; /** * This compares the speed of the bundled java codec with the approach used in the scala @@ -157,28 +156,28 @@ public byte[] writeClientSpan_thrift_libthrift() throws TException { } static final byte[] span2Json = read("/span2.json"); - static final Span span2 = BytesDecoder.JSON.decode(span2Json); + static final Span span2 = SpanBytesCodec.JSON.decode(span2Json); static final List tenSpan2s = Collections.nCopies(10, span2); - static final byte[] tenSpan2sJson = BytesEncoder.JSON.encodeList(tenSpan2s); + static final byte[] tenSpan2sJson = SpanBytesCodec.JSON.encodeList(tenSpan2s); @Benchmark public Span readClientSpan_json_span2() { - return BytesDecoder.JSON.decode(span2Json); + return SpanBytesCodec.JSON.decode(span2Json); } @Benchmark public List readTenClientSpans_json_span2() { - return BytesDecoder.JSON.decodeList(tenSpan2sJson); + return SpanBytesCodec.JSON.decodeList(tenSpan2sJson); } @Benchmark public byte[] writeClientSpan_json_span2() { - return BytesEncoder.JSON.encode(span2); + return SpanBytesCodec.JSON.encode(span2); } @Benchmark public byte[] writeTenClientSpans_json_span2() { - return BytesEncoder.JSON.encodeList(tenSpan2s); + return SpanBytesCodec.JSON.encodeList(tenSpan2s); } static final byte[] rpcSpanJson = read("/span-rpc.json"); diff --git a/benchmarks/src/main/java/zipkin/benchmarks/Span2ConverterBenchmarks.java b/benchmarks/src/main/java/zipkin/benchmarks/Span2ConverterBenchmarks.java index 3601e3e1222..c3ffd8eb568 100644 --- a/benchmarks/src/main/java/zipkin/benchmarks/Span2ConverterBenchmarks.java +++ b/benchmarks/src/main/java/zipkin/benchmarks/Span2ConverterBenchmarks.java @@ -38,7 +38,7 @@ import zipkin.internal.V2SpanConverter; import zipkin.internal.Util; -import static zipkin.internal.V2SpanConverter.convert; +import static zipkin.internal.V2SpanConverter.toEndpoint; @Measurement(iterations = 5, time = 1) @Warmup(iterations = 10, time = 1) @@ -97,8 +97,8 @@ public class Span2ConverterBenchmarks { .name("get") .kind(Span.Kind.SERVER) .shared(true) - .localEndpoint(convert(backend)) - .remoteEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(backend)) + .remoteEndpoint(toEndpoint(frontend)) .timestamp(1472470996250000L) .duration(100000L) .putTag(TraceKeys.HTTP_PATH, "/backend") diff --git a/pom.xml b/pom.xml index bb00e90366f..0dbbb9a13b3 100755 --- a/pom.xml +++ b/pom.xml @@ -323,6 +323,12 @@ ${okio.version} + + com.squareup.moshi + moshi + 1.5.0 + + com.google.auto.value auto-value diff --git a/zipkin-collector/kafka/src/test/java/zipkin/collector/kafka/KafkaCollectorTest.java b/zipkin-collector/kafka/src/test/java/zipkin/collector/kafka/KafkaCollectorTest.java index b252f77c993..d6f24e9ba31 100644 --- a/zipkin-collector/kafka/src/test/java/zipkin/collector/kafka/KafkaCollectorTest.java +++ b/zipkin-collector/kafka/src/test/java/zipkin/collector/kafka/KafkaCollectorTest.java @@ -32,7 +32,7 @@ import zipkin.collector.kafka.KafkaCollector.Builder; import zipkin.internal.ApplyTimestampAndDuration; import zipkin.internal.V2SpanConverter; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.storage.AsyncSpanConsumer; import zipkin.storage.AsyncSpanStore; import zipkin.storage.SpanStore; @@ -146,7 +146,7 @@ public void messageWithMultipleSpans_json2() throws Exception { ApplyTimestampAndDuration.apply(LOTS_OF_SPANS[1]) ); - byte[] message = BytesEncoder.JSON.encodeList(asList( + byte[] message = SpanBytesCodec.JSON.encodeList(asList( V2SpanConverter.fromSpan(spans.get(0)).get(0), V2SpanConverter.fromSpan(spans.get(1)).get(0) )); diff --git a/zipkin-collector/kafka10/src/test/java/zipkin/collector/kafka10/KafkaCollectorTest.java b/zipkin-collector/kafka10/src/test/java/zipkin/collector/kafka10/KafkaCollectorTest.java index 7486bf6d330..1461d7002db 100644 --- a/zipkin-collector/kafka10/src/test/java/zipkin/collector/kafka10/KafkaCollectorTest.java +++ b/zipkin-collector/kafka10/src/test/java/zipkin/collector/kafka10/KafkaCollectorTest.java @@ -39,7 +39,7 @@ import zipkin.collector.kafka10.KafkaCollector.Builder; import zipkin.internal.ApplyTimestampAndDuration; import zipkin.internal.V2SpanConverter; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.storage.AsyncSpanConsumer; import zipkin.storage.AsyncSpanStore; import zipkin.storage.SpanStore; @@ -200,7 +200,7 @@ public void messageWithMultipleSpans_json2() throws Exception { ApplyTimestampAndDuration.apply(LOTS_OF_SPANS[1]) ); - byte[] message = BytesEncoder.JSON.encodeList(asList( + byte[] message = SpanBytesCodec.JSON.encodeList(asList( V2SpanConverter.fromSpan(spans.get(0)).get(0), V2SpanConverter.fromSpan(spans.get(1)).get(0) )); diff --git a/zipkin-junit/pom.xml b/zipkin-junit/pom.xml index 4873578f8d5..40acecd680b 100644 --- a/zipkin-junit/pom.xml +++ b/zipkin-junit/pom.xml @@ -42,6 +42,11 @@ mockwebserver + + com.squareup.moshi + moshi + + junit junit diff --git a/zipkin-junit/src/main/java/zipkin/junit/ZipkinDispatcher.java b/zipkin-junit/src/main/java/zipkin/junit/ZipkinDispatcher.java index e57607f31b3..5761d8de2ce 100644 --- a/zipkin-junit/src/main/java/zipkin/junit/ZipkinDispatcher.java +++ b/zipkin-junit/src/main/java/zipkin/junit/ZipkinDispatcher.java @@ -31,7 +31,8 @@ import zipkin.collector.CollectorMetrics; import zipkin.internal.V2JsonSpanDecoder; import zipkin.internal.V2StorageComponent; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.internal.v2.internal.Platform; import zipkin.storage.Callback; import zipkin.storage.QueryRequest; @@ -124,20 +125,18 @@ MockResponse queryV2(HttpUrl url) throws IOException { } else if (url.encodedPath().equals("/api/v2/dependencies")) { Long endTs = maybeLong(url.queryParameter("endTs")); Long lookback = maybeLong(url.queryParameter("lookback")); - List result = store2.getDependencies( + List result = store2.getDependencies( endTs != null ? endTs : System.currentTimeMillis(), lookback != null ? lookback : DEFAULT_LOOKBACK ).execute(); - return jsonResponse(Codec.JSON.writeDependencyLinks(result)); + return jsonResponse(DependencyLinkBytesCodec.JSON.encodeList(result)); } else if (url.encodedPath().equals("/api/v2/traces")) { List> traces = store2.getTraces(toQueryRequest2(url)).execute(); - return jsonResponse(BytesEncoder.JSON.encodeNestedList(traces)); + return jsonResponse(SpanBytesCodec.JSON.encodeNestedList(traces)); } else if (url.encodedPath().startsWith("/api/v2/trace/")) { String traceIdHex = url.encodedPath().replace("/api/v2/trace/", ""); List trace = store2.getTrace(normalizeTraceId(traceIdHex)).execute(); - if (!trace.isEmpty()) { - return jsonResponse(BytesEncoder.JSON.encodeList(trace)); - } + if (!trace.isEmpty()) return jsonResponse(SpanBytesCodec.JSON.encodeList(trace)); } return new MockResponse().setResponseCode(404); } diff --git a/zipkin-junit/src/test/java/zipkin/junit/ZipkinRuleTest.java b/zipkin-junit/src/test/java/zipkin/junit/ZipkinRuleTest.java index a2d7e41af01..6120206848a 100644 --- a/zipkin-junit/src/test/java/zipkin/junit/ZipkinRuleTest.java +++ b/zipkin-junit/src/test/java/zipkin/junit/ZipkinRuleTest.java @@ -32,7 +32,7 @@ import zipkin.Span; import zipkin.internal.ApplyTimestampAndDuration; import zipkin.internal.V2SpanConverter; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import static java.lang.String.format; import static java.util.Arrays.asList; @@ -68,7 +68,7 @@ public void getTraces_storedViaPostVersion2() throws IOException { ApplyTimestampAndDuration.apply(LOTS_OF_SPANS[1]) ); - byte[] message = BytesEncoder.JSON.encodeList(asList( + byte[] message = SpanBytesCodec.JSON.encodeList(asList( V2SpanConverter.fromSpan(spans.get(0)).get(0), V2SpanConverter.fromSpan(spans.get(1)).get(0) )); diff --git a/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanConsumer.java b/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanConsumer.java index e264ba5bb6a..edefd685692 100644 --- a/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanConsumer.java +++ b/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanConsumer.java @@ -19,9 +19,8 @@ import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.RequestBody; -import okio.Buffer; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.internal.v2.storage.SpanConsumer; /** Implements the span consumer interface by forwarding requests over http. */ @@ -33,16 +32,10 @@ final class HttpV2SpanConsumer implements SpanConsumer { } @Override public zipkin.internal.v2.Call accept(List spans) { - Buffer json = new Buffer(); - json.writeByte('['); - for (int i = 0, length = spans.size(); i < length; ) { - json.write(BytesEncoder.JSON.encode(spans.get(i))); - if (++i < length) json.writeByte(','); - } - json.writeByte(']'); + byte[] json = SpanBytesCodec.JSON.encodeList(spans); return factory.newCall(new Request.Builder() .url(factory.baseUrl.resolve("/api/v2/spans")) - .post(RequestBody.create(MediaType.parse("application/json"), json.readByteArray())).build(), + .post(RequestBody.create(MediaType.parse("application/json"), json)).build(), b -> null /* void */ ); } diff --git a/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanStore.java b/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanStore.java index 4876fa6f771..58e974d2af5 100644 --- a/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanStore.java +++ b/zipkin-junit/src/test/java/zipkin/junit/v2/HttpV2SpanStore.java @@ -13,22 +13,28 @@ */ package zipkin.junit.v2; +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.Moshi; +import com.squareup.moshi.Types; import java.util.Collections; import java.util.List; import javax.annotation.Nullable; import okhttp3.HttpUrl; import okhttp3.OkHttpClient; import okhttp3.Request; -import zipkin.Codec; -import zipkin.DependencyLink; import zipkin.internal.v2.Call; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesDecoder; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.internal.v2.storage.QueryRequest; import zipkin.internal.v2.storage.SpanStore; /** Implements the span store interface by forwarding requests over http. */ final class HttpV2SpanStore implements SpanStore { + static final JsonAdapter> STRING_LIST_ADAPTER = + new Moshi.Builder().build().adapter(Types.newParameterizedType(List.class, String.class)); + final HttpV2Call.Factory factory; HttpV2SpanStore(OkHttpClient client, HttpUrl baseUrl) { @@ -46,13 +52,13 @@ final class HttpV2SpanStore implements SpanStore { maybeAddQueryParam(url, "lookback", request.lookback()); maybeAddQueryParam(url, "limit", request.limit()); return factory.newCall(new Request.Builder().url(url.build()).build(), - content -> BytesDecoder.JSON.decodeNestedList(content.readByteArray())); + content -> SpanBytesCodec.JSON.decodeNestedList(content.readByteArray())); } @Override public Call> getTrace(String traceId) { return factory.newCall(new Request.Builder() .url(factory.baseUrl.resolve("/api/v2/trace/" + Span.normalizeTraceId(traceId))) - .build(), content -> BytesDecoder.JSON.decodeList(content.readByteArray())) + .build(), content -> SpanBytesCodec.JSON.decodeList(content.readByteArray())) .handleError(((error, callback) -> { if (error instanceof HttpException && ((HttpException) error).code == 404) { callback.onSuccess(Collections.emptyList()); @@ -66,20 +72,20 @@ final class HttpV2SpanStore implements SpanStore { public Call> getServiceNames() { return factory.newCall(new Request.Builder() .url(factory.baseUrl.resolve("/api/v2/services")) - .build(), content -> Codec.JSON.readStrings(content.readByteArray())); + .build(), STRING_LIST_ADAPTER::fromJson); } @Override public Call> getSpanNames(String serviceName) { return factory.newCall(new Request.Builder() .url(factory.baseUrl.resolve("/api/v2/spans?serviceName=" + serviceName)) - .build(), content -> Codec.JSON.readStrings(content.readByteArray())); + .build(), STRING_LIST_ADAPTER::fromJson); } @Override public Call> getDependencies(long endTs, long lookback) { return factory.newCall(new Request.Builder() .url(factory.baseUrl.resolve("/api/v2/dependencies?endTs=" + endTs + "&lookback=" + lookback)) - .build(), content -> Codec.JSON.readDependencyLinks(content.readByteArray())); + .build(), content -> DependencyLinkBytesCodec.JSON.decodeList(content.readByteArray())); } void maybeAddQueryParam(HttpUrl.Builder builder, String name, @Nullable Object value) { diff --git a/zipkin-server/src/main/java/zipkin/server/ZipkinQueryApiV2.java b/zipkin-server/src/main/java/zipkin/server/ZipkinQueryApiV2.java index bc1c10b8f88..e03ef3aebde 100644 --- a/zipkin-server/src/main/java/zipkin/server/ZipkinQueryApiV2.java +++ b/zipkin-server/src/main/java/zipkin/server/ZipkinQueryApiV2.java @@ -32,12 +32,12 @@ import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.context.request.WebRequest; -import zipkin.Codec; -import zipkin.DependencyLink; import zipkin.internal.V2StorageComponent; import zipkin.internal.v2.Call; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.internal.v2.storage.QueryRequest; import zipkin.storage.StorageComponent; @@ -83,7 +83,7 @@ public byte[] getDependencies( Call> call = storage.v2SpanStore() .getDependencies(endTs, lookback != null ? lookback : defaultLookback); - return Codec.JSON.writeDependencyLinks(call.execute()); + return DependencyLinkBytesCodec.JSON.encodeList(call.execute()); } @RequestMapping(value = "/services", method = RequestMethod.GET) @@ -128,7 +128,7 @@ public String getTraces( .limit(limit).build(); List> traces = storage.v2SpanStore().getTraces(queryRequest).execute(); - return new String(BytesEncoder.JSON.encodeNestedList(traces), UTF_8); + return new String(SpanBytesCodec.JSON.encodeNestedList(traces), UTF_8); } @RequestMapping(value = "/trace/{traceIdHex}", method = RequestMethod.GET, produces = APPLICATION_JSON_VALUE) @@ -137,7 +137,7 @@ public String getTrace(@PathVariable String traceIdHex, WebRequest request) thro List trace = storage.v2SpanStore().getTrace(traceIdHex).execute(); if (trace.isEmpty()) throw new TraceNotFoundException(traceIdHex); - return new String(BytesEncoder.JSON.encodeList(trace), UTF_8); + return new String(SpanBytesCodec.JSON.encodeList(trace), UTF_8); } @ExceptionHandler(Version2StorageNotConfigured.class) diff --git a/zipkin-server/src/test/java/zipkin/server/ZipkinServerIntegrationTest.java b/zipkin-server/src/test/java/zipkin/server/ZipkinServerIntegrationTest.java index 482bd75c95a..8086f6dc916 100644 --- a/zipkin-server/src/test/java/zipkin/server/ZipkinServerIntegrationTest.java +++ b/zipkin-server/src/test/java/zipkin/server/ZipkinServerIntegrationTest.java @@ -38,7 +38,7 @@ import zipkin.internal.ApplyTimestampAndDuration; import zipkin.internal.V2InMemoryStorage; import zipkin.internal.V2SpanConverter; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import static java.lang.String.format; import static java.util.Arrays.asList; @@ -93,7 +93,7 @@ public void writeSpans_noContentTypeIsJson() throws Exception { public void writeSpans_version2() throws Exception { Span span = ApplyTimestampAndDuration.apply(LOTS_OF_SPANS[0]); - byte[] message = BytesEncoder.JSON.encodeList(asList( + byte[] message = SpanBytesCodec.JSON.encodeList(asList( V2SpanConverter.fromSpan(span).get(0) )); diff --git a/zipkin-storage/elasticsearch-http/pom.xml b/zipkin-storage/elasticsearch-http/pom.xml index f074e870d82..04649afa2d2 100644 --- a/zipkin-storage/elasticsearch-http/pom.xml +++ b/zipkin-storage/elasticsearch-http/pom.xml @@ -49,7 +49,6 @@ com.squareup.moshi moshi - 1.5.0 diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/moshi/JsonReaders.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/moshi/JsonReaders.java index 863f1e90f3b..5a23bc15e17 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/moshi/JsonReaders.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/moshi/JsonReaders.java @@ -16,7 +16,9 @@ import com.squareup.moshi.JsonReader; import java.io.EOFException; import java.io.IOException; +import java.util.ArrayList; import java.util.LinkedHashSet; +import java.util.List; import java.util.Set; import javax.annotation.Nullable; @@ -69,10 +71,10 @@ public static JsonReader enterPath(JsonReader reader, String path) throws IOExce return null; } - public static Set collectValuesNamed(JsonReader reader, String name) throws IOException { + public static List collectValuesNamed(JsonReader reader, String name) throws IOException { Set result = new LinkedHashSet<>(); visitObject(reader, name, result); - return result; + return new ArrayList<>(result); } static void visitObject(JsonReader reader, String name, Set result) throws IOException { diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/BodyConverters.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/BodyConverters.java index 9741aaf2562..20030f0ace6 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/BodyConverters.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/BodyConverters.java @@ -16,29 +16,24 @@ import com.squareup.moshi.JsonReader; import java.io.IOException; import java.util.List; -import java.util.Set; import okio.BufferedSource; -import zipkin.DependencyLink; -import zipkin.internal.DependencyLinker; -import zipkin.internal.Util; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Span; +import zipkin.internal.v2.internal.DependencyLinker; import zipkin.storage.elasticsearch.http.internal.client.HttpCall.BodyConverter; import zipkin.storage.elasticsearch.http.internal.client.SearchResultConverter; import static zipkin.moshi.JsonReaders.collectValuesNamed; final class BodyConverters { - static final BodyConverter> SORTED_KEYS = b -> { - Set result = collectValuesNamed(JsonReader.of(b), "key"); - return Util.sortedList(result); - }; + static final BodyConverter> KEYS = b -> collectValuesNamed(JsonReader.of(b), "key"); static final BodyConverter> SPANS = - SearchResultConverter.create(JsonAdapters.SPAN_ADAPTER); + SearchResultConverter.create(JsonAdapters.SPAN_ADAPTER); static final BodyConverter> DEPENDENCY_LINKS = - new SearchResultConverter(JsonAdapters.DEPENDENCY_LINK_ADAPTER) { - @Override public List convert(BufferedSource content) throws IOException { - List result = super.convert(content); - return result.isEmpty() ? result : DependencyLinker.merge(result); - } - }; + new SearchResultConverter(JsonAdapters.DEPENDENCY_LINK_ADAPTER) { + @Override public List convert(BufferedSource content) throws IOException { + List result = super.convert(content); + return result.isEmpty() ? result : DependencyLinker.merge(result); + } + }; } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumer.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumer.java index 6526609111a..4834049da00 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumer.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumer.java @@ -26,7 +26,7 @@ import zipkin.internal.v2.Annotation; import zipkin.internal.v2.Call; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesEncoder; import zipkin.internal.v2.storage.SpanConsumer; import zipkin.storage.elasticsearch.http.internal.client.HttpCall; @@ -131,9 +131,9 @@ static byte[] prefixWithTimestampMillisAndQuery(Span span, @Nullable Long timest if (LOG.isLoggable(Level.FINE)) { LOG.log(Level.FINE, "Error indexing query for span: " + span, e); } - return BytesEncoder.JSON.encode(span); + return SpanBytesEncoder.JSON.encode(span); } - byte[] document = BytesEncoder.JSON.encode(span); + byte[] document = SpanBytesEncoder.JSON.encode(span); if (query.rangeEquals(0L, ByteString.of(new byte[] {'{', '}'}))) { return document; } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanStore.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanStore.java index 75850044102..8e87340fcbf 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanStore.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanStore.java @@ -22,8 +22,8 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import zipkin.DependencyLink; import zipkin.internal.v2.Call; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Span; import zipkin.internal.v2.storage.QueryRequest; import zipkin.internal.v2.storage.SpanStore; @@ -99,7 +99,7 @@ final class ElasticsearchHttpSpanStore implements SpanStore { SearchRequest esRequest = SearchRequest.create(indices) .filters(filters).addAggregation(traceIdTimestamp); - HttpCall> traceIdsCall = search.newCall(esRequest, BodyConverters.SORTED_KEYS); + HttpCall> traceIdsCall = search.newCall(esRequest, BodyConverters.KEYS); // When we receive span results, we need to group them by trace ID BodyConverter>> converter = content -> { @@ -150,7 +150,7 @@ final class ElasticsearchHttpSpanStore implements SpanStore { .filters(filters) .addAggregation(Aggregation.terms("localEndpoint.serviceName", Integer.MAX_VALUE)) .addAggregation(Aggregation.terms("remoteEndpoint.serviceName", Integer.MAX_VALUE)); - return search.newCall(request, BodyConverters.SORTED_KEYS); + return search.newCall(request, BodyConverters.KEYS); } @Override public Call> getSpanNames(String serviceName) { @@ -171,7 +171,7 @@ final class ElasticsearchHttpSpanStore implements SpanStore { .filters(filters) .addAggregation(Aggregation.terms("name", Integer.MAX_VALUE)); - return search.newCall(request, BodyConverters.SORTED_KEYS); + return search.newCall(request, BodyConverters.KEYS); } @Override public Call> getDependencies(long endTs, long lookback) { diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpStorage.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpStorage.java index d782c6494c7..9e057746301 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpStorage.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpStorage.java @@ -29,12 +29,12 @@ import okhttp3.RequestBody; import okio.Buffer; import zipkin.internal.V2StorageComponent; +import zipkin.internal.v2.internal.Platform; import zipkin.internal.v2.storage.SpanConsumer; import zipkin.internal.v2.storage.SpanStore; import zipkin.storage.AsyncSpanStore; import zipkin.storage.elasticsearch.http.internal.client.HttpCall; -import static zipkin.internal.Util.checkNotNull; import static zipkin.moshi.JsonReaders.enterPath; import static zipkin.storage.elasticsearch.http.ElasticsearchHttpSpanStore.DEPENDENCY; import static zipkin.storage.elasticsearch.http.ElasticsearchHttpSpanStore.SPAN; @@ -86,7 +86,7 @@ public static abstract class Builder implements zipkin.storage.StorageComponent. * Defaults to "http://localhost:9200". */ public final Builder hosts(final List hosts) { - checkNotNull(hosts, "hosts"); + if (hosts == null) throw new NullPointerException("hosts == null"); return hostsSupplier(new HostsSupplier() { @Override public List get() { return hosts; @@ -235,7 +235,7 @@ void clear(String index) throws IOException { .url(http().baseUrl.newBuilder().addPathSegment(index).build()) .delete().tag("delete-index").build(); - http().execute(deleteRequest, b -> null); + http().newCall(deleteRequest, b -> null).execute(); flush(http(), index); } @@ -247,7 +247,7 @@ static void flush(HttpCall.Factory factory, String index) throws IOException { .post(RequestBody.create(APPLICATION_JSON, "")) .tag("flush-index").build(); - factory.execute(flushRequest, b -> null); + factory.newCall(flushRequest, b -> null).execute(); } /** This is blocking so that we can determine if the cluster is healthy or not */ @@ -260,7 +260,7 @@ CheckResult ensureClusterReady(String index) { .tag("get-cluster-health").build(); try { - return http().execute(request, b -> { + return http().newCall(request, b -> { b.request(Long.MAX_VALUE); // Buffer the entire body. Buffer body = b.buffer(); JsonReader status = enterPath(JsonReader.of(body.clone()), "status"); @@ -271,8 +271,8 @@ CheckResult ensureClusterReady(String index) { throw new IllegalStateException("Health status is RED"); } return CheckResult.OK; - }); - } catch (RuntimeException e) { + }).execute(); + } catch (IOException | RuntimeException e) { return CheckResult.failed(e); } } @@ -280,11 +280,15 @@ CheckResult ensureClusterReady(String index) { @Memoized // since we don't want overlapping calls to apply the index templates IndexTemplates ensureIndexTemplates() { String index = indexNameFormatter().index(); - IndexTemplates templates = new VersionSpecificTemplates(this).get(http()); - EnsureIndexTemplate.apply(http(), index + ":" + SPAN + "_template", templates.span()); - EnsureIndexTemplate.apply(http(), index + ":" + DEPENDENCY + "_template", - templates.dependency()); - return templates; + try { + IndexTemplates templates = new VersionSpecificTemplates(this).get(http()); + EnsureIndexTemplate.apply(http(), index + ":" + SPAN + "_template", templates.span()); + EnsureIndexTemplate.apply(http(), index + ":" + DEPENDENCY + "_template", + templates.dependency()); + return templates; + } catch (IOException e) { + throw Platform.get().uncheckedIOException(e); + } } @Memoized // hosts resolution might imply a network call, and we might make a new okhttp instance diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/EnsureIndexTemplate.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/EnsureIndexTemplate.java index abc460bb8ba..6c14b835778 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/EnsureIndexTemplate.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/EnsureIndexTemplate.java @@ -13,6 +13,7 @@ */ package zipkin.storage.elasticsearch.http; +import java.io.IOException; import okhttp3.HttpUrl; import okhttp3.Request; import okhttp3.RequestBody; @@ -27,17 +28,18 @@ final class EnsureIndexTemplate { * This is a blocking call, used inside a lazy. That's because no writes should occur until the * template is available. */ - static void apply(HttpCall.Factory callFactory, String name, String indexTemplate) { + static void apply(HttpCall.Factory callFactory, String name, String indexTemplate) + throws IOException { HttpUrl templateUrl = callFactory.baseUrl.newBuilder("_template").addPathSegment(name).build(); Request getTemplate = new Request.Builder().url(templateUrl).tag("get-template").build(); try { - callFactory.execute(getTemplate, b -> null); + callFactory.newCall(getTemplate, b -> null).execute(); } catch (IllegalStateException e) { // TODO: handle 404 slightly more nicely Request updateTemplate = new Request.Builder() - .url(templateUrl) - .put(RequestBody.create(APPLICATION_JSON, indexTemplate)) - .tag("update-template").build(); - callFactory.execute(updateTemplate, b -> null); + .url(templateUrl) + .put(RequestBody.create(APPLICATION_JSON, indexTemplate)) + .tag("update-template").build(); + callFactory.newCall(updateTemplate, b -> null).execute(); } } } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/HttpBulkIndexer.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/HttpBulkIndexer.java index 53b5f16a042..8849f7a9b2c 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/HttpBulkIndexer.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/HttpBulkIndexer.java @@ -22,9 +22,9 @@ import okhttp3.Request; import okhttp3.RequestBody; import okio.Buffer; -import zipkin.internal.JsonCodec; import zipkin.storage.elasticsearch.http.internal.client.HttpCall; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscape; import static zipkin.storage.elasticsearch.http.ElasticsearchHttpStorage.APPLICATION_JSON; // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html @@ -57,7 +57,7 @@ void writeIndexMetadata(String index, String typeName, @Nullable String id) { // the _type parameter is needed for Elasticsearch <6.x body.writeUtf8(",\"_type\":\"").writeUtf8(typeName).writeByte('"'); if (id != null) { - body.writeUtf8(",\"_id\":\"").writeUtf8(JsonCodec.escape(id)).writeByte('"'); + body.writeUtf8(",\"_id\":\"").writeUtf8(jsonEscape(id)).writeByte('"'); } body.writeUtf8("}}\n"); } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/IndexNameFormatter.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/IndexNameFormatter.java index 7db3af90d98..f8f78d250c6 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/IndexNameFormatter.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/IndexNameFormatter.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.TimeZone; import javax.annotation.Nullable; -import zipkin.internal.Util; @AutoValue abstract class IndexNameFormatter { @@ -116,7 +115,13 @@ List formatTypeAndRange(@Nullable String type, long beginMillis, long en static GregorianCalendar midnightUTC(long epochMillis) { GregorianCalendar result = new GregorianCalendar(UTC); - result.setTimeInMillis(Util.midnightUTC(epochMillis)); + Calendar day = Calendar.getInstance(UTC); + day.setTimeInMillis(epochMillis); + day.set(Calendar.MILLISECOND, 0); + day.set(Calendar.SECOND, 0); + day.set(Calendar.MINUTE, 0); + day.set(Calendar.HOUR_OF_DAY, 0); + result.setTimeInMillis(day.getTimeInMillis()); return result; } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/JsonAdapters.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/JsonAdapters.java index 56dfe87b610..1b3e0351be0 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/JsonAdapters.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/JsonAdapters.java @@ -19,8 +19,8 @@ import java.io.IOException; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import zipkin.DependencyLink; import zipkin.internal.v2.Annotation; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; @@ -177,7 +177,7 @@ public void toJson(JsonWriter writer, @Nullable Endpoint value) throws IOExcepti static final JsonAdapter DEPENDENCY_LINK_ADAPTER = new JsonAdapter() { @Override @Nonnull public DependencyLink fromJson(JsonReader reader) throws IOException { - DependencyLink.Builder result = DependencyLink.builder(); + DependencyLink.Builder result = DependencyLink.newBuilder(); reader.beginObject(); while (reader.hasNext()) { switch (reader.nextName()) { diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyElasticsearchHttpSpanStore.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyElasticsearchHttpSpanStore.java index 6caa7e48e06..21cf86cc41d 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyElasticsearchHttpSpanStore.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyElasticsearchHttpSpanStore.java @@ -13,6 +13,7 @@ */ package zipkin.storage.elasticsearch.http; +import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; @@ -20,6 +21,7 @@ import java.util.Locale; import java.util.Map; import javax.annotation.Nullable; +import okio.BufferedSource; import zipkin.DependencyLink; import zipkin.Span; import zipkin.internal.CorrectForClockSkew; @@ -47,6 +49,13 @@ final class LegacyElasticsearchHttpSpanStore implements AsyncSpanStore { SearchResultConverter.create(LegacyJsonAdapters.SPAN_ADAPTER); static final BodyConverter> NULLABLE_SPANS = SearchResultConverter.create(LegacyJsonAdapters.SPAN_ADAPTER).defaultToNull(); + static final BodyConverter> DEPENDENCY_LINKS = + new SearchResultConverter(LegacyJsonAdapters.LINK_ADAPTER) { + @Override public List convert(BufferedSource content) throws IOException { + List result = super.convert(content); + return result.isEmpty() ? result : zipkin.internal.DependencyLinker.merge(result); + } + }; final SearchCallFactory search; final String[] allIndices; @@ -70,8 +79,8 @@ final class LegacyElasticsearchHttpSpanStore implements AsyncSpanStore { filters.addRange("timestamp_millis", beginMillis, endMillis); if (request.serviceName != null) { filters.addNestedTerms(asList( - "annotations.endpoint.serviceName", - "binaryAnnotations.endpoint.serviceName" + "annotations.endpoint.serviceName", + "binaryAnnotations.endpoint.serviceName" ), request.serviceName); } @@ -116,8 +125,8 @@ final class LegacyElasticsearchHttpSpanStore implements AsyncSpanStore { // be no significant difference in user experience since span start times are usually very // close to each other in human time. Aggregation traceIdTimestamp = Aggregation.terms("traceId", request.limit) - .addSubAggregation(Aggregation.min("timestamp_millis")) - .orderBy("timestamp_millis", "desc"); + .addSubAggregation(Aggregation.min("timestamp_millis")) + .orderBy("timestamp_millis", "desc"); List indices = indexNameFormatter.formatTypeAndRange(null, beginMillis, endMillis); if (indices.isEmpty()) { @@ -126,9 +135,9 @@ final class LegacyElasticsearchHttpSpanStore implements AsyncSpanStore { } SearchRequest esRequest = SearchRequest.create(indices, SPAN) - .filters(filters).addAggregation(traceIdTimestamp); + .filters(filters).addAggregation(traceIdTimestamp); - HttpCall> traceIdsCall = search.newCall(esRequest, BodyConverters.SORTED_KEYS); + HttpCall> traceIdsCall = search.newCall(esRequest, BodyConverters.KEYS); // When we receive span results, we need to group them by trace ID Callback> successCallback = new Callback>() { @@ -193,14 +202,14 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> String traceIdHex = Util.toLowerHex(strictTraceId ? traceIdHigh : 0L, traceIdLow); SearchRequest request = SearchRequest.create(asList(allIndices), SPAN) - .term("traceId", traceIdHex); + .term("traceId", traceIdHex); search.newCall(request, NULLABLE_SPANS).submit(callback); } @Override public void getServiceNames(Callback> callback) { - long endMillis = System.currentTimeMillis(); - long beginMillis = endMillis - namesLookback; + long endMillis = System.currentTimeMillis(); + long beginMillis = endMillis - namesLookback; List indices = indexNameFormatter.formatTypeAndRange(null, beginMillis, endMillis); if (indices.isEmpty()) { @@ -209,9 +218,9 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> } SearchRequest request = SearchRequest.create(indices, SERVICE_SPAN) - .addAggregation(Aggregation.terms("serviceName", Integer.MAX_VALUE)); + .addAggregation(Aggregation.terms("serviceName", Integer.MAX_VALUE)); - search.newCall(request, BodyConverters.SORTED_KEYS).submit(new Callback>() { + search.newCall(request, BodyConverters.KEYS).submit(new Callback>() { @Override public void onSuccess(@Nullable List value) { if (!value.isEmpty()) callback.onSuccess(value); @@ -220,9 +229,9 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> SearchRequest.Filters filters = new SearchRequest.Filters(); filters.addRange("timestamp_millis", beginMillis, endMillis); SearchRequest request = SearchRequest.create(indices, SPAN).filters(filters) - .addAggregation(Aggregation.nestedTerms("annotations.endpoint.serviceName")) - .addAggregation(Aggregation.nestedTerms("binaryAnnotations.endpoint.serviceName")); - search.newCall(request, BodyConverters.SORTED_KEYS).submit(callback); + .addAggregation(Aggregation.nestedTerms("annotations.endpoint.serviceName")) + .addAggregation(Aggregation.nestedTerms("binaryAnnotations.endpoint.serviceName")); + search.newCall(request, BodyConverters.KEYS).submit(callback); } @Override public void onError(Throwable t) { @@ -237,8 +246,8 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> return; } - long endMillis = System.currentTimeMillis(); - long beginMillis = endMillis - namesLookback; + long endMillis = System.currentTimeMillis(); + long beginMillis = endMillis - namesLookback; List indices = indexNameFormatter.formatTypeAndRange(null, beginMillis, endMillis); if (indices.isEmpty()) { @@ -247,10 +256,10 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> } SearchRequest request = SearchRequest.create(indices, SERVICE_SPAN) - .term("serviceName", serviceName.toLowerCase(Locale.ROOT)) - .addAggregation(Aggregation.terms("spanName", Integer.MAX_VALUE)); + .term("serviceName", serviceName.toLowerCase(Locale.ROOT)) + .addAggregation(Aggregation.terms("spanName", Integer.MAX_VALUE)); - search.newCall(request, BodyConverters.SORTED_KEYS).submit(new Callback>() { + search.newCall(request, BodyConverters.KEYS).submit(new Callback>() { @Override public void onSuccess(@Nullable List value) { if (!value.isEmpty()) callback.onSuccess(value); @@ -259,12 +268,12 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> SearchRequest.Filters filters = new SearchRequest.Filters(); filters.addRange("timestamp_millis", beginMillis, endMillis); filters.addNestedTerms(asList( - "annotations.endpoint.serviceName", - "binaryAnnotations.endpoint.serviceName" + "annotations.endpoint.serviceName", + "binaryAnnotations.endpoint.serviceName" ), serviceName.toLowerCase(Locale.ROOT)); SearchRequest request = SearchRequest.create(indices, SPAN).filters(filters) - .addAggregation(Aggregation.terms("name", Integer.MAX_VALUE)); - search.newCall(request, BodyConverters.SORTED_KEYS).submit(callback); + .addAggregation(Aggregation.terms("name", Integer.MAX_VALUE)); + search.newCall(request, BodyConverters.KEYS).submit(callback); } @Override public void onError(Throwable t) { @@ -274,7 +283,7 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> } @Override public void getDependencies(long endTs, @Nullable Long lookback, - Callback> callback) { + Callback> callback) { long beginMillis = lookback != null ? Math.max(endTs - lookback, EARLIEST_MS) : EARLIEST_MS; // We just return all dependencies in the days that fall within endTs and lookback as @@ -291,6 +300,6 @@ public void getRawTrace(long traceIdHigh, long traceIdLow, Callback> void getDependencies(List indices, Callback> callback) { SearchRequest request = SearchRequest.create(indices, DEPENDENCY_LINK); - search.newCall(request, BodyConverters.DEPENDENCY_LINKS).submit(callback); + search.newCall(request, DEPENDENCY_LINKS).submit(callback); } } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyJsonAdapters.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyJsonAdapters.java index fc14b70c32e..f7a31452de2 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyJsonAdapters.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/LegacyJsonAdapters.java @@ -18,20 +18,23 @@ import com.squareup.moshi.JsonReader; import com.squareup.moshi.JsonWriter; import java.io.IOException; +import java.nio.charset.Charset; import javax.annotation.Nonnull; import javax.annotation.Nullable; import okio.Buffer; import okio.ByteString; import zipkin.Annotation; import zipkin.BinaryAnnotation; +import zipkin.DependencyLink; import zipkin.Endpoint; import zipkin.Span; -import zipkin.internal.Util; +import zipkin.internal.V2SpanConverter; -import static zipkin.internal.Util.UTF_8; import static zipkin.internal.Util.lowerHexToUnsignedLong; final class LegacyJsonAdapters { + static final Charset UTF_8 = Charset.forName("UTF-8"); + static final JsonAdapter SPAN_ADAPTER = new JsonAdapter() { @Override @Nullable public Span fromJson(JsonReader reader) throws IOException { @@ -55,10 +58,10 @@ public Span fromJson(JsonReader reader) throws IOException { result.name(reader.nextString()); break; case "id": - result.id(Util.lowerHexToUnsignedLong(reader.nextString())); + result.id(lowerHexToUnsignedLong(reader.nextString())); break; case "parentId": - result.parentId(Util.lowerHexToUnsignedLong(reader.nextString())); + result.parentId(lowerHexToUnsignedLong(reader.nextString())); break; case "timestamp": result.timestamp(reader.nextLong()); @@ -98,80 +101,80 @@ public void toJson(JsonWriter writer, @Nullable Span value) throws IOException { }; static final JsonAdapter BINARY_ANNOTATION_ADAPTER = new JsonAdapter() { - @Override @Nullable - public BinaryAnnotation fromJson(JsonReader reader) throws IOException { - BinaryAnnotation.Builder result = BinaryAnnotation.builder(); - String number = null; - String string = null; - BinaryAnnotation.Type type = BinaryAnnotation.Type.STRING; - reader.beginObject(); - while (reader.hasNext()) { - switch (reader.nextName()) { - case "key": - result.key(reader.nextString()); + @Override @Nullable + public BinaryAnnotation fromJson(JsonReader reader) throws IOException { + BinaryAnnotation.Builder result = BinaryAnnotation.builder(); + String number = null; + String string = null; + BinaryAnnotation.Type type = BinaryAnnotation.Type.STRING; + reader.beginObject(); + while (reader.hasNext()) { + switch (reader.nextName()) { + case "key": + result.key(reader.nextString()); + break; + case "value": + switch (reader.peek()) { + case BOOLEAN: + type = BinaryAnnotation.Type.BOOL; + result.value(reader.nextBoolean() ? new byte[] {1} : new byte[] {0}); + break; + case STRING: + string = reader.nextString(); + break; + case NUMBER: + number = reader.nextString(); + break; + default: + throw new JsonDataException( + "Expected value to be a boolean, string or number but was " + reader.peek() + + " at path " + reader.getPath()); + } + break; + case "type": + type = BinaryAnnotation.Type.valueOf(reader.nextString()); + break; + case "endpoint": + result.endpoint(ENDPOINT_ADAPTER.fromJson(reader)); + break; + default: + reader.skipValue(); + } + } + reader.endObject(); + result.type(type); + switch (type) { + case BOOL: + return result.build(); + case STRING: + return result.value(string.getBytes(UTF_8)).build(); + case BYTES: + return result.value(ByteString.decodeBase64(string).toByteArray()).build(); + default: break; - case "value": - switch (reader.peek()) { - case BOOLEAN: - type = BinaryAnnotation.Type.BOOL; - result.value(reader.nextBoolean() ? new byte[] {1} : new byte[] {0}); - break; - case STRING: - string = reader.nextString(); - break; - case NUMBER: - number = reader.nextString(); - break; - default: - throw new JsonDataException( - "Expected value to be a boolean, string or number but was " + reader.peek() - + " at path " + reader.getPath()); - } + } + Buffer buffer = new Buffer(); + switch (type) { + case I16: + buffer.writeShort(Short.parseShort(number)); break; - case "type": - type = BinaryAnnotation.Type.valueOf(reader.nextString()); + case I32: + buffer.writeInt(Integer.parseInt(number)); break; - case "endpoint": - result.endpoint(ENDPOINT_ADAPTER.fromJson(reader)); + case I64: + case DOUBLE: + if (number == null) number = string; + long v = type == BinaryAnnotation.Type.I64 + ? Long.parseLong(number) + : Double.doubleToRawLongBits(Double.parseDouble(number)); + buffer.writeLong(v); break; default: - reader.skipValue(); + throw new AssertionError( + "BinaryAnnotationType " + type + " was added, but not handled"); } + return result.value(buffer.readByteArray()).build(); } - reader.endObject(); - result.type(type); - switch (type) { - case BOOL: - return result.build(); - case STRING: - return result.value(string.getBytes(UTF_8)).build(); - case BYTES: - return result.value(ByteString.decodeBase64(string).toByteArray()).build(); - default: - break; - } - Buffer buffer = new Buffer(); - switch (type) { - case I16: - buffer.writeShort(Short.parseShort(number)); - break; - case I32: - buffer.writeInt(Integer.parseInt(number)); - break; - case I64: - case DOUBLE: - if (number == null) number = string; - long v = type == BinaryAnnotation.Type.I64 - ? Long.parseLong(number) - : Double.doubleToRawLongBits(Double.parseDouble(number)); - buffer.writeLong(v); - break; - default: - throw new AssertionError( - "BinaryAnnotationType " + type + " was added, but not handled"); - } - return result.value(buffer.readByteArray()).build(); - } @Override public void toJson(JsonWriter writer, @Nullable BinaryAnnotation value) throws IOException { @@ -244,4 +247,16 @@ public void toJson(JsonWriter writer, @Nullable Endpoint value) throws IOExcepti throw new UnsupportedOperationException(); } }.nullSafe(); + + static final JsonAdapter LINK_ADAPTER = new JsonAdapter() { + @Nonnull @Override public DependencyLink fromJson(JsonReader reader) throws IOException { + zipkin.internal.v2.DependencyLink result = + JsonAdapters.DEPENDENCY_LINK_ADAPTER.fromJson(reader); + return V2SpanConverter.toLink(result); + } + + @Override public void toJson(JsonWriter writer, @Nullable DependencyLink value) { + throw new UnsupportedOperationException(); + } + }; } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/PseudoAddressRecordSet.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/PseudoAddressRecordSet.java index c248520b024..7bcc0b30f27 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/PseudoAddressRecordSet.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/PseudoAddressRecordSet.java @@ -24,8 +24,6 @@ import okhttp3.Dns; import okhttp3.HttpUrl; -import static zipkin.internal.Util.checkArgument; - /** * This returns a Dns provider that combines the IPv4 or IPv6 addresses from a supplied list of * urls, provided they are all http and share the same port. @@ -61,9 +59,12 @@ static Dns create(List urls, Dns actualDns) { ports.add(httpUrl.port()); } - checkArgument(ports.size() == 1, "Only one port supported with multiple hosts %s", urls); - checkArgument(schemes.size() == 1 && schemes.iterator().next().equals("http"), - "Only http supported with multiple hosts %s", urls); + if (ports.size() != 1) { + throw new IllegalArgumentException("Only one port supported with multiple hosts " + urls); + } + if (schemes.size() != 1 || !schemes.iterator().next().equals("http")) { + throw new IllegalArgumentException("Only http supported with multiple hosts " + urls); + } if (hosts.isEmpty()) return new StaticDns(ipAddresses); return new ConcatenatingDns(ipAddresses, hosts, actualDns); diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/VersionSpecificTemplates.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/VersionSpecificTemplates.java index 79e3f83b985..103937387ab 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/VersionSpecificTemplates.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/VersionSpecificTemplates.java @@ -14,6 +14,7 @@ package zipkin.storage.elasticsearch.http; import com.squareup.moshi.JsonReader; +import java.io.IOException; import java.util.logging.Logger; import okhttp3.Request; import zipkin.storage.elasticsearch.http.internal.client.HttpCall; @@ -123,7 +124,7 @@ final class VersionSpecificTemplates { + " \"mappings\": {\"" + DEPENDENCY + "\": { \"enabled\": false }}\n" + "}"; - IndexTemplates get(HttpCall.Factory callFactory) { + IndexTemplates get(HttpCall.Factory callFactory) throws IOException { float version = getVersion(callFactory); return IndexTemplates.builder() .version(version) @@ -132,9 +133,9 @@ IndexTemplates get(HttpCall.Factory callFactory) { .build(); } - static float getVersion(HttpCall.Factory callFactory) { + static float getVersion(HttpCall.Factory callFactory) throws IOException { Request getNode = new Request.Builder().url(callFactory.baseUrl).tag("get-node").build(); - return callFactory.execute(getNode, b -> { + return callFactory.newCall(getNode, b -> { JsonReader version = enterPath(JsonReader.of(b), "version", "number"); if (version == null) throw new IllegalStateException(".version.number not in response"); String versionString = version.nextString(); @@ -143,7 +144,7 @@ static float getVersion(HttpCall.Factory callFactory) { LOG.warning("Please upgrade to Elasticsearch 2 or later. version=" + versionString); } return result; - }); + }).execute(); } private String versionSpecificSpanIndexTemplate(float version) { diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/HttpCall.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/HttpCall.java index 4142960888e..2a93b43bbae 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/HttpCall.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/HttpCall.java @@ -24,12 +24,9 @@ import okio.BufferedSource; import okio.GzipSource; import okio.Okio; -import zipkin.internal.CallbackCaptor; import zipkin.internal.v2.Call; import zipkin.internal.v2.Callback; -import static zipkin.internal.Util.propagateIfFatal; - public final class HttpCall extends Call { public interface BodyConverter { @@ -49,12 +46,6 @@ public HttpCall newCall(Request request, BodyConverter bodyConverter) return new HttpCall<>(this, request, bodyConverter); } - public V execute(Request request, BodyConverter bodyConverter) { - CallbackCaptor response = new CallbackCaptor<>(); - newCall(request, bodyConverter).submit(response); - return response.get(); - } - @Override public void close() { ok.dispatcher().executorService().shutdownNow(); } diff --git a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/SearchRequest.java b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/SearchRequest.java index 751910555eb..28748c43ce0 100644 --- a/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/SearchRequest.java +++ b/zipkin-storage/elasticsearch-http/src/main/java/zipkin/storage/elasticsearch/http/internal/client/SearchRequest.java @@ -54,18 +54,6 @@ public static SearchRequest create(List indices, String type) { this.type = type; } - public static class Should extends LinkedList { - public Should addTerm(String field, String value) { - add(new Term(field, value)); - return this; - } - - public Should addExists(String field) { - add(new Exists(field)); - return this; - } - } - public static class Filters extends LinkedList { public Filters addRange(String field, long from, Long to) { add(new Range(field, from, to)); @@ -77,12 +65,6 @@ public Filters addTerm(String field, String value) { return this; } - public Should should() { - Should result = new Should(); - add(new SearchRequest.BoolQuery("should", result)); - return result; - } - public Filters addNestedTerms(Collection nestedFields, String value) { add(_nestedTermsEqual(nestedFields, value)); return this; diff --git a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/moshi/JsonReadersTest.java b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/moshi/JsonReadersTest.java index 48a7f188d73..94d6af58626 100644 --- a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/moshi/JsonReadersTest.java +++ b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/moshi/JsonReadersTest.java @@ -15,7 +15,7 @@ import com.squareup.moshi.JsonReader; import java.io.IOException; -import java.util.Set; +import java.util.List; import okio.Buffer; import org.junit.Test; @@ -50,7 +50,7 @@ public void enterPath_nullOnNoInput() throws IOException { @Test public void collectValuesNamed_emptyWhenNotFound() throws IOException { - Set result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8( + List result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8( "{\"took\":1,\"timed_out\":false,\"_shards\":{\"total\":0,\"successful\":0,\"failed\":0},\"hits\":{\"total\":0,\"max_score\":0.0,\"hits\":[]}}" )), "key"); @@ -59,7 +59,7 @@ public void collectValuesNamed_emptyWhenNotFound() throws IOException { @Test public void collectValuesNamed_mergesArrays() throws IOException { - Set result = + List result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8(SPAN_NAMES)), "key"); assertThat(result).containsExactly("methodcall", "yak"); @@ -67,7 +67,7 @@ public void collectValuesNamed_mergesArrays() throws IOException { @Test public void collectValuesNamed_mergesChildren() throws IOException { - Set result = + List result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8(SERVICE_NAMES)), "key"); assertThat(result).containsExactly("yak", "service"); @@ -75,7 +75,7 @@ public void collectValuesNamed_mergesChildren() throws IOException { @Test public void collectValuesNamed_nested() throws IOException { - Set result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8("{\n" + List result = JsonReaders.collectValuesNamed(JsonReader.of(new Buffer().writeUtf8("{\n" + " \"took\": 49,\n" + " \"timed_out\": false,\n" + " \"_shards\": {\n" diff --git a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumerTest.java b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumerTest.java index d3e8ddd64fc..a6bded81f8e 100644 --- a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumerTest.java +++ b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/ElasticsearchHttpSpanConsumerTest.java @@ -25,13 +25,11 @@ import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; import zipkin.internal.v2.Span.Kind; -import zipkin.internal.v2.codec.BytesDecoder; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; import static zipkin.TestObjects.TODAY; -import static zipkin.internal.Util.UTF_8; import static zipkin.storage.elasticsearch.http.ElasticsearchHttpSpanConsumer.prefixWithTimestampMillisAndQuery; public class ElasticsearchHttpSpanConsumerTest { @@ -81,7 +79,7 @@ public void close() throws IOException { byte[] result = prefixWithTimestampMillisAndQuery(span, span.timestamp()); - assertThat(new String(result, UTF_8)) + assertThat(new String(result, "UTF-8")) .startsWith("{\"traceId\":\""); } @@ -93,7 +91,7 @@ public void close() throws IOException { byte[] result = prefixWithTimestampMillisAndQuery(span, span.timestamp()); - assertThat(new String(result, UTF_8)) + assertThat(new String(result, "UTF-8")) .startsWith("{\"timestamp_millis\":1,\"traceId\":"); } @@ -105,7 +103,7 @@ public void close() throws IOException { byte[] result = prefixWithTimestampMillisAndQuery(span, span.timestamp()); - assertThat(new String(result, UTF_8)) + assertThat(new String(result, "UTF-8")) .startsWith("{\"_q\":[\"\\\"foo\"],\"traceId"); } @@ -117,7 +115,7 @@ public void close() throws IOException { byte[] result = prefixWithTimestampMillisAndQuery(span, span.timestamp()); - assertThat(new String(result, UTF_8)) + assertThat(new String(result, "UTF-8")) .startsWith("{\"_q\":[\"\\\"foo\",\"\\\"foo=\\\"bar\"],\"traceId"); } @@ -125,7 +123,7 @@ public void close() throws IOException { Span span = Span.newBuilder().traceId("20").id("20").name("get") .timestamp(TODAY * 1000).build(); - assertThat(BytesDecoder.JSON.decode(prefixWithTimestampMillisAndQuery(span, span.timestamp()))) + assertThat(SpanBytesCodec.JSON.decode(prefixWithTimestampMillisAndQuery(span, span.timestamp()))) .isEqualTo(span); // ignores timestamp_millis field } @@ -146,7 +144,7 @@ public void close() throws IOException { accept(Span.newBuilder().traceId("1").id("1").name("foo").build()); assertThat(es.takeRequest().getBody().readByteString().utf8()) - .contains("\n" + new String(BytesEncoder.JSON.encode(span), UTF_8) + "\n"); + .contains("\n" + new String(SpanBytesCodec.JSON.encode(span), "UTF-8") + "\n"); } @Test public void traceIsSearchableByServerServiceName() throws Exception { diff --git a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/JsonAdaptersTest.java b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/JsonAdaptersTest.java index 0d39dd62c51..46df03f12b9 100644 --- a/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/JsonAdaptersTest.java +++ b/zipkin-storage/elasticsearch-http/src/test/java/zipkin/storage/elasticsearch/http/JsonAdaptersTest.java @@ -16,14 +16,14 @@ import java.io.IOException; import okio.Buffer; import org.junit.Test; -import zipkin.Codec; -import zipkin.DependencyLink; import zipkin.TestObjects; import zipkin.internal.ApplyTimestampAndDuration; import zipkin.internal.V2SpanConverter; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; +import zipkin.internal.v2.codec.SpanBytesEncoder; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.entry; @@ -133,7 +133,7 @@ public void span_roundTrip() throws IOException { zipkin.Span span = ApplyTimestampAndDuration.apply(TestObjects.LOTS_OF_SPANS[0]); Span span2 = V2SpanConverter.fromSpan(span).get(0); Buffer bytes = new Buffer(); - bytes.write(BytesEncoder.JSON.encode(span2)); + bytes.write(SpanBytesEncoder.JSON.encode(span2)); assertThat(SPAN_ADAPTER.fromJson(bytes)) .isEqualTo(span2); } @@ -158,7 +158,7 @@ public void span_specialCharsInJson() throws IOException { .build(); Buffer bytes = new Buffer(); - bytes.write(BytesEncoder.JSON.encode(worstSpanInTheWorld)); + bytes.write(SpanBytesEncoder.JSON.encode(worstSpanInTheWorld)); assertThat(SPAN_ADAPTER.fromJson(bytes)) .isEqualTo(worstSpanInTheWorld); } @@ -230,24 +230,27 @@ public void span_readsTraceIdHighFromTraceIdField() throws IOException { @Test public void dependencyLinkRoundTrip() throws IOException { - DependencyLink link = DependencyLink.create("foo", "bar", 2); + DependencyLink link = DependencyLink.newBuilder() + .parent("foo") + .child("bar") + .callCount(2).build(); Buffer bytes = new Buffer(); - bytes.write(Codec.JSON.writeDependencyLink(link)); + bytes.write(DependencyLinkBytesCodec.JSON.encode(link)); assertThat(JsonAdapters.DEPENDENCY_LINK_ADAPTER.fromJson(bytes)) .isEqualTo(link); } @Test public void dependencyLinkRoundTrip_withError() throws IOException { - DependencyLink link = DependencyLink.builder() + DependencyLink link = DependencyLink.newBuilder() .parent("foo") .child("bar") .callCount(2) .errorCount(1).build(); Buffer bytes = new Buffer(); - bytes.write(Codec.JSON.writeDependencyLink(link)); + bytes.write(DependencyLinkBytesCodec.JSON.encode(link)); assertThat(JsonAdapters.DEPENDENCY_LINK_ADAPTER.fromJson(bytes)) .isEqualTo(link); } diff --git a/zipkin/src/main/java/zipkin/collector/Collector.java b/zipkin/src/main/java/zipkin/collector/Collector.java index 7e4180a55ac..6d75ae260c9 100644 --- a/zipkin/src/main/java/zipkin/collector/Collector.java +++ b/zipkin/src/main/java/zipkin/collector/Collector.java @@ -23,7 +23,7 @@ import zipkin.internal.V2SpanConverter; import zipkin.internal.V2StorageComponent; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesDecoder; +import zipkin.internal.v2.codec.SpanBytesCodec; import zipkin.storage.Callback; import zipkin.storage.StorageComponent; @@ -108,7 +108,7 @@ public void acceptSpans(byte[] serializedSpans, SpanDecoder decoder, Callback= 2 && - // Is this the end of a u2028 or u2028 UTF-8 codepoint? - // 0xE2 0x80 0xA8 == u2028; 0xE2 0x80 0xA9 == u2028 - (current == 0xA8 || current == 0xA9) - && (v[i - 1] & 0xFF) == 0x80 - && (v[i - 2] & 0xFF) == 0xE2) { - return true; - } else if (current < 0x80 && REPLACEMENT_CHARS[current] != null) { - return true; - } - } - return false; // must be a string we don't need to escape. - } - - static int jsonEscapedSizeInBytes(byte[] v) { - return needsJsonEscaping(v) ? jsonEscapedSizeInBytes(new String(v, Util.UTF_8)) : v.length; - } - - public static int jsonEscapedSizeInBytes(String v) { - boolean ascii = true; - int escapingOverhead = 0; - for (int i = 0, length = v.length(); i < length; i++) { - char c = v.charAt(i); - if (c == '\u2028' || c == '\u2029') { - escapingOverhead += 5; - } else if (c >= 0x80) { - ascii = false; - } else { - String maybeReplacement = REPLACEMENT_CHARS[c]; - if (maybeReplacement != null) escapingOverhead += maybeReplacement.length() - 1; - } - } - if (ascii) return v.length() + escapingOverhead; - return utf8SizeInBytes(v) + escapingOverhead; - } - - Buffer writeJsonEscaped(byte[] v) { - return needsJsonEscaping(v) ? writeJsonEscaped(new String(v, Util.UTF_8)) : write(v); - } - - public Buffer writeJsonEscaped(String v) { - return writeUtf8(jsonEscape(v)); - } - - static String jsonEscape(String v) { - int afterReplacement = 0; - int length = v.length(); - StringBuilder builder = null; - for (int i = 0; i < length; i++) { - char c = v.charAt(i); - String replacement; - if (c < 0x80) { - replacement = REPLACEMENT_CHARS[c]; - if (replacement == null) continue; - } else if (c == '\u2028') { - replacement = U2028; - } else if (c == '\u2029') { - replacement = U2029; - } else { - continue; - } - if (afterReplacement < i) { // write characters between the last replacement and now - if (builder == null) builder = new StringBuilder(); - builder.append(v, afterReplacement, i); - } - if (builder == null) builder = new StringBuilder(); - builder.append(replacement); - afterReplacement = i + 1; - } - String escaped; - if (builder == null) { // then we didn't escape anything - escaped = v; - } else { - if (afterReplacement < length) { - builder.append(v, afterReplacement, length); - } - escaped = builder.toString(); - } - return escaped; - } - - Buffer writeUtf8(String v) { - if (isAscii(v)) return writeAscii(v); - byte[] temp = v.getBytes(Util.UTF_8); - write(temp); - return this; - } - - public Buffer writeLowerHex(long v) { - writeHexByte((byte) ((v >>> 56L) & 0xff)); - writeHexByte((byte) ((v >>> 48L) & 0xff)); - writeHexByte((byte) ((v >>> 40L) & 0xff)); - writeHexByte((byte) ((v >>> 32L) & 0xff)); - writeHexByte((byte) ((v >>> 24L) & 0xff)); - writeHexByte((byte) ((v >>> 16L) & 0xff)); - writeHexByte((byte) ((v >>> 8L) & 0xff)); - writeHexByte((byte) (v & 0xff)); - return this; - } - - // the code to get the size of ipv6 is long and basically the same as encoding it. - public static int ipv6SizeInBytes(byte[] ipv6) { - int result = IPV6_SIZE.get().writeIpV6(ipv6).pos; - IPV6_SIZE.get().pos = 0; - return result; - } - - private static final ThreadLocal IPV6_SIZE = new ThreadLocal() { - @Override protected Buffer initialValue() { - return new Buffer(39); // maximum length of encoded ipv6 - } - }; - - public Buffer writeIpV6(byte[] ipv6) { - // Compress the longest string of zeros - int zeroCompressionIndex = -1; - int zeroCompressionLength = -1; - int zeroIndex = -1; - boolean allZeros = true; - for (int i = 0; i < ipv6.length; i += 2) { - if (ipv6[i] == 0 && ipv6[i + 1] == 0) { - if (zeroIndex < 0) zeroIndex = i; - continue; - } - allZeros = false; - if (zeroIndex >= 0) { - int zeroLength = i - zeroIndex; - if (zeroLength > zeroCompressionLength) { - zeroCompressionIndex = zeroIndex; - zeroCompressionLength = zeroLength; - } - zeroIndex = -1; - } - } - - // handle all zeros: 0:0:0:0:0:0:0:0 -> :: - if (allZeros) { - buf[pos++] = ':'; - buf[pos++] = ':'; - return this; - } - - // handle trailing zeros: 2001:0:0:4:0:0:0:0 -> 2001:0:0:4:: - if (zeroCompressionIndex == -1 && zeroIndex != -1) { - zeroCompressionIndex = zeroIndex; - zeroCompressionLength = 16 - zeroIndex; - } - - int i = 0; - while (i < ipv6.length) { - if (i == zeroCompressionIndex) { - buf[pos++] = ':'; - i += zeroCompressionLength; - if (i == ipv6.length) buf[pos++] = ':'; - continue; - } - if (i != 0) buf[pos++] = ':'; - - byte high = ipv6[i++]; - byte low = ipv6[i++]; - - // handle leading zeros: 2001:0:0:4:0000:0:0:8 -> 2001:0:0:4::8 - boolean leadingZero; - byte val = HEX_DIGITS[(high >> 4) & 0xf]; - if (!(leadingZero = val == '0')) buf[pos++] = val; - val = HEX_DIGITS[high & 0xf]; - if (!(leadingZero = (leadingZero && val == '0'))) buf[pos++] = val; - val = HEX_DIGITS[(low >> 4) & 0xf]; - if (!(leadingZero && val == '0')) buf[pos++] = val; - buf[pos++] = HEX_DIGITS[low & 0xf]; - } - return this; - } - - /** - * Binary search for character width which favors matching lower numbers. - * - *

Adapted from okio.Buffer - */ - public static int asciiSizeInBytes(long v) { - if (v == 0) return 1; - if (v == Long.MIN_VALUE) return 20; - - boolean negative = false; - if (v < 0) { - v = -v; // making this positive allows us to compare using less-than - negative = true; - } - int width = - v < 100000000L - ? v < 10000L - ? v < 100L - ? v < 10L ? 1 : 2 - : v < 1000L ? 3 : 4 - : v < 1000000L - ? v < 100000L ? 5 : 6 - : v < 10000000L ? 7 : 8 - : v < 1000000000000L - ? v < 10000000000L - ? v < 1000000000L ? 9 : 10 - : v < 100000000000L ? 11 : 12 - : v < 1000000000000000L - ? v < 10000000000000L ? 13 - : v < 100000000000000L ? 14 : 15 - : v < 100000000000000000L - ? v < 10000000000000000L ? 16 : 17 - : v < 1000000000000000000L ? 18 : 19; - return negative ? width + 1 : width; // conditionally add room for negative sign - } - - public Buffer writeAscii(long v) { - if (v == 0) return writeByte('0'); - if (v == Long.MIN_VALUE) return writeAscii("-9223372036854775808"); - - int width = asciiSizeInBytes(v); - int pos = this.pos += width; // We write backwards from right to left. - - boolean negative = false; - if (v < 0) { - negative = true; - v = -v; // needs to be positive so we can use this for an array index - } - while (v != 0) { - int digit = (int) (v % 10); - buf[--pos] = HEX_DIGITS[digit]; - v /= 10; - } - if (negative) buf[--pos] = '-'; - return this; - } - - static final byte[] HEX_DIGITS = - {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; - - void writeHexByte(byte b) { - buf[pos++] = HEX_DIGITS[(b >> 4) & 0xf]; - buf[pos++] = HEX_DIGITS[b & 0xf]; - } - byte[] toByteArray() { //assert pos == buf.length; return buf; diff --git a/zipkin/src/main/java/zipkin/internal/CorrectForClockSkew.java b/zipkin/src/main/java/zipkin/internal/CorrectForClockSkew.java index bfb4d8aeaff..9341233c793 100644 --- a/zipkin/src/main/java/zipkin/internal/CorrectForClockSkew.java +++ b/zipkin/src/main/java/zipkin/internal/CorrectForClockSkew.java @@ -26,6 +26,7 @@ import zipkin.Constants; import zipkin.Endpoint; import zipkin.Span; +import zipkin.internal.v2.internal.Node; import static java.lang.String.format; import static java.util.logging.Level.FINE; diff --git a/zipkin/src/main/java/zipkin/internal/DependencyLinker.java b/zipkin/src/main/java/zipkin/internal/DependencyLinker.java index 185efa461e6..d836bb98c9a 100644 --- a/zipkin/src/main/java/zipkin/internal/DependencyLinker.java +++ b/zipkin/src/main/java/zipkin/internal/DependencyLinker.java @@ -13,42 +13,31 @@ */ package zipkin.internal; -import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.logging.Logger; -import javax.annotation.Nullable; import zipkin.DependencyLink; import zipkin.internal.v2.Span; import zipkin.internal.v2.Span.Kind; -import static java.util.logging.Level.FINE; -import static zipkin.Constants.ERROR; +import static zipkin.internal.V2SpanConverter.fromLinks; +import static zipkin.internal.V2SpanConverter.toLinks; /** * This parses a span tree into dependency links used by Web UI. Ex. http://zipkin/dependency * *

This implementation traverses the tree, and only creates links between {@link Kind#SERVER * server} spans. One exception is at the bottom of the trace tree. {@link Kind#CLIENT client} spans - * that record their {@link Span#remoteEndpoint()} are included, as this accounts for uninstrumented - * services. Spans with {@link Span#kind()} unset, but {@link Span#remoteEndpoint()} set are treated - * the same as client spans. + * that record their {@link zipkin.internal.v2.Span#remoteEndpoint()} are included, as this accounts + * for uninstrumented services. Spans with {@link zipkin.internal.v2.Span#kind()} unset, but {@link + * zipkin.internal.v2.Span#remoteEndpoint()} set are treated the same as client spans. */ public final class DependencyLinker { - private final Logger logger; - private final Map, Long> callCounts = new LinkedHashMap<>(); - private final Map, Long> errorCounts = new LinkedHashMap<>(); + private final zipkin.internal.v2.internal.DependencyLinker delegate; public DependencyLinker() { - this(Logger.getLogger(DependencyLinker.class.getName())); - } - - DependencyLinker(Logger logger) { - this.logger = logger; + this.delegate = new zipkin.internal.v2.internal.DependencyLinker(); } /** @@ -61,219 +50,24 @@ public DependencyLinker putTrace(Collection spans) { for (zipkin.Span s : MergeById.apply(spans)) { linkSpans.addAll(V2SpanConverter.fromSpan(s)); } - return putTrace(linkSpans.iterator()); - } - - static final Node.MergeFunction MERGE_RPC = new MergeRpc(); - - static final class MergeRpc implements Node.MergeFunction { - @Override public Span merge(@Nullable Span left, @Nullable Span right) { - if (left == null) return right; - if (right == null) return left; - if (left.kind() == null) { - return copyError(left, right); - } - if (right.kind() == null) { - return copyError(right, left); - } - Span server = left.kind() == Kind.SERVER ? left : right; - Span client = left == server ? right : left; - if (server.remoteServiceName() != null) { - return copyError(client, server); - } - return copyError(client, server).toBuilder().remoteEndpoint(client.localEndpoint()).build(); - } - - static Span copyError(Span maybeError, Span result) { - if (maybeError.tags().containsKey(ERROR)) { - return result.toBuilder().putTag(ERROR, maybeError.tags().get(ERROR)).build(); - } - return result; - } + delegate.putTrace(linkSpans.iterator()); + return this; } /** * @param spans spans where all spans have the same trace id */ public DependencyLinker putTrace(Iterator spans) { - if (!spans.hasNext()) return this; - - Span first = spans.next(); - Node.TreeBuilder builder = - new Node.TreeBuilder<>(logger, MERGE_RPC, first.traceId()); - builder.addNode(first.parentId(), first.id(), first); - while (spans.hasNext()) { - Span next = spans.next(); - builder.addNode(next.parentId(), next.id(), next); - } - Node tree = builder.build(); - - if (logger.isLoggable(FINE)) logger.fine("traversing trace tree, breadth-first"); - for (Iterator> i = tree.traverse(); i.hasNext(); ) { - Node current = i.next(); - Span currentSpan = current.value(); - if (logger.isLoggable(FINE)) { - logger.fine("processing " + currentSpan); - } - if (current.isSyntheticRootForPartialTree()) { - logger.fine("skipping synthetic node for broken span tree"); - continue; - } - - Kind kind = currentSpan.kind(); - if (Kind.CLIENT.equals(kind) && !current.children().isEmpty()) { - logger.fine("deferring link to rpc child span"); - continue; - } - - String serviceName = currentSpan.localServiceName(); - String remoteServiceName = currentSpan.remoteServiceName(); - if (kind == null) { - // Treat unknown type of span as a client span if we know both sides - if (serviceName != null && remoteServiceName != null) { - kind = Kind.CLIENT; - } else { - logger.fine("non-rpc span; skipping"); - continue; - } - } - - String child; - String parent; - switch (kind) { - case SERVER: - case CONSUMER: - child = serviceName; - parent = remoteServiceName; - if (current == tree) { // we are the root-most span. - if (parent == null) { - logger.fine("root's peer is unknown; skipping"); - continue; - } - } - break; - case CLIENT: - case PRODUCER: - parent = serviceName; - child = remoteServiceName; - break; - default: - logger.fine("unknown kind; skipping"); - continue; - } - - boolean isError = currentSpan.tags().containsKey(ERROR); - if (kind == Kind.PRODUCER || kind == Kind.CONSUMER) { - if (parent == null || child == null) { - logger.fine("cannot link messaging span to its broker; skipping"); - } else { - addLink(parent, child, isError); - } - continue; - } - - if (logger.isLoggable(FINE) && parent == null) { - logger.fine("cannot determine parent, looking for first server ancestor"); - } - - Span rpcAncestor = findRpcAncestor(current); - String rpcAncestorName; - if (rpcAncestor != null && (rpcAncestorName = rpcAncestor.localServiceName()) != null) { - // Some users accidentally put the remote service name on client annotations. - // Check for this and backfill a link from the nearest remote to that service as necessary. - if (kind == Kind.CLIENT && serviceName != null && !rpcAncestorName.equals(serviceName)) { - logger.fine("detected missing link to client span"); - addLink(rpcAncestorName, serviceName, false); // we don't know if there's an error here - } - - // Local spans may be between the current node and its remote parent - if (parent == null) parent = rpcAncestorName; - - // When an RPC is split between spans, we skip the child (server side). If our parent is a - // client, we need to check it for errors. - if (!isError && Kind.CLIENT.equals(rpcAncestor.kind()) && - currentSpan.parentId() != null && currentSpan.parentId().equals(rpcAncestor.id())) { - isError = rpcAncestor.tags().containsKey(ERROR); - } - } - - if (parent == null || child == null) { - logger.fine("cannot find server ancestor; skipping"); - continue; - } - - addLink(parent, child, isError); - } + delegate.putTrace(spans); return this; } - Span findRpcAncestor(Node current) { - Node ancestor = current.parent(); - while (ancestor != null) { - if (logger.isLoggable(FINE)) { - logger.fine("processing ancestor " + ancestor.value()); - } - if (!ancestor.isSyntheticRootForPartialTree()) { - Span maybeRemote = ancestor.value(); - if (maybeRemote.kind() != null) return maybeRemote; - } - ancestor = ancestor.parent(); - } - return null; - } - - void addLink(String parent, String child, boolean isError) { - if (logger.isLoggable(FINE)) { - logger.fine("incrementing " + (isError ? "error " : "") + "link " + parent + " -> " + child); - } - Pair key = Pair.create(parent, child); - if (callCounts.containsKey(key)) { - callCounts.put(key, callCounts.get(key) + 1); - } else { - callCounts.put(key, 1L); - } - if (!isError) return; - if (errorCounts.containsKey(key)) { - errorCounts.put(key, errorCounts.get(key) + 1); - } else { - errorCounts.put(key, 1L); - } - } - public List link() { - return link(callCounts, errorCounts); + return toLinks(delegate.link()); } /** links are merged by mapping to parent/child and summing corresponding links */ public static List merge(Iterable in) { - Map, Long> callCounts = new LinkedHashMap<>(); - Map, Long> errorCounts = new LinkedHashMap<>(); - - for (DependencyLink link : in) { - Pair parentChild = Pair.create(link.parent, link.child); - long callCount = callCounts.containsKey(parentChild) ? callCounts.get(parentChild) : 0L; - callCount += link.callCount; - callCounts.put(parentChild, callCount); - long errorCount = errorCounts.containsKey(parentChild) ? errorCounts.get(parentChild) : 0L; - errorCount += link.errorCount; - errorCounts.put(parentChild, errorCount); - } - - return link(callCounts, errorCounts); - } - - static List link(Map, Long> callCounts, - Map, Long> errorCounts) { - List result = new ArrayList<>(callCounts.size()); - for (Map.Entry, Long> entry : callCounts.entrySet()) { - Pair parentChild = entry.getKey(); - result.add(DependencyLink.builder() - .parent(parentChild._1) - .child(parentChild._2) - .callCount(entry.getValue()) - .errorCount(errorCounts.containsKey(parentChild) ? errorCounts.get(parentChild) : 0L) - .build()); - } - return result; + return toLinks(zipkin.internal.v2.internal.DependencyLinker.merge(fromLinks(in))); } } diff --git a/zipkin/src/main/java/zipkin/internal/JsonCodec.java b/zipkin/src/main/java/zipkin/internal/JsonCodec.java index 8763a582535..9b6a7d5f158 100644 --- a/zipkin/src/main/java/zipkin/internal/JsonCodec.java +++ b/zipkin/src/main/java/zipkin/internal/JsonCodec.java @@ -16,9 +16,7 @@ import com.google.gson.stream.JsonReader; import com.google.gson.stream.JsonToken; import com.google.gson.stream.MalformedJsonException; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStreamReader; import java.nio.ByteBuffer; import java.util.Collections; import java.util.LinkedList; @@ -30,17 +28,22 @@ import zipkin.DependencyLink; import zipkin.Endpoint; import zipkin.Span; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; +import zipkin.internal.v2.internal.Buffer; +import zipkin.internal.v2.internal.JsonCodec.JsonReaderAdapter; import static java.lang.Double.doubleToRawLongBits; -import static zipkin.internal.Buffer.asciiSizeInBytes; -import static zipkin.internal.Buffer.ipv6SizeInBytes; -import static zipkin.internal.Buffer.jsonEscapedSizeInBytes; -import static zipkin.internal.Buffer.utf8SizeInBytes; import static zipkin.internal.Util.UTF_8; -import static zipkin.internal.Util.assertionError; -import static zipkin.internal.Util.checkArgument; import static zipkin.internal.Util.lowerHexToUnsignedLong; import static zipkin.internal.Util.writeBase64Url; +import static zipkin.internal.v2.internal.Buffer.asciiSizeInBytes; +import static zipkin.internal.v2.internal.JsonCodec.read; +import static zipkin.internal.v2.internal.JsonCodec.readList; +import static zipkin.internal.v2.internal.JsonCodec.write; +import static zipkin.internal.v2.internal.JsonCodec.writeList; +import static zipkin.internal.v2.internal.JsonCodec.writeNestedList; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscape; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscapedSizeInBytes; /** * This explicitly constructs instances of model classes via manual parsing for a number of @@ -81,41 +84,47 @@ public final class JsonCodec implements Codec { }; static final Buffer.Writer ENDPOINT_WRITER = new Buffer.Writer() { - @Override public int sizeInBytes(Endpoint value) { - int sizeInBytes = 0; - sizeInBytes += "{\"serviceName\":\"".length(); - sizeInBytes += jsonEscapedSizeInBytes(value.serviceName) + 1; // for end quote - if (value.ipv4 != 0) { - sizeInBytes += ",\"ipv4\":\"".length(); - sizeInBytes += asciiSizeInBytes(value.ipv4 >> 24 & 0xff) + 1; // for dot - sizeInBytes += asciiSizeInBytes(value.ipv4 >> 16 & 0xff) + 1; // for dot - sizeInBytes += asciiSizeInBytes(value.ipv4 >> 8 & 0xff) + 1; // for dot - sizeInBytes += asciiSizeInBytes(value.ipv4 & 0xff) + 1; // for end quote + @Override public int sizeInBytes(Endpoint v) { + zipkin.internal.v2.Endpoint value = V2SpanConverter.toEndpoint(v); + int sizeInBytes = 17; // {"serviceName":"" + if (value.serviceName() != null) { + sizeInBytes += jsonEscapedSizeInBytes(value.serviceName()); } - if (value.port != null && value.port != 0) { - sizeInBytes += ",\"port\":".length() + asciiSizeInBytes(value.port & 0xffff); + if (value.ipv4() != null) { + if (sizeInBytes != 1) sizeInBytes++; // , + sizeInBytes += 9; // "ipv4":"" + sizeInBytes += value.ipv4().length(); } - if (value.ipv6 != null) { - sizeInBytes += ",\"ipv6\":\"".length() + ipv6SizeInBytes(value.ipv6) + 1; + if (value.ipv6() != null) { + if (sizeInBytes != 1) sizeInBytes++; // , + sizeInBytes += 9; // "ipv6":"" + sizeInBytes += value.ipv6().length(); } - return ++sizeInBytes;// end curly-brace + if (value.port() != null) { + if (sizeInBytes != 1) sizeInBytes++; // , + sizeInBytes += 7; // "port": + sizeInBytes += asciiSizeInBytes(value.port()); + } + return ++sizeInBytes; // } } - @Override public void write(Endpoint value, Buffer b) { + @Override public void write(Endpoint v, Buffer b) { + zipkin.internal.v2.Endpoint value = V2SpanConverter.toEndpoint(v); b.writeAscii("{\"serviceName\":\""); - b.writeJsonEscaped(value.serviceName).writeByte('"'); - if (value.ipv4 != 0) { + if (value.serviceName() != null) { + b.writeUtf8(jsonEscape(value.serviceName())); + } + b.writeByte('"'); + if (value.ipv4() != null) { b.writeAscii(",\"ipv4\":\""); - b.writeAscii(value.ipv4 >> 24 & 0xff).writeByte('.'); - b.writeAscii(value.ipv4 >> 16 & 0xff).writeByte('.'); - b.writeAscii(value.ipv4 >> 8 & 0xff).writeByte('.'); - b.writeAscii(value.ipv4 & 0xff).writeByte('"'); + b.writeAscii(value.ipv4()).writeByte('"'); } - if (value.port != null && value.port != 0) { - b.writeAscii(",\"port\":").writeAscii(value.port & 0xffff); + if (value.ipv6() != null) { + b.writeAscii(",\"ipv6\":\""); + b.writeAscii(value.ipv6()).writeByte('"'); } - if (value.ipv6 != null) { - b.writeAscii(",\"ipv6\":\"").writeIpV6(value.ipv6).writeByte('"'); + if (value.port() != null) { + b.writeAscii(",\"port\":").writeAscii(value.port()); } b.writeByte('}'); } @@ -153,7 +162,7 @@ public final class JsonCodec implements Codec { @Override public void write(Annotation value, Buffer b) { b.writeAscii("{\"timestamp\":").writeAscii(value.timestamp); - b.writeAscii(",\"value\":\"").writeJsonEscaped(value.value).writeByte('"'); + b.writeAscii(",\"value\":\"").writeUtf8(jsonEscape(value.value)).writeByte('"'); if (value.endpoint != null) { b.writeAscii(ENDPOINT_HEADER); ENDPOINT_WRITER.write(value.endpoint, b); @@ -247,7 +256,10 @@ public final class JsonCodec implements Codec { sizeInBytes += value.value[0] == 1 ? 4 /* true */ : 5 /* false */; break; case STRING: - sizeInBytes += jsonEscapedSizeInBytes(value.value) + 2; //for quotes + int escapedSize = needsJsonEscaping(value.value) + ? jsonEscapedSizeInBytes(new String(value.value, UTF_8)) + : value.value.length; + sizeInBytes += escapedSize + 2; //for quotes break; case BYTES: sizeInBytes += (/* base64 */(value.value.length + 2) / 3 * 4) + 2; //for quotes @@ -270,7 +282,7 @@ public final class JsonCodec implements Codec { default: } if (value.type != BinaryAnnotation.Type.STRING && value.type != BinaryAnnotation.Type.BOOL) { - sizeInBytes += ",\"type\":\"".length() + utf8SizeInBytes(value.type.name()) + 1; + sizeInBytes += ",\"type\":\"".length() + value.type.name().length() + 1; } if (value.endpoint != null) { sizeInBytes += ENDPOINT_HEADER.length() + ENDPOINT_WRITER.sizeInBytes(value.endpoint); @@ -279,14 +291,14 @@ public final class JsonCodec implements Codec { } @Override public void write(BinaryAnnotation value, Buffer b) { - b.writeAscii("{\"key\":\"").writeJsonEscaped(value.key); + b.writeAscii("{\"key\":\"").writeUtf8(jsonEscape(value.key)); b.writeAscii("\",\"value\":"); switch (value.type) { case BOOL: b.writeAscii(value.value[0] == 1 ? "true" : "false"); break; case STRING: - b.writeByte('"').writeJsonEscaped(value.value).writeByte('"'); + b.writeByte('"').writeUtf8(jsonEscape(new String(value.value, UTF_8))).writeByte('"'); break; case BYTES: b.writeByte('"').writeAscii(writeBase64Url(value.value)).writeByte('"'); @@ -392,12 +404,20 @@ static final class SpanReader implements JsonReaderAdapter { sizeInBytes += ",\"duration\":".length() + asciiSizeInBytes(value.duration); } if (!value.annotations.isEmpty()) { - sizeInBytes += ",\"annotations\":".length(); - sizeInBytes += JsonCodec.sizeInBytes(ANNOTATION_WRITER, value.annotations); + sizeInBytes += 17; // ,"annotations":[] + int length = value.annotations.size(); + if (length > 1) sizeInBytes += length - 1; // comma to join elements + for (int i = 0; i < length; i++) { + sizeInBytes += ANNOTATION_WRITER.sizeInBytes(value.annotations.get(i)); + } } if (!value.binaryAnnotations.isEmpty()) { - sizeInBytes += ",\"binaryAnnotations\":".length(); - sizeInBytes += JsonCodec.sizeInBytes(BINARY_ANNOTATION_WRITER, value.binaryAnnotations); + sizeInBytes += 23; // ,"binaryAnnotations":[] + int length = value.binaryAnnotations.size(); + if (length > 1) sizeInBytes += length - 1; // comma to join elements + for (int i = 0; i < length; i++) { + sizeInBytes += BINARY_ANNOTATION_WRITER.sizeInBytes(value.binaryAnnotations.get(i)); + } } if (value.debug != null && value.debug) { sizeInBytes += ",\"debug\":true".length(); @@ -408,13 +428,16 @@ static final class SpanReader implements JsonReaderAdapter { @Override public void write(Span value, Buffer b) { b.writeAscii("{\"traceId\":\""); if (value.traceIdHigh != 0) { - b.writeLowerHex(value.traceIdHigh); + writeLowerHex(b, value.traceIdHigh); } - b.writeLowerHex(value.traceId); - b.writeAscii("\",\"id\":\"").writeLowerHex(value.id); - b.writeAscii("\",\"name\":\"").writeJsonEscaped(value.name).writeByte('"'); + writeLowerHex(b, value.traceId); + b.writeAscii("\",\"id\":\""); + writeLowerHex(b, value.id); + b.writeAscii("\",\"name\":\"").writeUtf8(jsonEscape(value.name)).writeByte('"'); if (value.parentId != null) { - b.writeAscii(",\"parentId\":\"").writeLowerHex(value.parentId).writeByte('"'); + b.writeAscii(",\"parentId\":\""); + writeLowerHex(b, value.parentId); + b.writeByte('"'); } if (value.timestamp != null) { b.writeAscii(",\"timestamp\":").writeAscii(value.timestamp); @@ -470,11 +493,6 @@ public static byte[] writeBinaryAnnotation(BinaryAnnotation value) { return write(BINARY_ANNOTATION_WRITER, value); } - /** Exposed for ElasticSearch HttpBulkIndexer */ - public static String escape(String value) { - return Buffer.jsonEscape(value); - } - @Override public List readSpans(byte[] bytes) { return readList(new SpanReader(), bytes); @@ -490,27 +508,6 @@ public byte[] writeTraces(List> traces) { return writeNestedList(SPAN_WRITER, traces); } - public static byte[] writeNestedList(Buffer.Writer writer, List> traces) { - // Get the encoded size of the nested list so that we don't need to grow the buffer - int sizeInBytes = overheadInBytes(traces); - for (int i = 0, length = traces.size(); i < length; i++) { - List spans = traces.get(i); - sizeInBytes += overheadInBytes(spans); - for (int j = 0, jLength = spans.size(); j < jLength; j++) { - sizeInBytes += writer.sizeInBytes(spans.get(j)); - } - } - - Buffer out = new Buffer(sizeInBytes); - out.writeByte('['); // start list of traces - for (int i = 0, length = traces.size(); i < length; i++) { - writeList(writer, traces.get(i), out); - if (i + 1 < length) out.writeByte(','); - } - out.writeByte(']'); // stop list of traces - return out.toByteArray(); - } - public List> readTraces(byte[] bytes) { return readList(new SpanListReader(), bytes); } @@ -564,25 +561,14 @@ static final class SpanListReader implements JsonReaderAdapter> { }; static final Buffer.Writer DEPENDENCY_LINK_WRITER = new Buffer.Writer() { - @Override public int sizeInBytes(DependencyLink value) { - int sizeInBytes = 0; - sizeInBytes += "{\"parent\":\"".length() + jsonEscapedSizeInBytes(value.parent); - sizeInBytes += "\",\"child\":\"".length() + jsonEscapedSizeInBytes(value.child); - sizeInBytes += "\",\"callCount\":".length() + asciiSizeInBytes(value.callCount); - if (value.errorCount > 0) { - sizeInBytes += ",\"errorCount\":".length() + asciiSizeInBytes(value.errorCount); - } - return ++sizeInBytes;// end curly-brace + @Override public int sizeInBytes(DependencyLink v) { + zipkin.internal.v2.DependencyLink value = V2SpanConverter.fromLink(v); + return DependencyLinkBytesCodec.JSON.sizeInBytes(value); } - @Override public void write(DependencyLink value, Buffer b) { - b.writeAscii("{\"parent\":\"").writeJsonEscaped(value.parent); - b.writeAscii("\",\"child\":\"").writeJsonEscaped(value.child); - b.writeAscii("\",\"callCount\":").writeAscii(value.callCount); - if (value.errorCount > 0) { - b.writeAscii(",\"errorCount\":").writeAscii(value.errorCount); - } - b.writeByte('}'); + @Override public void write(DependencyLink v, Buffer b) { + zipkin.internal.v2.DependencyLink value = V2SpanConverter.fromLink(v); + b.write(DependencyLinkBytesCodec.JSON.encode(value)); } @Override public String toString() { @@ -626,7 +612,7 @@ public byte[] writeDependencyLinks(List value) { } @Override public void write(String value, Buffer buffer) { - buffer.writeByte('"').writeJsonEscaped(value).writeByte('"'); + buffer.writeByte('"').writeUtf8(jsonEscape(value)).writeByte('"'); } @Override public String toString() { @@ -642,107 +628,56 @@ public byte[] writeStrings(List value) { return writeList(STRING_WRITER, value); } - public static T read(JsonReaderAdapter adapter, byte[] bytes) { - checkArgument(bytes.length > 0, "Empty input reading %s", adapter); - try { - return adapter.fromJson(jsonReader(bytes)); - } catch (Exception e) { - throw exceptionReading(adapter.toString(), bytes, e); - } + static boolean needsJsonEscaping(byte[] v) { + for (int i = 0; i < v.length; i++) { + int current = v[i] & 0xFF; + if (i >= 2 && + // Is this the end of a u2028 or u2028 UTF-8 codepoint? + // 0xE2 0x80 0xA8 == u2028; 0xE2 0x80 0xA9 == u2028 + (current == 0xA8 || current == 0xA9) + && (v[i - 1] & 0xFF) == 0x80 + && (v[i - 2] & 0xFF) == 0xE2) { + return true; + } else if (current < 0x80 && REPLACEMENT_CHARS[current] != null) { + return true; + } + } + return false; // must be a string we don't need to escape. } - public static List readList(JsonReaderAdapter adapter, byte[] bytes) { - checkArgument(bytes.length > 0, "Empty input reading List<%s>", adapter); - JsonReader reader = jsonReader(bytes); - List result; - try { - reader.beginArray(); - result = reader.hasNext() ? new LinkedList<>() : Collections.emptyList(); - while (reader.hasNext()) result.add(adapter.fromJson(reader)); - reader.endArray(); - return result; - } catch (Exception e) { - throw exceptionReading("List<" + adapter + ">", bytes, e); - } - } + static final byte[] HEX_DIGITS = + {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; - static JsonReader jsonReader(byte[] bytes) { - return new JsonReader(new InputStreamReader(new ByteArrayInputStream(bytes), UTF_8)); - } - - /** Inability to encode is a programming bug. */ - public static byte[] write(Buffer.Writer writer, T value) { - Buffer b = new Buffer(writer.sizeInBytes(value)); - try { - writer.write(value, b); - } catch (RuntimeException e) { - byte[] bytes = b.toByteArray(); - int lengthWritten = bytes.length; - for (int i = 0; i < bytes.length; i++) { - if (bytes[i] == 0) { - lengthWritten = i; - break; - } - } - - final byte[] bytesWritten; - if (lengthWritten == bytes.length) { - bytesWritten = bytes; - } else { - bytesWritten = new byte[lengthWritten]; - System.arraycopy(bytes, 0, bytesWritten, 0, lengthWritten); - } - - String written = new String(bytesWritten, UTF_8); - // Don't use value directly in the message, as its toString might be implemented using this - // method. If that's the case, we'd stack overflow. Instead, emit what we've written so far. - String message = String.format( - "Bug found using %s to write %s as json. Wrote %s/%s bytes: %s", - writer.getClass().getSimpleName(), value.getClass().getSimpleName(), lengthWritten, - bytes.length, written); - throw assertionError(message, e); - } - return b.toByteArray(); - } - - static int sizeInBytes(Buffer.Writer writer, List value) { - int sizeInBytes = overheadInBytes(value); - for (int i = 0, length = value.size(); i < length; i++) { - sizeInBytes += writer.sizeInBytes(value.get(i)); - } - return sizeInBytes; - } - - static int overheadInBytes(List value) { - int sizeInBytes = 2; // brackets - if (value.size() > 1) sizeInBytes += value.size() - 1; // comma to join elements - return sizeInBytes; - } - - public static byte[] writeList(Buffer.Writer writer, List value) { - if (value.isEmpty()) return new byte[] {'[', ']'}; - Buffer result = new Buffer(JsonCodec.sizeInBytes(writer, value)); - writeList(writer, value, result); - return result.toByteArray(); - } - - public static void writeList(Buffer.Writer writer, List value, Buffer b) { - b.writeByte('['); - for (int i = 0, length = value.size(); i < length; ) { - writer.write(value.get(i++), b); - if (i < length) b.writeByte(','); - } - b.writeByte(']'); + static void writeHexByte(Buffer buffer, byte b) { + buffer.writeByte(HEX_DIGITS[(b >> 4) & 0xf]); + buffer.writeByte(HEX_DIGITS[b & 0xf]); } - static IllegalArgumentException exceptionReading(String type, byte[] bytes, Exception e) { - String cause = e.getMessage() == null ? "Error" : e.getMessage(); - if (cause.indexOf("malformed") != -1) cause = "Malformed"; - String message = String.format("%s reading %s from json: %s", cause, type, new String(bytes, UTF_8)); - throw new IllegalArgumentException(message, e); + static void writeLowerHex(Buffer b, long v) { + writeHexByte(b, (byte) ((v >>> 56L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 48L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 40L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 32L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 24L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 16L) & 0xff)); + writeHexByte(b, (byte) ((v >>> 8L) & 0xff)); + writeHexByte(b, (byte) (v & 0xff)); } - public interface JsonReaderAdapter { - T fromJson(JsonReader reader) throws IOException; + // copied from JsonEscaper + private static final String[] REPLACEMENT_CHARS; + + static { + REPLACEMENT_CHARS = new String[128]; + for (int i = 0; i <= 0x1f; i++) { + REPLACEMENT_CHARS[i] = String.format("\\u%04x", (int) i); + } + REPLACEMENT_CHARS['"'] = "\\\""; + REPLACEMENT_CHARS['\\'] = "\\\\"; + REPLACEMENT_CHARS['\t'] = "\\t"; + REPLACEMENT_CHARS['\b'] = "\\b"; + REPLACEMENT_CHARS['\n'] = "\\n"; + REPLACEMENT_CHARS['\r'] = "\\r"; + REPLACEMENT_CHARS['\f'] = "\\f"; } } diff --git a/zipkin/src/main/java/zipkin/internal/V2JsonSpanDecoder.java b/zipkin/src/main/java/zipkin/internal/V2JsonSpanDecoder.java index 5a1fbba042d..d607a2375a1 100644 --- a/zipkin/src/main/java/zipkin/internal/V2JsonSpanDecoder.java +++ b/zipkin/src/main/java/zipkin/internal/V2JsonSpanDecoder.java @@ -18,7 +18,7 @@ import java.util.List; import zipkin.SpanDecoder; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesDecoder; +import zipkin.internal.v2.codec.SpanBytesCodec; /** Decodes a span from zipkin v2 encoding */ public final class V2JsonSpanDecoder implements SpanDecoder { @@ -27,7 +27,7 @@ public final class V2JsonSpanDecoder implements SpanDecoder { } @Override public List readSpans(byte[] span) { - List span2s = BytesDecoder.JSON.decodeList(span); + List span2s = SpanBytesCodec.JSON.decodeList(span); if (span2s.isEmpty()) return Collections.emptyList(); int length = span2s.size(); List result = new ArrayList<>(length); diff --git a/zipkin/src/main/java/zipkin/internal/V2SpanConverter.java b/zipkin/src/main/java/zipkin/internal/V2SpanConverter.java index 865b660313c..81cc0a10b57 100644 --- a/zipkin/src/main/java/zipkin/internal/V2SpanConverter.java +++ b/zipkin/src/main/java/zipkin/internal/V2SpanConverter.java @@ -24,6 +24,7 @@ import zipkin.Annotation; import zipkin.BinaryAnnotation; import zipkin.Constants; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; import zipkin.internal.v2.Span.Kind; @@ -109,7 +110,7 @@ void processAnnotations(zipkin.Span source) { if (closeEnough(cs.endpoint, sr.endpoint)) { client.kind(Kind.CLIENT); // fork a new span for the server side - server = newSpanBuilder(source, convert(sr.endpoint)).kind(Kind.SERVER); + server = newSpanBuilder(source, toEndpoint(sr.endpoint)).kind(Kind.SERVER); } else { server = forEndpoint(source, sr.endpoint); } @@ -150,7 +151,7 @@ void processAnnotations(zipkin.Span source) { if (closeEnough(ms.endpoint, mr.endpoint)) { producer.kind(Kind.PRODUCER); // fork a new span for the consumer side - consumer = newSpanBuilder(source, convert(mr.endpoint)).kind(Kind.CONSUMER); + consumer = newSpanBuilder(source, toEndpoint(mr.endpoint)).kind(Kind.CONSUMER); } else { consumer = forEndpoint(source, mr.endpoint); } @@ -233,37 +234,37 @@ void processBinaryAnnotations(zipkin.Span source) { } if (cs != null && sa != null && !closeEnough(sa, cs.endpoint)) { - forEndpoint(source, cs.endpoint).remoteEndpoint(convert(sa)); + forEndpoint(source, cs.endpoint).remoteEndpoint(toEndpoint(sa)); } if (sr != null && ca != null && !closeEnough(ca, sr.endpoint)) { - forEndpoint(source, sr.endpoint).remoteEndpoint(convert(ca)); + forEndpoint(source, sr.endpoint).remoteEndpoint(toEndpoint(ca)); } if (ms != null && ma != null && !closeEnough(ma, ms.endpoint)) { - forEndpoint(source, ms.endpoint).remoteEndpoint(convert(ma)); + forEndpoint(source, ms.endpoint).remoteEndpoint(toEndpoint(ma)); } if (mr != null && ma != null && !closeEnough(ma, mr.endpoint)) { - forEndpoint(source, mr.endpoint).remoteEndpoint(convert(ma)); + forEndpoint(source, mr.endpoint).remoteEndpoint(toEndpoint(ma)); } // special-case when we are missing core annotations, but we have both address annotations if ((cs == null && sr == null) && (ca != null && sa != null)) { - forEndpoint(source, ca).remoteEndpoint(convert(sa)); + forEndpoint(source, ca).remoteEndpoint(toEndpoint(sa)); } } Span.Builder forEndpoint(zipkin.Span source, @Nullable zipkin.Endpoint e) { if (e == null) return spans.get(0); // allocate missing endpoint data to first span - Endpoint converted = convert(e); + Endpoint converted = toEndpoint(e); for (int i = 0, length = spans.size(); i < length; i++) { Span.Builder next = spans.get(i); Endpoint nextLocalEndpoint = next.localEndpoint(); if (nextLocalEndpoint == null) { next.localEndpoint(converted); return next; - } else if (closeEnough(convert(nextLocalEndpoint), e)) { + } else if (closeEnough(toEndpoint(nextLocalEndpoint), e)) { return next; } } @@ -321,8 +322,8 @@ public static zipkin.Span toSpan(Span in) { result.duration(in.duration()); } - zipkin.Endpoint local = in.localEndpoint() != null ? convert(in.localEndpoint()) : null; - zipkin.Endpoint remote = in.remoteEndpoint() != null ? convert(in.remoteEndpoint()) : null; + zipkin.Endpoint local = in.localEndpoint() != null ? toEndpoint(in.localEndpoint()) : null; + zipkin.Endpoint remote = in.remoteEndpoint() != null ? toEndpoint(in.remoteEndpoint()) : null; Kind kind = in.kind(); Annotation cs = null, sr = null, ss = null, cr = null, ms = null, mr = null, ws = null, wr = null; @@ -442,8 +443,8 @@ public static zipkin.Span toSpan(Span in) { return result.build(); } - public static zipkin.internal.v2.Endpoint convert(zipkin.Endpoint input) { - zipkin.internal.v2.Endpoint.Builder result = zipkin.internal.v2.Endpoint.newBuilder() + public static Endpoint toEndpoint(zipkin.Endpoint input) { + Endpoint.Builder result = Endpoint.newBuilder() .serviceName(input.serviceName) .port(input.port != null ? input.port & 0xffff : null); if (input.ipv4 != 0) { @@ -463,7 +464,7 @@ public static zipkin.internal.v2.Endpoint convert(zipkin.Endpoint input) { return result.build(); } - public static zipkin.Endpoint convert(Endpoint input) { + public static zipkin.Endpoint toEndpoint(Endpoint input) { zipkin.Endpoint.Builder result = zipkin.Endpoint.builder() .serviceName(input.serviceName() != null ? input.serviceName() : "") .port(input.port() != null ? input.port() : 0); @@ -475,4 +476,57 @@ public static zipkin.Endpoint convert(Endpoint input) { } return result.build(); } + + static List toSpans(List spans) { + if (spans.isEmpty()) return Collections.emptyList(); + int length = spans.size(); + List span1s = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + span1s.add(V2SpanConverter.toSpan(spans.get(i))); + } + return span1s; + } + + public static DependencyLink fromLink(zipkin.DependencyLink link) { + return DependencyLink.newBuilder() + .parent(link.parent) + .child(link.child) + .callCount(link.callCount) + .errorCount(link.errorCount).build(); + } + + public static zipkin.DependencyLink toLink(DependencyLink link) { + return zipkin.DependencyLink.builder() + .parent(link.parent()) + .child(link.child()) + .callCount(link.callCount()) + .errorCount(link.errorCount()).build(); + } + + public static List toLinks(List links) { + if (links.isEmpty()) return Collections.emptyList(); + int length = links.size(); + List result = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + DependencyLink link2 = links.get(i); + result.add(zipkin.DependencyLink.builder() + .parent(link2.parent()) + .child(link2.child()) + .callCount(link2.callCount()) + .errorCount(link2.errorCount()).build()); + } + return result; + } + + public static List fromLinks(Iterable links) { + List result = new ArrayList<>(); + for (zipkin.DependencyLink link1 : links) { + result.add(DependencyLink.newBuilder() + .parent(link1.parent) + .child(link1.child) + .callCount(link1.callCount) + .errorCount(link1.errorCount).build()); + } + return result; + } } diff --git a/zipkin/src/main/java/zipkin/internal/V2SpanStoreAdapter.java b/zipkin/src/main/java/zipkin/internal/V2SpanStoreAdapter.java index c1731e3986e..505e47082f8 100644 --- a/zipkin/src/main/java/zipkin/internal/V2SpanStoreAdapter.java +++ b/zipkin/src/main/java/zipkin/internal/V2SpanStoreAdapter.java @@ -31,6 +31,7 @@ import static zipkin.internal.GroupByTraceId.TRACE_DESCENDING; import static zipkin.internal.Util.sortedList; import static zipkin.internal.Util.toLowerHex; +import static zipkin.internal.V2SpanConverter.toSpans; final class V2SpanStoreAdapter implements zipkin.storage.SpanStore, AsyncSpanStore { final SpanStore delegate; @@ -54,7 +55,7 @@ public void getTraces(zipkin.storage.QueryRequest request, } Call>> getTracesCall(zipkin.storage.QueryRequest v1Request) { - return delegate.getTraces(convert(v1Request)).map(getTracesMapper); + return delegate.getTraces(convertRequest(v1Request)).map(getTracesMapper); } @Nullable @Override public List getTrace(long traceIdHigh, long traceIdLow) { @@ -130,7 +131,8 @@ Call> getRawTraceCall(long traceIdHigh, long traceIdLow) { } Call> getDependenciesCall(long endTs, @Nullable Long lookback) { - return delegate.getDependencies(endTs, lookback != null ? lookback : endTs); + return delegate.getDependencies(endTs, lookback != null ? lookback : endTs) + .map(V2SpanConverter::toLinks); } @Nullable @Override public List getTrace(long traceId) { @@ -154,23 +156,23 @@ Call> getDependenciesCall(long endTs, @Nullable Long lookba int length = trace2s.size(); List> trace1s = new ArrayList<>(length); for (int i = 0; i < length; i++) { - trace1s.add(CorrectForClockSkew.apply(MergeById.apply(convert(trace2s.get(i))))); + trace1s.add(CorrectForClockSkew.apply(MergeById.apply(toSpans(trace2s.get(i))))); } Collections.sort(trace1s, TRACE_DESCENDING); return trace1s; }; static final Mapper, List> getTraceMapper = (spans) -> { - List span1s = CorrectForClockSkew.apply(MergeById.apply(convert(spans))); + List span1s = CorrectForClockSkew.apply(MergeById.apply(toSpans(spans))); return (span1s.isEmpty()) ? null : span1s; }; static final Mapper, List> getRawTraceMapper = (spans) -> { - List span1s = convert(spans); + List span1s = toSpans(spans); return (span1s.isEmpty()) ? null : span1s; }; - static QueryRequest convert(zipkin.storage.QueryRequest v1Request) { + static QueryRequest convertRequest(zipkin.storage.QueryRequest v1Request) { return QueryRequest.newBuilder() .serviceName(v1Request.serviceName) .spanName(v1Request.spanName) @@ -181,14 +183,4 @@ static QueryRequest convert(zipkin.storage.QueryRequest v1Request) { .lookback(v1Request.lookback) .limit(v1Request.limit).build(); } - - static List convert(List spans) { - if (spans.isEmpty()) return Collections.emptyList(); - int length = spans.size(); - List span1s = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - span1s.add(V2SpanConverter.toSpan(spans.get(i))); - } - return span1s; - } } diff --git a/zipkin/src/main/java/zipkin/internal/v2/Call.java b/zipkin/src/main/java/zipkin/internal/v2/Call.java index c5632e13246..3dc7a0c4345 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/Call.java +++ b/zipkin/src/main/java/zipkin/internal/v2/Call.java @@ -133,6 +133,17 @@ public final Call handleError(ErrorHandler errorHandler) { return new ErrorHandling<>(errorHandler, this); } + // Taken from RxJava throwIfFatal, which was taken from scala + public static void propagateIfFatal(Throwable t) { + if (t instanceof VirtualMachineError) { + throw (VirtualMachineError) t; + } else if (t instanceof ThreadDeath) { + throw (ThreadDeath) t; + } else if (t instanceof LinkageError) { + throw (LinkageError) t; + } + } + /** * Invokes a request, returning a success value or propagating an error to the caller. Invoking * this more than once will result in an error. To repeat a call, make a copy with {@linkplain diff --git a/zipkin/src/main/java/zipkin/internal/v2/DependencyLink.java b/zipkin/src/main/java/zipkin/internal/v2/DependencyLink.java new file mode 100644 index 00000000000..d331a1dc243 --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/DependencyLink.java @@ -0,0 +1,80 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2; + +import com.google.auto.value.AutoValue; +import java.io.Serializable; +import java.nio.charset.Charset; +import java.util.Locale; +import javax.annotation.concurrent.Immutable; +import zipkin.internal.v2.codec.DependencyLinkBytesCodec; + +@Immutable +@AutoValue +public abstract class DependencyLink implements Serializable { // for Spark jobs + static final Charset UTF_8 = Charset.forName("UTF-8"); + + private static final long serialVersionUID = 0L; + + public static Builder newBuilder() { + return new AutoValue_DependencyLink.Builder().errorCount(0); + } + + /** parent service name (caller) */ + public abstract String parent(); + + /** child service name (callee) */ + public abstract String child(); + + /** total traced calls made from {@link #parent} to {@link #child} */ + public abstract long callCount(); + + /** How many {@link #callCount calls} are known to be errors */ + public abstract long errorCount(); + + public abstract Builder toBuilder(); + + @AutoValue.Builder + public static abstract class Builder { + + public abstract Builder parent(String parent); + + public abstract Builder child(String child); + + public abstract Builder callCount(long callCount); + + public abstract Builder errorCount(long errorCount); + + abstract String parent(); + + abstract String child(); + + abstract DependencyLink autoBuild(); + + public final DependencyLink build() { + return parent(parent().toLowerCase(Locale.ROOT)) + .child(child().toLowerCase(Locale.ROOT)).autoBuild(); + } + + Builder() { + } + } + + @Override public String toString() { + return new String(DependencyLinkBytesCodec.JSON.encode(this), UTF_8); + } + + DependencyLink() { + } +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/Span.java b/zipkin/src/main/java/zipkin/internal/v2/Span.java index d95a8ee17e7..5fa1881f3bb 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/Span.java +++ b/zipkin/src/main/java/zipkin/internal/v2/Span.java @@ -28,7 +28,7 @@ import javax.annotation.concurrent.Immutable; import zipkin.Constants; import zipkin.TraceKeys; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesEncoder; /** * A trace is a series of spans (often RPC calls) which form a latency tree. @@ -427,7 +427,7 @@ public Span build() { } @Override public String toString() { - return new String(BytesEncoder.JSON.encode(this), UTF_8); + return new String(SpanBytesEncoder.JSON.encode(this), UTF_8); } /** diff --git a/zipkin/src/main/java/zipkin/internal/v2/codec/BytesDecoder.java b/zipkin/src/main/java/zipkin/internal/v2/codec/BytesDecoder.java index dc09b746648..26c8d57fcda 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/codec/BytesDecoder.java +++ b/zipkin/src/main/java/zipkin/internal/v2/codec/BytesDecoder.java @@ -14,39 +14,16 @@ package zipkin.internal.v2.codec; import java.util.List; -import zipkin.internal.JsonCodec; -import zipkin.internal.v2.Span; /** - * @param type of the span, usually {@link zipkin.Span} + * @param type of the object to deserialize */ -public interface BytesDecoder { - BytesDecoder JSON = new BytesDecoder() { - @Override public Encoding encoding() { - return Encoding.JSON; - } - - @Override public Span decode(byte[] span) { // ex decode span in dependencies job - return JsonCodec.read(new Span2JsonAdapters.Span2Reader(), span); - } - - @Override public List decodeList(byte[] spans) { // ex getTrace - return JsonCodec.readList(new Span2JsonAdapters.Span2Reader(), spans); - } - - @Override public List> decodeNestedList(byte[] traces) { // ex getTraces - return JsonCodec.readList(new Span2JsonAdapters.Span2ListReader(), traces); - } - }; - +public interface BytesDecoder { Encoding encoding(); - /** throws {@linkplain IllegalArgumentException} if the span couldn't be decoded */ - S decode(byte[] span); - - /** throws {@linkplain IllegalArgumentException} if the spans couldn't be decoded */ - List decodeList(byte[] spans); + /** throws {@linkplain IllegalArgumentException} if the type couldn't be decoded */ + T decode(byte[] serialized); - /** throws {@linkplain IllegalArgumentException} if the traces couldn't be decoded */ - List> decodeNestedList(byte[] traces); + /** throws {@linkplain IllegalArgumentException} if the type couldn't be decoded */ + List decodeList(byte[] serialized); } diff --git a/zipkin/src/main/java/zipkin/internal/v2/codec/BytesEncoder.java b/zipkin/src/main/java/zipkin/internal/v2/codec/BytesEncoder.java index d7bed047660..3430264311e 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/codec/BytesEncoder.java +++ b/zipkin/src/main/java/zipkin/internal/v2/codec/BytesEncoder.java @@ -14,41 +14,18 @@ package zipkin.internal.v2.codec; import java.util.List; -import zipkin.internal.JsonCodec; -import zipkin.internal.v2.Span; - -import static zipkin.internal.v2.codec.Span2JsonAdapters.SPAN_WRITER; /** - * @param type of the span, usually {@link zipkin.Span} + * @param type of the object to deserialize */ -public interface BytesEncoder { - BytesEncoder JSON = new BytesEncoder() { - @Override public Encoding encoding() { - return Encoding.JSON; - } - - @Override public byte[] encode(Span span) { - return JsonCodec.write(SPAN_WRITER, span); - } - - @Override public byte[] encodeList(List spans) { - return JsonCodec.writeList(SPAN_WRITER, spans); - } - - @Override public byte[] encodeNestedList(List> spans) { - return JsonCodec.writeNestedList(SPAN_WRITER, spans); - } - }; - +public interface BytesEncoder { Encoding encoding(); - /** Serializes a span recorded from instrumentation into its binary form. */ - byte[] encode(S span); + int sizeInBytes(T input); - /** Serializes a list of spans recorded from instrumentation into its binary form. */ - byte[] encodeList(List spans); + /** Serializes an object into its binary form. */ + byte[] encode(T input); - /** Serializes a list of spans recorded from instrumentation into its binary form. */ - byte[] encodeNestedList(List> traces); + /** Serializes a list of objects into their binary form. */ + byte[] encodeList(List input); } diff --git a/zipkin/src/main/java/zipkin/internal/v2/codec/DependencyLinkBytesCodec.java b/zipkin/src/main/java/zipkin/internal/v2/codec/DependencyLinkBytesCodec.java new file mode 100644 index 00000000000..0a4fcb27eb9 --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/codec/DependencyLinkBytesCodec.java @@ -0,0 +1,110 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.codec; + +import com.google.gson.stream.JsonReader; +import java.io.IOException; +import java.util.List; +import zipkin.internal.v2.DependencyLink; +import zipkin.internal.v2.internal.Buffer; +import zipkin.internal.v2.internal.JsonCodec; +import zipkin.internal.v2.internal.JsonCodec.JsonReaderAdapter; + +import static zipkin.internal.v2.internal.Buffer.asciiSizeInBytes; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscape; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscapedSizeInBytes; + +public enum DependencyLinkBytesCodec + implements BytesEncoder, BytesDecoder { + JSON { + @Override public Encoding encoding() { + return Encoding.JSON; + } + + @Override public int sizeInBytes(DependencyLink input) { + return WRITER.sizeInBytes(input); + } + + @Override public byte[] encode(DependencyLink link) { + return JsonCodec.write(WRITER, link); + } + + @Override public DependencyLink decode(byte[] link) { + return JsonCodec.read(READER, link); + } + + @Override public byte[] encodeList(List links) { + return JsonCodec.writeList(WRITER, links); + } + + @Override public List decodeList(byte[] links) { + return JsonCodec.readList(READER, links); + } + }; + + static final JsonReaderAdapter READER = new JsonReaderAdapter() { + @Override public DependencyLink fromJson(JsonReader reader) throws IOException { + DependencyLink.Builder result = DependencyLink.newBuilder(); + reader.beginObject(); + while (reader.hasNext()) { + String nextName = reader.nextName(); + if (nextName.equals("parent")) { + result.parent(reader.nextString()); + } else if (nextName.equals("child")) { + result.child(reader.nextString()); + } else if (nextName.equals("callCount")) { + result.callCount(reader.nextLong()); + } else if (nextName.equals("errorCount")) { + result.errorCount(reader.nextLong()); + } else { + reader.skipValue(); + } + } + reader.endObject(); + return result.build(); + } + + @Override public String toString() { + return "DependencyLink"; + } + }; + + static final Buffer.Writer WRITER = new Buffer.Writer() { + @Override public int sizeInBytes(DependencyLink value) { + int sizeInBytes = 37; // {"parent":"","child":"","callCount":} + sizeInBytes += jsonEscapedSizeInBytes(value.parent()); + sizeInBytes += jsonEscapedSizeInBytes(value.child()); + sizeInBytes += asciiSizeInBytes(value.callCount()); + if (value.errorCount() > 0) { + sizeInBytes += 14; // ,"errorCount": + sizeInBytes += asciiSizeInBytes(value.errorCount()); + } + return sizeInBytes; + } + + @Override public void write(DependencyLink value, Buffer b) { + b.writeAscii("{\"parent\":\"").writeUtf8(jsonEscape(value.parent())); + b.writeAscii("\",\"child\":\"").writeUtf8(jsonEscape(value.child())); + b.writeAscii("\",\"callCount\":").writeAscii(value.callCount()); + if (value.errorCount() > 0) { + b.writeAscii(",\"errorCount\":").writeAscii(value.errorCount()); + } + b.writeByte('}'); + } + + @Override public String toString() { + return "DependencyLink"; + } + }; +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesCodec.java b/zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesCodec.java new file mode 100644 index 00000000000..637b695b7ca --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesCodec.java @@ -0,0 +1,207 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.codec; + +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonToken; +import com.google.gson.stream.MalformedJsonException; +import java.io.IOException; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import zipkin.internal.v2.Endpoint; +import zipkin.internal.v2.Span; +import zipkin.internal.v2.internal.JsonCodec; + +/** This is separate from {@link SpanBytesEncoder}, as it isn't needed for instrumentation */ +public enum SpanBytesCodec implements BytesEncoder, BytesDecoder { + /** Corresponds to the Zipkin v2 json format */ + JSON { + @Override public Encoding encoding() { + return Encoding.JSON; + } + + @Override public int sizeInBytes(Span input) { + return SpanBytesEncoder.JSON.sizeInBytes(input); + } + + @Override public byte[] encode(Span input) { + return SpanBytesEncoder.JSON.encode(input); + } + + @Override public byte[] encodeList(List input) { + return SpanBytesEncoder.JSON.encodeList(input); + } + + @Override public Span decode(byte[] span) { // ex decode span in dependencies job + return JsonCodec.read(new SpanReader(), span); + } + + @Override public List decodeList(byte[] spans) { // ex getTrace + return JsonCodec.readList(new SpanReader(), spans); + } + + @Override public byte[] encodeNestedList(List> traces) { + return JsonCodec.writeNestedList(SpanBytesEncoder.SPAN_WRITER, traces); + } + + @Override public List> decodeNestedList(byte[] traces) { // ex getTraces + return JsonCodec.readList(new SpanListReader(), traces); + } + }; + + /** Serializes a list of traces retrieved from storage into its binary form. */ + public abstract byte[] encodeNestedList(List> traces); + + /** throws {@linkplain IllegalArgumentException} if the traces couldn't be decoded */ + public abstract List> decodeNestedList(byte[] traces); + + static final class SpanReader implements JsonCodec.JsonReaderAdapter { + Span.Builder builder; + + @Override public Span fromJson(JsonReader reader) throws IOException { + if (builder == null) { + builder = Span.newBuilder(); + } else { + builder.clear(); + } + reader.beginObject(); + while (reader.hasNext()) { + String nextName = reader.nextName(); + if (nextName.equals("traceId")) { + builder.traceId(reader.nextString()); + continue; + } else if (nextName.equals("id")) { + builder.id(reader.nextString()); + continue; + } else if (reader.peek() == JsonToken.NULL) { + reader.skipValue(); + continue; + } + + // read any optional fields + if (nextName.equals("parentId")) { + builder.parentId(reader.nextString()); + } else if (nextName.equals("kind")) { + builder.kind(Span.Kind.valueOf(reader.nextString())); + } else if (nextName.equals("name")) { + builder.name(reader.nextString()); + } else if (nextName.equals("timestamp")) { + builder.timestamp(reader.nextLong()); + } else if (nextName.equals("duration")) { + builder.duration(reader.nextLong()); + } else if (nextName.equals("localEndpoint")) { + builder.localEndpoint(ENDPOINT_READER.fromJson(reader)); + } else if (nextName.equals("remoteEndpoint")) { + builder.remoteEndpoint(ENDPOINT_READER.fromJson(reader)); + } else if (nextName.equals("annotations")) { + reader.beginArray(); + while (reader.hasNext()) { + reader.beginObject(); + Long timestamp = null; + String value = null; + while (reader.hasNext()) { + nextName = reader.nextName(); + if (nextName.equals("timestamp")) { + timestamp = reader.nextLong(); + } else if (nextName.equals("value")) { + value = reader.nextString(); + } else { + reader.skipValue(); + } + } + if (timestamp == null || value == null) { + throw new MalformedJsonException("Incomplete annotation at " + reader.getPath()); + } + reader.endObject(); + builder.addAnnotation(timestamp, value); + } + reader.endArray(); + } else if (nextName.equals("tags")) { + reader.beginObject(); + while (reader.hasNext()) { + String key = reader.nextName(); + if (reader.peek() == JsonToken.NULL) { + throw new MalformedJsonException("No value at " + reader.getPath()); + } + builder.putTag(key, reader.nextString()); + } + reader.endObject(); + } else if (nextName.equals("debug")) { + if (reader.nextBoolean()) builder.debug(true); + } else if (nextName.equals("shared")) { + if (reader.nextBoolean()) builder.shared(true); + } else { + reader.skipValue(); + } + } + reader.endObject(); + return builder.build(); + } + + @Override public String toString() { + return "Span"; + } + } + + static final JsonCodec.JsonReaderAdapter ENDPOINT_READER = reader -> { + Endpoint.Builder result = Endpoint.newBuilder(); + reader.beginObject(); + boolean readField = false; + while (reader.hasNext()) { + String nextName = reader.nextName(); + if (reader.peek() == JsonToken.NULL) { + reader.skipValue(); + continue; + } + if (nextName.equals("serviceName")) { + result.serviceName(reader.nextString()); + readField = true; + } else if (nextName.equals("ipv4") || nextName.equals("ipv6")) { + result.parseIp(reader.nextString()); + readField = true; + } else if (nextName.equals("port")) { + result.port(reader.nextInt()); + readField = true; + } else { + reader.skipValue(); + } + } + reader.endObject(); + if (!readField) throw new MalformedJsonException("Empty endpoint at " + reader.getPath()); + return result.build(); + }; + + + static final class SpanListReader implements JsonCodec.JsonReaderAdapter> { + SpanReader spanReader; + + @Override public List fromJson(JsonReader reader) throws IOException { + reader.beginArray(); + if (!reader.hasNext()) { + reader.endArray(); + return Collections.emptyList(); + } + List result = new LinkedList<>(); // because we don't know how long it will be + if (spanReader == null) spanReader = new SpanReader(); + while (reader.hasNext()) result.add(spanReader.fromJson(reader)); + reader.endArray(); + return result; + } + + @Override public String toString() { + return "List"; + } + } +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/codec/Span2JsonAdapters.java b/zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesEncoder.java similarity index 54% rename from zipkin/src/main/java/zipkin/internal/v2/codec/Span2JsonAdapters.java rename to zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesEncoder.java index f11c51c369a..f93a474c0fb 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/codec/Span2JsonAdapters.java +++ b/zipkin/src/main/java/zipkin/internal/v2/codec/SpanBytesEncoder.java @@ -13,145 +13,38 @@ */ package zipkin.internal.v2.codec; -import com.google.gson.stream.JsonReader; -import com.google.gson.stream.JsonToken; -import com.google.gson.stream.MalformedJsonException; -import java.io.IOException; -import java.util.Collections; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; -import zipkin.internal.Buffer; -import zipkin.internal.JsonCodec; -import zipkin.internal.JsonCodec.JsonReaderAdapter; import zipkin.internal.v2.Annotation; import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; +import zipkin.internal.v2.internal.Buffer; +import zipkin.internal.v2.internal.JsonCodec; + +import static zipkin.internal.v2.internal.Buffer.asciiSizeInBytes; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscape; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscapedSizeInBytes; + +/** Limited interface needed by those writing span reporters */ +public enum SpanBytesEncoder implements BytesEncoder { + /** Corresponds to the Zipkin v2 json format */ + JSON { + @Override public Encoding encoding() { + return Encoding.JSON; + } -import static zipkin.internal.Buffer.asciiSizeInBytes; -import static zipkin.internal.Buffer.jsonEscapedSizeInBytes; - -/** - * Internal type supporting codec operations in {@link Span}. Design rationale is the same as - * {@link JsonCodec}. - */ -final class Span2JsonAdapters { - - static final class Span2Reader implements JsonReaderAdapter { - Span.Builder builder; - - @Override public Span fromJson(JsonReader reader) throws IOException { - if (builder == null) { - builder = Span.newBuilder(); - } else { - builder.clear(); - } - reader.beginObject(); - while (reader.hasNext()) { - String nextName = reader.nextName(); - if (nextName.equals("traceId")) { - builder.traceId(reader.nextString()); - continue; - } else if (nextName.equals("id")) { - builder.id(reader.nextString()); - continue; - } else if (reader.peek() == JsonToken.NULL) { - reader.skipValue(); - continue; - } - - // read any optional fields - if (nextName.equals("parentId")) { - builder.parentId(reader.nextString()); - } else if (nextName.equals("kind")) { - builder.kind(Span.Kind.valueOf(reader.nextString())); - } else if (nextName.equals("name")) { - builder.name(reader.nextString()); - } else if (nextName.equals("timestamp")) { - builder.timestamp(reader.nextLong()); - } else if (nextName.equals("duration")) { - builder.duration(reader.nextLong()); - } else if (nextName.equals("localEndpoint")) { - builder.localEndpoint(ENDPOINT_READER.fromJson(reader)); - } else if (nextName.equals("remoteEndpoint")) { - builder.remoteEndpoint(ENDPOINT_READER.fromJson(reader)); - } else if (nextName.equals("annotations")) { - reader.beginArray(); - while (reader.hasNext()) { - reader.beginObject(); - Long timestamp = null; - String value = null; - while (reader.hasNext()) { - nextName = reader.nextName(); - if (nextName.equals("timestamp")) { - timestamp = reader.nextLong(); - } else if (nextName.equals("value")) { - value = reader.nextString(); - } else { - reader.skipValue(); - } - } - if (timestamp == null || value == null) { - throw new MalformedJsonException("Incomplete annotation at " + reader.getPath()); - } - reader.endObject(); - builder.addAnnotation(timestamp, value); - } - reader.endArray(); - } else if (nextName.equals("tags")) { - reader.beginObject(); - while (reader.hasNext()) { - String key = reader.nextName(); - if (reader.peek() == JsonToken.NULL) { - throw new MalformedJsonException("No value at " + reader.getPath()); - } - builder.putTag(key, reader.nextString()); - } - reader.endObject(); - } else if (nextName.equals("debug")) { - if (reader.nextBoolean()) builder.debug(true); - } else if (nextName.equals("shared")) { - if (reader.nextBoolean()) builder.shared(true); - } else { - reader.skipValue(); - } - } - reader.endObject(); - return builder.build(); + @Override public int sizeInBytes(Span input) { + return SPAN_WRITER.sizeInBytes(input); } - @Override public String toString() { - return "Span"; + @Override public byte[] encode(Span span) { + return JsonCodec.write(SPAN_WRITER, span); } - } - static final JsonReaderAdapter ENDPOINT_READER = reader -> { - Endpoint.Builder result = Endpoint.newBuilder(); - reader.beginObject(); - boolean readField = false; - while (reader.hasNext()) { - String nextName = reader.nextName(); - if (reader.peek() == JsonToken.NULL) { - reader.skipValue(); - continue; - } - if (nextName.equals("serviceName")) { - result.serviceName(reader.nextString()); - readField = true; - } else if (nextName.equals("ipv4") || nextName.equals("ipv6")) { - result.parseIp(reader.nextString()); - readField = true; - } else if (nextName.equals("port")) { - result.port(reader.nextInt()); - readField = true; - } else { - reader.skipValue(); - } + @Override public byte[] encodeList(List spans) { + return JsonCodec.writeList(SPAN_WRITER, spans); } - reader.endObject(); - if (!readField) throw new MalformedJsonException("Empty endpoint at " + reader.getPath()); - return result.build(); }; static final Buffer.Writer ENDPOINT_WRITER = new Buffer.Writer() { @@ -184,7 +77,7 @@ static final class Span2Reader implements JsonReaderAdapter { boolean wroteField = false; if (value.serviceName() != null) { b.writeAscii("\"serviceName\":\""); - b.writeJsonEscaped(value.serviceName()).writeByte('"'); + b.writeUtf8(jsonEscape(value.serviceName())).writeByte('"'); wroteField = true; } if (value.ipv4() != null) { @@ -207,6 +100,20 @@ static final class Span2Reader implements JsonReaderAdapter { } }; + static final Buffer.Writer ANNOTATION_WRITER = new Buffer.Writer() { + @Override public int sizeInBytes(Annotation value) { + int sizeInBytes = 25; // {"timestamp":,"value":""} + sizeInBytes += asciiSizeInBytes(value.timestamp()); + sizeInBytes += jsonEscapedSizeInBytes(value.value()); + return sizeInBytes; + } + + @Override public void write(Annotation value, Buffer b) { + b.writeAscii("{\"timestamp\":").writeAscii(value.timestamp()); + b.writeAscii(",\"value\":\"").writeUtf8(jsonEscape(value.value())).writeAscii("\"}"); + } + }; + static final Buffer.Writer SPAN_WRITER = new Buffer.Writer() { @Override public int sizeInBytes(Span value) { int sizeInBytes = 13; // {"traceId":"" @@ -273,10 +180,10 @@ static final class Span2Reader implements JsonReaderAdapter { } b.writeAscii(",\"id\":\"").writeAscii(value.id()).writeByte('"'); if (value.kind() != null) { - b.writeAscii(",\"kind\":\"").writeJsonEscaped(value.kind().toString()).writeByte('"'); + b.writeAscii(",\"kind\":\"").writeAscii(value.kind().toString()).writeByte('"'); } if (value.name() != null) { - b.writeAscii(",\"name\":\"").writeJsonEscaped(value.name()).writeByte('"'); + b.writeAscii(",\"name\":\"").writeUtf8(jsonEscape(value.name())).writeByte('"'); } if (value.timestamp() != null) { b.writeAscii(",\"timestamp\":").writeAscii(value.timestamp()); @@ -294,15 +201,20 @@ static final class Span2Reader implements JsonReaderAdapter { } if (!value.annotations().isEmpty()) { b.writeAscii(",\"annotations\":"); - JsonCodec.writeList(ANNOTATION_WRITER, value.annotations(), b); + b.writeByte('['); + for (int i = 0, length = value.annotations().size(); i < length; ) { + ANNOTATION_WRITER.write(value.annotations().get(i++), b); + if (i < length) b.writeByte(','); + } + b.writeByte(']'); } if (!value.tags().isEmpty()) { b.writeAscii(",\"tags\":{"); Iterator> i = value.tags().entrySet().iterator(); while (i.hasNext()) { Map.Entry entry = i.next(); - b.writeByte('"').writeJsonEscaped(entry.getKey()).writeAscii("\":\""); - b.writeJsonEscaped(entry.getValue()).writeByte('"'); + b.writeByte('"').writeUtf8(jsonEscape(entry.getKey())).writeAscii("\":\""); + b.writeUtf8(jsonEscape(entry.getValue())).writeByte('"'); if (i.hasNext()) b.writeByte(','); } b.writeByte('}'); @@ -320,39 +232,4 @@ static final class Span2Reader implements JsonReaderAdapter { return "Span"; } }; - - static final Buffer.Writer ANNOTATION_WRITER = new Buffer.Writer() { - @Override public int sizeInBytes(Annotation value) { - int sizeInBytes = 25; // {"timestamp":,"value":""} - sizeInBytes += asciiSizeInBytes(value.timestamp()); - sizeInBytes += jsonEscapedSizeInBytes(value.value()); - return sizeInBytes; - } - - @Override public void write(Annotation value, Buffer b) { - b.writeAscii("{\"timestamp\":").writeAscii(value.timestamp()); - b.writeAscii(",\"value\":\"").writeJsonEscaped(value.value()).writeAscii("\"}"); - } - }; - - static final class Span2ListReader implements JsonReaderAdapter> { - Span2Reader spanReader; - - @Override public List fromJson(JsonReader reader) throws IOException { - reader.beginArray(); - if (!reader.hasNext()) { - reader.endArray(); - return Collections.emptyList(); - } - List result = new LinkedList<>(); // because we don't know how long it will be - if (spanReader == null) spanReader = new Span2Reader(); - while (reader.hasNext()) result.add(spanReader.fromJson(reader)); - reader.endArray(); - return result; - } - - @Override public String toString() { - return "List"; - } - } } diff --git a/zipkin/src/main/java/zipkin/internal/v2/internal/Buffer.java b/zipkin/src/main/java/zipkin/internal/v2/internal/Buffer.java new file mode 100644 index 00000000000..d44c95e6d29 --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/Buffer.java @@ -0,0 +1,158 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import java.nio.charset.Charset; + +public final class Buffer { + static final Charset UTF_8 = Charset.forName("UTF-8"); + + public interface Writer { + int sizeInBytes(T value); + + void write(T value, Buffer buffer); + } + + private final byte[] buf; + private int pos; + + Buffer(int size) { + buf = new byte[size]; + } + + public Buffer writeByte(int v) { + buf[pos++] = (byte) v; + return this; + } + + public Buffer write(byte[] v) { + System.arraycopy(v, 0, buf, pos, v.length); + pos += v.length; + return this; + } + + static int utf8SizeInBytes(String string) { + // Adapted from http://stackoverflow.com/questions/8511490/calculating-length-in-utf-8-of-java-string-without-actually-encoding-it + int sizeInBytes = 0; + for (int i = 0, len = string.length(); i < len; i++) { + char ch = string.charAt(i); + if (ch < 0x80) { + sizeInBytes++; // 7-bit character + } else if (ch < 0x800) { + sizeInBytes += 2; // 11-bit character + } else if (ch < 0xd800 || ch > 0xdfff) { + sizeInBytes += 3; // 16-bit character + } else { + // malformed surrogate logic borrowed from okio.Utf8 + int low = i + 1 < len ? string.charAt(i + 1) : 0; + if (ch > 0xdbff || low < 0xdc00 || low > 0xdfff) { + sizeInBytes++; // A malformed surrogate, which yields '?'. + } else { + // A 21-bit character + sizeInBytes += 4; + i++; + } + } + } + return sizeInBytes; + } + + public Buffer writeAscii(String v) { + int length = v.length(); + for (int i = 0; i < length; i++) { + buf[pos++] = (byte) v.charAt(i); + } + return this; + } + + static boolean isAscii(String v) { + for (int i = 0, length = v.length(); i < length; i++) { + if (v.charAt(i) >= 0x80) { + return false; + } + } + return true; + } + + public Buffer writeUtf8(String v) { + if (isAscii(v)) return writeAscii(v); + byte[] temp = v.getBytes(UTF_8); + write(temp); + return this; + } + + /** + * Binary search for character width which favors matching lower numbers. + * + *

Adapted from okio.Buffer + */ + public static int asciiSizeInBytes(long v) { + if (v == 0) return 1; + if (v == Long.MIN_VALUE) return 20; + + boolean negative = false; + if (v < 0) { + v = -v; // making this positive allows us to compare using less-than + negative = true; + } + int width = + v < 100000000L + ? v < 10000L + ? v < 100L + ? v < 10L ? 1 : 2 + : v < 1000L ? 3 : 4 + : v < 1000000L + ? v < 100000L ? 5 : 6 + : v < 10000000L ? 7 : 8 + : v < 1000000000000L + ? v < 10000000000L + ? v < 1000000000L ? 9 : 10 + : v < 100000000000L ? 11 : 12 + : v < 1000000000000000L + ? v < 10000000000000L ? 13 + : v < 100000000000000L ? 14 : 15 + : v < 100000000000000000L + ? v < 10000000000000000L ? 16 : 17 + : v < 1000000000000000000L ? 18 : 19; + return negative ? width + 1 : width; // conditionally add room for negative sign + } + + public Buffer writeAscii(long v) { + if (v == 0) return writeByte('0'); + if (v == Long.MIN_VALUE) return writeAscii("-9223372036854775808"); + + int width = asciiSizeInBytes(v); + int pos = this.pos += width; // We write backwards from right to left. + + boolean negative = false; + if (v < 0) { + negative = true; + v = -v; // needs to be positive so we can use this for an array index + } + while (v != 0) { + int digit = (int) (v % 10); + buf[--pos] = DIGITS[digit]; + v /= 10; + } + if (negative) buf[--pos] = '-'; + return this; + } + + static final byte[] DIGITS = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}; + + byte[] toByteArray() { + //assert pos == buf.length; + return buf; + } +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/internal/DependencyLinker.java b/zipkin/src/main/java/zipkin/internal/v2/internal/DependencyLinker.java new file mode 100644 index 00000000000..e29777984ce --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/DependencyLinker.java @@ -0,0 +1,273 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import com.google.auto.value.AutoValue; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import zipkin.internal.v2.DependencyLink; +import zipkin.internal.v2.Span; +import zipkin.internal.v2.Span.Kind; + +import static java.util.logging.Level.FINE; + +/** + * This parses a span tree into dependency links used by Web UI. Ex. http://zipkin/dependency + * + *

This implementation traverses the tree, and only creates links between {@link Kind#SERVER + * server} spans. One exception is at the bottom of the trace tree. {@link Kind#CLIENT client} spans + * that record their {@link Span#remoteEndpoint()} are included, as this accounts for uninstrumented + * services. Spans with {@link Span#kind()} unset, but {@link Span#remoteEndpoint()} set are treated + * the same as client spans. + */ +public final class DependencyLinker { + private final Logger logger; + private final Map callCounts = new LinkedHashMap<>(); + private final Map errorCounts = new LinkedHashMap<>(); + + public DependencyLinker() { + this(Logger.getLogger(DependencyLinker.class.getName())); + } + + DependencyLinker(Logger logger) { + this.logger = logger; + } + + static final Node.MergeFunction MERGE_RPC = new MergeRpc(); + + static final class MergeRpc implements Node.MergeFunction { + @Override public Span merge(@Nullable Span left, @Nullable Span right) { + if (left == null) return right; + if (right == null) return left; + if (left.kind() == null) { + return copyError(left, right); + } + if (right.kind() == null) { + return copyError(right, left); + } + Span server = left.kind() == Kind.SERVER ? left : right; + Span client = left == server ? right : left; + if (server.remoteServiceName() != null) { + return copyError(client, server); + } + return copyError(client, server).toBuilder().remoteEndpoint(client.localEndpoint()).build(); + } + + static Span copyError(Span maybeError, Span result) { + String error = maybeError.tags().get("error"); + if (error != null) { + return result.toBuilder().putTag("error", error).build(); + } + return result; + } + } + + /** + * @param spans spans where all spans have the same trace id + */ + public DependencyLinker putTrace(Iterator spans) { + if (!spans.hasNext()) return this; + + Span first = spans.next(); + Node.TreeBuilder builder = new Node.TreeBuilder<>(logger, MERGE_RPC, first.traceId()); + builder.addNode(first.parentId(), first.id(), first); + while (spans.hasNext()) { + Span next = spans.next(); + builder.addNode(next.parentId(), next.id(), next); + } + Node tree = builder.build(); + + if (logger.isLoggable(FINE)) logger.fine("traversing trace tree, breadth-first"); + for (Iterator> i = tree.traverse(); i.hasNext(); ) { + Node current = i.next(); + Span currentSpan = current.value(); + if (logger.isLoggable(FINE)) { + logger.fine("processing " + currentSpan); + } + if (current.isSyntheticRootForPartialTree()) { + logger.fine("skipping synthetic node for broken span tree"); + continue; + } + + Kind kind = currentSpan.kind(); + if (Kind.CLIENT.equals(kind) && !current.children().isEmpty()) { + logger.fine("deferring link to rpc child span"); + continue; + } + + String serviceName = currentSpan.localServiceName(); + String remoteServiceName = currentSpan.remoteServiceName(); + if (kind == null) { + // Treat unknown type of span as a client span if we know both sides + if (serviceName != null && remoteServiceName != null) { + kind = Kind.CLIENT; + } else { + logger.fine("non-rpc span; skipping"); + continue; + } + } + + String child; + String parent; + switch (kind) { + case SERVER: + case CONSUMER: + child = serviceName; + parent = remoteServiceName; + if (current == tree) { // we are the root-most span. + if (parent == null) { + logger.fine("root's peer is unknown; skipping"); + continue; + } + } + break; + case CLIENT: + case PRODUCER: + parent = serviceName; + child = remoteServiceName; + break; + default: + logger.fine("unknown kind; skipping"); + continue; + } + + boolean isError = currentSpan.tags().containsKey("error"); + if (kind == Kind.PRODUCER || kind == Kind.CONSUMER) { + if (parent == null || child == null) { + logger.fine("cannot link messaging span to its broker; skipping"); + } else { + addLink(parent, child, isError); + } + continue; + } + + if (logger.isLoggable(FINE) && parent == null) { + logger.fine("cannot determine parent, looking for first server ancestor"); + } + + Span rpcAncestor = findRpcAncestor(current); + String rpcAncestorName; + if (rpcAncestor != null && (rpcAncestorName = rpcAncestor.localServiceName()) != null) { + // Some users accidentally put the remote service name on client annotations. + // Check for this and backfill a link from the nearest remote to that service as necessary. + if (kind == Kind.CLIENT && serviceName != null && !rpcAncestorName.equals(serviceName)) { + logger.fine("detected missing link to client span"); + addLink(rpcAncestorName, serviceName, false); // we don't know if there's an error here + } + + // Local spans may be between the current node and its remote parent + if (parent == null) parent = rpcAncestorName; + + // When an RPC is split between spans, we skip the child (server side). If our parent is a + // client, we need to check it for errors. + if (!isError && Kind.CLIENT.equals(rpcAncestor.kind()) && + currentSpan.parentId() != null && currentSpan.parentId().equals(rpcAncestor.id())) { + isError = rpcAncestor.tags().containsKey("error"); + } + } + + if (parent == null || child == null) { + logger.fine("cannot find server ancestor; skipping"); + continue; + } + + addLink(parent, child, isError); + } + return this; + } + + Span findRpcAncestor(Node current) { + Node ancestor = current.parent(); + while (ancestor != null) { + if (logger.isLoggable(FINE)) { + logger.fine("processing ancestor " + ancestor.value()); + } + if (!ancestor.isSyntheticRootForPartialTree()) { + Span maybeRemote = ancestor.value(); + if (maybeRemote.kind() != null) return maybeRemote; + } + ancestor = ancestor.parent(); + } + return null; + } + + void addLink(String parent, String child, boolean isError) { + if (logger.isLoggable(FINE)) { + logger.fine("incrementing " + (isError ? "error " : "") + "link " + parent + " -> " + child); + } + Pair key = Pair.of(parent, child); + if (callCounts.containsKey(key)) { + callCounts.put(key, callCounts.get(key) + 1); + } else { + callCounts.put(key, 1L); + } + if (!isError) return; + if (errorCounts.containsKey(key)) { + errorCounts.put(key, errorCounts.get(key) + 1); + } else { + errorCounts.put(key, 1L); + } + } + + public List link() { + return link(callCounts, errorCounts); + } + + /** links are merged by mapping to parent/child and summing corresponding links */ + public static List merge(Iterable in) { + Map callCounts = new LinkedHashMap<>(); + Map errorCounts = new LinkedHashMap<>(); + + for (DependencyLink link : in) { + Pair parentChild = Pair.of(link.parent(), link.child()); + long callCount = callCounts.containsKey(parentChild) ? callCounts.get(parentChild) : 0L; + callCount += link.callCount(); + callCounts.put(parentChild, callCount); + long errorCount = errorCounts.containsKey(parentChild) ? errorCounts.get(parentChild) : 0L; + errorCount += link.errorCount(); + errorCounts.put(parentChild, errorCount); + } + + return link(callCounts, errorCounts); + } + + static List link(Map callCounts, + Map errorCounts) { + List result = new ArrayList<>(callCounts.size()); + for (Map.Entry entry : callCounts.entrySet()) { + Pair parentChild = entry.getKey(); + result.add(DependencyLink.newBuilder() + .parent(parentChild.left()) + .child(parentChild.right()) + .callCount(entry.getValue()) + .errorCount(errorCounts.containsKey(parentChild) ? errorCounts.get(parentChild) : 0L) + .build()); + } + return result; + } + + @AutoValue + static abstract class Pair { + static Pair of(String left, String right) { + return new AutoValue_DependencyLinker_Pair(left, right); + } + abstract String left(); + abstract String right(); + } +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/internal/JsonCodec.java b/zipkin/src/main/java/zipkin/internal/v2/internal/JsonCodec.java new file mode 100644 index 00000000000..25852e83bbe --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/JsonCodec.java @@ -0,0 +1,169 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import com.google.gson.stream.JsonReader; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.Charset; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +import static java.lang.String.format; + +/** + * This explicitly constructs instances of model classes via manual parsing for a number of + * reasons. + * + *

  • Eliminates the need to keep separate model classes for thrift vs json
  • Avoids + * magic field initialization which, can miss constructor guards
  • Allows us to safely re-use + * the json form in toString methods
  • Encourages logic to be based on the thrift shape of + * objects
  • Ensures the order and naming of the fields in json is stable
+ * + *

There is the up-front cost of creating this, and maintenance of this to consider. However, + * this should be easy to justify as these objects don't change much at all. + */ +public final class JsonCodec { + static final Charset UTF_8 = Charset.forName("UTF-8"); + + public interface JsonReaderAdapter { + T fromJson(JsonReader reader) throws IOException; + } + + public static T read(JsonReaderAdapter adapter, byte[] bytes) { + if (bytes.length == 0) throw new IllegalArgumentException("Empty input reading " + adapter); + try { + return adapter.fromJson(jsonReader(bytes)); + } catch (Exception e) { + throw exceptionReading(adapter.toString(), bytes, e); + } + } + + public static List readList(JsonReaderAdapter adapter, byte[] bytes) { + if (bytes.length == 0) { + throw new IllegalArgumentException("Empty input reading List<" + adapter + ">"); + } + JsonReader reader = jsonReader(bytes); + List result; + try { + reader.beginArray(); + result = reader.hasNext() ? new LinkedList<>() : Collections.emptyList(); + while (reader.hasNext()) result.add(adapter.fromJson(reader)); + reader.endArray(); + return result; + } catch (Exception e) { + throw exceptionReading("List<" + adapter + ">", bytes, e); + } + } + + static JsonReader jsonReader(byte[] bytes) { + return new JsonReader(new InputStreamReader(new ByteArrayInputStream(bytes), UTF_8)); + } + + static int sizeInBytes(Buffer.Writer writer, List value) { + int length = value.size(); + int sizeInBytes = 2; // [] + if (length > 1) sizeInBytes += length - 1; // comma to join elements + for (int i = 0; i < length; i++) { + sizeInBytes += writer.sizeInBytes(value.get(i)); + } + return sizeInBytes; + } + + /** Inability to encode is a programming bug. */ + public static byte[] write(Buffer.Writer writer, T value) { + Buffer b = new Buffer(writer.sizeInBytes(value)); + try { + writer.write(value, b); + } catch (RuntimeException e) { + byte[] bytes = b.toByteArray(); + int lengthWritten = bytes.length; + for (int i = 0; i < bytes.length; i++) { + if (bytes[i] == 0) { + lengthWritten = i; + break; + } + } + + final byte[] bytesWritten; + if (lengthWritten == bytes.length) { + bytesWritten = bytes; + } else { + bytesWritten = new byte[lengthWritten]; + System.arraycopy(bytes, 0, bytesWritten, 0, lengthWritten); + } + + String written = new String(bytesWritten, UTF_8); + // Don't use value directly in the message, as its toString might be implemented using this + // method. If that's the case, we'd stack overflow. Instead, emit what we've written so far. + String message = format( + "Bug found using %s to write %s as json. Wrote %s/%s bytes: %s", + writer.getClass().getSimpleName().replace("AutoValue_", ""), + value.getClass().getSimpleName(), lengthWritten, bytes.length, written); + throw Platform.get().assertionError(message, e); + } + return b.toByteArray(); + } + + public static byte[] writeList(Buffer.Writer writer, List value) { + if (value.isEmpty()) return new byte[] {'[', ']'}; + Buffer result = new Buffer(sizeInBytes(writer, value)); + writeList(writer, value, result); + return result.toByteArray(); + } + + public static void writeList(Buffer.Writer writer, List value, Buffer b) { + b.writeByte('['); + for (int i = 0, length = value.size(); i < length; ) { + writer.write(value.get(i++), b); + if (i < length) b.writeByte(','); + } + b.writeByte(']'); + } + + public static byte[] writeNestedList(Buffer.Writer writer, List> traces) { + // Get the encoded size of the nested list so that we don't need to grow the buffer + int length = traces.size(); + int sizeInBytes = 2; // [] + if (length > 1) sizeInBytes += length - 1; // comma to join elements + + for (int i = 0; i < length; i++) { + List spans = traces.get(i); + int jLength = spans.size(); + sizeInBytes += 2; // [] + if (jLength > 1) sizeInBytes += jLength - 1; // comma to join elements + for (int j = 0; j < jLength; j++) { + sizeInBytes += writer.sizeInBytes(spans.get(j)); + } + } + + Buffer out = new Buffer(sizeInBytes); + out.writeByte('['); // start list of traces + for (int i = 0; i < length; i++) { + writeList(writer, traces.get(i), out); + if (i + 1 < length) out.writeByte(','); + } + out.writeByte(']'); // stop list of traces + return out.toByteArray(); + } + + static IllegalArgumentException exceptionReading(String type, byte[] bytes, Exception e) { + String cause = e.getMessage() == null ? "Error" : e.getMessage(); + if (cause.indexOf("malformed") != -1) cause = "Malformed"; + String message = format("%s reading %s from json: %s", cause, type, new String(bytes, UTF_8)); + throw new IllegalArgumentException(message, e); + } +} diff --git a/zipkin/src/main/java/zipkin/internal/v2/internal/JsonEscaper.java b/zipkin/src/main/java/zipkin/internal/v2/internal/JsonEscaper.java new file mode 100644 index 00000000000..c087c9b60ee --- /dev/null +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/JsonEscaper.java @@ -0,0 +1,103 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +public final class JsonEscaper { + /** Exposed for ElasticSearch HttpBulkIndexer */ + public static String jsonEscape(String v) { + int afterReplacement = 0; + int length = v.length(); + StringBuilder builder = null; + for (int i = 0; i < length; i++) { + char c = v.charAt(i); + String replacement; + if (c < 0x80) { + replacement = REPLACEMENT_CHARS[c]; + if (replacement == null) continue; + } else if (c == '\u2028') { + replacement = U2028; + } else if (c == '\u2029') { + replacement = U2029; + } else { + continue; + } + if (afterReplacement < i) { // write characters between the last replacement and now + if (builder == null) builder = new StringBuilder(); + builder.append(v, afterReplacement, i); + } + if (builder == null) builder = new StringBuilder(); + builder.append(replacement); + afterReplacement = i + 1; + } + String escaped; + if (builder == null) { // then we didn't escape anything + escaped = v; + } else { + if (afterReplacement < length) { + builder.append(v, afterReplacement, length); + } + escaped = builder.toString(); + } + return escaped; + } + + /* + * Escaping logic adapted from Moshi, which we couldn't use due to language level + * + * From RFC 7159, "All Unicode characters may be placed within the + * quotation marks except for the characters that must be escaped: + * quotation mark, reverse solidus, and the control characters + * (U+0000 through U+001F)." + * + * We also escape '\u2028' and '\u2029', which JavaScript interprets as + * newline characters. This prevents eval() from failing with a syntax + * error. http://code.google.com/p/google-gson/issues/detail?id=341 + */ + private static final String[] REPLACEMENT_CHARS; + + static { + REPLACEMENT_CHARS = new String[128]; + for (int i = 0; i <= 0x1f; i++) { + REPLACEMENT_CHARS[i] = String.format("\\u%04x", (int) i); + } + REPLACEMENT_CHARS['"'] = "\\\""; + REPLACEMENT_CHARS['\\'] = "\\\\"; + REPLACEMENT_CHARS['\t'] = "\\t"; + REPLACEMENT_CHARS['\b'] = "\\b"; + REPLACEMENT_CHARS['\n'] = "\\n"; + REPLACEMENT_CHARS['\r'] = "\\r"; + REPLACEMENT_CHARS['\f'] = "\\f"; + } + + private static final String U2028 = "\\u2028"; + private static final String U2029 = "\\u2029"; + + public static int jsonEscapedSizeInBytes(String v) { + boolean ascii = true; + int escapingOverhead = 0; + for (int i = 0, length = v.length(); i < length; i++) { + char c = v.charAt(i); + if (c == '\u2028' || c == '\u2029') { + escapingOverhead += 5; + } else if (c >= 0x80) { + ascii = false; + } else { + String maybeReplacement = REPLACEMENT_CHARS[c]; + if (maybeReplacement != null) escapingOverhead += maybeReplacement.length() - 1; + } + } + if (ascii) return v.length() + escapingOverhead; + return Buffer.utf8SizeInBytes(v) + escapingOverhead; + } +} diff --git a/zipkin/src/main/java/zipkin/internal/Node.java b/zipkin/src/main/java/zipkin/internal/v2/internal/Node.java similarity index 95% rename from zipkin/src/main/java/zipkin/internal/Node.java rename to zipkin/src/main/java/zipkin/internal/v2/internal/Node.java index 541a1384dac..93331a6354e 100644 --- a/zipkin/src/main/java/zipkin/internal/Node.java +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/Node.java @@ -11,7 +11,7 @@ * or implied. See the License for the specific language governing permissions and limitations under * the License. */ -package zipkin.internal; +package zipkin.internal.v2.internal; import java.util.ArrayDeque; import java.util.Collection; @@ -27,8 +27,6 @@ import static java.lang.String.format; import static java.util.logging.Level.FINE; -import static zipkin.internal.Util.checkArgument; -import static zipkin.internal.Util.checkNotNull; /** * Convenience type representing a tree. This is here because multiple facets in zipkin require @@ -58,12 +56,13 @@ public final class Node { } public Node value(V newValue) { - this.value = checkNotNull(newValue, "newValue"); + if (newValue == null) throw new NullPointerException("newValue == null"); + this.value = newValue; return this; } public Node addChild(Node child) { - checkArgument(child != this, "circular dependency on %s", this); + if (child == this) throw new IllegalArgumentException("circular dependency on " + this); child.parent = this; if (children.equals(Collections.emptyList())) children = new LinkedList<>(); children.add(child); @@ -125,12 +124,12 @@ interface MergeFunction { * * @param same type as {@link Node#value} */ - static final class TreeBuilder { + public static final class TreeBuilder { final Logger logger; final MergeFunction mergeFunction; final String traceId; - TreeBuilder(Logger logger, String traceId) { + public TreeBuilder(Logger logger, String traceId) { this(logger, FIRST_NOT_NULL, traceId); } diff --git a/zipkin/src/main/java/zipkin/internal/v2/internal/Platform.java b/zipkin/src/main/java/zipkin/internal/v2/internal/Platform.java index 0b83b052a41..eadd44a8e15 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/internal/Platform.java +++ b/zipkin/src/main/java/zipkin/internal/v2/internal/Platform.java @@ -31,6 +31,12 @@ public RuntimeException uncheckedIOException(IOException e) { return new RuntimeException(e); } + public AssertionError assertionError(String message, Throwable cause) { + AssertionError error = new AssertionError(message); + error.initCause(cause); + throw error; + } + public static Platform get() { return PLATFORM; } @@ -49,7 +55,7 @@ static Platform findPlatform() { return Jre6.build(); } - static final class Jre8 extends Platform { + static final class Jre8 extends Jre7 { static Jre8 buildIfSupported() { // Find JRE 8 new types try { @@ -66,7 +72,7 @@ static Jre8 buildIfSupported() { } } - static final class Jre7 extends Platform { + static class Jre7 extends Platform { static Jre7 buildIfSupported() { // Find JRE 7 new types try { @@ -77,6 +83,11 @@ static Jre7 buildIfSupported() { } return null; } + + @IgnoreJRERequirement @Override + public AssertionError assertionError(String message, Throwable cause) { + return new AssertionError(message, cause); + } } static final class Jre6 extends Platform { diff --git a/zipkin/src/main/java/zipkin/internal/v2/storage/InMemoryStorage.java b/zipkin/src/main/java/zipkin/internal/v2/storage/InMemoryStorage.java index 29b215ab20a..c4108307164 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/storage/InMemoryStorage.java +++ b/zipkin/src/main/java/zipkin/internal/v2/storage/InMemoryStorage.java @@ -28,10 +28,10 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; -import zipkin.DependencyLink; -import zipkin.internal.DependencyLinker; import zipkin.internal.v2.Call; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Span; +import zipkin.internal.v2.internal.DependencyLinker; /** * Test storage component that keeps all spans in memory, accepting them on the calling thread. diff --git a/zipkin/src/main/java/zipkin/internal/v2/storage/QueryRequest.java b/zipkin/src/main/java/zipkin/internal/v2/storage/QueryRequest.java index 9ec190fc36b..faee3bea4be 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/storage/QueryRequest.java +++ b/zipkin/src/main/java/zipkin/internal/v2/storage/QueryRequest.java @@ -24,7 +24,6 @@ import java.util.Set; import javax.annotation.Nullable; import zipkin.internal.v2.Annotation; -import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; /** @@ -236,7 +235,6 @@ timestamp > endTs() * 1000) { Map annotationQueryRemaining = new LinkedHashMap<>(annotationQuery()); for (Span span : spans) { - Endpoint localEndpoint = span.localEndpoint(); String localServiceName = span.localServiceName(); if (localServiceName != null) serviceNames.add(localServiceName); diff --git a/zipkin/src/main/java/zipkin/internal/v2/storage/SpanStore.java b/zipkin/src/main/java/zipkin/internal/v2/storage/SpanStore.java index 25ef0d73316..9016da836a9 100644 --- a/zipkin/src/main/java/zipkin/internal/v2/storage/SpanStore.java +++ b/zipkin/src/main/java/zipkin/internal/v2/storage/SpanStore.java @@ -14,11 +14,10 @@ package zipkin.internal.v2.storage; import java.util.List; -import zipkin.DependencyLink; -import zipkin.Endpoint; import zipkin.internal.v2.Call; +import zipkin.internal.v2.DependencyLink; +import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; -import zipkin.storage.StorageComponent; /** * Queries data derived from {@link SpanConsumer}. @@ -31,8 +30,8 @@ public interface SpanStore { /** * Retrieves spans grouped by trace ID from the storage system with no ordering expectation. * - *

If {@link StorageComponent.Builder#strictTraceId(boolean)} is enabled, spans with the same - * 64-bit trace ID will be grouped together. + *

When strict trace ID is disabled, spans are grouped by the right-most 16 characters of the + * trace ID. */ Call>> getTraces(QueryRequest request); @@ -40,8 +39,8 @@ public interface SpanStore { * Retrieves spans that share a 128-bit trace id with no ordering expectation or empty if none are * found. * - *

When {@link StorageComponent.Builder#strictTraceId(boolean)} is true, spans with the same - * right-most 16 characters are returned even if the characters to the left are not. + *

When strict trace ID is disabled, spans with the same right-most 16 characters are returned + * even if the characters to the left are not. * *

Implementations should use {@link Span#normalizeTraceId(String)} to ensure consistency. * @@ -71,12 +70,11 @@ public interface SpanStore { * was 25 hours, the implementation would query against 2 buckets. * *

Some implementations parse spans from storage and call {@link - * zipkin.internal.DependencyLinker} to aggregate links. The reason is certain graph logic, such - * as skipping up the tree is difficult to implement as a storage query. + * zipkin.internal.v2.internal.DependencyLinker} to aggregate links. The reason is certain graph + * logic, such as skipping up the tree is difficult to implement as a storage query. * - *

There's no parameter to indicate how to handle mixed ID length: this operates the same as if - * {@link StorageComponent.Builder#strictTraceId(boolean)} was set to false. This ensures call - * counts are not incremented twice due to one hop downgrading from 128 to 64-bit trace IDs. + *

Spans are grouped by the right-most 16 characters of the trace ID. This ensures call counts + * are not incremented twice due to one hop downgrading from 128 to 64-bit trace IDs. * * @param endTs only return links from spans where {@link Span#timestamp} are at or before this * time in epoch milliseconds. diff --git a/zipkin/src/test/java/zipkin/collector/CollectorTest.java b/zipkin/src/test/java/zipkin/collector/CollectorTest.java index 75021fe57bd..abc8265d5b2 100644 --- a/zipkin/src/test/java/zipkin/collector/CollectorTest.java +++ b/zipkin/src/test/java/zipkin/collector/CollectorTest.java @@ -22,7 +22,7 @@ import zipkin.internal.V2SpanConverter; import zipkin.internal.V2StorageComponent; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesEncoder; import zipkin.internal.v2.storage.SpanConsumer; import static java.util.Arrays.asList; @@ -84,7 +84,7 @@ public class CollectorTest { } @Test public void convertsSpan2Format() { - byte[] bytes = BytesEncoder.JSON.encodeList(asList(span2_1)); + byte[] bytes = SpanBytesEncoder.JSON.encodeList(asList(span2_1)); collector.acceptSpans(bytes, SpanDecoder.DETECTING_DECODER, NOOP); verify(collector).acceptSpans(bytes, SpanDecoder.DETECTING_DECODER, NOOP); @@ -106,7 +106,7 @@ abstract class WithSpan2 extends V2StorageComponent implements zipkin.storage.St collector = spy(Collector.builder(Collector.class) .storage(storage).build()); - byte[] bytes = BytesEncoder.JSON.encodeList(asList(span2_1)); + byte[] bytes = SpanBytesEncoder.JSON.encodeList(asList(span2_1)); collector.acceptSpans(bytes, SpanDecoder.DETECTING_DECODER, NOOP); verify(collector, never()).isSampled(any(zipkin.Span.class)); // skips v1 processing diff --git a/zipkin/src/test/java/zipkin/internal/BufferTest.java b/zipkin/src/test/java/zipkin/internal/BufferTest.java index 3c2e0f24629..abfc237479d 100644 --- a/zipkin/src/test/java/zipkin/internal/BufferTest.java +++ b/zipkin/src/test/java/zipkin/internal/BufferTest.java @@ -13,16 +13,11 @@ */ package zipkin.internal; -import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.net.Inet6Address; -import java.net.UnknownHostException; import java.util.Arrays; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; -import static zipkin.internal.Buffer.asciiSizeInBytes; -import static zipkin.internal.Buffer.jsonEscapedSizeInBytes; import static zipkin.internal.Util.UTF_8; public class BufferTest { @@ -42,148 +37,19 @@ public class BufferTest { } /** Uses test data and codepoint wrapping trick from okhttp3.FormBodyTest */ - @Test public void utf8_malformed() { + @Test public void utf8SizeInBytes_malformed() { for (int codepoint : Arrays.asList(0xD800, 0xDFFF, 0xD83D)) { String test = new String(new int[]{'a', codepoint, 'c'}, 0, 3); assertThat(Buffer.utf8SizeInBytes(test)) .isEqualTo(3); - assertThat(new Buffer(3).writeUtf8(test).toByteArray()) - .containsExactly('a', '?', 'c'); } } - @Test public void emoji() { + @Test public void utf8SizeInBytes_Emoji() { byte[] emojiBytes = {(byte) 0xF0, (byte) 0x9F, (byte) 0x98, (byte) 0x81}; String emoji = new String(emojiBytes, UTF_8); assertThat(Buffer.utf8SizeInBytes(emoji)) - .isEqualTo(emojiBytes.length); - assertThat(new Buffer(emojiBytes.length).writeUtf8(emoji).toByteArray()) - .isEqualTo(emojiBytes); - } - - // Test borrowed from guava InetAddressesTest - @Test public void ipv6() throws UnknownHostException { - assertThat(writeIpV6("1:2:3:4:5:6:7:8")) - .isEqualTo("1:2:3:4:5:6:7:8"); - assertThat(writeIpV6("2001:0:0:4:0000:0:0:8")) - .isEqualTo("2001:0:0:4::8"); - assertThat(writeIpV6("2001:0:0:4:5:6:7:8")) - .isEqualTo("2001::4:5:6:7:8"); - assertThat(writeIpV6("2001:0:3:4:5:6:7:8")) - .isEqualTo("2001::3:4:5:6:7:8"); - assertThat(writeIpV6("0:0:3:0:0:0:0:ffff")) - .isEqualTo("0:0:3::ffff"); - assertThat(writeIpV6("0:0:0:4:0:0:0:ffff")) - .isEqualTo("::4:0:0:0:ffff"); - assertThat(writeIpV6("0:0:0:0:5:0:0:ffff")) - .isEqualTo("::5:0:0:ffff"); - assertThat(writeIpV6("1:0:0:4:0:0:7:8")) - .isEqualTo("1::4:0:0:7:8"); - assertThat(writeIpV6("0:0:0:0:0:0:0:0")) - .isEqualTo("::"); - assertThat(writeIpV6("0:0:0:0:0:0:0:1")) - .isEqualTo("::1"); - assertThat(writeIpV6("2001:0658:022a:cafe::")) - .isEqualTo("2001:658:22a:cafe::"); - assertThat(writeIpV6("::1.2.3.4")) - .isEqualTo("::102:304"); - } - - static String writeIpV6(String address) throws UnknownHostException { - byte[] ipv6 = Inet6Address.getByName(address).getAddress(); - byte[] buffered = new Buffer(Buffer.ipv6SizeInBytes(ipv6)).writeIpV6(ipv6).toByteArray(); - return new String(buffered, UTF_8); - } - - @Test - public void asciiSizeInBytes_long() throws IOException { - assertThat(asciiSizeInBytes(0L)).isEqualTo(1); - assertThat(asciiSizeInBytes(-1005656679588439279L)).isEqualTo(20); - assertThat(asciiSizeInBytes(-9223372036854775808L /* Long.MIN_VALUE */)).isEqualTo(20); - assertThat(asciiSizeInBytes(123456789L)).isEqualTo(9); - } - - @Test - public void writeAscii_long() throws IOException { - assertThat(writeAscii(-1005656679588439279L)) - .isEqualTo("-1005656679588439279"); - assertThat(writeAscii(0L)) - .isEqualTo("0"); - assertThat(writeAscii(-9223372036854775808L /* Long.MIN_VALUE */)) - .isEqualTo("-9223372036854775808"); - assertThat(writeAscii(123456789L)) - .isEqualTo("123456789"); - } - - static String writeAscii(long v) { - byte[] buffered = new Buffer(Buffer.asciiSizeInBytes(v)).writeAscii(v).toByteArray(); - return new String(buffered, UTF_8); - } - - @Test - public void jsonEscapedSizeInBytes_string() throws IOException { - assertThat(jsonEscapedSizeInBytes(new String(new char[] {0, 'a', 1}))) - .isEqualTo(13); - assertThat(jsonEscapedSizeInBytes(new String(new char[] {'"', '\\', '\t', '\b'}))) - .isEqualTo(8); - assertThat(jsonEscapedSizeInBytes(new String(new char[] {'\n', '\r', '\f'}))) - .isEqualTo(6); - assertThat(jsonEscapedSizeInBytes("\u2028 and \u2029")) - .isEqualTo(17); - assertThat(jsonEscapedSizeInBytes("\"foo")) - .isEqualTo(5); - } - - @Test - public void jsonEscapedSizeInBytes_bytes() throws IOException { - assertThat(jsonEscapedSizeInBytes(new byte[] {0, 'a', 1})) - .isEqualTo(13); - assertThat(jsonEscapedSizeInBytes(new byte[] {'"', '\\', '\t', '\b'})) - .isEqualTo(8); - assertThat(jsonEscapedSizeInBytes(new byte[] {'\n', '\r', '\f'})) - .isEqualTo(6); - assertThat(jsonEscapedSizeInBytes("\u2028 and \u2029".getBytes(UTF_8))) - .isEqualTo(17); - assertThat(jsonEscapedSizeInBytes("\"foo".getBytes(UTF_8))) - .isEqualTo(5); - } - - @Test - public void writeJsonEscaped_string() throws IOException { - assertThat(writeJsonEscaped(new String(new char[] {0, 'a', 1}))) - .isEqualTo("\\u0000a\\u0001"); - assertThat(writeJsonEscaped(new String(new char[] {'"', '\\', '\t', '\b'}))) - .isEqualTo("\\\"\\\\\\t\\b"); - assertThat(writeJsonEscaped(new String(new char[] {'\n', '\r', '\f'}))) - .isEqualTo("\\n\\r\\f"); - assertThat(writeJsonEscaped("\u2028 and \u2029")) - .isEqualTo("\\u2028 and \\u2029"); - assertThat(writeJsonEscaped("\"foo")) - .isEqualTo("\\\"foo"); - } - - @Test - public void writeJsonEscaped_bytes() throws IOException { - assertThat(writeJsonEscaped(new byte[] {0, 'a', 1})) - .isEqualTo("\\u0000a\\u0001"); - assertThat(writeJsonEscaped(new byte[] {'"', '\\', '\t', '\b'})) - .isEqualTo("\\\"\\\\\\t\\b"); - assertThat(writeJsonEscaped(new byte[] {'\n', '\r', '\f'})) - .isEqualTo("\\n\\r\\f"); - assertThat(writeJsonEscaped("\u2028 and \u2029".getBytes(UTF_8))) - .isEqualTo("\\u2028 and \\u2029"); - assertThat(writeJsonEscaped("\"foo".getBytes(UTF_8))) - .isEqualTo("\\\"foo"); - } - - static String writeJsonEscaped(String v) { - byte[] buffered = new Buffer(jsonEscapedSizeInBytes(v)).writeJsonEscaped(v).toByteArray(); - return new String(buffered, UTF_8); - } - - static String writeJsonEscaped(byte[] v) { - byte[] buffered = new Buffer(jsonEscapedSizeInBytes(v)).writeJsonEscaped(v).toByteArray(); - return new String(buffered, UTF_8); + .isEqualTo(4); } // Test creating Buffer for a long string diff --git a/zipkin/src/test/java/zipkin/internal/DependencyLinkerTest.java b/zipkin/src/test/java/zipkin/internal/DependencyLinkerTest.java deleted file mode 100644 index d884577fcba..00000000000 --- a/zipkin/src/test/java/zipkin/internal/DependencyLinkerTest.java +++ /dev/null @@ -1,568 +0,0 @@ -/** - * Copyright 2015-2017 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin.internal; - -import java.util.ArrayList; -import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; -import javax.annotation.Nullable; -import org.junit.Test; -import zipkin.DependencyLink; -import zipkin.TestObjects; -import zipkin.internal.v2.Endpoint; -import zipkin.internal.v2.Span; -import zipkin.internal.v2.Span.Kind; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin.Constants.ERROR; -import static zipkin.internal.Util.toLowerHex; - -public class DependencyLinkerTest { - List messages = new ArrayList<>(); - - Logger logger = new Logger("", null) { - { - setLevel(Level.ALL); - } - - @Override public void log(Level level, String msg) { - assertThat(level).isEqualTo(Level.FINE); - messages.add(msg); - } - }; - - @Test - public void baseCase() { - assertThat(new DependencyLinker().link()).isEmpty(); - } - - @Test - public void linksSpans() { - assertThat(new DependencyLinker().putTrace(TestObjects.TRACE).link()).containsExactly( - DependencyLink.builder().parent("web").child("app").callCount(1L).build(), - DependencyLink.builder().parent("app").child("db").callCount(1L).errorCount(1L).build() - ); - } - - @Test - public void dropsSelfReferencingSpans() { - List trace = TestObjects.TRACE.stream() - .map(s -> s.toBuilder().parentId(s.parentId != null ? s.id : null).build()) - .collect(Collectors.toList()); - - assertThat(new DependencyLinker(logger).putTrace(trace).link()).isEmpty(); - - assertThat(messages).contains( - "skipping circular dependency: traceId=f66529c8cc356aa0, spanId=93288b464457044e", - "skipping circular dependency: traceId=f66529c8cc356aa0, spanId=71e62981f1e136a7" - ); - } - - @Test - public void messagingSpansDontLinkWithoutBroker_consumer() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", null, false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", "kafka", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("kafka").child("consumer").callCount(1L).build() - ); - } - - @Test - public void messagingSpansDontLinkWithoutBroker_producer() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", "kafka", false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", null, false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("producer").child("kafka").callCount(1L).build() - ); - } - - @Test - public void messagingWithBroker_both_sides_same() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", "kafka", false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", "kafka", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("producer").child("kafka").callCount(1L).build(), - DependencyLink.builder().parent("kafka").child("consumer").callCount(1L).build() - ); - } - - @Test - public void messagingWithBroker_different() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", "kafka1", false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", "kafka2", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("producer").child("kafka1").callCount(1L).build(), - DependencyLink.builder().parent("kafka2").child("consumer").callCount(1L).build() - ); - } - - /** Shows we don't assume there's a direct link between producer and consumer. */ - @Test - public void messagingWithoutBroker_noLinks() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", null, false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", null, false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()) - .isEmpty(); - } - - /** When a server is the child of a producer span, make a link as it is really an RPC */ - @Test - public void producerLinksToServer_childSpan() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", null, false), - span2(1L, 1L, 2L, Kind.SERVER, "server", null, false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("producer").child("server").callCount(1L).build() - ); - } - - /** - * Servers most often join a span vs create a child. Make sure this works when a producer is used - * instead of a client. - */ - @Test - public void producerLinksToServer_sameSpan() { - List trace = asList( - span2(1L, null, 1L, Kind.PRODUCER, "producer", null, false), - span2(1L, null, 1L, Kind.SERVER, "server", null, false) - .toBuilder().shared(true).build() - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("producer").child("server").callCount(1L).build() - ); - } - - /** - * Client might be used for historical reasons instead of PRODUCER. Don't link as the server-side - * is authoritative. - */ - @Test - public void clientDoesntLinkToConsumer_child() { - List trace = asList( - span2(1L, null, 1L, Kind.CLIENT, "client", null, false), - span2(1L, 1L, 2L, Kind.CONSUMER, "consumer", null, false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()) - .isEmpty(); - } - - /** - * A root span can be a client-originated trace or a server receipt which knows its peer. In these - * cases, the peer is known and kind establishes the direction. - */ - @Test - public void linksSpansDirectedByKind() { - List validRootSpans = asList( - span2(1L, null, 1L, Kind.SERVER, "server", "client", false), - span2(1L, null, 1L, Kind.CLIENT, "client", "server", false) - .toBuilder().shared(true).build() - ); - - for (Span span : validRootSpans) { - assertThat(new DependencyLinker() - .putTrace(asList(span).iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 1L)); - } - } - - @Test - public void callsAgainstTheSameLinkIncreasesCallCount_span() { - List trace = asList( - span2(1L, null, 1L, Kind.SERVER, "client", null, false), - span2(1L, 1L, 2L, Kind.CLIENT, null, "server", false), - span2(1L, 1L, 3L, Kind.CLIENT, null, "server", false) - ); - - assertThat(new DependencyLinker() - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 2L)); - } - - @Test - public void callsAgainstTheSameLinkIncreasesCallCount_trace() { - List trace = asList( - span2(1L, null, 1L, Kind.SERVER, "client", null, false), - span2(1L, 1L, 2L, Kind.CLIENT, null, "server", false) - ); - - assertThat(new DependencyLinker() - .putTrace(trace.iterator()) - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 2L)); - } - - /** - * Spans don't always include both the client and server service. When you know the kind, you can - * link these without duplicating call count. - */ - @Test - public void singleHostSpansResultInASingleCallCount() { - List trace = asList( - span2(3L, null, 3L, Kind.CLIENT, "client", null, false), - span2(3L, 3L, 4L, Kind.SERVER, "server", null, false) - ); - - assertThat(new DependencyLinker() - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 1L)); - } - - @Test - public void singleHostSpansResultInASingleErrorCount() { - List trace = asList( - span2(3L, null, 3L, Kind.CLIENT, "client", null, true), - span2(3L, 3L, 4L, Kind.SERVER, "server", null, true) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("client").child("server").callCount(1L).errorCount(1L).build() - ); - } - - @Test - public void singleHostSpansResultInASingleErrorCount_sameId() { - List trace = asList( - span2(3L, null, 3L, Kind.CLIENT, "client", null, true), - span2(3L, null, 3L, Kind.SERVER, "server", null, true) - .toBuilder().shared(true).build() - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("client").child("server").callCount(1L).errorCount(1L).build() - ); - } - - @Test - public void singleHostSpansResultInASingleCallCount_defersNameToServer() { - List trace = asList( - span2(1L, null, 1L, Kind.CLIENT, "client", "server", false), - span2(1L, 1L, 2L, Kind.SERVER, "server", null, false) - ); - - assertThat(new DependencyLinker(logger) - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 1L)); - - assertThat(messages).contains("deferring link to rpc child span"); - messages.clear(); - } - - @Test - public void singleHostSpans_multipleChildren() { - List trace = asList( - span2(4L, null, 4L, Kind.CLIENT, "client", null, false), - span2(4L, 4L, 5L, Kind.SERVER, "server", "client", true), - span2(4L, 4L, 6L, Kind.SERVER, "server", "client", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("client").child("server").callCount(2L).errorCount(1L).build() - ); - } - - @Test - public void singleHostSpans_multipleChildren_defersNameToServer() { - List trace = asList( - span2(1L, null, 1L, Kind.CLIENT, "client", "server", false), - span2(1L, 1L, 2L, Kind.SERVER, "server", null, false), - span2(1L, 1L, 3L, Kind.SERVER, "server", null, false) - ); - - assertThat(new DependencyLinker(logger) - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 2L)); - - assertThat(messages).contains("deferring link to rpc child span"); - } - - /** - * Spans are sometimes intermediated by an unknown type of span. Prefer the nearest server when - * accounting for them. - */ - @Test - public void intermediatedClientSpansMissingLocalServiceNameLinkToNearestServer() { - List trace = asList( - span2(1L, null, 1L, Kind.SERVER, "client", null, false), - span2(1L, 1L, 2L, null, null, null, false), - // possibly a local fan-out span - span2(1L, 2L, 3L, Kind.CLIENT, "server", null, false), - span2(1L, 2L, 4L, Kind.CLIENT, "server", null, false) - ); - - assertThat(new DependencyLinker() - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 2L)); - } - - @Test - public void errorsOnUninstrumentedLinks() { - List trace = asList( - span2(1L, null, 1L, Kind.SERVER, "client", null, false), - span2(1L, 1L, 2L, null, null, null, false), - // there's no remote here, so we shouldn't see any error count - span2(1L, 2L, 3L, Kind.CLIENT, "server", null, true), - span2(1L, 2L, 4L, Kind.CLIENT, "server", null, true) - ); - - assertThat(new DependencyLinker() - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("client", "server", 2L)); - } - - @Test - public void errorsOnInstrumentedLinks() { - List trace = asList( - span2(1L, null, 1L, Kind.SERVER, "foo", null, false), - span2(1L, 1L, 2L, null, null, null, false), - span2(1L, 2L, 3L, Kind.CLIENT, "bar", "baz", true), - span2(1L, 2L, 4L, Kind.CLIENT, "bar", "baz", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("foo").child("bar").callCount(2L).build(), - DependencyLink.builder().parent("bar").child("baz").callCount(2L).errorCount(1L).build() - ); - } - - @Test - public void linkWithErrorIsLogged() { - List trace = asList( - span2(1L, 2L, 3L, Kind.CLIENT, "foo", "bar", true) - ); - new DependencyLinker(logger).putTrace(trace.iterator()).link(); - - assertThat(messages).contains( - "incrementing error link foo -> bar" - ); - } - - /** Tag indicates a failed span, not an annotation */ - @Test - public void annotationNamedErrorDoesntIncrementErrorCount() { - List trace = asList( - span2(1L, 2L, 3L, Kind.CLIENT, "foo", "bar", false) - .toBuilder().addAnnotation(1L, ERROR).build() - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("foo").child("bar").callCount(1L).build() - ); - } - - /** A loopback span is direction-agnostic, so can be linked properly regardless of kind. */ - @Test - public void linksLoopbackSpans() { - List validRootSpans = asList( - span2(1L, null, 1L, Kind.SERVER, "service", "service", false), - span2(2L, null, 2L, Kind.CLIENT, "service", "service", false) - ); - - for (Span span : validRootSpans) { - assertThat(new DependencyLinker() - .putTrace(asList(span).iterator()).link()) - .containsOnly(DependencyLink.create("service", "service", 1L)); - } - } - - @Test - public void noSpanKindTreatedSameAsClient() { - List trace = asList( - span2(1L, null, 1L, null, "some-client", "web", false), - span2(1L, 1L, 2L, null, "web", "app", false), - span2(1L, 2L, 3L, null, "app", "db", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.create("some-client", "web", 1L), - DependencyLink.create("web", "app", 1L), - DependencyLink.create("app", "db", 1L) - ); - } - - @Test - public void noSpanKindWithError() { - List trace = asList( - span2(1L, null, 1L, null, "some-client", "web", false), - span2(1L, 1L, 2L, null, "web", "app", true), - span2(1L, 2L, 3L, null, "app", "db", false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.create("some-client", "web", 1L), - DependencyLink.builder().parent("web").child("app").callCount(1L).errorCount(1L).build(), - DependencyLink.create("app", "db", 1L) - ); - } - - /** - * A dependency link is between two services. Given only one span, we cannot link if we don't know - * both service names. - */ - @Test - public void cannotLinkSingleSpanWithoutBothServiceNames() { - List incompleteRootSpans = asList( - span2(1L, null, 1L, Kind.SERVER, null, null, false), - span2(1L, null, 1L, Kind.SERVER, "server", null, false), - span2(1L, null, 1L, Kind.SERVER, null, "client", false), - span2(1L, null, 1L, Kind.CLIENT, null, null, false), - span2(1L, null, 1L, Kind.CLIENT, "client", null, false), - span2(1L, null, 1L, Kind.CLIENT, null, "server", false) - ); - - for (Span span : incompleteRootSpans) { - assertThat(new DependencyLinker(logger) - .putTrace(asList(span).iterator()).link()) - .isEmpty(); - } - } - - @Test - public void doesntLinkUnrelatedSpansWhenMissingRootSpan() { - long missingParentId = 1; - List trace = asList( - span2(1L, missingParentId, 2L, Kind.SERVER, "service1", null, false), - span2(1L, missingParentId, 3L, Kind.SERVER, "service2", null, false) - ); - - assertThat(new DependencyLinker(logger) - .putTrace(trace.iterator()).link()) - .isEmpty(); - - assertThat(messages).contains( - "skipping synthetic node for broken span tree" - ); - } - - @Test - public void linksRelatedSpansWhenMissingRootSpan() { - long missingParentId = 1L; - List trace = asList( - span2(1L, missingParentId, 2L, Kind.SERVER, "service1", null, false), - span2(1L, 2L, 3L, Kind.SERVER, "service2", null, false) - ); - - assertThat(new DependencyLinker(logger) - .putTrace(trace.iterator()).link()) - .containsOnly(DependencyLink.create("service1", "service2", 1L)); - - assertThat(messages).contains( - "skipping synthetic node for broken span tree" - ); - } - - /** Client+Server spans that don't share IDs are treated as server spans missing their peer */ - @Test - public void linksSingleHostSpans() { - List singleHostSpans = asList( - span2(1L, null, 1L, Kind.CLIENT, "web", null, false), - span2(1L, 1L, 2L, Kind.SERVER, "app", null, false) - ); - - assertThat(new DependencyLinker() - .putTrace(singleHostSpans.iterator()).link()) - .containsOnly(DependencyLink.create("web", "app", 1L)); - } - - @Test - public void linksSingleHostSpans_errorOnClient() { - List trace = asList( - span2(1L, null, 1L, Kind.CLIENT, "web", null, true), - span2(1L, 1L, 2L, Kind.SERVER, "app", null, false) - ); - - assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( - DependencyLink.builder().parent("web").child("app").callCount(1L).errorCount(1L).build() - ); - } - - /** Creates a link when there's a span missing, in this case 2L which is an RPC from web to app */ - @Test - public void missingSpan() { - List singleHostSpans = asList( - span2(1L, null, 1L, Kind.SERVER, "web", null, false), - span2(1L, 1L, 2L, Kind.CLIENT, "app", null, false) - ); - - assertThat(new DependencyLinker(logger) - .putTrace(singleHostSpans.iterator()).link()) - .containsOnly(DependencyLink.create("web", "app", 1L)); - - assertThat(messages).contains( - "detected missing link to client span" - ); - } - - @Test - public void merge() { - List links = asList( - DependencyLink.builder().parent("foo").child("bar").callCount(2L).errorCount(1L).build(), - DependencyLink.builder().parent("foo").child("bar").callCount(2L).errorCount(2L).build(), - DependencyLink.builder().parent("foo").child("foo").callCount(1L).build() - ); - - assertThat(DependencyLinker.merge(links)).containsExactly( - DependencyLink.builder().parent("foo").child("bar").callCount(4L).errorCount(3L).build(), - DependencyLink.builder().parent("foo").child("foo").callCount(1L).build() - ); - } - - @Test - public void merge_error() { - List links = asList( - DependencyLink.create("client", "server", 2L), - DependencyLink.create("client", "server", 2L), - DependencyLink.create("client", "client", 1L) - ); - - assertThat(DependencyLinker.merge(links)).containsExactly( - DependencyLink.create("client", "server", 4L), - DependencyLink.create("client", "client", 1L) - ); - } - - static Span span2(long traceId, @Nullable Long parentId, long id, @Nullable Kind kind, - @Nullable String local, @Nullable String remote, boolean isError) { - Span.Builder result = Span.newBuilder() - .traceId(toLowerHex(traceId)) - .parentId(parentId != null ? toLowerHex(parentId) : null) - .id(toLowerHex(id)) - .kind(kind); - if (local != null) result.localEndpoint(Endpoint.newBuilder().serviceName(local).build()); - if (remote != null) result.remoteEndpoint(Endpoint.newBuilder().serviceName(remote).build()); - if (isError) result.putTag(ERROR, ""); - return result.build(); - } -} diff --git a/zipkin/src/test/java/zipkin/internal/DetectingSpanDecoderTest.java b/zipkin/src/test/java/zipkin/internal/DetectingSpanDecoderTest.java index d247f252bfd..a03051cf36a 100644 --- a/zipkin/src/test/java/zipkin/internal/DetectingSpanDecoderTest.java +++ b/zipkin/src/test/java/zipkin/internal/DetectingSpanDecoderTest.java @@ -17,7 +17,7 @@ import zipkin.Codec; import zipkin.SpanDecoder; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesEncoder; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; @@ -69,15 +69,15 @@ public class DetectingSpanDecoderTest { /** Single-element reads were for legacy non-list encoding. Don't add new code that does this */ @Test(expected = UnsupportedOperationException.class) public void readSpan_json2() { - decoder.readSpan(BytesEncoder.JSON.encode(span2_1)); + decoder.readSpan(SpanBytesEncoder.JSON.encode(span2_1)); } @Test(expected = IllegalArgumentException.class) public void readSpans_json2_not_list() { - decoder.readSpans(BytesEncoder.JSON.encode(span2_1)); + decoder.readSpans(SpanBytesEncoder.JSON.encode(span2_1)); } @Test public void readSpans_json2() { - byte[] message = BytesEncoder.JSON.encodeList(asList(span2_1, span2_2)); + byte[] message = SpanBytesEncoder.JSON.encodeList(asList(span2_1, span2_2)); assertThat(decoder.readSpans(message)) .containsExactly(span1, span2); diff --git a/zipkin/src/test/java/zipkin/internal/JsonCodecTest.java b/zipkin/src/test/java/zipkin/internal/JsonCodecTest.java index 5a8c43ee2e8..0a1d2f3a8a7 100644 --- a/zipkin/src/test/java/zipkin/internal/JsonCodecTest.java +++ b/zipkin/src/test/java/zipkin/internal/JsonCodecTest.java @@ -192,58 +192,6 @@ public void niceErrorOnUppercaseTraceId() { Codec.JSON.readSpan(json.getBytes(UTF_8)); } - @Test - public void doesntStackOverflowOnToBufferWriterBug_lessThanBytes() { - thrown.expect(AssertionError.class); - thrown.expectMessage("Bug found using FooWriter to write Foo as json. Wrote 1/2 bytes: a"); - - class FooWriter implements Buffer.Writer { - @Override public int sizeInBytes(Object value) { - return 2; - } - - @Override public void write(Object value, Buffer buffer) { - buffer.writeByte('a'); - throw new RuntimeException("buggy"); - } - } - - class Foo { - @Override - public String toString() { - return new String(JsonCodec.write(new FooWriter(), this), UTF_8); - } - } - - new Foo().toString(); - } - - @Test - public void doesntStackOverflowOnToBufferWriterBug_Overflow() { - thrown.expect(AssertionError.class); - thrown.expectMessage("Bug found using FooWriter to write Foo as json. Wrote 2/2 bytes: ab"); - - // pretend there was a bug calculating size, ex it calculated incorrectly as to small - class FooWriter implements Buffer.Writer { - @Override public int sizeInBytes(Object value) { - return 2; - } - - @Override public void write(Object value, Buffer buffer) { - buffer.writeByte('a').writeByte('b').writeByte('c'); // wrote larger than size! - } - } - - class Foo { - @Override - public String toString() { - return new String(JsonCodec.write(new FooWriter(), this), UTF_8); - } - } - - new Foo().toString(); - } - @Test public void niceErrorOnNull_id() { thrown.expect(IllegalArgumentException.class); diff --git a/zipkin/src/test/java/zipkin/internal/V2JsonSpanDecoderTest.java b/zipkin/src/test/java/zipkin/internal/V2JsonSpanDecoderTest.java index 5cf9c5aba6f..e79dfdf7851 100644 --- a/zipkin/src/test/java/zipkin/internal/V2JsonSpanDecoderTest.java +++ b/zipkin/src/test/java/zipkin/internal/V2JsonSpanDecoderTest.java @@ -16,7 +16,7 @@ import org.junit.Test; import zipkin.SpanDecoder; import zipkin.internal.v2.Span; -import zipkin.internal.v2.codec.BytesEncoder; +import zipkin.internal.v2.codec.SpanBytesEncoder; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; @@ -31,11 +31,11 @@ public class V2JsonSpanDecoderTest { SpanDecoder decoder = new V2JsonSpanDecoder(); @Test(expected = UnsupportedOperationException.class) public void readSpan() { - decoder.readSpan(BytesEncoder.JSON.encode(span2_1)); + decoder.readSpan(SpanBytesEncoder.JSON.encode(span2_1)); } @Test public void readSpans() { - byte[] message = BytesEncoder.JSON.encodeList(asList(span2_1, span2_2)); + byte[] message = SpanBytesEncoder.JSON.encodeList(asList(span2_1, span2_2)); assertThat(decoder.readSpans(message)) .containsExactly(span1, span2); diff --git a/zipkin/src/test/java/zipkin/internal/V2SpanConverterTest.java b/zipkin/src/test/java/zipkin/internal/V2SpanConverterTest.java index 7fab665eb78..9ccf5772c64 100644 --- a/zipkin/src/test/java/zipkin/internal/V2SpanConverterTest.java +++ b/zipkin/src/test/java/zipkin/internal/V2SpanConverterTest.java @@ -26,7 +26,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static zipkin.Constants.LOCAL_COMPONENT; -import static zipkin.internal.V2SpanConverter.convert; +import static zipkin.internal.V2SpanConverter.toEndpoint; public class V2SpanConverterTest { Endpoint frontend = Endpoint.create("frontend", 127 << 24 | 1); @@ -44,8 +44,8 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("get") .kind(Kind.CLIENT) - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(backend)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(backend)) .timestamp(1472470996199000L) .duration(207000L) .addAnnotation(1472470996238000L, Constants.WIRE_SEND) @@ -84,7 +84,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("get") .kind(Kind.CLIENT) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .addAnnotation(1472470996238000L, Constants.WIRE_SEND) .build(); @@ -112,7 +112,7 @@ public class V2SpanConverterTest { .parentId("6b221d5bc9e6496c") .id("5b4185666d50f68b") .name("get") - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(1472470996238000L - 1472470996199000L) .addAnnotation(1472470996199000L, Constants.CLIENT_SEND) @@ -140,8 +140,8 @@ public class V2SpanConverterTest { .parentId("6b221d5bc9e6496c") .id("5b4185666d50f68b") .name("get") - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(backend)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(backend)) .timestamp(1472470996199000L) .duration(207000L) .build(); @@ -171,7 +171,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .kind(Kind.CLIENT) .name("get") - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(207000L) .build(); @@ -200,8 +200,8 @@ public class V2SpanConverterTest { .id("216a2aea45d08fc9") .name("get") .kind(Kind.SERVER) - .localEndpoint(convert(backend)) - .remoteEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(backend)) + .remoteEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(207000L) .putTag(TraceKeys.HTTP_PATH, "/api") @@ -285,7 +285,7 @@ public class V2SpanConverterTest { .parentId("1") .id("2") .name("local") - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(207000L) .build(); @@ -336,8 +336,8 @@ public class V2SpanConverterTest { // the client side owns timestamp and duration Span client = builder.clone() .kind(Kind.CLIENT) - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(backend)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(backend)) .timestamp(1472470996199000L) .duration(207000L) .addAnnotation(1472470996238000L, Constants.WIRE_SEND) @@ -350,8 +350,8 @@ public class V2SpanConverterTest { Span server = builder.clone() .kind(Kind.SERVER) .shared(true) - .localEndpoint(convert(backend)) - .remoteEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(backend)) + .remoteEndpoint(toEndpoint(frontend)) .timestamp(1472470996250000L) .duration(100000L) .putTag(TraceKeys.HTTP_PATH, "/backend") @@ -384,7 +384,7 @@ public class V2SpanConverterTest { .name("get") .kind(Kind.SERVER) .shared(true) - .localEndpoint(convert(backend)) + .localEndpoint(toEndpoint(backend)) .timestamp(1472470996250000L) .duration(100000L) .build(); @@ -416,7 +416,7 @@ public class V2SpanConverterTest { Span client = builder.clone() .kind(Kind.CLIENT) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(207000L) .build(); @@ -424,7 +424,7 @@ public class V2SpanConverterTest { Span server = builder.clone() .kind(Kind.SERVER) .shared(true) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996250000L) .duration(100000L) .build(); @@ -452,14 +452,14 @@ public class V2SpanConverterTest { Span client = builder.clone() .kind(Kind.CLIENT) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .build(); Span server = builder.clone() .kind(Kind.SERVER) .shared(true) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996250000L) .build(); @@ -483,7 +483,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.PRODUCER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .build(); @@ -509,9 +509,9 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.PRODUCER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) - .remoteEndpoint(convert(kafka)) + .remoteEndpoint(toEndpoint(kafka)) .build(); assertThat(V2SpanConverter.toSpan(span2)) @@ -539,7 +539,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.PRODUCER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(51000L) .build(); @@ -567,7 +567,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.CONSUMER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .build(); @@ -595,8 +595,8 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.CONSUMER) - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(kafka)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(kafka)) .timestamp(1472470996199000L) .build(); @@ -625,7 +625,7 @@ public class V2SpanConverterTest { .id("5b4185666d50f68b") .name("send") .kind(Kind.CONSUMER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(51000L) .build(); @@ -659,8 +659,8 @@ public class V2SpanConverterTest { Span producer = builder.clone() .kind(Kind.PRODUCER) - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(kafka)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(kafka)) .timestamp(1472470996199000L) .duration(1472470996238000L - 1472470996199000L) .build(); @@ -668,8 +668,8 @@ public class V2SpanConverterTest { Span consumer = builder.clone() .kind(Kind.CONSUMER) .shared(true) - .localEndpoint(convert(backend)) - .remoteEndpoint(convert(kafka)) + .localEndpoint(toEndpoint(backend)) + .remoteEndpoint(toEndpoint(kafka)) .timestamp(1472470996403000L) .duration(1472470996406000L - 1472470996403000L) .build(); @@ -700,7 +700,7 @@ public class V2SpanConverterTest { Span producer = builder.clone() .kind(Kind.PRODUCER) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996199000L) .duration(1472470996238000L - 1472470996199000L) .build(); @@ -708,7 +708,7 @@ public class V2SpanConverterTest { Span consumer = builder.clone() .kind(Kind.CONSUMER) .shared(true) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp(1472470996403000L) .duration(1472470996406000L - 1472470996403000L) .build(); @@ -738,7 +738,7 @@ public class V2SpanConverterTest { .name("missing"); Span first = builder.clone() - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .addAnnotation(1472470996199000L, "foo") .addAnnotation(1472470996238000L, "bar") .addAnnotation(1472470996403000L, "missing") @@ -747,7 +747,7 @@ public class V2SpanConverterTest { .build(); Span second = builder.clone() - .localEndpoint(convert(backend)) + .localEndpoint(toEndpoint(backend)) .addAnnotation(1472470996250000L, "baz") .addAnnotation(1472470996350000L, "qux") .putTag("baz", "qux") @@ -781,7 +781,7 @@ public class V2SpanConverterTest { .traceId("1") .name("test") .id("2") - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .putTag("bool", "true") .putTag("short", "20") .putTag("int", "32800") diff --git a/zipkin/src/test/java/zipkin/internal/V2SpanStoreAdapterTest.java b/zipkin/src/test/java/zipkin/internal/V2SpanStoreAdapterTest.java index 9d9dc1d8fa3..0ec502089c6 100644 --- a/zipkin/src/test/java/zipkin/internal/V2SpanStoreAdapterTest.java +++ b/zipkin/src/test/java/zipkin/internal/V2SpanStoreAdapterTest.java @@ -41,7 +41,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static zipkin.TestObjects.TODAY; -import static zipkin.internal.V2SpanConverter.convert; +import static zipkin.internal.V2SpanConverter.toEndpoint; public class V2SpanStoreAdapterTest { @Rule public MockitoRule mocks = MockitoJUnit.rule(); @@ -61,14 +61,14 @@ public class V2SpanStoreAdapterTest { List skewedTrace2 = asList( builder.clone() .kind(Span.Kind.CLIENT) - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .timestamp((TODAY + 200) * 1000) .duration(120_000L) .build(), builder.clone() .kind(Span.Kind.SERVER) .shared(true) - .localEndpoint(convert(backend)) + .localEndpoint(toEndpoint(backend)) .timestamp((TODAY + 100) * 1000) // received before sent! .duration(60_000L) .build() @@ -427,7 +427,7 @@ public void getDependencies_sync_wrapsIOE() throws IOException { } @Test public void convert_queryRequest() { - assertThat(V2SpanStoreAdapter.convert(QueryRequest.builder() + assertThat(V2SpanStoreAdapter.convertRequest(QueryRequest.builder() .serviceName("service") .spanName("span") .parseAnnotationQuery("annotation and tag=value") diff --git a/zipkin/src/test/java/zipkin/internal/v2/SpanTest.java b/zipkin/src/test/java/zipkin/internal/v2/SpanTest.java index 01661ce4109..1a2f51ce9d1 100644 --- a/zipkin/src/test/java/zipkin/internal/v2/SpanTest.java +++ b/zipkin/src/test/java/zipkin/internal/v2/SpanTest.java @@ -21,10 +21,10 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.data.MapEntry.entry; import static zipkin.TestObjects.APP_ENDPOINT; -import static zipkin.internal.V2SpanConverter.convert; +import static zipkin.internal.V2SpanConverter.toEndpoint; public class SpanTest { - Span base = Span.newBuilder().traceId("1").id("1").localEndpoint(convert(APP_ENDPOINT)).build(); + Span base = Span.newBuilder().traceId("1").id("1").localEndpoint(toEndpoint(APP_ENDPOINT)).build(); @Test public void traceIdString() { Span with128BitId = base.toBuilder() diff --git a/zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonAdaptersTest.java b/zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonSpanAdaptersTest.java similarity index 80% rename from zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonAdaptersTest.java rename to zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonSpanAdaptersTest.java index 31aef0a3c07..98e967f86da 100644 --- a/zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonAdaptersTest.java +++ b/zipkin/src/test/java/zipkin/internal/v2/codec/SpanJsonSpanAdaptersTest.java @@ -22,14 +22,14 @@ import zipkin.Constants; import zipkin.Endpoint; import zipkin.TraceKeys; -import zipkin.internal.Util; +import zipkin.internal.V2SpanConverter; import zipkin.internal.v2.Span; import static org.assertj.core.api.Assertions.assertThat; import static zipkin.internal.Util.UTF_8; -import static zipkin.internal.V2SpanConverter.convert; +import static zipkin.internal.V2SpanConverter.toEndpoint; -public class SpanJsonAdaptersTest { +public class SpanJsonSpanAdaptersTest { Endpoint frontend = Endpoint.create("frontend", 127 << 24 | 1); Endpoint backend = Endpoint.builder() .serviceName("backend") @@ -43,8 +43,8 @@ public class SpanJsonAdaptersTest { .id("5b4185666d50f68b") .name("get") .kind(Span.Kind.CLIENT) - .localEndpoint(convert(frontend)) - .remoteEndpoint(convert(backend)) + .localEndpoint(toEndpoint(frontend)) + .remoteEndpoint(toEndpoint(backend)) .timestamp(1472470996199000L) .duration(207000L) .addAnnotation(1472470996238000L, Constants.WIRE_SEND) @@ -56,34 +56,34 @@ public class SpanJsonAdaptersTest { @Rule public ExpectedException thrown = ExpectedException.none(); @Test public void spanRoundTrip() throws IOException { - assertThat(BytesDecoder.JSON.decode(BytesEncoder.JSON.encode(span))) + assertThat(SpanBytesCodec.JSON.decode(SpanBytesEncoder.JSON.encode(span))) .isEqualTo(span); } @Test public void sizeInBytes() throws IOException { - assertThat(Span2JsonAdapters.SPAN_WRITER.sizeInBytes(span)) - .isEqualTo(BytesEncoder.JSON.encode(span).length); + assertThat(SpanBytesEncoder.SPAN_WRITER.sizeInBytes(span)) + .isEqualTo(SpanBytesEncoder.JSON.encode(span).length); } @Test public void spanRoundTrip_64bitTraceId() throws IOException { span = span.toBuilder().traceId(span.traceId().substring(16)).build(); - assertThat(BytesDecoder.JSON.decode(BytesEncoder.JSON.encode(span))) + assertThat(SpanBytesCodec.JSON.decode(SpanBytesEncoder.JSON.encode(span))) .isEqualTo(span); } @Test public void spanRoundTrip_shared() throws IOException { span = span.toBuilder().shared(true).build(); - assertThat(BytesDecoder.JSON.decode(BytesEncoder.JSON.encode(span))) + assertThat(SpanBytesCodec.JSON.decode(SpanBytesEncoder.JSON.encode(span))) .isEqualTo(span); } @Test public void sizeInBytes_64bitTraceId() throws IOException { span = span.toBuilder().traceId(span.traceId().substring(16)).build(); - assertThat(Span2JsonAdapters.SPAN_WRITER.sizeInBytes(span)) - .isEqualTo(BytesEncoder.JSON.encode(span).length); + assertThat(SpanBytesEncoder.SPAN_WRITER.sizeInBytes(span)) + .isEqualTo(SpanBytesEncoder.JSON.encode(span).length); } /** @@ -101,7 +101,7 @@ public class SpanJsonAdaptersTest { .putTag("\"foo", "Database error: ORA-00942:\u2028 and \u2029 table or view does not exist\n") .build(); - assertThat(BytesDecoder.JSON.decode(BytesEncoder.JSON.encode(worstSpanInTheWorld))) + assertThat(SpanBytesCodec.JSON.decode(SpanBytesEncoder.JSON.encode(worstSpanInTheWorld))) .isEqualTo(worstSpanInTheWorld); } @@ -115,14 +115,14 @@ public class SpanJsonAdaptersTest { + " \"id\": \"6b221d5bc9e6496c\"\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnEmpty_inputSpans() throws IOException { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Empty input reading List"); - BytesDecoder.JSON.decodeList(new byte[0]); + SpanBytesCodec.JSON.decodeList(new byte[0]); } /** @@ -132,25 +132,25 @@ public class SpanJsonAdaptersTest { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Malformed reading List from "); - BytesDecoder.JSON.decodeList(new byte[] {'h', 'e', 'l', 'l', 'o'}); + SpanBytesCodec.JSON.decodeList(new byte[] {'h', 'e', 'l', 'l', 'o'}); } @Test public void spansRoundTrip() throws IOException { List tenClientSpans = Collections.nCopies(10, span); - byte[] message = BytesEncoder.JSON.encodeList(tenClientSpans); + byte[] message = SpanBytesEncoder.JSON.encodeList(tenClientSpans); - assertThat(BytesDecoder.JSON.decodeList(message)) + assertThat(SpanBytesCodec.JSON.decodeList(message)) .isEqualTo(tenClientSpans); } @Test public void writesTraceIdHighIntoTraceIdField() { Span with128BitTraceId = Span.newBuilder() .traceId("48485a3953bb61246b221d5bc9e6496c") - .localEndpoint(convert(frontend)) + .localEndpoint(toEndpoint(frontend)) .id("1").name("").build(); - assertThat(new String(BytesEncoder.JSON.encode(with128BitTraceId), Util.UTF_8)) + assertThat(new String(SpanBytesEncoder.JSON.encode(with128BitTraceId), UTF_8)) .startsWith("{\"traceId\":\"48485a3953bb61246b221d5bc9e6496c\""); } @@ -166,8 +166,8 @@ public class SpanJsonAdaptersTest { + " \"id\": \"6b221d5bc9e6496c\"\n" + "}").getBytes(UTF_8); - assertThat(BytesDecoder.JSON.decode(with128BitTraceId)) - .isEqualTo(BytesDecoder.JSON.decode(withLower64bitsTraceId).toBuilder() + assertThat(SpanBytesCodec.JSON.decode(with128BitTraceId)) + .isEqualTo(SpanBytesCodec.JSON.decode(withLower64bitsTraceId).toBuilder() .traceId("48485a3953bb61246b221d5bc9e6496c").build()); } @@ -187,7 +187,7 @@ public class SpanJsonAdaptersTest { + " \"shared\": null\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void ignoresNull_endpoint_topLevelFields() { @@ -203,7 +203,7 @@ public class SpanJsonAdaptersTest { + " }\n" + "}"; - assertThat(convert(BytesDecoder.JSON.decode(json.getBytes(UTF_8)).localEndpoint())) + assertThat(V2SpanConverter.toEndpoint(SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)).localEndpoint())) .isEqualTo(Endpoint.create("", 127 << 24 | 1)); } @@ -222,7 +222,7 @@ public class SpanJsonAdaptersTest { + " \"port\": null\n" + " }\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnIncomplete_annotation() { @@ -238,7 +238,7 @@ public class SpanJsonAdaptersTest { + " ]\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnNull_traceId() { @@ -251,7 +251,7 @@ public class SpanJsonAdaptersTest { + " \"id\": \"6b221d5bc9e6496c\"\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnNull_id() { @@ -264,7 +264,7 @@ public class SpanJsonAdaptersTest { + " \"id\": null\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnNull_tagValue() { @@ -280,7 +280,7 @@ public class SpanJsonAdaptersTest { + " }\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnNull_annotationValue() { @@ -296,7 +296,7 @@ public class SpanJsonAdaptersTest { + " ]\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void niceErrorOnNull_annotationTimestamp() { @@ -312,7 +312,7 @@ public class SpanJsonAdaptersTest { + " ]\n" + "}"; - BytesDecoder.JSON.decode(json.getBytes(UTF_8)); + SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)); } @Test public void readSpan_localEndpoint_noServiceName() { @@ -325,7 +325,7 @@ public class SpanJsonAdaptersTest { + " }\n" + "}"; - assertThat(BytesDecoder.JSON.decode(json.getBytes(UTF_8)).localServiceName()) + assertThat(SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)).localServiceName()) .isNull(); } @@ -339,24 +339,24 @@ public class SpanJsonAdaptersTest { + " }\n" + "}"; - assertThat(BytesDecoder.JSON.decode(json.getBytes(UTF_8)).remoteServiceName()) + assertThat(SpanBytesCodec.JSON.decode(json.getBytes(UTF_8)).remoteServiceName()) .isNull(); } @Test public void spanRoundTrip_noRemoteServiceName() throws IOException { span = span.toBuilder() - .remoteEndpoint(convert(backend.toBuilder().serviceName("").build())).build(); + .remoteEndpoint(toEndpoint(backend.toBuilder().serviceName("").build())).build(); - assertThat(BytesDecoder.JSON.decode(BytesEncoder.JSON.encode(span))) + assertThat(SpanBytesCodec.JSON.decode(SpanBytesEncoder.JSON.encode(span))) .isEqualTo(span); } @Test public void doesntWriteEmptyServiceName() throws IOException { span = span.toBuilder() - .localEndpoint(convert(frontend.toBuilder().serviceName("").build())) + .localEndpoint(toEndpoint(frontend.toBuilder().serviceName("").build())) .remoteEndpoint(null).build(); - assertThat(new String(BytesEncoder.JSON.encode(span), UTF_8)) + assertThat(new String(SpanBytesEncoder.JSON.encode(span), UTF_8)) .contains("{\"ipv4\":\"127.0.0.1\"}"); } } diff --git a/zipkin/src/test/java/zipkin/internal/v2/internal/BufferTest.java b/zipkin/src/test/java/zipkin/internal/v2/internal/BufferTest.java new file mode 100644 index 00000000000..fd9d47bd762 --- /dev/null +++ b/zipkin/src/test/java/zipkin/internal/v2/internal/BufferTest.java @@ -0,0 +1,88 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.charset.Charset; +import java.util.Arrays; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BufferTest { + static final Charset UTF_8 = Charset.forName("UTF-8"); + + // Adapted from http://stackoverflow.com/questions/8511490/calculating-length-in-utf-8-of-java-string-without-actually-encoding-it + @Test public void utf8SizeInBytes() { + for (int codepoint = 0; codepoint <= 0x10FFFF; codepoint++) { + if (codepoint == 0xD800) codepoint = 0xDFFF + 1; // skip surrogates + if (Character.isDefined(codepoint)) { + String test = new String(Character.toChars(codepoint)); + int expected = test.getBytes(UTF_8).length; + int actual = Buffer.utf8SizeInBytes(test); + if (actual != expected) { + throw new AssertionError(actual + " length != " + expected + " for " + codepoint); + } + } + } + } + + /** Uses test data and codepoint wrapping trick from okhttp3.FormBodyTest */ + @Test public void utf8_malformed() { + for (int codepoint : Arrays.asList(0xD800, 0xDFFF, 0xD83D)) { + String test = new String(new int[] {'a', codepoint, 'c'}, 0, 3); + assertThat(Buffer.utf8SizeInBytes(test)) + .isEqualTo(3); + assertThat(new Buffer(3).writeUtf8(test).toByteArray()) + .containsExactly('a', '?', 'c'); + } + } + + @Test public void emoji() { + byte[] emojiBytes = {(byte) 0xF0, (byte) 0x9F, (byte) 0x98, (byte) 0x81}; + String emoji = new String(emojiBytes, UTF_8); + assertThat(Buffer.utf8SizeInBytes(emoji)) + .isEqualTo(emojiBytes.length); + assertThat(new Buffer(emojiBytes.length).writeUtf8(emoji).toByteArray()) + .isEqualTo(emojiBytes); + } + + @Test public void writeAscii_long() throws IOException { + assertThat(writeAscii(-1005656679588439279L)) + .isEqualTo("-1005656679588439279"); + assertThat(writeAscii(0L)) + .isEqualTo("0"); + assertThat(writeAscii(-9223372036854775808L /* Long.MIN_VALUE */)) + .isEqualTo("-9223372036854775808"); + assertThat(writeAscii(123456789L)) + .isEqualTo("123456789"); + } + + static String writeAscii(long v) { + byte[] buffered = new Buffer(Buffer.asciiSizeInBytes(v)).writeAscii(v).toByteArray(); + return new String(buffered, UTF_8); + } + + // Test creating Buffer for a long string + @Test public void writeString() throws UnsupportedEncodingException { + StringBuffer stringBuffer = new StringBuffer(); + for (int i = 0; i < 100000; i++) { + stringBuffer.append("a"); + } + String string = stringBuffer.toString(); + byte[] buffered = new Buffer(string.length()).writeAscii(string).toByteArray(); + assertThat(new String(buffered, "US-ASCII")).isEqualTo(string); + } +} diff --git a/zipkin/src/test/java/zipkin/internal/v2/internal/DependencyLinkerTest.java b/zipkin/src/test/java/zipkin/internal/v2/internal/DependencyLinkerTest.java new file mode 100644 index 00000000000..3bf5d168fb1 --- /dev/null +++ b/zipkin/src/test/java/zipkin/internal/v2/internal/DependencyLinkerTest.java @@ -0,0 +1,584 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.junit.Test; +import zipkin.TestObjects; +import zipkin.internal.V2SpanConverter; +import zipkin.internal.v2.DependencyLink; +import zipkin.internal.v2.Endpoint; +import zipkin.internal.v2.Span; +import zipkin.internal.v2.Span.Kind; + +import static java.util.Arrays.asList; +import static org.assertj.core.api.Assertions.assertThat; +import static zipkin.Constants.ERROR; + +public class DependencyLinkerTest { + static final List TRACE = TestObjects.TRACE.stream() + .flatMap(s -> V2SpanConverter.fromSpan(s).stream()) + .collect(Collectors.toList()); + + List messages = new ArrayList<>(); + + Logger logger = new Logger("", null) { + { + setLevel(Level.ALL); + } + + @Override public void log(Level level, String msg) { + assertThat(level).isEqualTo(Level.FINE); + messages.add(msg); + } + }; + + @Test + public void baseCase() { + assertThat(new DependencyLinker().link()).isEmpty(); + } + + @Test + public void linksSpans() { + assertThat(new DependencyLinker().putTrace(TRACE.iterator()).link()).containsExactly( + DependencyLink.newBuilder().parent("web").child("app").callCount(1L).build(), + DependencyLink.newBuilder().parent("app").child("db").callCount(1L).errorCount(1L).build() + ); + } + + @Test + public void dropsSelfReferencingSpans() { + List trace = TRACE.stream() + .map(s -> s.toBuilder().parentId(s.parentId() != null ? s.id() : null).build()) + .collect(Collectors.toList()); + + assertThat(new DependencyLinker(logger).putTrace(trace.iterator()).link()).isEmpty(); + + assertThat(messages).contains( + "skipping circular dependency: traceId=f66529c8cc356aa0, spanId=93288b464457044e", + "skipping circular dependency: traceId=f66529c8cc356aa0, spanId=71e62981f1e136a7" + ); + } + + @Test + public void messagingSpansDontLinkWithoutBroker_consumer() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", null, false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", "kafka", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("kafka").child("consumer").callCount(1L).build() + ); + } + + @Test + public void messagingSpansDontLinkWithoutBroker_producer() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", "kafka", false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("producer").child("kafka").callCount(1L).build() + ); + } + + @Test + public void messagingWithBroker_both_sides_same() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", "kafka", false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", "kafka", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("producer").child("kafka").callCount(1L).build(), + DependencyLink.newBuilder().parent("kafka").child("consumer").callCount(1L).build() + ); + } + + @Test + public void messagingWithBroker_different() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", "kafka1", false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", "kafka2", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("producer").child("kafka1").callCount(1L).build(), + DependencyLink.newBuilder().parent("kafka2").child("consumer").callCount(1L).build() + ); + } + + /** Shows we don't assume there's a direct link between producer and consumer. */ + @Test + public void messagingWithoutBroker_noLinks() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", null, false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()) + .isEmpty(); + } + + /** When a server is the child of a producer span, make a link as it is really an RPC */ + @Test + public void producerLinksToServer_childSpan() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", null, false), + span2("a", "a", "b", Kind.SERVER, "server", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("producer").child("server").callCount(1L).build() + ); + } + + /** + * Servers most often join a span vs create a child. Make sure this works when a producer is used + * instead of a client. + */ + @Test + public void producerLinksToServer_sameSpan() { + List trace = asList( + span2("a", null, "a", Kind.PRODUCER, "producer", null, false), + span2("a", null, "a", Kind.SERVER, "server", null, false) + .toBuilder().shared(true).build() + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("producer").child("server").callCount(1L).build() + ); + } + + /** + * Client might be used for historical reasons instead of PRODUCER. Don't link as the server-side + * is authoritative. + */ + @Test + public void clientDoesntLinkToConsumer_child() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", null, false), + span2("a", "a", "b", Kind.CONSUMER, "consumer", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()) + .isEmpty(); + } + + /** + * A root span can be a client-originated trace or a server receipt which knows its peer. In these + * cases, the peer is known and kind establishes the direction. + */ + @Test + public void linksSpansDirectedByKind() { + List validRootSpans = asList( + span2("a", null, "a", Kind.SERVER, "server", "client", false), + span2("a", null, "a", Kind.CLIENT, "client", "server", false) + .toBuilder().shared(true).build() + ); + + for (Span span : validRootSpans) { + assertThat(new DependencyLinker().putTrace(asList(span).iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(1L).build() + ); + } + } + + @Test + public void callsAgainstTheSameLinkIncreasesCallCount_span() { + List trace = asList( + span2("a", null, "a", Kind.SERVER, "client", null, false), + span2("a", "a", "b", Kind.CLIENT, null, "server", false), + span2("a", "a", "c", Kind.CLIENT, null, "server", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build() + ); + } + + @Test + public void callsAgainstTheSameLinkIncreasesCallCount_trace() { + List trace = asList( + span2("a", null, "a", Kind.SERVER, "client", null, false), + span2("a", "a", "b", Kind.CLIENT, null, "server", false) + ); + + assertThat(new DependencyLinker() + .putTrace(trace.iterator()) + .putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build() + ); + } + + /** + * Spans don't always include both the client and server service. When you know the kind, you can + * link these without duplicating call count. + */ + @Test + public void singleHostSpansResultInASingleCallCount() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", null, false), + span2("a", "a", "b", Kind.SERVER, "server", null, false) + ); + + assertThat(new DependencyLinker() + .putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(1L).build() + ); + } + + @Test + public void singleHostSpansResultInASingleErrorCount() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", null, true), + span2("a", "a", "b", Kind.SERVER, "server", null, true) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder() + .parent("client") + .child("server") + .callCount(1L) + .errorCount(1L) + .build() + ); + } + + @Test + public void singleHostSpansResultInASingleErrorCount_sameId() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", null, true), + span2("a", null, "a", Kind.SERVER, "server", null, true) + .toBuilder().shared(true).build() + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder() + .parent("client") + .child("server") + .callCount(1L) + .errorCount(1L) + .build() + ); + } + + @Test + public void singleHostSpansResultInASingleCallCount_defersNameToServer() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", "server", false), + span2("a", "a", "b", Kind.SERVER, "server", null, false) + ); + + assertThat(new DependencyLinker(logger).putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(1L).build() + ); + + assertThat(messages).contains("deferring link to rpc child span"); + messages.clear(); + } + + @Test + public void singleHostSpans_multipleChildren() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", null, false), + span2("a", "a", "b", Kind.SERVER, "server", "client", true), + span2("a", "a", "c", Kind.SERVER, "server", "client", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder() + .parent("client") + .child("server") + .callCount(2L) + .errorCount(1L) + .build() + ); + } + + @Test + public void singleHostSpans_multipleChildren_defersNameToServer() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "client", "server", false), + span2("a", "a", "b", Kind.SERVER, "server", null, false), + span2("a", "a", "c", Kind.SERVER, "server", null, false) + ); + + assertThat(new DependencyLinker(logger).putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build() + ); + + assertThat(messages).contains("deferring link to rpc child span"); + } + + /** + * Spans are sometimes intermediated by an unknown type of span. Prefer the nearest server when + * accounting for them. + */ + @Test + public void intermediatedClientSpansMissingLocalServiceNameLinkToNearestServer() { + List trace = asList( + span2("a", null, "a", Kind.SERVER, "client", null, false), + span2("a", "a", "b", null, null, null, false), + // possibly a local fan-out span + span2("a", "b", "c", Kind.CLIENT, "server", null, false), + span2("a", "b", "d", Kind.CLIENT, "server", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build() + ); + } + + @Test + public void errorsOnUninstrumentedLinks() { + List trace = asList( + span2("a", null, "a", Kind.SERVER, "client", null, false), + span2("a", "a", "b", null, null, null, false), + // there's no remote here, so we shouldn't see any error count + span2("a", "b", "c", Kind.CLIENT, "server", null, true), + span2("a", "b", "d", Kind.CLIENT, "server", null, true) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build() + ); + } + + @Test + public void errorsOnInstrumentedLinks() { + List trace = asList( + span2("a", null, "a", Kind.SERVER, "foo", null, false), + span2("a", "a", "b", null, null, null, false), + span2("a", "b", "c", Kind.CLIENT, "bar", "baz", true), + span2("a", "b", "d", Kind.CLIENT, "bar", "baz", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("foo").child("bar").callCount(2L).build(), + DependencyLink.newBuilder().parent("bar").child("baz").callCount(2L).errorCount(1L).build() + ); + } + + @Test + public void linkWithErrorIsLogged() { + List trace = asList( + span2("a", "b", "c", Kind.CLIENT, "foo", "bar", true) + ); + new DependencyLinker(logger).putTrace(trace.iterator()).link(); + + assertThat(messages).contains( + "incrementing error link foo -> bar" + ); + } + + /** Tag indicates a failed span, not an annotation */ + @Test + public void annotationNamedErrorDoesntIncrementErrorCount() { + List trace = asList( + span2("a", "b", "c", Kind.CLIENT, "foo", "bar", false) + .toBuilder().addAnnotation(1L, ERROR).build() + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("foo").child("bar").callCount(1L).build() + ); + } + + /** A loopback span is direction-agnostic, so can be linked properly regardless of kind. */ + @Test + public void linksLoopbackSpans() { + List validRootSpans = asList( + span2("a", null, "a", Kind.SERVER, "service", "service", false), + span2("b", null, "b", Kind.CLIENT, "service", "service", false) + ); + + for (Span span : validRootSpans) { + assertThat(new DependencyLinker().putTrace(asList(span).iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("service").child("service").callCount(1L).build() + ); + } + } + + @Test + public void noSpanKindTreatedSameAsClient() { + List trace = asList( + span2("a", null, "a", null, "some-client", "web", false), + span2("a", "a", "b", null, "web", "app", false), + span2("a", "b", "c", null, "app", "db", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("some-client").child("web").callCount(1L).build(), + DependencyLink.newBuilder().parent("web").child("app").callCount(1L).build(), + DependencyLink.newBuilder().parent("app").child("db").callCount(1L).build() + ); + } + + @Test + public void noSpanKindWithError() { + List trace = asList( + span2("a", null, "a", null, "some-client", "web", false), + span2("a", "a", "b", null, "web", "app", true), + span2("a", "b", "c", null, "app", "db", false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("some-client").child("web").callCount(1L).build(), + DependencyLink.newBuilder().parent("web").child("app").callCount(1L).errorCount(1L).build(), + DependencyLink.newBuilder().parent("app").child("db").callCount(1L).build() + ); + } + + /** + * A dependency link is between two services. Given only one span).callCount(we cannot link if we don't know + * both service names. + */ + @Test + public void cannotLinkSingleSpanWithoutBothServiceNames() { + List incompleteRootSpans = asList( + span2("a", null, "a", Kind.SERVER, null, null, false), + span2("a", null, "a", Kind.SERVER, "server", null, false), + span2("a", null, "a", Kind.SERVER, null, "client", false), + span2("a", null, "a", Kind.CLIENT, null, null, false), + span2("a", null, "a", Kind.CLIENT, "client", null, false), + span2("a", null, "a", Kind.CLIENT, null, "server", false) + ); + + for (Span span : incompleteRootSpans) { + assertThat(new DependencyLinker(logger) + .putTrace(asList(span).iterator()).link()) + .isEmpty(); + } + } + + @Test + public void doesntLinkUnrelatedSpansWhenMissingRootSpan() { + String missingParentId = "a"; + List trace = asList( + span2("a", missingParentId, "b", Kind.SERVER, "service1", null, false), + span2("a", missingParentId, "c", Kind.SERVER, "service2", null, false) + ); + + assertThat(new DependencyLinker(logger) + .putTrace(trace.iterator()).link()) + .isEmpty(); + + assertThat(messages).contains( + "skipping synthetic node for broken span tree" + ); + } + + @Test + public void linksRelatedSpansWhenMissingRootSpan() { + String missingParentId = "a"; + List trace = asList( + span2("a", missingParentId, "b", Kind.SERVER, "service1", null, false), + span2("a", "b", "c", Kind.SERVER, "service2", null, false) + ); + + assertThat(new DependencyLinker(logger).putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("service1").child("service2").callCount(1L).build() + ); + + assertThat(messages).contains( + "skipping synthetic node for broken span tree" + ); + } + + /** Client+Server spans that don't share IDs are treated as server spans missing their peer */ + @Test + public void linksSingleHostSpans() { + List singleHostSpans = asList( + span2("a", null, "a", Kind.CLIENT, "web", null, false), + span2("a", "a", "b", Kind.SERVER, "app", null, false) + ); + + assertThat(new DependencyLinker().putTrace(singleHostSpans.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("web").child("app").callCount(1L).build() + ); + } + + @Test + public void linksSingleHostSpans_errorOnClient() { + List trace = asList( + span2("a", null, "a", Kind.CLIENT, "web", null, true), + span2("a", "a", "b", Kind.SERVER, "app", null, false) + ); + + assertThat(new DependencyLinker().putTrace(trace.iterator()).link()).containsOnly( + DependencyLink.newBuilder().parent("web").child("app").callCount(1L).errorCount(1L).build() + ); + } + + /** Creates a link when there's a span missing, in this case 2L which is an RPC from web to app */ + @Test + public void missingSpan() { + List singleHostSpans = asList( + span2("a", null, "a", Kind.SERVER, "web", null, false), + span2("a", "a", "b", Kind.CLIENT, "app", null, false) + ); + + assertThat(new DependencyLinker(logger).putTrace(singleHostSpans.iterator()).link()) + .containsOnly(DependencyLink.newBuilder().parent("web").child("app").callCount(1L).build()); + + assertThat(messages).contains( + "detected missing link to client span" + ); + } + + @Test + public void merge() { + List links = asList( + DependencyLink.newBuilder().parent("foo").child("bar").callCount(2L).errorCount(1L).build(), + DependencyLink.newBuilder().parent("foo").child("bar").callCount(2L).errorCount(2L).build(), + DependencyLink.newBuilder().parent("foo").child("foo").callCount(1L).build() + ); + + assertThat(DependencyLinker.merge(links)).containsExactly( + DependencyLink.newBuilder().parent("foo").child("bar").callCount(4L).errorCount(3L).build(), + DependencyLink.newBuilder().parent("foo").child("foo").callCount(1L).build() + ); + } + + @Test + public void merge_error() { + List links = asList( + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build(), + DependencyLink.newBuilder().parent("client").child("server").callCount(2L).build(), + DependencyLink.newBuilder().parent("client").child("client").callCount(1L).build() + ); + + assertThat(DependencyLinker.merge(links)).containsExactly( + DependencyLink.newBuilder().parent("client").child("server").callCount(4L).build(), + DependencyLink.newBuilder().parent("client").child("client").callCount(1L).build() + ); + } + + static Span span2(String traceId, @Nullable String parentId, String id, @Nullable Kind kind, + @Nullable String local, @Nullable String remote, boolean isError) { + Span.Builder result = Span.newBuilder().traceId(traceId).parentId(parentId).id(id).kind(kind); + if (local != null) result.localEndpoint(Endpoint.newBuilder().serviceName(local).build()); + if (remote != null) result.remoteEndpoint(Endpoint.newBuilder().serviceName(remote).build()); + if (isError) result.putTag(ERROR, ""); + return result.build(); + } +} diff --git a/zipkin/src/test/java/zipkin/internal/v2/internal/JsonCodecTest.java b/zipkin/src/test/java/zipkin/internal/v2/internal/JsonCodecTest.java new file mode 100644 index 00000000000..a96e8f23a62 --- /dev/null +++ b/zipkin/src/test/java/zipkin/internal/v2/internal/JsonCodecTest.java @@ -0,0 +1,73 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import static zipkin.internal.Util.UTF_8; + +public class JsonCodecTest { + @Rule public ExpectedException thrown = ExpectedException.none(); + + @Test public void doesntStackOverflowOnToBufferWriterBug_lessThanBytes() { + thrown.expect(AssertionError.class); + thrown.expectMessage("Bug found using FooWriter to write Foo as json. Wrote 1/2 bytes: a"); + + class FooWriter implements Buffer.Writer { + @Override public int sizeInBytes(Object value) { + return 2; + } + + @Override public void write(Object value, Buffer buffer) { + buffer.writeByte('a'); + throw new RuntimeException("buggy"); + } + } + + class Foo { + @Override public String toString() { + return new String(JsonCodec.write(new FooWriter(), this), + UTF_8); + } + } + + new Foo().toString(); + } + + @Test public void doesntStackOverflowOnToBufferWriterBug_Overflow() { + thrown.expect(AssertionError.class); + thrown.expectMessage("Bug found using FooWriter to write Foo as json. Wrote 2/2 bytes: ab"); + + // pretend there was a bug calculating size, ex it calculated incorrectly as to small + class FooWriter implements Buffer.Writer { + @Override public int sizeInBytes(Object value) { + return 2; + } + + @Override public void write(Object value, Buffer buffer) { + buffer.writeByte('a').writeByte('b').writeByte('c'); // wrote larger than size! + } + } + + class Foo { + @Override public String toString() { + return new String(JsonCodec.write(new FooWriter(), this), UTF_8); + } + } + + new Foo().toString(); + } +} diff --git a/zipkin/src/test/java/zipkin/internal/v2/internal/JsonEscaperTest.java b/zipkin/src/test/java/zipkin/internal/v2/internal/JsonEscaperTest.java new file mode 100644 index 00000000000..2f6a78f39d2 --- /dev/null +++ b/zipkin/src/test/java/zipkin/internal/v2/internal/JsonEscaperTest.java @@ -0,0 +1,50 @@ +/** + * Copyright 2015-2017 The OpenZipkin Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package zipkin.internal.v2.internal; + +import java.io.IOException; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscape; +import static zipkin.internal.v2.internal.JsonEscaper.jsonEscapedSizeInBytes; + +public class JsonEscaperTest { + + @Test public void testJjsonEscapedSizeInBytes() throws IOException { + assertThat(jsonEscapedSizeInBytes(new String(new char[] {0, 'a', 1}))) + .isEqualTo(13); + assertThat(jsonEscapedSizeInBytes(new String(new char[] {'"', '\\', '\t', '\b'}))) + .isEqualTo(8); + assertThat(jsonEscapedSizeInBytes(new String(new char[] {'\n', '\r', '\f'}))) + .isEqualTo(6); + assertThat(jsonEscapedSizeInBytes("\u2028 and \u2029")) + .isEqualTo(17); + assertThat(jsonEscapedSizeInBytes("\"foo")) + .isEqualTo(5); + } + + @Test public void testJsonEscape() throws IOException { + assertThat(jsonEscape(new String(new char[] {0, 'a', 1}))) + .isEqualTo("\\u0000a\\u0001"); + assertThat(jsonEscape(new String(new char[] {'"', '\\', '\t', '\b'}))) + .isEqualTo("\\\"\\\\\\t\\b"); + assertThat(jsonEscape(new String(new char[] {'\n', '\r', '\f'}))) + .isEqualTo("\\n\\r\\f"); + assertThat(jsonEscape("\u2028 and \u2029")) + .isEqualTo("\\u2028 and \\u2029"); + assertThat(jsonEscape("\"foo")) + .isEqualTo("\\\"foo"); + } +} diff --git a/zipkin/src/test/java/zipkin/internal/NodeTest.java b/zipkin/src/test/java/zipkin/internal/v2/internal/NodeTest.java similarity index 63% rename from zipkin/src/test/java/zipkin/internal/NodeTest.java rename to zipkin/src/test/java/zipkin/internal/v2/internal/NodeTest.java index fba3b733c78..b546b2cf1b4 100644 --- a/zipkin/src/test/java/zipkin/internal/NodeTest.java +++ b/zipkin/src/test/java/zipkin/internal/v2/internal/NodeTest.java @@ -11,7 +11,7 @@ * or implied. See the License for the specific language governing permissions and limitations under * the License. */ -package zipkin.internal; +package zipkin.internal.v2.internal; import java.util.ArrayList; import java.util.Collections; @@ -20,12 +20,11 @@ import java.util.logging.Level; import java.util.logging.Logger; import org.junit.Test; -import zipkin.Span; -import zipkin.TestObjects; +import zipkin.internal.v2.Span; import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; -import static zipkin.internal.Util.toLowerHex; +import static zipkin.internal.v2.internal.DependencyLinkerTest.span2; public class NodeTest { List messages = new ArrayList<>(); @@ -62,8 +61,7 @@ public void addChild_selfNotAllowed() { * e f g h * } */ - @Test - public void traversesBreadthFirst() { + @Test public void traversesBreadthFirst() { Node a = new Node().value('a'); Node b = new Node().value('b'); Node c = new Node().value('c'); @@ -87,48 +85,47 @@ public void traversesBreadthFirst() { /** * Makes sure that the trace tree is constructed based on parent-child, not by parameter order. */ - @Test - public void constructsTraceTree() { + @Test public void constructsTraceTree() { + List trace = asList( + span2("a", null, "a", Span.Kind.CLIENT, "client", null, false), + span2("a", "a", "b", Span.Kind.SERVER, "server", null, false), + span2("a", "b", "c", Span.Kind.CLIENT, "server", null, false) + ); // TRACE is sorted with root span first, lets shuffle them to make // sure the trace is stitched together by id. - List copy = new ArrayList<>(TestObjects.TRACE); + List copy = new ArrayList<>(trace); Collections.shuffle(copy); Node.TreeBuilder treeBuilder = - new Node.TreeBuilder<>(logger, copy.get(0).traceIdString()); + new Node.TreeBuilder<>(logger, copy.get(0).traceId()); for (Span span : copy) { - treeBuilder.addNode( - span.parentId != null ? toLowerHex(span.parentId) : null, toLowerHex(span.id), span - ); + treeBuilder.addNode(span.parentId(), span.id(), span); } Node root = treeBuilder.build(); assertThat(root.value()) - .isEqualTo(TestObjects.TRACE.get(0)); + .isEqualTo(trace.get(0)); assertThat(root.children()).extracting(Node::value) - .containsExactly(TestObjects.TRACE.get(1)); + .containsExactly(trace.get(1)); Node child = root.children().iterator().next(); assertThat(child.children()).extracting(Node::value) - .containsExactly(TestObjects.TRACE.get(2)); + .containsExactly(trace.get(2)); } - @Test - public void constructTree_noChildLeftBehind() { + @Test public void constructTree_noChildLeftBehind() { List spans = asList( - Span.builder().traceId(137L).id(1L).name("root-0").build(), - Span.builder().traceId(137L).parentId(1L).id(2L).name("child-0").build(), - Span.builder().traceId(137L).parentId(1L).id(3L).name("child-1").build(), - Span.builder().traceId(137L).id(4L).name("lost-0").build(), - Span.builder().traceId(137L).id(5L).name("lost-1").build()); + Span.newBuilder().traceId("a").id("b").name("root-0").build(), + Span.newBuilder().traceId("a").parentId("b").id("c").name("child-0").build(), + Span.newBuilder().traceId("a").parentId("b").id("d").name("child-1").build(), + Span.newBuilder().traceId("a").id("e").name("lost-0").build(), + Span.newBuilder().traceId("a").id("f").name("lost-1").build()); int treeSize = 0; - Node.TreeBuilder treeBuilder = - new Node.TreeBuilder<>(logger, spans.get(0).traceIdString()); + Node.TreeBuilder treeBuilder = new Node.TreeBuilder<>(logger, spans.get(0).traceId()); for (Span span : spans) { - assertThat(treeBuilder.addNode( - span.parentId != null ? toLowerHex(span.parentId) : null, toLowerHex(span.id), span - )).isTrue(); + assertThat(treeBuilder.addNode(span.parentId(), span.id(), span)) + .isTrue(); } Node tree = treeBuilder.build(); Iterator> iter = tree.traverse(); @@ -138,21 +135,19 @@ public void constructTree_noChildLeftBehind() { } assertThat(treeSize).isEqualTo(spans.size()); assertThat(messages).containsExactly( - "attributing span missing parent to root: traceId=0000000000000089, rootSpanId=0000000000000001, spanId=0000000000000004", - "attributing span missing parent to root: traceId=0000000000000089, rootSpanId=0000000000000001, spanId=0000000000000005" + "attributing span missing parent to root: traceId=000000000000000a, rootSpanId=000000000000000b, spanId=000000000000000e", + "attributing span missing parent to root: traceId=000000000000000a, rootSpanId=000000000000000b, spanId=000000000000000f" ); } @Test public void constructTree_headless() { - Span s2 = Span.builder().traceId(137L).parentId(1L).id(2L).name("s2").build(); - Span s3 = Span.builder().traceId(137L).parentId(1L).id(3L).name("s3").build(); - Span s4 = Span.builder().traceId(137L).parentId(1L).id(4L).name("s4").build(); + Span s2 = Span.newBuilder().traceId("a").parentId("a").id("b").name("s2").build(); + Span s3 = Span.newBuilder().traceId("a").parentId("a").id("c").name("s3").build(); + Span s4 = Span.newBuilder().traceId("a").parentId("a").id("d").name("s4").build(); - Node.TreeBuilder treeBuilder = new Node.TreeBuilder<>(logger, s2.traceIdString()); + Node.TreeBuilder treeBuilder = new Node.TreeBuilder<>(logger, s2.traceId()); for (Span span : asList(s2, s3, s4)) { - treeBuilder.addNode( - span.parentId != null ? toLowerHex(span.parentId) : null, toLowerHex(span.id), span - ); + treeBuilder.addNode(span.parentId(), span.id(), span); } Node root = treeBuilder.build(); assertThat(root.isSyntheticRootForPartialTree()) @@ -160,26 +155,21 @@ public void constructTree_noChildLeftBehind() { assertThat(root.children()).extracting(Node::value) .containsExactly(s2, s3, s4); assertThat(messages).containsExactly( - "substituting dummy node for missing root span: traceId=0000000000000089" + "substituting dummy node for missing root span: traceId=000000000000000a" ); } - @Test - public void addNode_skipsOnCycle() { - Span s1 = Span.builder().traceId(137L).parentId(null).id(1L).name("s1").build(); - Span s2 = Span.builder().traceId(137L).parentId(2L).id(2L).name("s2").build(); + @Test public void addNode_skipsOnCycle() { + Span s1 = Span.newBuilder().traceId("a").parentId(null).id("a").name("s1").build(); + Span s2 = Span.newBuilder().traceId("a").parentId("b").id("b").name("s2").build(); - Node.TreeBuilder treeBuilder = new Node.TreeBuilder<>(logger, s2.traceIdString()); - treeBuilder.addNode( - s1.parentId != null ? toLowerHex(s1.parentId) : null, toLowerHex(s1.id), s1 - ); - assertThat(treeBuilder.addNode( - s2.parentId != null ? toLowerHex(s2.parentId) : null, toLowerHex(s2.id), s2 - )).isFalse(); + Node.TreeBuilder treeBuilder = new Node.TreeBuilder<>(logger, s2.traceId()); + treeBuilder.addNode(s1.parentId(), s1.id(), s1); + assertThat(treeBuilder.addNode(s2.parentId(), s2.id(), s2)).isFalse(); treeBuilder.build(); assertThat(messages).containsExactly( - "skipping circular dependency: traceId=0000000000000089, spanId=0000000000000002" + "skipping circular dependency: traceId=000000000000000a, spanId=000000000000000b" ); } } diff --git a/zipkin/src/test/java/zipkin/internal/v2/storage/InMemoryStorageTest.java b/zipkin/src/test/java/zipkin/internal/v2/storage/InMemoryStorageTest.java index deb62161d0f..60a6a5db5b3 100644 --- a/zipkin/src/test/java/zipkin/internal/v2/storage/InMemoryStorageTest.java +++ b/zipkin/src/test/java/zipkin/internal/v2/storage/InMemoryStorageTest.java @@ -19,7 +19,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import org.junit.Test; -import zipkin.DependencyLink; +import zipkin.internal.v2.DependencyLink; import zipkin.internal.v2.Endpoint; import zipkin.internal.v2.Span; @@ -91,7 +91,7 @@ public class InMemoryStorageTest { storage.accept(asList(span)); assertThat(storage.getDependencies(TODAY + 1000L, TODAY).execute()).containsOnly( - DependencyLink.builder().parent("kafka").child("app").callCount(1L).build() + DependencyLink.newBuilder().parent("kafka").child("app").callCount(1L).build() ); }