From ff61badcb884003acca2e53413d1c0ee04f50066 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Tue, 9 Apr 2019 08:49:00 -0600 Subject: [PATCH 1/8] Improve Watcher test framework resiliency (#40658) It is possible for the watches tracked by ScheduleTriggerEngineMock to get out of sync with the Watches in the ScheduleTriggerEngine production code, which can lead to watches failing to run. This commit: 1. Changes TimeWarp to try to run the watch on all schedulers, rather than stopping after one which claims to have the watch registered. This reduces the impact of desynchronization between the mocking code and the backing production code. 2. Makes ScheduleTriggerEngineMock respect pauses of execution again. This is necessary to prevent duplicate watch invocations due to the above change. 3. Tweaks how watches are registered in ScheduleTriggerEngineMock to prevent race conditions due to concurrent modification. 4. Tweaks WatcherConcreteIndexTests to use TimeWarp instead of waiting for watches to be triggered, as TimeWarp is more reliable and accomplishes the same goal. --- .../watcher/WatcherConcreteIndexTests.java | 9 +++-- .../webhook/WebhookHttpsIntegrationTests.java | 3 +- .../webhook/WebhookIntegrationTests.java | 4 +-- .../AbstractWatcherIntegrationTestCase.java | 14 ++++++-- .../test/integration/BasicWatcherTests.java | 2 -- .../HttpSecretsIntegrationTests.java | 1 - .../test/integration/WatchAckTests.java | 1 - .../test/integration/WatchMetadataTests.java | 1 - .../trigger/ScheduleTriggerEngineMock.java | 35 ++++++++++++------- 9 files changed, 39 insertions(+), 31 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java index e6b253d17397c..237c0a2bdf153 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java @@ -23,11 +23,6 @@ public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCase { - @Override - protected boolean timeWarped() { - return false; - } - public void testCanUseAnyConcreteIndexName() throws Exception { String newWatcherIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); String watchResultsIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); @@ -35,6 +30,7 @@ public void testCanUseAnyConcreteIndexName() throws Exception { stopWatcher(); replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, newWatcherIndexName); + ensureGreen(newWatcherIndexName); startWatcher(); PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("mywatch").setSource(watchBuilder() @@ -45,6 +41,9 @@ public void testCanUseAnyConcreteIndexName() throws Exception { .get(); assertTrue(putWatchResponse.isCreated()); + refresh(); + + timeWarp().trigger("mywatch"); assertBusy(() -> { SearchResponse searchResult = client().prepareSearch(watchResultsIndex).setTrackTotalHits(true).get(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java index adbf43140328b..bdaa2377fd1d7 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; @@ -67,7 +67,6 @@ public void stopWebservice() throws Exception { webServer.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public void testHttps() throws Exception { webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java index 521cc2d49fc3f..2c961db6187fe 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher.actions.webhook; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.transport.TransportAddress; @@ -18,9 +17,9 @@ import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; -import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; @@ -44,7 +43,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase { private MockWebServer webServer = new MockWebServer(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 21e5751029f52..8c44ba831b359 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.watcher.test; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -580,6 +582,7 @@ public EmailSent send(Email email, Authentication auth, Profile profile, String } protected static class TimeWarp { + private static final Logger logger = LogManager.getLogger(TimeWarp.class); private final List schedulers; private final ClockMock clock; @@ -598,9 +601,14 @@ public ClockMock clock() { } public void trigger(String watchId, int times, TimeValue timeValue) { - boolean isTriggered = schedulers.stream().anyMatch(scheduler -> scheduler.trigger(watchId, times, timeValue)); - String msg = String.format(Locale.ROOT, "could not find watch [%s] to trigger", watchId); - assertThat(msg, isTriggered, is(true)); + long triggeredCount = schedulers.stream() + .filter(scheduler -> scheduler.trigger(watchId, times, timeValue)) + .count(); + String msg = String.format(Locale.ROOT, "watch was triggered on [%d] schedulers, expected [1]", triggeredCount); + if (triggeredCount > 1) { + logger.warn(msg); + } + assertThat(msg, triggeredCount, greaterThanOrEqualTo(1L)); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index 05d8b4ef29ded..2f2299d7d65e0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher.test.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -63,7 +62,6 @@ @TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { public void testIndexWatch() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index 3eefa03137146..f8ddc3065f79d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -87,7 +87,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return super.nodeSettings(nodeOrdinal); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40587") public void testHttpInput() throws Exception { WatcherClient watcherClient = watcherClient(); watcherClient.preparePutWatch("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index 0e95a15b2a35c..a0ef5e97d8534 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -122,7 +122,6 @@ public void testAckSingleAction() throws Exception { assertThat(throttledCount, greaterThan(0L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35506") public void testAckAllActions() throws Exception { PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() .setId("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java index aff3a62c12cf1..1e2c1ddbc64f1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java @@ -38,7 +38,6 @@ public class WatchMetadataTests extends AbstractWatcherIntegrationTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40631") public void testWatchMetadata() throws Exception { Map metadata = new HashMap<>(); metadata.put("foo", "bar"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index f58954658fc1e..3e46f7102c192 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -21,8 +21,10 @@ import java.time.ZonedDateTime; import java.util.Collection; import java.util.Collections; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; /** * A mock scheduler to help with unit testing. Provide {@link ScheduleTriggerEngineMock#trigger} method to manually trigger @@ -31,7 +33,8 @@ public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine { private static final Logger logger = LogManager.getLogger(ScheduleTriggerEngineMock.class); - private final ConcurrentMap watches = new ConcurrentHashMap<>(); + private final AtomicReference> watches = new AtomicReference<>(new ConcurrentHashMap<>()); + private final AtomicBoolean paused = new AtomicBoolean(false); public ScheduleTriggerEngineMock(ScheduleRegistry scheduleRegistry, Clock clock) { super(scheduleRegistry, clock); @@ -49,30 +52,32 @@ public ScheduleTriggerEvent parseTriggerEvent(TriggerService service, String wat } @Override - public void start(Collection jobs) { - jobs.forEach(this::add); + public synchronized void start(Collection jobs) { + Map newWatches = new ConcurrentHashMap<>(); + jobs.forEach((watch) -> newWatches.put(watch.id(), watch)); + watches.set(newWatches); + paused.set(false); } @Override public void stop() { - watches.clear(); + watches.set(new ConcurrentHashMap<>()); } @Override - public void add(Watch watch) { + public synchronized void add(Watch watch) { logger.debug("adding watch [{}]", watch.id()); - watches.put(watch.id(), watch); + watches.get().put(watch.id(), watch); } @Override public void pauseExecution() { - // No action is needed because this engine does not trigger watches on a schedule (instead - // they must be triggered manually). + paused.set(true); } @Override - public boolean remove(String jobId) { - return watches.remove(jobId) != null; + public synchronized boolean remove(String jobId) { + return watches.get().remove(jobId) != null; } public boolean trigger(String jobName) { @@ -80,7 +85,11 @@ public boolean trigger(String jobName) { } public boolean trigger(String jobName, int times, TimeValue interval) { - if (watches.containsKey(jobName) == false) { + if (watches.get().containsKey(jobName) == false) { + return false; + } + if (paused.get()) { + logger.info("not executing watch [{}] on this scheduler because it is paused", jobName); return false; } @@ -89,7 +98,7 @@ public boolean trigger(String jobName, int times, TimeValue interval) { logger.debug("firing watch [{}] at [{}]", jobName, now); ScheduleTriggerEvent event = new ScheduleTriggerEvent(jobName, now, now); consumers.forEach(consumer -> consumer.accept(Collections.singletonList(event))); - if (interval != null) { + if (interval != null) { if (clock instanceof ClockMock) { ((ClockMock) clock).fastForward(interval); } else { From 8e61b77d3f849661b7175544f471119042fe9551 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 9 Apr 2019 18:38:54 +0300 Subject: [PATCH 2/8] SQL: Fix catalog filtering in SYS COLUMNS (#40583) Properly treat '%' as a wildcard for catalog filtering instead of doing a straight string match. Table filtering now considers aliases as well. Add escaping char for LIKE queries with user defined params Fix monotony of ORDINAL_POSITION Add integration test for SYS COLUMNS - currently running only inside single_node since the cluster name is test dependent. Add pattern unescaping for index names Fix #40582 --- .../xpack/sql/jdbc/JdbcDatabaseMetaData.java | 23 +-- .../xpack/sql/qa/security/JdbcSecurityIT.java | 10 +- .../sql/qa/single_node/JdbcCsvSpecIT.java | 16 ++ .../xpack/sql/qa/jdbc/CsvTestUtils.java | 2 + .../xpack/sql/qa/jdbc/JdbcTestUtils.java | 3 +- .../qa/jdbc/SpecBaseIntegrationTestCase.java | 6 +- .../setup_mock_metadata_get_columns.sql | 2 +- .../single-node-only/command-sys.csv-spec | 120 +++++++++++++ .../sql/analysis/index/IndexResolver.java | 17 +- .../plan/logical/command/sys/SysColumns.java | 56 ++++-- .../xpack/sql/util/StringUtils.java | 28 +++ .../logical/command/sys/SysColumnsTests.java | 169 ++++++++++++++---- .../logical/command/sys/SysParserTests.java | 163 ----------------- .../xpack/sql/util/LikeConversionTests.java | 28 ++- 14 files changed, 409 insertions(+), 234 deletions(-) create mode 100644 x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec delete mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index eaececff16d7c..4d5b6eae2e1e6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -33,6 +33,8 @@ */ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { + private static final String WILDCARD = "%"; + private final JdbcConnection con; JdbcDatabaseMetaData(JdbcConnection con) { @@ -713,19 +715,19 @@ private boolean isDefaultCatalog(String catalog) throws SQLException { // null means catalog info is irrelevant // % means return all catalogs // EMPTY means return those without a catalog - return catalog == null || catalog.equals(EMPTY) || catalog.equals("%") || catalog.equals(defaultCatalog()); + return catalog == null || catalog.equals(EMPTY) || catalog.equals(WILDCARD) || catalog.equals(defaultCatalog()); } private boolean isDefaultSchema(String schema) { // null means schema info is irrelevant // % means return all schemas` // EMPTY means return those without a schema - return schema == null || schema.equals(EMPTY) || schema.equals("%"); + return schema == null || schema.equals(EMPTY) || schema.equals(WILDCARD); } @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { - String statement = "SYS TABLES CATALOG LIKE ? LIKE ?"; + String statement = "SYS TABLES CATALOG LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\' "; if (types != null && types.length > 0) { statement += " TYPE ?"; @@ -738,8 +740,8 @@ public ResultSet getTables(String catalog, String schemaPattern, String tableNam } PreparedStatement ps = con.prepareStatement(statement); - ps.setString(1, catalog != null ? catalog.trim() : "%"); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); + ps.setString(1, catalog != null ? catalog.trim() : WILDCARD); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); if (types != null && types.length > 0) { for (int i = 0; i < types.length; i++) { @@ -784,14 +786,15 @@ public ResultSet getTableTypes() throws SQLException { return memorySet(con.cfg, columnInfo("TABLE_TYPES", "TABLE_TYPE"), data); } + @Override public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? LIKE ?"); - // TODO: until passing null works, pass an empty string - ps.setString(1, catalog != null ? catalog.trim() : EMPTY); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); - ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : "%"); + PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\'"); + // NB: catalog is not a pattern hence why null is send instead + ps.setString(1, catalog != null ? catalog.trim() : null); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); + ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : WILDCARD); return ps.executeQuery(); } diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index c56f3b23946e7..a911e7d4854ae 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -232,13 +232,13 @@ public void expectUnknownColumn(String user, String sql, String column) throws E public void checkNoMonitorMain(String user) throws Exception { // Without monitor/main the JDBC driver - ES server version comparison doesn't take place, which fails everything else expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user))); - expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMinorVersion()); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("SELECT * FROM test")); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("SHOW TABLES LIKE 'test'")); - expectUnauthorized("cluster:monitor/main", user, + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).createStatement().executeQuery("DESCRIBE test")); } @@ -292,7 +292,7 @@ public void testMetaDataGetColumnsWorksAsFullAccess() throws Exception { expectActionMatchesAdmin( con -> con.getMetaData().getColumns(null, "%", "%t", "%"), "full_access", - con -> con.getMetaData().getColumns(null, "%", "%", "%")); + con -> con.getMetaData().getColumns(null, "%", "%t", "%")); } public void testMetaDataGetColumnsWithNoAccess() throws Exception { diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index 66ac2e2c7df24..f742b1304a79e 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -5,10 +5,26 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + public class JdbcCsvSpecIT extends CsvSpecTestCase { + + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(CsvSpecTestCase.readScriptSpec()); + return readScriptSpec("/single-node-only/command-sys.csv-spec", specParser()); + } + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 8cc8cf6e04044..6376bd13308d6 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -155,6 +155,8 @@ private static String resolveColumnType(String type) { return "timestamp"; case "bt": return "byte"; + case "sh": + return "short"; default: return type; } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 19c30b55e92b1..123f22073ae57 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -212,7 +212,8 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO @SuppressForbidden(reason = "need to open jar") private static JarInputStream getJarStream(URL resource) throws IOException { URLConnection con = resource.openConnection(); - con.setDefaultUseCaches(false); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); return new JarInputStream(con.getInputStream()); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 4282b97d87fb1..05ba49bbd0d32 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -19,6 +19,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.net.URL; +import java.net.URLConnection; import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.ResultSet; @@ -216,6 +217,9 @@ public interface Parser { @SuppressForbidden(reason = "test reads from jar") public static InputStream readFromJarUrl(URL source) throws IOException { - return source.openStream(); + URLConnection con = source.openConnection(); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); + return con.getInputStream(); } } diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql index d6df2fbb9e14b..5e02df28b068d 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql @@ -30,7 +30,7 @@ FROM DUAL UNION ALL SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 32766, 2147483647, null, null, 1, -- columnNullable - null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 12, 0, 2147483647, 2, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL SELECT null, 'test2', 'date', 93, 'DATETIME', 29, 8, null, null, diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec new file mode 100644 index 0000000000000..f6b02ba4bea43 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec @@ -0,0 +1,120 @@ +// +// Sys Commands +// + +sysColumnsWithTableLikeWithEscape +SYS COLUMNS TABLE LIKE 'test\_emp' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithTableLikeNoEscape +SYS COLUMNS TABLE LIKE 'test_emp'; + +// since there's no escaping test_emp means test*emp which matches also test_alias_emp +// however as there's no way to filter the matching indices, we can't exclude the field + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithCatalogAndLike +SYS COLUMNS CATALOG 'x-pack_plugin_sql_qa_single-node_integTestCluster' TABLE LIKE 'test\_emp\_copy' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+-------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsOnAliasWithTableLike +SYS COLUMNS TABLE LIKE 'test\_alias' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsAllTables +SYS COLUMNS TABLE LIKE '%'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |@timestamp |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_in |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_out |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_port |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |dest_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |id |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |status |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index ec2dfa46f47f2..367c9ea3a149f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -415,6 +415,8 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac GetIndexRequest getIndexRequest = createGetIndexRequest(indexWildcard); client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { ImmutableOpenMap> mappings = getIndexResponse.getMappings(); + ImmutableOpenMap> aliases = getIndexResponse.getAliases(); + List results = new ArrayList<>(mappings.size()); Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; for (ObjectObjectCursor> indexMappings : mappings) { @@ -425,7 +427,20 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac * and not the concrete index: there is a well known information leak of the concrete index name in the response. */ String concreteIndex = indexMappings.key; - if (pattern == null || pattern.matcher(concreteIndex).matches()) { + + // take into account aliases + List aliasMetadata = aliases.get(concreteIndex); + boolean matchesAlias = false; + if (pattern != null && aliasMetadata != null) { + for (AliasMetaData aliasMeta : aliasMetadata) { + if (pattern.matcher(aliasMeta.alias()).matches()) { + matchesAlias = true; + break; + } + } + } + + if (pattern == null || matchesAlias || pattern.matcher(concreteIndex).matches()) { IndexResolution getIndexResult = buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value); if (getIndexResult.isValid()) { results.add(getIndexResult.get()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index e5c80197296c2..68cfefe7fb572 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.plan.logical.command.sys; +import org.apache.lucene.util.Counter; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.util.StringUtils; import java.sql.DatabaseMetaData; import java.util.ArrayList; @@ -101,40 +103,63 @@ public void execute(SqlSession session, ActionListener listener) { String cluster = session.indexResolver().clusterName(); // bail-out early if the catalog is present but differs - if (Strings.hasText(catalog) && !cluster.equals(catalog)) { + if (Strings.hasText(catalog) && cluster.equals(catalog) == false) { listener.onResponse(Rows.empty(output)); return; } + // save original index name (as the pattern can contain special chars) + String indexName = index != null ? index : (pattern != null ? StringUtils.likeToUnescaped(pattern.pattern(), + pattern.escape()) : ""); String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; - session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { - List> rows = new ArrayList<>(); - for (EsIndex esIndex : esIndices) { - fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); - } + // special case fo '%' (translated to *) + if ("*".equals(idx)) { + session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { + List> rows = new ArrayList<>(); + for (EsIndex esIndex : esIndices) { + fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); + } + + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } + // otherwise use a merged mapping + else { + session.indexResolver().resolveAsMergedMapping(idx, regex, ActionListener.wrap(r -> { + List> rows = new ArrayList<>(); + // populate the data only when a target is found + if (r.isValid() == true) { + EsIndex esIndex = r.get(); + fillInRows(cluster, indexName, esIndex.mapping(), null, rows, columnMatcher, mode); + } - listener.onResponse(Rows.of(output, rows)); - }, listener::onFailure)); + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } } static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, Pattern columnMatcher, Mode mode) { - int pos = 0; + fillInRows(clusterName, indexName, mapping, prefix, rows, columnMatcher, Counter.newCounter(), mode); + } + + private static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, + Pattern columnMatcher, Counter position, Mode mode) { boolean isOdbcClient = mode == Mode.ODBC; for (Map.Entry entry : mapping.entrySet()) { - pos++; // JDBC is 1-based so we start with 1 here + position.addAndGet(1); // JDBC is 1-based so we start with 1 here String name = entry.getKey(); name = prefix != null ? prefix + "." + name : name; EsField field = entry.getValue(); DataType type = field.getDataType(); - // skip the nested, object and unsupported types for JDBC and ODBC - if (type.isPrimitive() || false == Mode.isDriver(mode)) { + // skip the nested, object and unsupported types + if (type.isPrimitive()) { if (columnMatcher == null || columnMatcher.matcher(name).matches()) { rows.add(asList(clusterName, // schema is not supported @@ -162,7 +187,7 @@ static void fillInRows(String clusterName, String indexName, Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); + private final IndexInfo index = new IndexInfo("test_emp", IndexType.INDEX); + private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + + public void testSysColumns() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); - assertEquals(17, rows.size()); + // nested fields are ignored + assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -54,81 +91,57 @@ public void testSysColumns() { assertEquals(8, bufferLength(row)); row = rows.get(5); - assertEquals("unsupported", name(row)); - assertEquals(Types.OTHER, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(0, bufferLength(row)); - - row = rows.get(6); - assertEquals("some", name(row)); - assertEquals(Types.STRUCT, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); - - row = rows.get(7); - assertEquals("some.dotted", name(row)); - assertEquals(Types.STRUCT, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); - - row = rows.get(8); assertEquals("some.dotted.field", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - - row = rows.get(9); + + row = rows.get(6); assertEquals("some.string", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(10); + row = rows.get(7); assertEquals("some.string.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(11); + row = rows.get(8); assertEquals("some.string.typical", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(12); + row = rows.get(9); assertEquals("some.ambiguous", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(13); + row = rows.get(10); assertEquals("some.ambiguous.one", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(14); + row = rows.get(11); assertEquals("some.ambiguous.two", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(15); + row = rows.get(12); assertEquals("some.ambiguous.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - - row = rows.get(16); - assertEquals("foo_type", name(row)); - assertEquals(Types.OTHER, sqlType(row)); - assertEquals(null, radix(row)); - assertEquals(0, bufferLength(row)); } public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.ODBC); assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); @@ -263,7 +276,7 @@ public void testSysColumnsInOdbcMode() { public void testSysColumnsInJdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.JDBC); assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); @@ -431,4 +444,88 @@ private static Object sqlDataType(List list) { private static Object sqlDataTypeSub(List list) { return list.get(14); } -} + + public void testSysColumnsNoArg() throws Exception { + executeCommand("SYS COLUMNS", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithCatalogWildcard() throws Exception { + executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithMissingCatalog() throws Exception { + executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithNullCatalog() throws Exception { + executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + @SuppressWarnings({ "unchecked" }) + private void executeCommand(String sql, List params, Consumer consumer, Map mapping) + throws Exception { + Tuple tuple = sql(sql, params, mapping); + + IndexResolver resolver = tuple.v2().indexResolver(); + + EsIndex test = new EsIndex("test", mapping); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[2]).onResponse(IndexResolution.valid(test)); + return Void.TYPE; + }).when(resolver).resolveAsMergedMapping(any(), any(), any()); + + tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + } + + private Tuple sql(String sql, List params, Map mapping) { + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), + new Verifier(new Metrics())); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql, params), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn(CLUSTER_NAME); + + SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); + return new Tuple<>(cmd, session); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java deleted file mode 100644 index 110c320d679e4..0000000000000 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.plan.logical.command.sys; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; -import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; -import org.elasticsearch.xpack.sql.analysis.index.EsIndex; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; -import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.sql.parser.SqlParser; -import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.stats.Metrics; -import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.EsField; -import org.elasticsearch.xpack.sql.type.TypesTests; - -import java.util.List; -import java.util.Map; - -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SysParserTests extends ESTestCase { - - private final SqlParser parser = new SqlParser(); - private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); - - @SuppressWarnings({ "rawtypes", "unchecked" }) - private Tuple sql(String sql) { - EsIndex test = new EsIndex("test", mapping); - Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), - new Verifier(new Metrics())); - Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); - - IndexResolver resolver = mock(IndexResolver.class); - when(resolver.clusterName()).thenReturn("cluster"); - - doAnswer(invocation -> { - ((ActionListener) invocation.getArguments()[2]).onResponse(singletonList(test)); - return Void.TYPE; - }).when(resolver).resolveAsSeparateMappings(any(), any(), any()); - - SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); - return new Tuple<>(cmd, session); - } - - public void testSysTypes() { - Command cmd = sql("SYS TYPES").v1(); - - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", - "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "TIME", "DATETIME", - "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", - "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", - "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); - - cmd.execute(null, ActionListener.wrap(r -> { - assertEquals(19, r.columnCount()); - assertEquals(DataType.values().length, r.size()); - assertFalse(r.schema().types().contains(DataType.NULL)); - // test numeric as signed - assertFalse(r.column(9, Boolean.class)); - // make sure precision is returned as boolean (not int) - assertFalse(r.column(10, Boolean.class)); - // no auto-increment - assertFalse(r.column(11, Boolean.class)); - - for (int i = 0; i < r.size(); i++) { - assertEquals(names.get(i), r.column(0)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsNoArgs() { - runSysColumns("SYS COLUMNS"); - } - - public void testSysColumnEmptyCatalog() { - Tuple sql = sql("SYS COLUMNS CATALOG '' TABLE LIKE '%' LIKE '%'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyCatalog() { - Tuple sql = sql("SYS COLUMNS CATALOG 'catalog'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(0, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyPattern() { - runSysColumns("SYS COLUMNS TABLE LIKE 'test'"); - } - - public void testSysColsColOnlyPattern() { - runSysColumns("SYS COLUMNS LIKE '%'"); - } - - public void testSysColsTableAndColsPattern() { - runSysColumns("SYS COLUMNS TABLE LIKE 'test' LIKE '%'"); - } - - - private void runSysColumns(String commandVariation) { - Tuple sql = sql(commandVariation); - List names = asList("bool", - "int", - "text", - "keyword", - "unsupported", - "date", - "some", - "some.dotted", - "some.dotted.field", - "some.string", - "some.string.normalized", - "some.string.typical", - "some.ambiguous", - "some.ambiguous.one", - "some.ambiguous.two", - "some.ambiguous.normalized", - "dep", - "dep.dep_name", - "dep.dep_id", - "dep.dep_id.keyword", - "dep.end_date", - "dep.start_date"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - - for (int i = 0; i < r.size(); i++) { - assertEquals("cluster", r.column(0)); - assertNull(r.column(1)); - assertEquals("test", r.column(2)); - assertEquals(names.get(i), r.column(3)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } -} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java index 19a544c14e50b..29cbb9b985ffd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java @@ -9,6 +9,7 @@ import static org.elasticsearch.xpack.sql.util.StringUtils.likeToJavaPattern; import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard; +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToUnescaped; public class LikeConversionTests extends ESTestCase { @@ -20,6 +21,10 @@ private static String wildcard(String pattern) { return likeToLuceneWildcard(pattern, '|'); } + private static String unescape(String pattern) { + return likeToUnescaped(pattern, '|'); + } + public void testNoRegex() { assertEquals("^fooBar$", regex("fooBar")); } @@ -103,4 +108,25 @@ public void testWildcardTripleEscaping() { public void testWildcardIgnoreDoubleEscapedButSkipEscapingOfSql() { assertEquals("foo\\\\\\*bar\\\\?\\?", wildcard("foo\\*bar\\_?")); } -} + + public void testUnescapeLiteral() { + assertEquals("foo", unescape("foo")); + } + + public void testUnescapeEscaped() { + assertEquals("foo_bar", unescape("foo|_bar")); + } + + public void testUnescapeEscapedEscape() { + assertEquals("foo|_bar", unescape("foo||_bar")); + } + + public void testUnescapeLastCharEscape() { + assertEquals("foo_bar|", unescape("foo|_bar|")); + } + + public void testUnescapeMultipleEscapes() { + assertEquals("foo|_bar|", unescape("foo|||_bar||")); + } + +} \ No newline at end of file From 6a63377542dd233bd2ad70dc1bb19c88a025139d Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Tue, 9 Apr 2019 16:47:21 +0100 Subject: [PATCH 3/8] [ML][TEST] Fix randomly failing HLRC test (#40973) Made changes to ensure that unique IDs are generated for model snapshots used by the deleteExpiredDataTest test in the MachineLearningIT suite. Previously a sleep of 1s was performed between jobs under the assumption that this would be sufficient to guarantee that the timestamps used in the composition of the snapshot IDs would be different. The new approach is to wait on the condition that the old and new timestamps are in fact different (to 1s resolution). --- .../elasticsearch/client/MachineLearningIT.java | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 07d7187fd1d70..f7b7b148f660b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -878,6 +878,18 @@ private String createExpiredData(String jobId) throws Exception { waitForJobToClose(jobId); + long prevJobTimeStamp = System.currentTimeMillis() / 1000; + + // Check that the current timestamp component, in seconds, differs from previously. + // Note that we used to use an 'awaitBusy(() -> false, 1, TimeUnit.SECONDS);' + // for the same purpose but the new approach... + // a) explicitly checks that the timestamps, in seconds, are actually different and + // b) is slightly more efficient since we may not need to wait an entire second for the timestamp to increment + assertBusy(() -> { + long timeNow = System.currentTimeMillis() / 1000; + assertFalse(prevJobTimeStamp >= timeNow); + }); + // Update snapshot timestamp to force it out of snapshot retention window long oneDayAgo = nowMillis - TimeValue.timeValueHours(24).getMillis() - 1; updateModelSnapshotTimestamp(jobId, String.valueOf(oneDayAgo)); @@ -1418,6 +1430,7 @@ private void startDatafeed(String datafeedId, String start, String end) throws E } private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws Exception { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); GetModelSnapshotsRequest getModelSnapshotsRequest = new GetModelSnapshotsRequest(jobId); @@ -1435,9 +1448,6 @@ private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + jobId, "_doc", documentId); updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON); highLevelClient().update(updateSnapshotRequest, RequestOptions.DEFAULT); - - // Wait a second to ensure subsequent model snapshots will have a different ID (it depends on epoch seconds) - awaitBusy(() -> false, 1, TimeUnit.SECONDS); } From 1830a4911679dc6e94fb8fd0c92e01ddb23463c2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Apr 2019 19:46:13 +0200 Subject: [PATCH 4/8] Fix Exception Handling for TransportShardBulkAction (#41006) * Prior to #39793 exceptions for the primary write and delete actions were bubbled up to the caller so that closed shards would be handled accordingly upstream. #39793 accidentally changed the behaviour here and simply marked those exceptions as bulk item failures on the request and kept processing bulk request items on closed shards. * This fix returns to that behaviour and adjusts the listeners passed in `TransportReplicationAction` such that they behave like the previous synchronous `catch`. * Dried up the exception handling slightly for that and inlined all the listeners to make the logic a little easier to follow * Reenable SplitIndexIT now that clsoed shards are properly handled again * Closes #40944 --- .../action/bulk/TransportShardBulkAction.java | 104 ++++++++---------- .../TransportReplicationAction.java | 78 +++++++------ .../admin/indices/create/SplitIndexIT.java | 3 - 3 files changed, 84 insertions(+), 101 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 1117971e53517..da30dedfe5e60 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -154,7 +154,7 @@ public static void performOnPrimary( private final BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(request, primary); @Override - protected void doRun() { + protected void doRun() throws Exception { while (context.hasMoreOperationsToExecute()) { if (executeBulkItemRequest(context, updateHelper, nowInMillisSupplier, mappingUpdater, waitForMappingUpdate, ActionListener.wrap(v -> executor.execute(this), this::onRejection)) == false) { @@ -168,12 +168,6 @@ protected void doRun() { finishRequest(); } - @Override - public void onFailure(Exception e) { - assert false : "All exceptions should be handled by #executeBulkItemRequest"; - onRejection(e); - } - @Override public void onRejection(Exception e) { // Fail all operations after a bulk rejection hit an action that waited for a mapping update and finish the request @@ -204,7 +198,7 @@ private void finishRequest() { */ static boolean executeBulkItemRequest(BulkPrimaryExecutionContext context, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, Consumer> waitForMappingUpdate, - ActionListener itemDoneListener) { + ActionListener itemDoneListener) throws Exception { final DocWriteRequest.OpType opType = context.getCurrent().opType(); final UpdateHelper.Result updateResult; @@ -252,55 +246,51 @@ static boolean executeBulkItemRequest(BulkPrimaryExecutionContext context, Updat final IndexShard primary = context.getPrimary(); final long version = context.getRequestToExecute().version(); final boolean isDelete = context.getRequestToExecute().opType() == DocWriteRequest.OpType.DELETE; - try { - final Engine.Result result; - if (isDelete) { - final DeleteRequest request = context.getRequestToExecute(); - result = primary.applyDeleteOperationOnPrimary(version, request.type(), request.id(), request.versionType(), - request.ifSeqNo(), request.ifPrimaryTerm()); - } else { - final IndexRequest request = context.getRequestToExecute(); - result = primary.applyIndexOperationOnPrimary(version, request.versionType(), new SourceToParse( - request.index(), request.type(), request.id(), request.source(), request.getContentType(), request.routing()), - request.ifSeqNo(), request.ifPrimaryTerm(), request.getAutoGeneratedTimestamp(), request.isRetry()); - } - if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), primary.shardId(), - context.getRequestToExecute().type(), - new ActionListener() { - @Override - public void onResponse(Void v) { - context.markAsRequiringMappingUpdate(); - waitForMappingUpdate.accept( - ActionListener.runAfter(new ActionListener() { - @Override - public void onResponse(Void v) { - assert context.requiresWaitingForMappingUpdate(); - context.resetForExecutionForRetry(); - } - - @Override - public void onFailure(Exception e) { - context.failOnMappingUpdate(e); - } - }, () -> itemDoneListener.onResponse(null)) - ); - } - - @Override - public void onFailure(Exception e) { - onComplete(exceptionToResult(e, primary, isDelete, version), context, updateResult); - // Requesting mapping update failed, so we don't have to wait for a cluster state update - assert context.isInitial(); - itemDoneListener.onResponse(null); - } - }); - return false; - } else { - onComplete(result, context, updateResult); - } - } catch (Exception e) { - onComplete(exceptionToResult(e, primary, isDelete, version), context, updateResult); + final Engine.Result result; + if (isDelete) { + final DeleteRequest request = context.getRequestToExecute(); + result = primary.applyDeleteOperationOnPrimary(version, request.type(), request.id(), request.versionType(), + request.ifSeqNo(), request.ifPrimaryTerm()); + } else { + final IndexRequest request = context.getRequestToExecute(); + result = primary.applyIndexOperationOnPrimary(version, request.versionType(), new SourceToParse( + request.index(), request.type(), request.id(), request.source(), request.getContentType(), request.routing()), + request.ifSeqNo(), request.ifPrimaryTerm(), request.getAutoGeneratedTimestamp(), request.isRetry()); + } + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), primary.shardId(), + context.getRequestToExecute().type(), + new ActionListener<>() { + @Override + public void onResponse(Void v) { + context.markAsRequiringMappingUpdate(); + waitForMappingUpdate.accept( + ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(Void v) { + assert context.requiresWaitingForMappingUpdate(); + context.resetForExecutionForRetry(); + } + + @Override + public void onFailure(Exception e) { + context.failOnMappingUpdate(e); + } + }, () -> itemDoneListener.onResponse(null)) + ); + } + + @Override + public void onFailure(Exception e) { + onComplete(exceptionToResult(e, primary, isDelete, version), context, updateResult); + // Requesting mapping update failed, so we don't have to wait for a cluster state update + assert context.isInitial(); + itemDoneListener.onResponse(null); + } + }); + return false; + } else { + onComplete(result, context, updateResult); } return true; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index ac23b95b3bacf..96c6cc3afa04c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -342,7 +342,7 @@ void runWithPrimaryShardReference(final PrimaryShardReference primaryShardRefere new ConcreteShardRequest<>(primaryRequest.getRequest(), primary.allocationId().getRelocationId(), primaryRequest.getPrimaryTerm()), transportOptions, - new ActionListenerResponseHandler(onCompletionListener, reader) { + new ActionListenerResponseHandler<>(onCompletionListener, reader) { @Override public void handleResponse(Response response) { setPhase(replicationTask, "finished"); @@ -357,58 +357,54 @@ public void handleException(TransportException exp) { }); } else { setPhase(replicationTask, "primary"); - final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(primaryRequest.getRequest(), - ActionListener.wrap(result -> result.respond(listener), listener::onFailure), - primaryShardReference) - .execute(); + ActionListener.wrap(result -> result.respond( + new ActionListener<>() { + @Override + public void onResponse(Response response) { + if (syncGlobalCheckpointAfterOperation) { + final IndexShard shard = primaryShardReference.indexShard; + try { + shard.maybeSyncGlobalCheckpoint("post-operation"); + } catch (final Exception e) { + // only log non-closed exceptions + if (ExceptionsHelper.unwrap( + e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { + // intentionally swallow, a missed global checkpoint sync should not fail this operation + logger.info( + new ParameterizedMessage( + "{} failed to execute post-operation global checkpoint sync", shard.shardId()), e); + } + } + } + primaryShardReference.close(); // release shard operation lock before responding to caller + setPhase(replicationTask, "finished"); + onCompletionListener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + handleException(primaryShardReference, e); + } + }), e -> handleException(primaryShardReference, e) + ), primaryShardReference).execute(); } } catch (Exception e) { - Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller - onFailure(e); + handleException(primaryShardReference, e); } } + private void handleException(PrimaryShardReference primaryShardReference, Exception e) { + Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller + onFailure(e); + } + @Override public void onFailure(Exception e) { setPhase(replicationTask, "finished"); onCompletionListener.onFailure(e); } - private ActionListener createResponseListener(final PrimaryShardReference primaryShardReference) { - return new ActionListener() { - @Override - public void onResponse(Response response) { - if (syncGlobalCheckpointAfterOperation) { - final IndexShard shard = primaryShardReference.indexShard; - try { - shard.maybeSyncGlobalCheckpoint("post-operation"); - } catch (final Exception e) { - // only log non-closed exceptions - if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { - logger.info( - new ParameterizedMessage( - "{} failed to execute post-operation global checkpoint sync", - shard.shardId()), - e); - // intentionally swallow, a missed global checkpoint sync should not fail this operation - } - } - } - primaryShardReference.close(); // release shard operation lock before responding to caller - setPhase(replicationTask, "finished"); - onCompletionListener.onResponse(response); - } - - @Override - public void onFailure(Exception e) { - primaryShardReference.close(); // release shard operation lock before responding to caller - setPhase(replicationTask, "finished"); - onCompletionListener.onFailure(e); - } - }; - } - protected ReplicationOperation> createReplicatedOperation( Request request, ActionListener> listener, PrimaryShardReference primaryShardReference) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 8ee0fbee32678..7038505ff6fb3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.Constants; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -78,8 +77,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40944") public class SplitIndexIT extends ESIntegTestCase { @Override From 75fe3d0c84d4441945970e0d021e505583b8f7ff Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 10:49:01 -0700 Subject: [PATCH 5/8] Some clarifications in the 'enabled' documentation. (#40989) This PR makes a few clarifications to the docs for the `enabled` setting: - Replace references to 'mapping type' with 'mapping' or 'mapping definition'. - In code examples, clarify that the disabled fields have type `object`. - Add a section on how disabled fields can hold non-object data. --- .../reference/mapping/params/enabled.asciidoc | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index 06b76ddeae006..7193c6aa9f6e3 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -7,11 +7,11 @@ you are using Elasticsearch as a web session store. You may want to index the session ID and last update time, but you don't need to query or run aggregations on the session data itself. -The `enabled` setting, which can be applied only to the mapping type and to -<> fields, causes Elasticsearch to skip parsing of the -contents of the field entirely. The JSON can still be retrieved from the -<> field, but it is not searchable or stored -in any other way: +The `enabled` setting, which can be applied only to the top-level mapping +definition and to <> fields, causes Elasticsearch to skip +parsing of the contents of the field entirely. The JSON can still be retrieved +from the <> field, but it is not searchable or +stored in any other way: [source,js] -------------------------------------------------- @@ -26,6 +26,7 @@ PUT my_index "type": "date" }, "session_data": { <1> + "type": "object", "enabled": false } } @@ -55,7 +56,7 @@ PUT my_index/_doc/session_2 <2> Any arbitrary data can be passed to the `session_data` field as it will be entirely ignored. <3> The `session_data` will also ignore values that are not JSON objects. -The entire mapping type may be disabled as well, in which case the document is +The entire mapping may be disabled as well, in which case the document is stored in the <> field, which means it can be retrieved, but none of its contents are indexed in any way: @@ -84,10 +85,34 @@ GET my_index/_doc/session_1 <2> GET my_index/_mapping <3> -------------------------------------------------- // CONSOLE -<1> The entire mapping type is disabled. +<1> The entire mapping is disabled. <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. TIP: The `enabled` setting can be updated on existing fields using the <>. +Note that because Elasticsearch completely skips parsing the field +contents, it is possible to add non-object data to a disabled field: +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "session_data": { + "type": "object", + "enabled": false + } + } + } +} + +PUT my_index/_doc/session_1 +{ + "session_data": "foo bar" <1> +} +-------------------------------------------------- +// CONSOLE + +<1> The document is added successfully, even though `session_data` contains non-object data. \ No newline at end of file From cf541d66dcd10611d55230c3b60a1b0c6555a16f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 13:50:47 -0400 Subject: [PATCH 6/8] Fix unsafe publication of invalid license enforcer (#40985) The invalid license enforced is exposed to the cluster state update thread (via the license state listener) before the constructor has finished. This violates the JLS for safe publication of an object, and means there is a concurrency bug lurking here. This commit addresses this by avoiding publication of the invalid license enforcer before the constructor has returned. --- .../xpack/ml/InvalidLicenseEnforcer.java | 24 ++++++++++++++++--- .../xpack/ml/MachineLearning.java | 6 +++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java index 35ec721a94710..bff85d691b4b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java @@ -3,17 +3,19 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.license.LicenseStateListener; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -public class InvalidLicenseEnforcer { +public class InvalidLicenseEnforcer implements LicenseStateListener { private static final Logger logger = LogManager.getLogger(InvalidLicenseEnforcer.class); @@ -22,17 +24,32 @@ public class InvalidLicenseEnforcer { private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; + private volatile boolean licenseStateListenerRegistered; + InvalidLicenseEnforcer(XPackLicenseState licenseState, ThreadPool threadPool, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager) { this.threadPool = threadPool; this.licenseState = licenseState; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; - licenseState.addListener(this::closeJobsAndDatafeedsIfLicenseExpired); } - private void closeJobsAndDatafeedsIfLicenseExpired() { + void listenForLicenseStateChanges() { + /* + * Registering this as a listener can not be done in the constructor because otherwise it would be unsafe publication of this. That + * is, it would expose this to another thread before the constructor had finished. Therefore, we have a dedicated method to register + * the listener that is invoked after the constructor has returned. + */ + assert licenseStateListenerRegistered == false; + licenseState.addListener(this); + licenseStateListenerRegistered = true; + } + + @Override + public void licenseStateChanged() { + assert licenseStateListenerRegistered; if (licenseState.isMachineLearningAllowed() == false) { + // if the license has expired, close jobs and datafeeds threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -47,4 +64,5 @@ protected void doRun() throws Exception { }); } } + } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 3d59a5fb45acd..b69e7b786a77e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -461,8 +461,10 @@ public Collection createComponents(Client client, ClusterService cluster MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, autodetectProcessManager, memoryTracker); - // This object's constructor attaches to the license state, so there's no need to retain another reference to it - new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + // this object registers as a license state listener, and is never removed, so there's no need to retain another reference to it + final InvalidLicenseEnforcer enforcer = + new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + enforcer.listenForLicenseStateChanges(); // run node startup tasks autodetectProcessManager.onNodeStartup(); From 27419ceef5103a5976bb9ad7ae80d67f5af39454 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 9 Apr 2019 14:24:54 -0400 Subject: [PATCH 7/8] Wait for all listeners in checkpoint listeners test It could be that we try to shutdown the executor pool before all the listeners have been invoked. It can happen that one was not invoked if it timed out and was in the process of being notified that it timed out on the executor. If we do this shutdown then, a listener will be met with rejected execution exception. To address this, we first wait until all listeners have been notified (or timed out) before proceeding with shutting down the executor. Relates #40970 --- .../shard/GlobalCheckpointListenersTests.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java index 59c3553d25fd2..d71bade29a369 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java @@ -431,7 +431,7 @@ public void testListenersReadyToBeNotifiedUsesExecutor() { assertThat(count.get(), equalTo(numberOfListeners)); } - public void testConcurrency() throws BrokenBarrierException, InterruptedException { + public void testConcurrency() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, 8)); final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); @@ -470,11 +470,12 @@ public void testConcurrency() throws BrokenBarrierException, InterruptedExceptio // sometimes this will notify the listener immediately globalCheckpointListeners.add( globalCheckpoint.get(), - maybeMultipleInvocationProtectingListener((g, e) -> { - if (invocation.compareAndSet(false, true) == false) { - throw new IllegalStateException("listener invoked twice"); - } - }), + maybeMultipleInvocationProtectingListener( + (g, e) -> { + if (invocation.compareAndSet(false, true) == false) { + throw new IllegalStateException("listener invoked twice"); + } + }), randomBoolean() ? null : TimeValue.timeValueNanos(randomLongBetween(1, TimeUnit.MICROSECONDS.toNanos(1)))); } // synchronize ending with the updating thread and the main test thread @@ -491,11 +492,13 @@ public void testConcurrency() throws BrokenBarrierException, InterruptedExceptio globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint.incrementAndGet()); } assertThat(globalCheckpointListeners.pendingListeners(), equalTo(0)); - executor.shutdown(); - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + // wait for all the listeners to be notified for (final AtomicBoolean invocation : invocations) { - assertTrue(invocation.get()); + assertBusy(() -> assertTrue(invocation.get())); } + // now shutdown + executor.shutdown(); + assertTrue(executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS)); updatingThread.join(); listenersThread.join(); } From 1de2a254382fb3337093776fe9d266dca6913817 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 9 Apr 2019 11:33:18 -0700 Subject: [PATCH 8/8] Mute DedicatedClusterSnapshotRestoreIT#testSnapshotWithStuckNode as we await a fix. --- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index da2014e44df5f..3345fbd3f248e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -420,6 +420,7 @@ public void testSnapshotDuringNodeShutdown() throws Exception { logger.info("--> done"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39852") public void testSnapshotWithStuckNode() throws Exception { logger.info("--> start 2 nodes"); ArrayList nodes = new ArrayList<>();