From 9a72b9efe88de98cf6f3abc47b7fee235db2b10a Mon Sep 17 00:00:00 2001 From: hit-lacus Date: Mon, 24 Jun 2019 11:49:23 +0800 Subject: [PATCH] KYLIN-4054 Logger of HCreateTableJob record error message --- .../persistence/JDBCConnectionManager.java | 4 +-- .../gtrecord/GTCubeStorageQueryBase.java | 32 ++++++++++--------- .../storage/hbase/steps/CreateHTableJob.java | 27 ++++++++-------- .../apache/kylin/tool/CubeMetaIngester.java | 8 ++--- 4 files changed, 36 insertions(+), 35 deletions(-) diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java index dcb9a1bb49f..3ca40264d8a 100644 --- a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java +++ b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java @@ -62,7 +62,7 @@ private JDBCConnectionManager(KylinConfig config) { dataSource = BasicDataSourceFactory.createDataSource(getDbcpProperties()); Connection conn = getConn(); DatabaseMetaData mdm = conn.getMetaData(); - logger.info("Connected to {0} {1}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion()); + logger.info("Connected to {} {}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion()); closeQuietly(conn); } catch (Exception e) { throw new IllegalArgumentException(e); @@ -89,7 +89,7 @@ private Map initDbcpProps(KylinConfig config) { ret.remove("passwordEncrypted"); } - logger.info("Connecting to Jdbc with url:{0} by user {1}", ret.get("url"), ret.get("username")); + logger.info("Connecting to Jdbc with url:{} by user {}", ret.get("url"), ret.get("username")); putIfMissing(ret, "driverClassName", "com.mysql.jdbc.Driver"); putIfMissing(ret, "maxActive", "5"); diff --git a/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java b/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java index 3c3c7ff9e45..2273dbea2f1 100644 --- a/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java +++ b/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java @@ -305,7 +305,7 @@ protected Set findSingleValuesCompFilters(TupleFilter filter private long getQueryFilterMask(Set filterColumnD) { long filterMask = 0; - logger.info("Filter column set for query: %s", filterColumnD); + logger.info("Filter column set for query: {}", filterColumnD); if (filterColumnD.isEmpty() == false) { RowKeyColDesc[] allColumns = cubeDesc.getRowkey().getRowKeyColumns(); for (int i = 0; i < allColumns.length; i++) { @@ -314,7 +314,7 @@ private long getQueryFilterMask(Set filterColumnD) { } } } - logger.info("Filter mask is: {0}", filterMask); + logger.info("Filter mask is: {}", filterMask); return filterMask; } @@ -430,18 +430,19 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection g if (!groupsD.containsAll(cuboid.getColumns().subList(0, size))) { storageLimitLevel = StorageLimitLevel.LIMIT_ON_RETURN_SIZE; logger.debug( - "storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {0} with cuboid columns: {1}", groupsD.toString(), cuboid.getColumns().toString()); + "storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {} with cuboid columns: {}", + groupsD, cuboid.getColumns()); } if (!dynGroups.isEmpty()) { storageLimitLevel = StorageLimitLevel.NO_LIMIT; - logger.debug("Storage limit push down is impossible because the query has dynamic groupby {0}", dynGroups); + logger.debug("Storage limit push down is impossible because the query has dynamic groupby {}", dynGroups); } // derived aggregation is bad, unless expanded columns are already in group by if (!groups.containsAll(derivedPostAggregation)) { storageLimitLevel = StorageLimitLevel.NO_LIMIT; - logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {0}", + logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {}", derivedPostAggregation); } @@ -452,7 +453,7 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection g if (!loosenedColumnD.isEmpty()) { // KYLIN-2173 storageLimitLevel = StorageLimitLevel.NO_LIMIT; - logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {0}", loosenedColumnD); + logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {}", loosenedColumnD); } if (context.hasSort()) { @@ -464,7 +465,7 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection g for (FunctionDesc functionDesc : functionDescs) { if (functionDesc.isDimensionAsMetric()) { storageLimitLevel = StorageLimitLevel.NO_LIMIT; - logger.debug("storageLimitLevel set to NO_LIMIT because {0} isDimensionAsMetric ", functionDesc); + logger.debug("storageLimitLevel set to NO_LIMIT because {} isDimensionAsMetric ", functionDesc); } } @@ -483,8 +484,8 @@ private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set gro } if (!shardByInGroups.isEmpty()) { enabled = false; - logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {0}", - shardByInGroups); + logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}", + shardByInGroups); } if (!context.isNeedStorageAggregation()) { @@ -531,7 +532,7 @@ private TupleFilter checkHavingCanPushDown(TupleFilter havingFilter, Set aggrOutCols = new HashSet<>(); @@ -563,20 +564,21 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec } if (cuboid.requirePostAggregation()) { - logger.info("exactAggregation is false because cuboid {0}=>{1}", cuboid.getInputID(), cuboid.getId()); + logger.info("exactAggregation is false because cuboid {}=>{}", cuboid.getInputID(), cuboid.getId()); return false; } // derived aggregation is bad, unless expanded columns are already in group by if (!groups.containsAll(derivedPostAggregation)) { - logger.info("exactAggregation is false because derived column require post aggregation: {0}", + logger.info("exactAggregation is false because derived column require post aggregation: {}", derivedPostAggregation); return false; } // other columns (from filter) is bad, unless they are ensured to have single value if (!singleValuesD.containsAll(othersD)) { - logger.info("exactAggregation is false because some column not on group by: {0} (single value column: {1})", othersD, singleValuesD); + logger.info("exactAggregation is false because some column not on group by: {} (single value column: {})", + othersD, singleValuesD); return false; } @@ -599,7 +601,7 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec if (partDesc.isPartitioned()) { TblColRef col = partDesc.getPartitionDateColumnRef(); if (!groups.contains(col) && !singleValuesD.contains(col)) { - logger.info("exactAggregation is false because cube is partitioned and %s is not on group by", col); + logger.info("exactAggregation is false because cube is partitioned and {} is not on group by", col); return false; } } @@ -610,7 +612,7 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec return false; } - logger.info("exactAggregation is true, cuboid id is {0}", String.valueOf(cuboid.getId())); + logger.info("exactAggregation is true, cuboid id is {}", cuboid.getId()); return true; } diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java index 354dcae36f7..23a865dfbb9 100644 --- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java +++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java @@ -107,7 +107,7 @@ public int run(String[] args) throws Exception { for (Long cuboid : buildingCuboids) { Double cuboidSize = cuboidSizeMap.get(cuboid); if (cuboidSize == null) { - logger.warn("{0} cuboid's size is null will replace by 0", cuboid); + logger.warn("{} cuboid's size is null will replace by 0", cuboid); cuboidSize = 0.0; } optimizedCuboidSizeMap.put(cuboid, cuboidSize); @@ -135,7 +135,7 @@ private void exportHBaseConfiguration(String hbaseTableName) throws IOException HTable table = new HTable(hbaseConf, hbaseTableName); HFileOutputFormat2.configureIncrementalLoadMap(job, table); - logger.info("Saving HBase configuration to {0}", hbaseConfPath); + logger.info("Saving HBase configuration to {}", hbaseConfPath); FileSystem fs = HadoopUtil.getWorkingFileSystem(); FSDataOutputStream out = null; try { @@ -164,7 +164,7 @@ public static byte[][] getRegionSplitsFromCuboidStatistics(final Map Short.MAX_VALUE) { - logger.info("Too many regions! reduce to {0}" + String.valueOf(Short.MAX_VALUE)); + logger.info("Too many regions! reduce to {}", Short.MAX_VALUE); nRegion = Short.MAX_VALUE; } if (nRegion != original) { - logger.info( - "Region count is adjusted from {0} to {1} to help random sharding", String.valueOf(original), String.valueOf(nRegion)); + logger.info("Region count is adjusted from {} to {} to help random sharding", original, nRegion); } } int mbPerRegion = (int) (totalSizeInM / nRegion); mbPerRegion = Math.max(1, mbPerRegion); - logger.info("Total size {0} M (estimated)", String.valueOf(totalSizeInM)); - logger.info("Expecting {0} regions.", String.valueOf(nRegion)); - logger.info("Expecting {0} MB per region.", String.valueOf(mbPerRegion)); + logger.info("Total size {} M (estimated)", totalSizeInM); + logger.info("Expecting {} regions.", nRegion); + logger.info("Expecting {} MB per region.", mbPerRegion); if (cubeSegment.isEnableSharding()) { //each cuboid will be split into different number of shards @@ -244,8 +243,8 @@ public static byte[][] getRegionSplitsFromCuboidStatistics(final Map> innerReg } int compactionThreshold = Integer.parseInt(hbaseConf.get("hbase.hstore.compactionThreshold", "3")); - logger.info("hbase.hstore.compactionThreshold is {0}", String.valueOf(compactionThreshold)); + logger.info("hbase.hstore.compactionThreshold is {}", compactionThreshold); if (hfileSizeMB > 0.0f && hfileSizeMB * compactionThreshold < mbPerRegion) { hfileSizeMB = ((float) mbPerRegion) / compactionThreshold; } @@ -291,7 +290,7 @@ protected static void saveHFileSplits(final List> innerReg if (hfileSizeMB <= 0f) { hfileSizeMB = mbPerRegion; } - logger.info("hfileSizeMB {0}", String.valueOf(hfileSizeMB)); + logger.info("hfileSizeMB {}", hfileSizeMB); final Path hfilePartitionFile = new Path(outputFolder, "part-r-00000_hfile"); short regionCount = (short) innerRegionSplits.size(); @@ -314,7 +313,7 @@ protected static void saveHFileSplits(final List> innerReg for (Long cuboid : allCuboids) { if (accumulatedSize >= hfileSizeMB) { - logger.debug("Region {0}'s hfile {1} size is {2} mb", String.valueOf(i), String.valueOf(j), String.valueOf(accumulatedSize)); + logger.debug("Region {}'s hfile {} size is {} mb", i, j, accumulatedSize); byte[] split = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN]; BytesUtil.writeUnsigned(i, split, 0, RowConstants.ROWKEY_SHARDID_LEN); System.arraycopy(Bytes.toBytes(cuboid), 0, split, RowConstants.ROWKEY_SHARDID_LEN, diff --git a/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java b/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java index 8443d0324c0..40971c11995 100644 --- a/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java +++ b/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java @@ -170,13 +170,13 @@ private void checkAndMark(TableMetadataManager srcMetadataManager, DataModelMana TableDesc existing = metadataManager.getTableDesc(tableDesc.getIdentity(), targetProjectName); if (existing != null && !existing.equals(tableDesc)) { logger.info("Table {} already has a different version in target metadata store", tableDesc.getIdentity()); - logger.info("Existing version: " + existing); - logger.info("New version: " + tableDesc); + logger.info("Existing version: {}", existing); + logger.info("New version: {}", tableDesc); if (!forceIngest && !overwriteTables) { throw new IllegalStateException("table already exists with a different version: " + tableDesc.getIdentity() + ". Consider adding -overwriteTables option to force overwriting (with caution)"); } else { - logger.warn("Overwriting the old table desc: " + tableDesc.getIdentity()); + logger.warn("Overwriting the old table desc: {}", tableDesc.getIdentity()); } } requiredResources.add(tableDesc.getResourcePath()); @@ -207,7 +207,7 @@ private void checkExesting(RootPersistentEntity existing, String type, String na if (!forceIngest) { throw new IllegalStateException("Already exist a " + type + " called " + name); } else { - logger.warn("Overwriting the old {0} desc: {1}", type, name); + logger.warn("Overwriting the old {} desc: {}", type, name); } } }