Skip to content

Commit

Permalink
KYLIN-4054 Logger of HCreateTableJob record error message
Browse files Browse the repository at this point in the history
  • Loading branch information
hit-lacus authored and nichunen committed Jun 24, 2019
1 parent 760aefd commit 9a72b9e
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ private JDBCConnectionManager(KylinConfig config) {
dataSource = BasicDataSourceFactory.createDataSource(getDbcpProperties());
Connection conn = getConn();
DatabaseMetaData mdm = conn.getMetaData();
logger.info("Connected to {0} {1}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion());
logger.info("Connected to {} {}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion());
closeQuietly(conn);
} catch (Exception e) {
throw new IllegalArgumentException(e);
Expand All @@ -89,7 +89,7 @@ private Map<String, String> initDbcpProps(KylinConfig config) {
ret.remove("passwordEncrypted");
}

logger.info("Connecting to Jdbc with url:{0} by user {1}", ret.get("url"), ret.get("username"));
logger.info("Connecting to Jdbc with url:{} by user {}", ret.get("url"), ret.get("username"));

putIfMissing(ret, "driverClassName", "com.mysql.jdbc.Driver");
putIfMissing(ret, "maxActive", "5");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ protected Set<CompareTupleFilter> findSingleValuesCompFilters(TupleFilter filter
private long getQueryFilterMask(Set<TblColRef> filterColumnD) {
long filterMask = 0;

logger.info("Filter column set for query: %s", filterColumnD);
logger.info("Filter column set for query: {}", filterColumnD);
if (filterColumnD.isEmpty() == false) {
RowKeyColDesc[] allColumns = cubeDesc.getRowkey().getRowKeyColumns();
for (int i = 0; i < allColumns.length; i++) {
Expand All @@ -314,7 +314,7 @@ private long getQueryFilterMask(Set<TblColRef> filterColumnD) {
}
}
}
logger.info("Filter mask is: {0}", filterMask);
logger.info("Filter mask is: {}", filterMask);
return filterMask;
}

Expand Down Expand Up @@ -430,18 +430,19 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection<TblColRef> g
if (!groupsD.containsAll(cuboid.getColumns().subList(0, size))) {
storageLimitLevel = StorageLimitLevel.LIMIT_ON_RETURN_SIZE;
logger.debug(
"storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {0} with cuboid columns: {1}", groupsD.toString(), cuboid.getColumns().toString());
"storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {} with cuboid columns: {}",
groupsD, cuboid.getColumns());
}

if (!dynGroups.isEmpty()) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
logger.debug("Storage limit push down is impossible because the query has dynamic groupby {0}", dynGroups);
logger.debug("Storage limit push down is impossible because the query has dynamic groupby {}", dynGroups);
}

// derived aggregation is bad, unless expanded columns are already in group by
if (!groups.containsAll(derivedPostAggregation)) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {0}",
logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {}",
derivedPostAggregation);
}

Expand All @@ -452,7 +453,7 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection<TblColRef> g

if (!loosenedColumnD.isEmpty()) { // KYLIN-2173
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {0}", loosenedColumnD);
logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {}", loosenedColumnD);
}

if (context.hasSort()) {
Expand All @@ -464,7 +465,7 @@ private void enableStorageLimitIfPossible(Cuboid cuboid, Collection<TblColRef> g
for (FunctionDesc functionDesc : functionDescs) {
if (functionDesc.isDimensionAsMetric()) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
logger.debug("storageLimitLevel set to NO_LIMIT because {0} isDimensionAsMetric ", functionDesc);
logger.debug("storageLimitLevel set to NO_LIMIT because {} isDimensionAsMetric ", functionDesc);
}
}

Expand All @@ -483,8 +484,8 @@ private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set<TblColRef> gro
}
if (!shardByInGroups.isEmpty()) {
enabled = false;
logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {0}",
shardByInGroups);
logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}",
shardByInGroups);
}

if (!context.isNeedStorageAggregation()) {
Expand Down Expand Up @@ -531,7 +532,7 @@ private TupleFilter checkHavingCanPushDown(TupleFilter havingFilter, Set<TblColR
return null;

// OK, push down
logger.info("Push down having filter {0}", havingFilter);
logger.info("Push down having filter {}", havingFilter);

// convert columns in the filter
Set<TblColRef> aggrOutCols = new HashSet<>();
Expand Down Expand Up @@ -563,20 +564,21 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec
}

if (cuboid.requirePostAggregation()) {
logger.info("exactAggregation is false because cuboid {0}=>{1}", cuboid.getInputID(), cuboid.getId());
logger.info("exactAggregation is false because cuboid {}=>{}", cuboid.getInputID(), cuboid.getId());
return false;
}

// derived aggregation is bad, unless expanded columns are already in group by
if (!groups.containsAll(derivedPostAggregation)) {
logger.info("exactAggregation is false because derived column require post aggregation: {0}",
logger.info("exactAggregation is false because derived column require post aggregation: {}",
derivedPostAggregation);
return false;
}

// other columns (from filter) is bad, unless they are ensured to have single value
if (!singleValuesD.containsAll(othersD)) {
logger.info("exactAggregation is false because some column not on group by: {0} (single value column: {1})", othersD, singleValuesD);
logger.info("exactAggregation is false because some column not on group by: {} (single value column: {})",
othersD, singleValuesD);
return false;
}

Expand All @@ -599,7 +601,7 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec
if (partDesc.isPartitioned()) {
TblColRef col = partDesc.getPartitionDateColumnRef();
if (!groups.contains(col) && !singleValuesD.contains(col)) {
logger.info("exactAggregation is false because cube is partitioned and %s is not on group by", col);
logger.info("exactAggregation is false because cube is partitioned and {} is not on group by", col);
return false;
}
}
Expand All @@ -610,7 +612,7 @@ private boolean isExactAggregation(StorageContext context, Cuboid cuboid, Collec
return false;
}

logger.info("exactAggregation is true, cuboid id is {0}", String.valueOf(cuboid.getId()));
logger.info("exactAggregation is true, cuboid id is {}", cuboid.getId());
return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ public int run(String[] args) throws Exception {
for (Long cuboid : buildingCuboids) {
Double cuboidSize = cuboidSizeMap.get(cuboid);
if (cuboidSize == null) {
logger.warn("{0} cuboid's size is null will replace by 0", cuboid);
logger.warn("{} cuboid's size is null will replace by 0", cuboid);
cuboidSize = 0.0;
}
optimizedCuboidSizeMap.put(cuboid, cuboidSize);
Expand Down Expand Up @@ -135,7 +135,7 @@ private void exportHBaseConfiguration(String hbaseTableName) throws IOException
HTable table = new HTable(hbaseConf, hbaseTableName);
HFileOutputFormat2.configureIncrementalLoadMap(job, table);

logger.info("Saving HBase configuration to {0}", hbaseConfPath);
logger.info("Saving HBase configuration to {}", hbaseConfPath);
FileSystem fs = HadoopUtil.getWorkingFileSystem();
FSDataOutputStream out = null;
try {
Expand Down Expand Up @@ -164,7 +164,7 @@ public static byte[][] getRegionSplitsFromCuboidStatistics(final Map<Long, Doubl
final CubeDesc cubeDesc = cubeSegment.getCubeDesc();
float cut = cubeDesc.getConfig().getKylinHBaseRegionCut();

logger.info("Cut for HBase region is {0} GB", String.valueOf(cut));
logger.info("Cut for HBase region is {} GB", cut);

double totalSizeInM = 0;
for (Double cuboidSize : cubeSizeMap.values()) {
Expand All @@ -187,22 +187,21 @@ public static byte[][] getRegionSplitsFromCuboidStatistics(final Map<Long, Doubl
}

if (nRegion > Short.MAX_VALUE) {
logger.info("Too many regions! reduce to {0}" + String.valueOf(Short.MAX_VALUE));
logger.info("Too many regions! reduce to {}", Short.MAX_VALUE);
nRegion = Short.MAX_VALUE;
}

if (nRegion != original) {
logger.info(
"Region count is adjusted from {0} to {1} to help random sharding", String.valueOf(original), String.valueOf(nRegion));
logger.info("Region count is adjusted from {} to {} to help random sharding", original, nRegion);
}
}

int mbPerRegion = (int) (totalSizeInM / nRegion);
mbPerRegion = Math.max(1, mbPerRegion);

logger.info("Total size {0} M (estimated)", String.valueOf(totalSizeInM));
logger.info("Expecting {0} regions.", String.valueOf(nRegion));
logger.info("Expecting {0} MB per region.", String.valueOf(mbPerRegion));
logger.info("Total size {} M (estimated)", totalSizeInM);
logger.info("Expecting {} regions.", nRegion);
logger.info("Expecting {} MB per region.", mbPerRegion);

if (cubeSegment.isEnableSharding()) {
//each cuboid will be split into different number of shards
Expand Down Expand Up @@ -244,8 +243,8 @@ public static byte[][] getRegionSplitsFromCuboidStatistics(final Map<Long, Doubl
}

for (int i = 0; i < nRegion; ++i) {
logger.debug("Region {0}'s estimated size is {1} MB, accounting for {2} percent",
String.valueOf(i), String.valueOf(regionSizes[i]), String.valueOf(100.0 * regionSizes[i] / totalSizeInM));
logger.debug("Region {}'s estimated size is {} MB, accounting for {} percent", i, regionSizes[i],
100.0 * regionSizes[i] / totalSizeInM);
}

CuboidShardUtil.saveCuboidShards(cubeSegment, cuboidShards, nRegion);
Expand Down Expand Up @@ -283,15 +282,15 @@ protected static void saveHFileSplits(final List<HashMap<Long, Double>> innerReg
}

int compactionThreshold = Integer.parseInt(hbaseConf.get("hbase.hstore.compactionThreshold", "3"));
logger.info("hbase.hstore.compactionThreshold is {0}", String.valueOf(compactionThreshold));
logger.info("hbase.hstore.compactionThreshold is {}", compactionThreshold);
if (hfileSizeMB > 0.0f && hfileSizeMB * compactionThreshold < mbPerRegion) {
hfileSizeMB = ((float) mbPerRegion) / compactionThreshold;
}

if (hfileSizeMB <= 0f) {
hfileSizeMB = mbPerRegion;
}
logger.info("hfileSizeMB {0}", String.valueOf(hfileSizeMB));
logger.info("hfileSizeMB {}", hfileSizeMB);
final Path hfilePartitionFile = new Path(outputFolder, "part-r-00000_hfile");
short regionCount = (short) innerRegionSplits.size();

Expand All @@ -314,7 +313,7 @@ protected static void saveHFileSplits(final List<HashMap<Long, Double>> innerReg
for (Long cuboid : allCuboids) {

if (accumulatedSize >= hfileSizeMB) {
logger.debug("Region {0}'s hfile {1} size is {2} mb", String.valueOf(i), String.valueOf(j), String.valueOf(accumulatedSize));
logger.debug("Region {}'s hfile {} size is {} mb", i, j, accumulatedSize);
byte[] split = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN];
BytesUtil.writeUnsigned(i, split, 0, RowConstants.ROWKEY_SHARDID_LEN);
System.arraycopy(Bytes.toBytes(cuboid), 0, split, RowConstants.ROWKEY_SHARDID_LEN,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,13 +170,13 @@ private void checkAndMark(TableMetadataManager srcMetadataManager, DataModelMana
TableDesc existing = metadataManager.getTableDesc(tableDesc.getIdentity(), targetProjectName);
if (existing != null && !existing.equals(tableDesc)) {
logger.info("Table {} already has a different version in target metadata store", tableDesc.getIdentity());
logger.info("Existing version: " + existing);
logger.info("New version: " + tableDesc);
logger.info("Existing version: {}", existing);
logger.info("New version: {}", tableDesc);

if (!forceIngest && !overwriteTables) {
throw new IllegalStateException("table already exists with a different version: " + tableDesc.getIdentity() + ". Consider adding -overwriteTables option to force overwriting (with caution)");
} else {
logger.warn("Overwriting the old table desc: " + tableDesc.getIdentity());
logger.warn("Overwriting the old table desc: {}", tableDesc.getIdentity());
}
}
requiredResources.add(tableDesc.getResourcePath());
Expand Down Expand Up @@ -207,7 +207,7 @@ private void checkExesting(RootPersistentEntity existing, String type, String na
if (!forceIngest) {
throw new IllegalStateException("Already exist a " + type + " called " + name);
} else {
logger.warn("Overwriting the old {0} desc: {1}", type, name);
logger.warn("Overwriting the old {} desc: {}", type, name);
}
}
}
Expand Down

0 comments on commit 9a72b9e

Please sign in to comment.