Skip to content
This repository has been archived by the owner on Sep 18, 2023. It is now read-only.

Commit

Permalink
[NSE-636]Remove log4j1 related unit tests (#646)
Browse files Browse the repository at this point in the history
  • Loading branch information
weiting-chen authored Dec 22, 2021
1 parent 58e6d33 commit ad4c4e4
Show file tree
Hide file tree
Showing 15 changed files with 43 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package com.intel.oap.misc

import com.intel.oap.tpc.ds.TPCDSTableGen
import com.intel.oap.tpc.util.TPCRunner
import org.apache.log4j.{Level, LogManager}
//import org.apache.log4j.{Level, LogManager}
import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.functions.{col, expr}
Expand Down Expand Up @@ -68,7 +68,7 @@ class PartitioningSuite extends QueryTest with SharedSparkSession {

override def beforeAll(): Unit = {
super.beforeAll()
LogManager.getRootLogger.setLevel(Level.WARN)
//LogManager.getRootLogger.setLevel(Level.WARN)

lPath = Files.createTempFile("", ".parquet").toFile.getAbsolutePath
spark.range(scale)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
package com.intel.oap.tpc.ds

import com.intel.oap.tpc.util.TPCRunner
import org.apache.log4j.{Level, LogManager}
//import org.apache.log4j.{Level, LogManager}
import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSparkSession
Expand Down Expand Up @@ -60,7 +60,7 @@ class Orc_TPCDSSuite extends QueryTest with SharedSparkSession {

override def beforeAll(): Unit = {
super.beforeAll()
LogManager.getRootLogger.setLevel(Level.WARN)
//LogManager.getRootLogger.setLevel(Level.WARN)
val tGen = new Orc_TPCDSTableGen(spark, 0.1D, TPCDS_WRITE_PATH)
tGen.gen()
tGen.createTables()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
package com.intel.oap.tpc.ds

import com.intel.oap.tpc.util.TPCRunner
import org.apache.log4j.{Level, LogManager}
//import org.apache.log4j.{Level, LogManager}
import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.functions.{col, exp, expr}
Expand Down Expand Up @@ -64,7 +64,7 @@ class TPCDSSuite extends QueryTest with SharedSparkSession {

override def beforeAll(): Unit = {
super.beforeAll()
LogManager.getRootLogger.setLevel(Level.WARN)
//LogManager.getRootLogger.setLevel(Level.WARN)
val tGen = new TPCDSTableGen(spark, 0.1D, TPCDS_WRITE_PATH)
tGen.gen()
tGen.createTables()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import com.intel.oap.tpc.MallocUtils
import com.intel.oap.tpc.h.TPCHSuite.RAMMonitor
import com.intel.oap.tpc.util.TPCRunner
import org.apache.commons.lang.StringUtils
import org.apache.log4j.{Level, LogManager}
//import org.apache.log4j.{Level, LogManager}
import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSparkSession
Expand Down Expand Up @@ -63,7 +63,7 @@ class Orc_TPCHSuite extends QueryTest with SharedSparkSession {

override def beforeAll(): Unit = {
super.beforeAll()
LogManager.getRootLogger.setLevel(Level.WARN)
//LogManager.getRootLogger.setLevel(Level.WARN)
val tGen = new Orc_TPCHTableGen(spark, 0.1D, TPCH_WRITE_PATH)
tGen.gen()
tGen.createTables()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import com.intel.oap.tpc.MallocUtils
import com.intel.oap.tpc.h.TPCHSuite.RAMMonitor
import com.intel.oap.tpc.util.TPCRunner
import org.apache.commons.lang.StringUtils
import org.apache.log4j.{Level, LogManager}
//import org.apache.log4j.{Level, LogManager}
import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSparkSession
Expand Down Expand Up @@ -72,7 +72,7 @@ class TPCHSuite extends QueryTest with SharedSparkSession {

override def beforeAll(): Unit = {
super.beforeAll()
LogManager.getRootLogger.setLevel(Level.WARN)
//LogManager.getRootLogger.setLevel(Level.WARN)
val tGen = new TPCHTableGen(spark, 0.1D, TPCH_WRITE_PATH)
tGen.gen()
tGen.createTables()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql

import org.apache.log4j.Level
//import org.apache.log4j.Level

import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.test.SharedSparkSession
Expand Down Expand Up @@ -55,6 +55,7 @@ class CTEHintSuite extends QueryTest with SharedSparkSession {
assert(joinHints == expectedHints)
}

/* Remark log4j1 unit test
def verifyJoinHintWithWarnings(
df: => DataFrame,
expectedHints: Seq[JoinHint],
Expand All @@ -72,6 +73,7 @@ class CTEHintSuite extends QueryTest with SharedSparkSession {
assert(warningMessages.contains(w))
}
}
*/

def msgNoJoinForJoinHint(strategy: String): String =
s"A join hint (strategy=$strategy) is specified but it is not part of a join relation."
Expand Down Expand Up @@ -133,6 +135,7 @@ class CTEHintSuite extends QueryTest with SharedSparkSession {
Some(HintInfo(strategy = Some(SHUFFLE_HASH))),
None) :: Nil
)
/* Remark log4j1 unit test
verifyJoinHintWithWarnings(
sql(
"""
Expand All @@ -151,6 +154,7 @@ class CTEHintSuite extends QueryTest with SharedSparkSession {
msgNoJoinForJoinHint("shuffle_hash") ::
msgJoinHintOverridden("broadcast") :: Nil
)
*/
verifyJoinHint(
sql(
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,7 @@ trait CharVarcharTestSuite extends QueryTest with SQLTestUtils {
class BasicCharVarcharTestSuite extends QueryTest with SharedSparkSession {
import testImplicits._

/* Remark log4j1 unit test
test("user-specified schema in cast") {
def assertNoCharType(df: DataFrame): Unit = {
checkAnswer(df, Row("0"))
Expand All @@ -655,6 +656,7 @@ class BasicCharVarcharTestSuite extends QueryTest with SharedSparkSession {
assertNoCharType(sql("SELECT CAST(id AS CHAR(5)) FROM range(1)"))
}
}
*/

def failWithInvalidCharUsage[T](fn: => T): Unit = {
val e = intercept[AnalysisException](fn)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql

import org.apache.log4j.Level
//import org.apache.log4j.Level

import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight, BuildSide, EliminateResolvedHint}
import org.apache.spark.sql.catalyst.plans.PlanTest
Expand Down Expand Up @@ -45,6 +45,7 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
def msgJoinHintOverridden(strategy: String): String =
s"Hint (strategy=$strategy) is overridden by another hint and will not take effect."

/* Remark log4j1 unit test
def verifyJoinHintWithWarnings(
df: => DataFrame,
expectedHints: Seq[JoinHint],
Expand All @@ -62,6 +63,7 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
assert(warningMessages.contains(w))
}
}
*/

def verifyJoinHint(df: DataFrame, expectedHints: Seq[JoinHint]): Unit = {
val optimized = df.queryExecution.optimizedPlan
Expand Down Expand Up @@ -210,6 +212,7 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
)
}

/* Remark log4j1 unit test
test("hint merge") {
verifyJoinHintWithWarnings(
df.hint("broadcast").filter($"id" > 2).hint("broadcast").join(df, "id"),
Expand Down Expand Up @@ -248,7 +251,9 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
msgJoinHintOverridden("shuffle_hash") :: Nil
)
}
*/

/* Remark log4j1 unit test
test("hint merge - SQL") {
withTempView("a", "b", "c") {
df1.createOrReplaceTempView("a")
Expand Down Expand Up @@ -299,6 +304,7 @@ class JoinHintSuite extends PlanTest with SharedSparkSession with AdaptiveSparkP
)
}
}
*/

test("nested hint") {
verifyJoinHint(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach {

}

/* Remark log4j1 unit test
test("SPARK-33944: warning setting hive.metastore.warehouse.dir using session options") {
val msg = "Not allowing to set hive.metastore.warehouse.dir in SparkSession's options"
val logAppender = new LogAppender(msg)
Expand All @@ -399,7 +400,9 @@ class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach {
}
assert(logAppender.loggingEvents.exists(_.getRenderedMessage.contains(msg)))
}
*/

/* Remark log4j1 unit test
test("SPARK-33944: no warning setting spark.sql.warehouse.dir using session options") {
val msg = "Not allowing to set hive.metastore.warehouse.dir in SparkSession's options"
val logAppender = new LogAppender(msg)
Expand All @@ -412,4 +415,5 @@ class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach {
}
assert(!logAppender.loggingEvents.exists(_.getRenderedMessage.contains(msg)))
}
*/
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import java.io.File
import java.net.URI

import com.intel.oap.execution.{ColumnarBroadcastHashJoinExec, ColumnarSortMergeJoinExec}
import org.apache.log4j.Level
//import org.apache.log4j.Level
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
Expand Down Expand Up @@ -797,6 +797,7 @@ class AdaptiveQueryExecSuite
}
}

/* Remark log4j1 unit test
test("SPARK-30719: do not log warning if intentionally skip AQE") {
val testAppender = new LogAppender("aqe logging warning test when skip")
withLogAppender(testAppender) {
Expand All @@ -811,7 +812,9 @@ class AdaptiveQueryExecSuite
s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is" +
s" enabled but is not supported for")))
}
*/

/* Remark log4j1 unit test
test("test log level") {
def verifyLog(expectedLevel: Level): Unit = {
val logAppender = new LogAppender("adaptive execution")
Expand Down Expand Up @@ -856,6 +859,7 @@ class AdaptiveQueryExecSuite
}
}
}
*/

test("tree string output") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.adaptive
import java.io.File
import java.net.URI

import org.apache.log4j.Level
//import org.apache.log4j.Level
import org.apache.spark.SparkConf
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1780,6 +1780,7 @@ abstract class CSVSuite
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}

/* Remark log4j1 unit test
test("SPARK-23786: warning should be printed if CSV header doesn't conform to schema") {
val testAppender1 = new LogAppender("CSV header matches to schema")
withLogAppender(testAppender1) {
Expand Down Expand Up @@ -1809,6 +1810,7 @@ abstract class CSVSuite
assert(testAppender2.loggingEvents
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
}
*/

test("SPARK-25134: check header on parsing of dataset with projection and column pruning") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.apache.spark.sql.execution.datasources.v2.jdbc
import java.sql.{Connection, DriverManager}
import java.util.Properties

import org.apache.log4j.Level
//import org.apache.log4j.Level

import org.apache.spark.SparkConf
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
Expand Down Expand Up @@ -391,6 +391,7 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession {
}
}

/* Remark log4j1 unit test
test("CREATE TABLE with table comment") {
withTable("h2.test.new_table") {
val logAppender = new LogAppender("table comment")
Expand All @@ -404,6 +405,7 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession {
assert(createCommentWarning === false)
}
}
*/

test("CREATE TABLE with table property") {
withTable("h2.test.new_table") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql.internal
import java.util.TimeZone

import org.apache.hadoop.fs.Path
import org.apache.log4j.Level
//import org.apache.log4j.Level

import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.parser.ParseException
Expand Down Expand Up @@ -387,6 +387,7 @@ class SQLConfSuite extends QueryTest with SharedSparkSession {
assert(e.getMessage.contains(config))
}

/* Remark log4j1 unit test
test("log deprecation warnings") {
val logAppender = new LogAppender("deprecated SQL configs")
def check(config: String): Unit = {
Expand All @@ -407,6 +408,7 @@ class SQLConfSuite extends QueryTest with SharedSparkSession {
}
check(config2)
}
*/

test("spark.sql.session.timeZone should only accept valid zone id") {
spark.conf.set(SQLConf.SESSION_LOCAL_TIMEZONE.key, MIT.getId)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.sql.nativesql
import java.io.File
import java.net.URI

import org.apache.log4j.Level
//import org.apache.log4j.Level
import org.apache.spark.SparkConf
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
Expand Down

0 comments on commit ad4c4e4

Please sign in to comment.