Skip to content

Commit

Permalink
[MINOR][DOC] Correct some document description errors
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?

Correct some document description errors.

## How was this patch tested?
N/A

Closes apache#23162 from 10110346/docerror.

Authored-by: liuxian <[email protected]>
Signed-off-by: Sean Owen <[email protected]>
(cherry picked from commit 60e4239)
Signed-off-by: Sean Owen <[email protected]>
  • Loading branch information
10110346 authored and kai-chi committed Aug 1, 2019
1 parent ea1ae55 commit 31447e2
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ package object config {
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
.createWithDefault(10000)

private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
Expand Down Expand Up @@ -389,8 +389,8 @@ package object config {
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
"The chunk size during writing out the bytes of" +
" ChunkedByteBuffer should not larger than Int.MaxValue - 15.")
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)

private[spark] val CHECKPOINT_COMPRESS =
Expand Down Expand Up @@ -462,7 +462,7 @@ package object config {
"made in creating intermediate shuffle files.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The file buffer size must be greater than 0 and less than" +
s"The file buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")

Expand All @@ -472,7 +472,7 @@ package object config {
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The buffer size must be greater than 0 and less than" +
s"The buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ object SQLConf {
"factor as the estimated data size, in case the data is compressed in the file and lead to" +
" a heavily underestimated result.")
.doubleConf
.checkValue(_ > 0, "the value of fileDataSizeFactor must be larger than 0")
.checkValue(_ > 0, "the value of fileDataSizeFactor must be greater than 0")
.createWithDefault(1.0)

val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema")
Expand Down Expand Up @@ -648,7 +648,7 @@ object SQLConf {
val BUCKETING_MAX_BUCKETS = buildConf("spark.sql.sources.bucketing.maxBuckets")
.doc("The maximum number of buckets allowed. Defaults to 100000")
.intConf
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be larger than 0")
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0")
.createWithDefault(100000)

val CROSS_JOINS_ENABLED = buildConf("spark.sql.crossJoin.enabled")
Expand Down Expand Up @@ -1116,7 +1116,7 @@ object SQLConf {
.internal()
.doc("The number of bins when generating histograms.")
.intConf
.checkValue(num => num > 1, "The number of bins must be larger than 1.")
.checkValue(num => num > 1, "The number of bins must be greater than 1.")
.createWithDefault(254)

val PERCENTILE_ACCURACY =
Expand Down

0 comments on commit 31447e2

Please sign in to comment.