Skip to content

Commit

Permalink
Merge #37068
Browse files Browse the repository at this point in the history
37068: sql: show string values for enum cluster settings r=jordanlewis a=jordanlewis

Previously, showing the value of an enum cluster setting would return
its unhelpful enum id. Now, it returns a string instead.

Closes #35811.

Release note: None

Co-authored-by: Jordan Lewis <[email protected]>
  • Loading branch information
craig[bot] and jordanlewis committed Apr 24, 2019
2 parents 165f452 + a799105 commit 3ffc259
Show file tree
Hide file tree
Showing 6 changed files with 25 additions and 16 deletions.
10 changes: 5 additions & 5 deletions docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
<tr><td><code>jobs.retention_time</code></td><td>duration</td><td><code>336h0m0s</code></td><td>the amount of time to retain records for completed jobs before</td></tr>
<tr><td><code>kv.allocator.lease_rebalancing_aggressiveness</code></td><td>float</td><td><code>1</code></td><td>set greater than 1.0 to rebalance leases toward load more aggressively, or between 0 and 1.0 to be more conservative about rebalancing leases</td></tr>
<tr><td><code>kv.allocator.load_based_lease_rebalancing.enabled</code></td><td>boolean</td><td><code>true</code></td><td>set to enable rebalancing of range leases based on load and latency</td></tr>
<tr><td><code>kv.allocator.load_based_rebalancing</code></td><td>enumeration</td><td><code>2</code></td><td>whether to rebalance based on the distribution of QPS across stores [off = 0, leases = 1, leases and replicas = 2]</td></tr>
<tr><td><code>kv.allocator.load_based_rebalancing</code></td><td>enumeration</td><td><code>leases and replicas</code></td><td>whether to rebalance based on the distribution of QPS across stores [off = 0, leases = 1, leases and replicas = 2]</td></tr>
<tr><td><code>kv.allocator.qps_rebalance_threshold</code></td><td>float</td><td><code>0.25</code></td><td>minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfull</td></tr>
<tr><td><code>kv.allocator.range_rebalance_threshold</code></td><td>float</td><td><code>0.05</code></td><td>minimum fraction away from the mean a store's range count can be before it is considered overfull or underfull</td></tr>
<tr><td><code>kv.bulk_io_write.addsstable_max_rate</code></td><td>float</td><td><code>1.7976931348623157E+308</code></td><td>maximum number of AddSSTable requests per second for a single store</td></tr>
Expand Down Expand Up @@ -85,12 +85,12 @@
<tr><td><code>server.time_until_store_dead</code></td><td>duration</td><td><code>5m0s</code></td><td>the time after which if there is no new gossiped information about a store, it is considered dead</td></tr>
<tr><td><code>server.web_session_timeout</code></td><td>duration</td><td><code>168h0m0s</code></td><td>the duration that a newly created web session will be valid</td></tr>
<tr><td><code>sql.defaults.default_int_size</code></td><td>integer</td><td><code>8</code></td><td>the size, in bytes, of an INT type</td></tr>
<tr><td><code>sql.defaults.distsql</code></td><td>enumeration</td><td><code>1</code></td><td>default distributed SQL execution mode [off = 0, auto = 1, on = 2]</td></tr>
<tr><td><code>sql.defaults.experimental_vectorize</code></td><td>enumeration</td><td><code>0</code></td><td>default experimental_vectorize mode [off = 0, on = 1, always = 2]</td></tr>
<tr><td><code>sql.defaults.optimizer</code></td><td>enumeration</td><td><code>1</code></td><td>default cost-based optimizer mode [off = 0, on = 1, local = 2]</td></tr>
<tr><td><code>sql.defaults.distsql</code></td><td>enumeration</td><td><code>auto</code></td><td>default distributed SQL execution mode [off = 0, auto = 1, on = 2]</td></tr>
<tr><td><code>sql.defaults.experimental_vectorize</code></td><td>enumeration</td><td><code>off</code></td><td>default experimental_vectorize mode [off = 0, on = 1, always = 2]</td></tr>
<tr><td><code>sql.defaults.optimizer</code></td><td>enumeration</td><td><code>on</code></td><td>default cost-based optimizer mode [off = 0, on = 1, local = 2]</td></tr>
<tr><td><code>sql.defaults.reorder_joins_limit</code></td><td>integer</td><td><code>4</code></td><td>default number of joins to reorder</td></tr>
<tr><td><code>sql.defaults.results_buffer.size</code></td><td>byte size</td><td><code>16 KiB</code></td><td>default size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.</td></tr>
<tr><td><code>sql.defaults.serial_normalization</code></td><td>enumeration</td><td><code>0</code></td><td>default handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2]</td></tr>
<tr><td><code>sql.defaults.serial_normalization</code></td><td>enumeration</td><td><code>rowid</code></td><td>default handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2]</td></tr>
<tr><td><code>sql.distsql.distribute_index_joins</code></td><td>boolean</td><td><code>true</code></td><td>if set, for index joins we instantiate a join reader on every node that has a stream; if not set, we use a single join reader</td></tr>
<tr><td><code>sql.distsql.flow_stream_timeout</code></td><td>duration</td><td><code>10s</code></td><td>amount of time incoming streams wait for a flow to be set up before erroring out</td></tr>
<tr><td><code>sql.distsql.interleaved_joins.enabled</code></td><td>boolean</td><td><code>true</code></td><td>if set we plan interleaved table joins instead of merge joins when possible</td></tr>
Expand Down
4 changes: 2 additions & 2 deletions pkg/server/settingsworker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,15 +241,15 @@ func TestSettingsSetAndShow(t *testing.T) {
if expected, actual := int64(2), enumA.Get(&st.SV); expected != actual {
t.Fatalf("expected %v, got %v", expected, actual)
}
if expected, actual := "2", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual {
if expected, actual := "bar", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual {
t.Fatalf("expected %v, got %v", expected, actual)
}

db.Exec(t, fmt.Sprintf(setQ, enumKey, "'foo'"))
if expected, actual := int64(1), enumA.Get(&st.SV); expected != actual {
t.Fatalf("expected %v, got %v", expected, actual)
}
if expected, actual := "1", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual {
if expected, actual := "foo", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual {
t.Fatalf("expected %v, got %v", expected, actual)
}

Expand Down
9 changes: 9 additions & 0 deletions pkg/settings/enum.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,15 @@ func (e *EnumSetting) Typ() string {
return "e"
}

// String returns the enum's string value.
func (e *EnumSetting) String(sv *Values) string {
enumID := e.Get(sv)
if str, ok := e.enumValues[enumID]; ok {
return str
}
return fmt.Sprintf("unknown(%d)", enumID)
}

// ParseEnum returns the enum value, and a boolean that indicates if it was parseable.
func (e *EnumSetting) ParseEnum(raw string) (int64, bool) {
rawLower := strings.ToLower(raw)
Expand Down
8 changes: 4 additions & 4 deletions pkg/sql/logictest/logic.go
Original file line number Diff line number Diff line change
Expand Up @@ -1014,23 +1014,23 @@ func (t *logicTest) setup(cfg testClusterConfig) {
); err != nil {
t.Fatal(err)
}
wantedMode, ok := sessiondata.DistSQLExecModeFromString(cfg.overrideDistSQLMode)
_, ok := sessiondata.DistSQLExecModeFromString(cfg.overrideDistSQLMode)
if !ok {
t.Fatalf("invalid distsql mode override: %s", cfg.overrideDistSQLMode)
}
// Wait until all servers are aware of the setting.
testutils.SucceedsSoon(t.t, func() error {
for i := 0; i < t.cluster.NumServers(); i++ {
var m sessiondata.DistSQLExecMode
var m string
err := t.cluster.ServerConn(i % t.cluster.NumServers()).QueryRow(
"SHOW CLUSTER SETTING sql.defaults.distsql",
).Scan(&m)
if err != nil {
t.Fatal(errors.Wrapf(err, "%d", i))
}
if m != wantedMode {
if m != cfg.overrideDistSQLMode {
return errors.Errorf("node %d is still waiting for update of DistSQLMode to %s (have %s)",
i, wantedMode, m,
i, cfg.overrideDistSQLMode, m,
)
}
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/logictest/testdata/logic_test/show_source
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ transaction_priority normal
transaction_read_only off
transaction_status NoTxn

query I colnames
query T colnames
SELECT * FROM [SHOW CLUSTER SETTING sql.defaults.distsql]
----
sql.defaults.distsql
0
off

query TTTT colnames
SELECT * FROM [SHOW ALL CLUSTER SETTINGS] WHERE variable LIKE '%organization'
Expand Down
6 changes: 3 additions & 3 deletions pkg/sql/show_cluster_setting.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ func (p *planner) ShowClusterSetting(
}
var dType *types.T
switch val.(type) {
case *settings.IntSetting, *settings.EnumSetting:
case *settings.IntSetting:
dType = types.Int
case *settings.StringSetting, *settings.ByteSizeSetting, *settings.StateMachineSetting:
case *settings.StringSetting, *settings.ByteSizeSetting, *settings.StateMachineSetting, *settings.EnumSetting:
dType = types.String
case *settings.BoolSetting:
dType = types.Bool
Expand Down Expand Up @@ -158,7 +158,7 @@ func (p *planner) ShowClusterSetting(
case *settings.DurationSetting:
d = &tree.DInterval{Duration: duration.MakeDuration(s.Get(&st.SV).Nanoseconds(), 0, 0)}
case *settings.EnumSetting:
d = tree.NewDInt(tree.DInt(s.Get(&st.SV)))
d = tree.NewDString(s.String(&st.SV))
case *settings.ByteSizeSetting:
d = tree.NewDString(s.String(&st.SV))
default:
Expand Down

0 comments on commit 3ffc259

Please sign in to comment.