Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

demo: Add option to automatically apply the Geo-Partitioned Replicas topology to Movr #40355

Merged
merged 2 commits into from
Sep 12, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/ccl/workloadccl/allccl/all_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func TestDeterministicInitialData(t *testing.T) {
`intro`: 0x81c6a8cfd9c3452a,
`json`: 0xcbf29ce484222325,
`ledger`: 0xebe27d872d980271,
`movr`: 0x4f19a54c7e779f9c,
`movr`: 0x6a094e9d15a07970,
`queue`: 0xcbf29ce484222325,
`rand`: 0xcbf29ce484222325,
`roachmart`: 0xda5e73423dbdb2d9,
Expand Down
12 changes: 12 additions & 0 deletions pkg/cli/cliflags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -918,6 +918,18 @@ to us-east1 and availability zone to 3.
`,
}

DemoGeoPartitionedReplicas = FlagInfo{
Name: "geo-partitioned-replicas",
Description: `
When used with the Movr dataset, create a 9 node cluster and automatically apply
the geo-partitioned replicas topology across 3 virtual regions named us-east1, us-west1, and
europe-west1. This command will fail with an error if an enterprise license could not
be acquired, or if the Movr dataset is not used. More information about the geo-partitioned
replicas topology can be found at this URL:
https://www.cockroachlabs.com/docs/v19.1/topology-geo-partitioned-replicas.html
`,
}

UseEmptyDatabase = FlagInfo{
Name: "empty",
Description: `
Expand Down
10 changes: 6 additions & 4 deletions pkg/cli/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ func initCLIDefaults() {
demoCtx.useEmptyDatabase = false
demoCtx.runWorkload = false
demoCtx.localities = nil
demoCtx.geoPartitionedReplicas = false

initPreFlagsDefaults()

Expand Down Expand Up @@ -336,8 +337,9 @@ var sqlfmtCtx struct {
// demoCtx captures the command-line parameters of the `demo` command.
// Defaults set by InitCLIDefaults() above.
var demoCtx struct {
nodes int
useEmptyDatabase bool
runWorkload bool
localities demoLocalityList
nodes int
useEmptyDatabase bool
runWorkload bool
localities demoLocalityList
geoPartitionedReplicas bool
}
90 changes: 86 additions & 4 deletions pkg/cli/demo.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ to avoid pre-loading a dataset.
cockroach demo attempts to connect to a Cockroach Labs server to obtain a
temporary enterprise license for demoing enterprise features and enable
telemetry back to Cockroach Labs. In order to disable this behavior, set the
environment variable "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING".
environment variable "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING" to true.
`,
Example: ` cockroach demo`,
Args: cobra.NoArgs,
Expand Down Expand Up @@ -207,6 +207,10 @@ func setupTransientServers(
}
urlStr := url.String()

// Communicate information about license acquisition to services
// that depend on it.
licenseSuccess := make(chan bool, 1)

// Start up the update check loop.
// We don't do this in (*server.Server).Start() because we don't want it
// in tests.
Expand All @@ -230,8 +234,13 @@ func setupTransientServers(
const msg = "Unable to acquire demo license. Enterprise features are not enabled in this session.\n"
fmt.Fprint(stderr, msg)
}
licenseSuccess <- success
}()
}
} else {
// If we aren't supposed to check for a license, then automatically
// notify failure.
licenseSuccess <- false
}

// If there is a load generator, create its database and load its
Expand All @@ -253,10 +262,40 @@ func setupTransientServers(
return ``, ``, cleanup, err
}

partitioningComplete := make(chan struct{}, 1)
// If we are requested to prepartition our data spawn a goroutine to do the partitioning.
if demoCtx.geoPartitionedReplicas {
go func() {
success := <-licenseSuccess
// Only try partitioning if license acquisition was successful.
if success {
db, err := gosql.Open("postgres", urlStr)
if err != nil {
exitWithError("demo", err)
}
defer db.Close()
// Based on validation done in setup, we know that this workload has a partitioning step.
if err := gen.(workload.Hookser).Hooks().Partition(db); err != nil {
exitWithError("demo", err)
}
partitioningComplete <- struct{}{}
} else {
const msg = "license acquisition was unsuccessful. Enterprise features are needed to partition data"
exitWithError("demo", errors.New(msg))
}
}()
}

if demoCtx.runWorkload {
if err := runWorkload(ctx, gen, urlStr, stopper); err != nil {
return ``, ``, cleanup, err
}
go func() {
// If partitioning was requested, wait for that to complete before running the workload.
if demoCtx.geoPartitionedReplicas {
<-partitioningComplete
}
if err := runWorkload(ctx, gen, urlStr, stopper); err != nil {
exitWithError("demo", err)
}
}()
}
}

Expand Down Expand Up @@ -319,6 +358,49 @@ func runDemo(cmd *cobra.Command, gen workload.Generator) error {
return errors.New("cannot run a workload against an empty database")
}

// Make sure that the user didn't request to have a topology and an empty database.
if demoCtx.geoPartitionedReplicas && demoCtx.useEmptyDatabase {
return errors.New("cannot setup geo-partitioned replicas topology on an empty database")
}

// Make sure that the Movr database is selected when automatically partitioning.
if demoCtx.geoPartitionedReplicas && (gen == nil || gen.Meta().Name != "movr") {
return errors.New("--geo-partitioned-replicas must be used with the Movr dataset")
}

// If the geo-partitioned replicas flag was given and the demo localities have changed, throw an error.
if demoCtx.geoPartitionedReplicas && demoCtx.localities != nil {
return errors.New("--demo-locality cannot be used with --geo-partitioned-replicas")
}

// If the geo-partitioned replicas flag was given and the nodes have changed, throw an error.
if demoCtx.geoPartitionedReplicas && cmd.Flags().Lookup(cliflags.DemoNodes.Name).Changed {
return errors.New("--nodes cannot be used with --geo-partitioned-replicas")
}

// If geo-partition-replicas is requested, make sure the workload has a Partitioning step.
if demoCtx.geoPartitionedReplicas {
configErr := errors.New(fmt.Sprintf("workload %s is not configured to have a partitioning step", gen.Meta().Name))
hookser, ok := gen.(workload.Hookser)
if !ok {
return configErr
}
if hookser.Hooks().Partition == nil {
return configErr
}
}

// Th geo-partitioned replicas demo only works on a 9 node cluster, so set the node count as such.
// Ignore input user localities so that the nodes have the same attributes/localities as expected.
if demoCtx.geoPartitionedReplicas {
const msg = `#
# --geo-partitioned replicas operates on a 9 node cluster.
# The cluster size has been changed from the default to 9 nodes.`
fmt.Println(msg)
demoCtx.nodes = 9
demoCtx.localities = nil
}

connURL, adminURL, cleanup, err := setupTransientServers(cmd, gen)
defer cleanup()
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions pkg/cli/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -586,6 +586,7 @@ func init() {
IntFlag(demoFlags, &demoCtx.nodes, cliflags.DemoNodes, 1)
BoolFlag(demoFlags, &demoCtx.runWorkload, cliflags.RunDemoWorkload, false)
VarFlag(demoFlags, &demoCtx.localities, cliflags.DemoNodeLocality)
BoolFlag(demoFlags, &demoCtx.geoPartitionedReplicas, cliflags.DemoGeoPartitionedReplicas, false)
// The --empty flag is only valid for the top level demo command,
// so we use the regular flag set.
BoolFlag(demoCmd.Flags(), &demoCtx.useEmptyDatabase, cliflags.UseEmptyDatabase, false)
Expand Down
87 changes: 87 additions & 0 deletions pkg/cli/interactive_tests/test_demo_partitioning.tcl
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
#! /usr/bin/env expect -f

source [file join [file dirname $argv0] common.tcl]

start_test "Expect partitioning succeeds"
# test that partitioning works if a license could be acquired
spawn $argv demo --geo-partitioned-replicas

# wait for the shell to start up
eexpect "movr>"

# send multiple "SHOW PARTITIONS" requests to the DB as partitioning is happen asynchronously.
for {set i 0} {$i < 10} {incr i} {
send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\];\r"
sleep 1
}

# The number of partitions across the MovR database we expect is 24.
eexpect "24"
eexpect "(1 row)"
eexpect "movr>"

send "SHOW PARTITIONS FROM TABLE vehicles;\r"

# Verify the partitions are as we expect
send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west' AND partition_value='(''seattle''), (''san francisco''), (''los angeles'')';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west' AND zone_config='constraints = ''\[+region=us-west1\]''';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east' AND partition_value='(''new york''), (''boston''), (''washington dc'')';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east' AND zone_config='constraints = ''\[+region=us-east1\]''';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west' AND partition_value='(''amsterdam''), (''paris''), (''rome'')';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west' AND zone_config='constraints = ''\[+region=europe-west1\]''';\r"
eexpect "8"
eexpect "(1 row)"
eexpect "movr>"

interrupt
eexpect eof
end_test


start_test "Expect an error if geo-partitioning is requested and a license cannot be acquired"

# set the proper environment variable
set env(COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING) "true"
spawn $argv demo --geo-partitioned-replicas
# expect a failure
eexpect "Error: license acquisition was unsuccessful. Enterprise features are needed to partition data"
# clean up after the test
interrupt
eexpect eof
end_test

105 changes: 101 additions & 4 deletions pkg/workload/movr/movr.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,6 @@ var cities = []struct {
{city: "seattle", locality: "us_west"},
{city: "san francisco", locality: "us_west"},
{city: "los angeles", locality: "us_west"},
{city: "chicago", locality: "us_central"},
{city: "detroit", locality: "us_central"},
{city: "minneapolis", locality: "us_central"},
{city: "amsterdam", locality: "eu_west"},
{city: "paris", locality: "eu_west"},
{city: "rome", locality: "eu_west"},
Expand Down Expand Up @@ -210,8 +207,108 @@ func (g *movr) Hooks() workload.Hooks {
}
}
}
return nil
},
// This partitioning step is intended for a 3 region cluster, which have the localities region=us-east1,
// region=us-west1, region=europe-west1.
Partition: func(db *gosql.DB) error {
// Create us-west, us-east and europe-west partitions.
q := `
ALTER TABLE users PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER TABLE vehicles PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER INDEX vehicles_auto_index_fk_city_ref_users PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER TABLE rides PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER INDEX rides_auto_index_fk_city_ref_users PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER INDEX rides_auto_index_fk_vehicle_city_ref_vehicles PARTITION BY LIST (vehicle_city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER TABLE user_promo_codes PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
ALTER TABLE vehicle_location_histories PARTITION BY LIST (city) (
PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'),
PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'),
PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome')
);
`
if _, err := db.Exec(q); err != nil {
return err
}

// Alter the partitions to place replicas in the appropriate zones.
q = `
ALTER PARTITION us_west OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]';
ALTER PARTITION us_east OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]';
ALTER PARTITION europe_west OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]';

ALTER PARTITION us_west OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]';
ALTER PARTITION us_east OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]';
ALTER PARTITION europe_west OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]';

ALTER PARTITION us_west OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]';
ALTER PARTITION us_east OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]';
ALTER PARTITION europe_west OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]';

ALTER PARTITION us_west OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]';
ALTER PARTITION us_east OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]';
ALTER PARTITION europe_west OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]';

ALTER PARTITION us_west OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]';
ALTER PARTITION us_east OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]';
ALTER PARTITION europe_west OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]';
`
if _, err := db.Exec(q); err != nil {
return err
}

// Create some duplicate indexes for the promo_codes table.
q = `
CREATE INDEX promo_codes_idx_us_west ON promo_codes (code) STORING (description, creation_time, expiration_time, rules);
CREATE INDEX promo_codes_idx_europe_west ON promo_codes (code) STORING (description, creation_time, expiration_time, rules);
`
if _, err := db.Exec(q); err != nil {
return err
}

// TODO(dan): Partitions.
// Apply configurations to the index for fast reads.
q = `
ALTER TABLE promo_codes CONFIGURE ZONE USING num_replicas = 3,
constraints = '{"+region=us-east1": 1}',
lease_preferences = '[[+region=us-east1]]';
ALTER INDEX promo_codes@promo_codes_idx_us_west CONFIGURE ZONE USING
constraints = '{"+region=us-west1": 1}',
lease_preferences = '[[+region=us-west1]]';
ALTER INDEX promo_codes@promo_codes_idx_europe_west CONFIGURE ZONE USING
constraints = '{"+region=europe-west1": 1}',
lease_preferences = '[[+region=europe-west1]]';
`
if _, err := db.Exec(q); err != nil {
return err
}
return nil
},
}
Expand Down
Loading