Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add httpReq to placement source (debug) #1981

Merged
merged 6 commits into from
Oct 14, 2019
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions glide.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 5 additions & 3 deletions scripts/docker-integration-tests/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ function setup_single_m3db_node {
local dbnode_host=${DBNODE_HOST:-dbnode01}
local dbnode_port=${DBNODE_PORT:-9000}
local dbnode_health_port=${DBNODE_HEALTH_PORT:-9002}
local dbnode_id=${DBNODE_ID:-m3db_local}
local coordinator_port=${COORDINATOR_PORT:-7201}
local zone=${ZONE:-embedded}

echo "Wait for API to be available"
ATTEMPTS=100 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
Expand All @@ -62,9 +64,9 @@ function setup_single_m3db_node {
"replicationFactor": 1,
"hosts": [
{
"id": "m3db_local",
"id": "'${dbnode_id}'",
"isolation_group": "rack-a",
"zone": "embedded",
"zone": "'${zone}'",
"weight": 1024,
"address": "'"${dbnode_host}"'",
"port": '"${dbnode_port}"'
Expand All @@ -74,7 +76,7 @@ function setup_single_m3db_node {

echo "Wait until placement is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/placement | jq .placement.instances.m3db_local.id)" == \"m3db_local\" ]'
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/placement | jq .placement.instances.'${dbnode_id}'.id)" == \"'${dbnode_id}'\" ]'

wait_for_namespaces

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
version: "3.5"
services:
dbnode01:
expose:
- "9000-9004"
- "7201"
ports:
- "0.0.0.0:9000-9004:9000-9004"
- "0.0.0.0:7201:7201"
networks:
- backend
image: "m3dbnode_integration:${REVISION}"
environment:
- M3DB_HOST_ID=dbnode01
volumes:
- "./m3dbnode.yml:/etc/m3dbnode/m3dbnode.yml"
etcd01:
expose:
- "2379-2380"
ports:
- "0.0.0.0:2379-2380:2379-2380"
networks:
- backend
image: quay.io/coreos/etcd:v3.3.10
command:
- "etcd"
- "--name"
- "etcd01"
- "--listen-peer-urls"
- "http://0.0.0.0:2380"
- "--listen-client-urls"
- "http://0.0.0.0:2379"
- "--advertise-client-urls"
- "http://etcd01:2379"
- "--initial-cluster-token"
- "etcd-cluster-1"
- "--initial-advertise-peer-urls"
- "http://etcd01:2380"
- "--initial-cluster"
- "etcd01=http://etcd01:2380"
- "--initial-cluster-state"
- "new"
- "--data-dir"
- "/var/lib/etcd"
networks:
backend:
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
coordinator:
listenAddress:
type: "config"
value: "0.0.0.0:7201"

logging:
level: info

metrics:
scope:
prefix: "coordinator"
prometheus:
handlerPath: /metrics
listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
sanitization: prometheus
samplingRate: 1.0
extended: none

tagOptions:
idScheme: quoted

db:
logging:
level: info

metrics:
prometheus:
handlerPath: /metrics
sanitization: prometheus
samplingRate: 1.0
extended: detailed

listenAddress: 0.0.0.0:9000
clusterListenAddress: 0.0.0.0:9001
httpNodeListenAddress: 0.0.0.0:9002
httpClusterListenAddress: 0.0.0.0:9003
debugListenAddress: 0.0.0.0:9004

hostID:
resolver: environment
envVarName: M3DB_HOST_ID

client:
writeConsistencyLevel: majority
readConsistencyLevel: unstrict_majority

gcPercentage: 100

writeNewSeriesAsync: true
writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms

bootstrap:
# Intentionally disable peers bootstrapper to ensure it doesn't interfere with test.
bootstrappers:
- filesystem
- commitlog
- uninitialized_topology
commitlog:
returnUnfulfilledForCorruptCommitLogFiles: false

cache:
series:
policy: lru

commitlog:
flushMaxBytes: 524288
flushEvery: 1s
queue:
calculationType: fixed
size: 2097152

fs:
filePathPrefix: /var/lib/m3db

config:
service:
env: foo-namespace/foo-cluster
zone: bar-zone
service: m3db
cacheDir: /var/lib/m3kv
etcdClusters:
- zone: bar-zone
endpoints:
- etcd01:2379
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/usr/bin/env bash

set -xe

source $GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/common.sh
REVISION=$(git rev-parse HEAD)
SCRIPT_PATH=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/dedicated_etcd_embedded_coordinator
COMPOSE_FILE=$SCRIPT_PATH/docker-compose.yml
export REVISION

echo "Run etcd and m3dbnode containers"
docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes etcd01
docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes dbnode01

DUMP_DIR="${SCRIPT_PATH}/dump"
DUMP_ZIP="${DUMP_DIR}/dump.zip"

function defer {
if [ -d $DUMP_DIR ]; then
rm -rf $DUMP_DIR
fi
docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes
}
trap defer EXIT

# Should be able to setup single db node with custom environment and zone
# using the embedded coordinator without special headers
DBNODE_ID="dbnode01" ZONE="bar-zone" setup_single_m3db_node

echo "Test the debug dump endpoint works with custom env and zone"
mkdir -p $DUMP_DIR
curl -s http://localhost:9004/debug/dump > $DUMP_ZIP

unzip -d $DUMP_DIR $DUMP_ZIP

EXPECTED_FILES="cpu.prof heap.prof goroutine.prof host.json namespace.json placement-m3db.json"
for file in $(echo "${EXPECTED_FILES}" | tr " " "\n"); do
if ! [ -f "${DUMP_DIR}/${file}" ]; then
echo "Expected ${file} but not in dump:"
echo $(ls $DUMP_DIR)
exit 1
fi
done
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ function prometheus_remote_write {
network=$(docker network ls --format '{{.ID}}' | tail -n 1)
out=$((docker run -it --rm --network $network \
$PROMREMOTECLI_IMAGE \
-u http://coordinator01:7201/api/v1/prom/remote/write \
-u http://dbnode01:7201/api/v1/prom/remote/write \
-t __name__:${metric_name} \
-h "M3-Metrics-Type: ${metrics_type}" \
-h "M3-Storage-Policy: ${metrics_storage_policy}" \
Expand Down
2 changes: 1 addition & 1 deletion src/cluster/client/etcd/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ func (c *csclient) etcdClientGen(zone string) (*clientv3.Client, error) {

cluster, ok := c.opts.ClusterForZone(zone)
if !ok {
return nil, fmt.Errorf("no etcd cluster found for zone %s", zone)
return nil, fmt.Errorf("no etcd cluster found for zone: %s", zone)
}

err := c.retrier.Attempt(func() error {
Expand Down
43 changes: 35 additions & 8 deletions src/dbnode/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/kv/util"
"github.com/m3db/m3/src/cmd/services/m3dbnode/config"
queryconfig "github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
Expand Down Expand Up @@ -68,6 +69,8 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
"github.com/m3db/m3/src/query/api/v1/handler"
"github.com/m3db/m3/src/query/api/v1/handler/placement"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/context"
xdebug "github.com/m3db/m3/src/x/debug"
Expand Down Expand Up @@ -316,14 +319,6 @@ func Run(runOpts RunOptions) {

opentracing.SetGlobalTracer(tracer)

debugWriter, err := xdebug.NewZipWriterWithDefaultSources(
cpuProfileDuration,
iopts,
)
if err != nil {
logger.Error("unable to create debug writer", zap.Error(err))
}

if cfg.Index.MaxQueryIDsConcurrency != 0 {
queryIDsWorkerPool := xsync.NewWorkerPool(cfg.Index.MaxQueryIDsConcurrency)
queryIDsWorkerPool.Init()
Expand Down Expand Up @@ -603,6 +598,38 @@ func Run(runOpts RunOptions) {
logger.Info("node httpjson: listening", zap.String("address", cfg.HTTPNodeListenAddress))

if cfg.DebugListenAddress != "" {
var debugWriter xdebug.ZipWriter
handlerOpts, err := placement.NewHandlerOptions(syncCfg.ClusterClient,
queryconfig.Configuration{}, nil, iopts)
if err != nil {
logger.Warn("could not create handler options for debug writer", zap.Error(err))
} else {
envCfg, err := cfg.EnvironmentConfig.Services.SyncCluster()
if err != nil || envCfg.Service == nil {
logger.Warn("could not get cluster config for debug writer",
zap.Error(err),
zap.Bool("envCfgServiceIsNil", envCfg.Service == nil))
} else {
debugWriter, err = xdebug.NewPlacementAndNamespaceZipWriterWithDefaultSources(
cpuProfileDuration,
syncCfg.ClusterClient,
handlerOpts,
[]handler.ServiceNameAndDefaults{
{
ServiceName: handler.M3DBServiceName,
Defaults: []handler.ServiceOptionsDefault{
handler.WithDefaultServiceEnvironment(envCfg.Service.Env),
handler.WithDefaultServiceZone(envCfg.Service.Zone),
},
},
},
iopts)
if err != nil {
logger.Error("unable to create debug writer", zap.Error(err))
}
}
}

go func() {
mux := http.DefaultServeMux
if debugWriter != nil {
Expand Down
Loading