diff --git a/netlify.toml b/netlify.toml index 9269963a95..5945b48124 100644 --- a/netlify.toml +++ b/netlify.toml @@ -32,6 +32,14 @@ from = "/docs/how_to/single_node" to = "docs/quickstart" +[[redirects]] + from="docs/how_to/kubernetes" + to="/docs/getting_started/kubernetes_cluster" + +[[redirects]] + from="docs/quickstart/kubernetes" + to="/docs/cluster/kubernetes_cluster" + # TODO: Fix this with new content type [[redirects]] from = "/talks" diff --git a/site/config/_default/config.toml b/site/config/_default/config.toml index 4ebb6db2e3..c1b342376d 100644 --- a/site/config/_default/config.toml +++ b/site/config/_default/config.toml @@ -38,8 +38,18 @@ metaDataFormat = "yaml" # baseURL = "/" languageCode = "en-US" defaultContentLanguage = "en" +defaultContentLanguageInSubdir = true # staticDir = ["static"] -defaultContentLanguageInSubdir= true +# defaultContentLanguageInSubdir= true + +[languages] +[languages.en] +title = "Kubernetes" +description = "Production-Grade Container Orchestration" +languageName ="English" +# Weight used for sorting. +weight = 1 +languagedirection = "ltr" # Highlighting config. pygmentsCodeFences = true @@ -49,7 +59,7 @@ pygmentsUseClassic = false #pygmentsOptions = "linenos=table" # See https://help.farbox.com/pygments.html # pygmentsStyle = "vs" -enableGitInfo = true +# enableGitInfo = true [permalinks] "/" = "/docs/:section/:filename/" diff --git a/site/config/development/config.toml b/site/config/development/config.toml index 5f22fa6660..95d51cbfcc 100644 --- a/site/config/development/config.toml +++ b/site/config/development/config.toml @@ -29,162 +29,4 @@ [[module.mounts]] source = "archetypes" - target = "archetypes" - -ignoreFiles = [ "\\.ttf$", "\\.woff$", "\\.woff2$", "\\.eot$" ] - -[permalinks] - "/" = "/docs/:section/:title/" - "faqs" = "/docs/:section/:title/" - "glossaries" = "/docs/:section/:title/" - "how_tos" = "/docs/:section/:title/" - "integrations" = "/docs/:section/:title/" - "m3coordinators" = "/docs/:section/:title/" - "m3dbs" = "/docs/:section/:title/" - "m3querys" = "/docs/:section/:title/" - "operational_guides" = "/docs/:section/:title/" - "overviews" = "/docs/:section/:title/" - "quickstarts" = "/docs/:section/:title/" - "troubleshootings" = "/docs/:section/:title/" - -# theme = "docs-theme" -# baseURL = "/" -languageCode = "en-US" -defaultContentLanguage = "en" -# staticDir = ["static"] - -metaDataFormat = "yaml" -defaultContentLanguageInSubdir= true - -# Highlighting config. -pygmentsCodeFences = true -pygmentsUseClasses = false -# Use the new Chroma Go highlighter in Hugo. -pygmentsUseClassic = false -#pygmentsOptions = "linenos=table" -# See https://help.farbox.com/pygments.html -# pygmentsStyle = "vs" - -enableGitInfo = true - -[frontmatter] -# date = ["date", ":filename", "publishDate", "lastmod"] - -# Image processing configuration. -[imaging] -resampleFilter = "CatmullRom" -quality = 75 -anchor = "smart" - -[services] -[services.googleAnalytics] -# Comment out the next line to disable GA tracking. Also disables the feature described in [params.ui.feedback]. -# id = "UA-00000000-0" - -#fullversion = "v1.18.0" -#version = "v1.18" -githubbranch = "master" -docsbranch = "master" -deprecated = false -#currentUrl = "https://kubernetes.io/home/" -#nextUrl = "https://kubernetes-io-vnext-staging.netlify.com/" -githubWebsiteRepo = "https://github.com/m3db/m3" -githubWebsiteRaw = "raw.githubusercontent.com/m3db/m3" - -# Enable Algolia DocSearch -# algolia_docsearch = false - -# Enable Lunr.js offline search -offlineSearch = false - -[params] - # Prefix URL to edit current page. Will display an "Edit this page" button on top right hand corner of every page. - # Useful to give opportunity to people to create merge request for your doc. - # See the config.toml file from this documentation site to have an example. - # TODO: pattern to branch? - # TODO: bring back - # editURL = "https://github.com/m3db/m3/tree/master/site/content/" - # Shows a checkmark for visited pages on the menu - showVisitedLinks = false - # Disable search function. It will hide search bar - disableSearch = false - # Javascript and CSS cache are automatically busted when new version of site is generated. - # Set this to true to disable this behavior (some proxies don't handle well this optimization) - disableAssetsBusting = false - # Set this to true to disable copy-to-clipboard button for inline code. - disableInlineCopyToClipBoard = true - # A title for shortcuts in menu is set by default. Set this to true to disable it. - disableShortcutsTitle = false - # When using mulitlingual website, disable the switch language button. - disableLanguageSwitchingButton = false - # Hide breadcrumbs in the header and only show the current page title - disableBreadcrumb = true - # Hide Next and Previous page buttons normally displayed full height beside content - disableNextPrev = true - # Order sections in menu by "weight" or "title". Default to "weight" - ordersectionsby = "weight" - # Change default color scheme with a variant one. Can be "red", "blue", "green". - themeVariant = "blue" - twitter = "m3db_io" - disableHomeIcon = true - - [params.api] - localCordinator = "http://localhost:7201/" - apiEndpoint = "api/v1/" - - # TODO: Might need to refactor some of the K8s shortcodes later -# Add your release versions here -# TODO: Enable when ready -# [[params.versions]] -# version = "1.0" -# url = "https://master.kubeflow.org" -# [[params.versions]] -# version = "0.9" -# url = "https://master.kubeflow.org" - -# version_menu = "Versions" - -# TODO: Do not like doing this really -[markup] - [markup.goldmark] - [markup.goldmark.renderer] - unsafe = true - [markup.goldmark.parser] - attribute = true - autoHeadingID = true - autoHeadingIDType = "github" - [markup.tableOfContents] - endLevel = 3 - ordered = false - startLevel = 2 - [markup.goldmark.extensions] - definitionList = true - footnote = true - linkify = true - strikethrough = true - table = true - taskList = true - typographer = true - -[[menu.shortcuts]] -pre = "

More

" -name = " " -identifier = "ds" -url = "https://github.com/m3db/m3" -weight = 10 - -[[menu.shortcuts]] -name = " " -url = "https://bit.ly/m3slack" -weight = 11 - -[[menu.shortcuts]] -name = " " -url = "https://groups.google.com/forum/#!forum/m3db" -weight = 12 - -[outputs] -home = [ "HTML", "RSS", "JSON"] -page = [ "HTML"] -section = [ "HTML"] -chapter = [ "HTML"] + target = "archetypes" \ No newline at end of file diff --git a/site/config/production/config.toml b/site/config/production/config.toml index 4916bedee3..0c854d1dae 100644 --- a/site/config/production/config.toml +++ b/site/config/production/config.toml @@ -65,7 +65,7 @@ pygmentsUseClassic = false # See https://help.farbox.com/pygments.html # pygmentsStyle = "vs" -enableGitInfo = true +# enableGitInfo = true [frontmatter] # date = ["date", ":filename", "publishDate", "lastmod"] diff --git a/site/content/_index.md b/site/content/_index.md index 7272ba3512..cfebb18ed8 100644 --- a/site/content/_index.md +++ b/site/content/_index.md @@ -28,7 +28,7 @@ M3 has several features, provided as discrete components, which make it an ideal Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) -- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) +- [Clustered M3DB deployment](/docs/cluster) - [M3DB on Kubernetes](/docs/operator) - [Isolated M3Query on deployment](/docs/how_to/query) diff --git a/site/content/cluster/_index.md b/site/content/cluster/_index.md new file mode 100644 index 0000000000..af52c427a7 --- /dev/null +++ b/site/content/cluster/_index.md @@ -0,0 +1,6 @@ ++++ +title = "Create a Cluster" +date = 2020-04-01T19:26:56-04:00 +weight = 2 +chapter = true ++++ \ No newline at end of file diff --git a/site/content/cluster/binaries_cluster.md b/site/content/cluster/binaries_cluster.md new file mode 100644 index 0000000000..b95a87306b --- /dev/null +++ b/site/content/cluster/binaries_cluster.md @@ -0,0 +1,276 @@ +--- +linktitle: "Binaries" +weight: 2 +--- + +# Creating an M3 Cluster with Binaries + +This guide shows you the steps involved in creating an M3 cluster using M3 binaries, typically you would automate this with infrastructure as code tools such as Terraform or [Kubernetes](/docs/operator). + +{{% notice note %}} +This guide assumes you have read the [quickstart](/docs/quickstart/binaries), and builds upon the concepts in that guide. +{{% /notice %}} + +## M3 Architecture + +Here's a typical M3 deployment: + + + +![Typical Deployment](/cluster_architecture.png) + +An M3 deployment typically has two main node types: + +- **Coordinator node**: `m3coordinator` nodes coordinate reads and writes across all nodes in the cluster. It's a lightweight process, and does not store any data. This role typically runs alongside a Prometheus instance, or is part of a collector agent such as statsD. +- **Storage node**: The `m3dbnode` processes are the workhorses of M3, they store data and serve reads and writes. + +A `m3coordinator` node exposes two ports: + +- `7201` to manage the cluster topology, you make most API calls to this endpoint +- `7203` for Prometheus to scrape the metrics produced by M3DB and M3Coordinator + +## Prerequisites + +M3 uses [etcd](https://etcd.io/) as a distributed key-value storage for the following functions: + +- Update cluster configuration in realtime +- Manage placements for distributed and sharded clusters + +## Download and Install a Binary + +You can download the latest release as [pre-compiled binaries from the M3 GitHub page](https://github.com/m3db/m3/releases/latest). Inside the expanded archive are binaries for `m3dbnode`, which combines a coordinator and storage node, and a binary for `m3coordinator`, which is a standalone coordinator node. + +## Build from Source + +### Prerequisites + +- [Go](https://golang.org/dl/) +- [Make](https://www.gnu.org/software/make/) + +### Build + +[Clone the codebase](https://github.com/m3db/m3) and run `make m3dbnode` to generate a binary for a combination coordinator and storage node, or `make m3coordinator` to generate a binary for a standalone coordinator node. + +## Provision a Host + +Enough background, let's create a real cluster! + +M3 in production can run on local or cloud-based VMs, or bare-metal servers. M3 supports all popular Linux distributions (Ubuntu, RHEL, CentOS), and [let us know](https://github.com/m3db/m3/issues/new/choose) if you have any issues with your preferred distribution. + +### Network + +{{% notice tip %}} +If you use AWS or GCP, we recommend you use static IPs so that if you need to replace a host, you don't have to update configuration files on all the hosts, but decommission the old seed node and provision a new seed node with the same host ID and static IP that the old seed node had. If you're using AWS you can use an [Elastic Network Interface](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) on a Virtual Private Cloud (VPC) and for GCP you can use an [internal static IP address](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address). +{{% /notice %}} + +This example creates three static IP addresses for three storage nodes, using the embedded coordinator. + +This guide assumes you have host names configured, i.e., running `hostname` on a host in the cluster returns the host ID you use when creating the cluster placement. + +{{% notice tip %}} +When using GCP the name of your instance is the host name. When you create an instance, click _Management, disks, networking, SSH keys_, under _Networking_, click the default interface, click the _Primary internal IP_ drop down, select _Reserve a static internal IP address_, give it an appropriate name and description, and use _Assign automatically_. +{{% /notice %}} + +{{% notice tip %}} +When using AWS, you can use the host name supplied for the provisioned VM as your host ID, or use the `environment` host ID resolver and pass the host ID when launching the database process with an environment variable. +{{% /notice %}} + +For example, if you used `M3DB_HOST_ID` for the environment variable name, use the following in your configuration: + +```yaml +hostID: + resolver: config + value: ${M3DB_HOST_ID:""} +``` + +Then start the `m3dbnode` process with: + +{{< tabs name="start_container" >}} +{{% tab name="Pre-built binary" %}} + +```shell +M3DB_HOST_ID=m3db001 m3dbnode -f +``` + +{{% notice info %}} +Depending on your operating system setup, you might need to prefix the command with `sudo`. +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Output" %}} + + + + + +![Docker pull and run](/docker-install.gif) + +{{% /tab %}} +{{< /tabs >}} + +### Kernel Configuration + +Depending on the default limits of your bare-metal machine or VM, M3 may need some Kernel tweaks to run as efficiently as possible, and [we recommend you review those](/docs/operational_guide/kernel_configuration) before running M3 in production. + +## Configuration files + +You configure each M3 component by passing the location of a YAML file with the `-f` argument. + +The file groups configuration into `coordinator` or `db` sections that represent the `M3Coordinator` and `M3DB` instances of single-node cluster. + +{{% notice tip %}} +You can find more information on configuring M3DB in the [operational guides section](/docs/operational_guide/). +{{% /notice %}} + +{{% notice note %}} +The steps in this guide have the following 3 seed nodes, you need to change your configuration to suit the details of yours, including the details of an etcd cluster in the `m3dbCluster` > `endpoints` section of the M3 configuration file. +{{% /notice %}} + +- m3db001 (Region=us-east1, Zone=us-east1-a, Static IP=10.142.0.1) +- m3db002 (Region=us-east1, Zone=us-east1-b, Static IP=10.142.0.2) +- m3db003 (Region=us-east1, Zone=us-east1-c, Static IP=10.142.0.3) + +### M3DB node + +[Start with the M3DB configuration template](https://github.com/m3db/m3/blob/master/src/dbnode/config/m3dbnode-cluster-template.yml) and change it to suit your cluster. + +The example below connects to an etcd instance in a zone called `eu-1` +This example updates the `service` and `seedNodes` sections to match the node details above: + + + +```yaml +config: + discovery: + type: m3db_cluster + m3dbCluster: + env: default_env + endpoints: + - 10.142.0.1:2379 + - 10.142.0.2:2379 + - 10.142.0.3:2379 +``` + +## Start the storage nodes + +Start each seed node in the cluster using the same configuration file, and adjusting the `M3DB_HOST_ID` value to match the host name. + +```shell +M3DB_HOST_ID=m3db001 m3dbnode -f +``` + +{{% notice tip %}} +You can daemon-ize the node startup process using your favorite utility such as systemd, init.d, or supervisor. +{{% /notice %}} + +## Create Namespace and Initialize Placement + + + + + +This guide uses the _{{% apiendpoint %}}database/create_ endpoint that creates a namespace, and the placement if it doesn't already exist based on the `type` argument. + +You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. + +In the example below, the configuration for each host matches the details outlined above for the three nodes used. `isolationGroup` specifies how the cluster places shards to avoid more than one replica of a shard appearing in the same replica group. You should use at least as many isolation groups as your replication factor. This example uses the availability zones `us-east1-a`, `us-east1-b`, `us-east1-c` as the isolation groups which matches our replication factor of 3. [Read more details in this guide](/docs/operational_guide/replication_and_deployment_in_zones). + +{{< tabs name="database_create" >}} +{{% tab name="Command" %}} + + + +```shell +curl -X POST {{% apiendpoint %}}database/create -d '{ + "type": "cluster", + "namespaceName": "default", + "retentionTime": "48h", + "numShards": "1024", + "replicationFactor": "3", + "hosts": [ + { + "id": "m3db001", + "isolationGroup": "us-east1-a", + "zone": "embedded", + "weight": 100, + "address": "10.142.0.1", + "port": 9000 + }, + { + "id": "m3db002", + "isolationGroup": "us-east1-b", + "zone": "embedded", + "weight": 100, + "address": "10.142.0.2", + "port": 9000 + }, + { + "id": "m3db003", + "isolationGroup": "us-east1-c", + "zone": "embedded", + "weight": 100, + "address": "10.142.0.3", + "port": 9000 + } + ] +}' +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```shell +20:10:12.911218[I] updating database namespaces [{adds [default]} {updates []} {removals []}] +20:10:13.462798[I] node tchannelthrift: listening on 0.0.0.0:9000 +20:10:13.463107[I] cluster tchannelthrift: listening on 0.0.0.0:9001 +20:10:13.747173[I] node httpjson: listening on 0.0.0.0:9002 +20:10:13.747506[I] cluster httpjson: listening on 0.0.0.0:9003 +20:10:13.747763[I] bootstrapping shards for range starting ... +... +20:10:13.757834[I] bootstrap finished [{namespace metrics} {duration 10.1261ms}] +20:10:13.758001[I] bootstrapped +20:10:14.764771[I] successfully updated topology to 3 hosts +``` + +{{% /tab %}} +{{< /tabs >}} + +If you need to setup multiple namespaces, you can run the command above multiple times with different namespace configurations. + +### Ready a Namespace + +Once a namespace has finished bootstrapping, you must mark it as ready before receiving traffic by using the _{{% apiendpoint %}}namespace/ready_. + +{{< tabs name="ready_namespaces" >}} +{{% tab name="Command" %}} + +{{< codeinclude file="docs/includes/quickstart/ready-namespace.sh" language="shell" >}} + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "ready": true +} +``` + +{{% /tab %}} +{{< /tabs >}} + +### Replication factor + +We recommend a replication factor of **3**, with each replica spread across failure domains such as a physical server rack, data center or availability zone. Read our [replication factor recommendations](/docs/operational_guide/replication_and_deployment_in_zones) for more details. + +### Shards + +Read the [placement configuration guide](/docs/operational_guide/placement_configuration) to determine the appropriate number of shards to specify. + +{{< fileinclude file="cluster-common-steps.md" >}} + + diff --git a/site/content/cluster/create-database.sh b/site/content/cluster/create-database.sh new file mode 100644 index 0000000000..104dce0586 --- /dev/null +++ b/site/content/cluster/create-database.sh @@ -0,0 +1,6 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/database/create -d '{ + "type": "local", + "namespaceName": "default", + "retentionTime": "12h" +}' | jq . diff --git a/site/content/cluster/docker_cluster.md b/site/content/cluster/docker_cluster.md new file mode 100644 index 0000000000..a99aa8618c --- /dev/null +++ b/site/content/cluster/docker_cluster.md @@ -0,0 +1,46 @@ +--- +linktitle: "Docker" +weight: 3 +draft: true +--- + +# Creating an M3 Cluster with Docker + +This guide shows you the steps involved in creating an M3 cluster using Docker containers, typically you would automate this with infrastructure as code tools such as Terraform or Kubernetes. + +{{% notice note %}} +This guide assumes you have read the [quickstart](/docs/quickstart), and builds upon the concepts in that guide. +{{% /notice %}} + +## M3 Architecture + +Here's a typical M3 deployment: + + + +![Typical Deployment](/cluster_architecture.png) + +An M3 deployment typically has two main node types: + +- **Coordinator node**: `m3coordinator` nodes coordinate reads and writes across all nodes in the cluster. It's a lightweight process, and does not store any data. This role typically runs alongside a Prometheus instance, or is part of a collector agent. +- **Storage node**: The `m3dbnode` processes are the workhorses of M3, they store data and serve reads and writes. + +And exposes two ports: + +- `7201` to manage the cluster topology, you make most API calls to this endpoint +- `7203` for Prometheus to scrape the metrics produced by M3DB and M3Coordinator + +## Prerequisites + +M3 uses [etcd](https://etcd.io/) as a distributed key-value storage for the following functions: + +- Update cluster configuration in realtime +- Manage placements for distributed and sharded clusters + +{{% notice note %}} +M3 storage nodes have an embedded etcd server you can use for small test clusters which we call a **Seed Node** when run this way. See the `etcdClusters` section of [this example configuration file](https://github.com/m3db/m3/blob/master/src/dbnode/config/m3dbnode-local-etcd.yml). +{{% /notice %}} + +## Download and Install a Binary + +You can download the latest release as [pre-compiled binaries from the M3 GitHub page](https://github.com/m3db/m3/releases/latest). Inside the expanded archive are binaries for `m3dbnode`, which combines a coordinator and storage node, and a binary for `m3coordinator`, which is a standalone coordinator node. diff --git a/site/content/cluster/kubernetes_cluster.md b/site/content/cluster/kubernetes_cluster.md new file mode 100644 index 0000000000..91ae49e0a3 --- /dev/null +++ b/site/content/cluster/kubernetes_cluster.md @@ -0,0 +1,87 @@ +--- +linktitle: "Kubernetes" +weight: 1 +--- + +# Creating an M3 Cluster with Kubernetes + +This guide shows you how to create an M3 cluster of 3 nodes, designed to run locally on the same machine. It is designed to show you how M3 and Kubernetes can work together, but not as a production example. + +{{% notice note %}} +This guide assumes you have read the [quickstart](/docs/quickstart/docker), and builds upon the concepts in that guide. +{{% /notice %}} + +{{% notice tip %}} +We recommend you use [our Kubernetes operator](/docs/operator/operator) to deploy M3 to a cluster. It is a more streamlined setup that uses [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to automatically handle operations such as managing cluster placements. +{{% /notice %}} + +## Prerequisites + +- A running Kubernetes cluster. + - For local testing, you can use [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/), [Docker desktop](https://www.docker.com/products/docker-desktop), or [we have a script](https://raw.githubusercontent.com/m3db/m3db-operator/master/scripts/kind-create-cluster.sh) you can use to start a 3 node cluster with [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +## Create An etcd Cluster + +M3 stores its cluster placements and runtime metadata in [etcd](https://etcd.io) and needs a running cluster to communicate with. + +We have example services and stateful sets you can use, but feel free to use your own configuration and change any later instructions accordingly. + +```shell +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/example/etcd/etcd-minikube.yaml +``` + +If the etcd cluster is running on your local machine, update your _/etc/hosts_ file to match the domains specified in the `etcd` `--initial-cluster` argument. For example to match the `StatefulSet` declaration in the _etcd-minikube.yaml_ above, that is: + +```text +$(minikube ip) etcd-0.etcd +$(minikube ip) etcd-1.etcd +$(minikube ip) etcd-2.etcd +``` + +Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: + +```shell +kubectl exec etcd-0 -- env ETCDCTL_API=3 etcdctl endpoint health +``` + +## Install the Operator + +Install the bundled operator manifests in the current namespace: + +```shell +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/bundle.yaml +``` + +## Create an M3 Cluster + +The following creates an M3 cluster with 3 replicas of data across 256 shards that connects to the 3 available etcd endpoints. + +It creates three isolated groups for nodes, each with one node instance. In a production environment you can use a variety of different options to define how nodes are spread across groups based on factors such as resource capacity, or location. + +It creates namespaces in the cluster with the `namespaces` parameter. You can use M3-provided presets, or define your own. This example creates a namespace with the `10s:2d` preset. + +The cluster derives pod identity from the `podIdentityConfig` parameter, which in this case is the UID of the Pod. + +[Read more details on all the parameters in the Operator API docs](https://operator.m3db.io/api/). + +```shell +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/example/m3db-local.yaml +``` + +Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: + +```shell +kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health +``` + +## Deleting a Cluster + +Delete the M3 cluster using kubectl: + +```shell +kubectl delete m3dbcluster simple-cluster +``` + +By default, the operator uses [finalizers](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#finalizers) to delete the placement and namespaces associated with a cluster before the custom resources. If you do not want this behavior, set `keepEtcdDataOnDelete` to `true` in the cluster configuration. + +{{< fileinclude file="cluster-common-steps.md" >}} \ No newline at end of file diff --git a/site/content/cluster/ready-namespace.sh b/site/content/cluster/ready-namespace.sh new file mode 100644 index 0000000000..bd70876ad4 --- /dev/null +++ b/site/content/cluster/ready-namespace.sh @@ -0,0 +1,4 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ + "name": "default" +}' | jq . diff --git a/site/content/cluster/write-metrics-1.sh b/site/content/cluster/write-metrics-1.sh new file mode 100755 index 0000000000..453100c993 --- /dev/null +++ b/site/content/cluster/write-metrics-1.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 3347.26 +}' diff --git a/site/content/cluster/write-metrics-2.sh b/site/content/cluster/write-metrics-2.sh new file mode 100755 index 0000000000..eea2d30348 --- /dev/null +++ b/site/content/cluster/write-metrics-2.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 5347.26 +}' diff --git a/site/content/cluster/write-metrics-3.sh b/site/content/cluster/write-metrics-3.sh new file mode 100755 index 0000000000..e6b3a5f046 --- /dev/null +++ b/site/content/cluster/write-metrics-3.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 7347.26 +}' diff --git a/site/content/docs.md b/site/content/docs.md deleted file mode 100644 index 4446d83bf7..0000000000 --- a/site/content/docs.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: M3 Introduction -weight: 1 -permalink: /docs/ ---- - - -## About - -After using open-source metrics solutions and finding issues with them at scale – such as reliability, cost, and -operational complexity – [M3](https://github.com/m3db/m3) was created from the ground up to provide Uber with a -native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and -other supporting infrastructure. - -## Key Features - -M3 has several features, provided as discrete components, which make it an ideal platform for time series data at scale: - -- A distributed time series database, [M3DB](/docs/m3db/), that provides scalable storage for time series data and a reverse index. -- A sidecar process, [M3Coordinator](/docs/integrations/prometheus), that allows M3DB to act as the long-term storage for Prometheus. -- A distributed query engine, [M3Query](/docs/m3query), with native support for PromQL and Graphite (M3QL coming soon). - -- An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions. - -## Getting Started - -**Note:** Make sure to read our [Operational Guides](/docs/operational_guide) before running in production! - -Getting started with M3 is as easy as following one of the How-To guides. - -- [Single M3DB node deployment](/docs/quickstart) -- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/operator) -- [Isolated M3Query on deployment](/docs/how_to/query) - -## Support - -For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be -reached in a variety of ways: - -- [Slack (main chat channel)](http://bit.ly/m3slack) -- [Email](https://groups.google.com/forum/#!forum/m3db) -- [Github issues](https://github.com/m3db/m3/issues) diff --git a/site/content/how_to/cluster_hard_way.md b/site/content/how_to/cluster_hard_way.md deleted file mode 100644 index 9e714d1c78..0000000000 --- a/site/content/how_to/cluster_hard_way.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -title: M3DB Cluster Deployment, Manually (The Hard Way) -menuTitle: Manual Cluster Deployment -weight: 2 ---- - -This document lists the manual steps involved in deploying a M3DB cluster. In practice, you'd be automating this using Terraform or using Kubernetes rather than doing this by hand; guides for doing so are available under the How-To section. - -## Primer Architecture - -A quick primer on M3DB architecture. Here’s what a typical deployment looks like: - -![Typical Deployment](/cluster_architecture.png) - -A few different things to highlight about the diagram: - -### Role Type - -There are three ‘role types’ for a m3db deployment - - -- Coordinator: `m3coordinator` serves to coordinate reads and writes across all hosts in the cluster. It’s a lightweight process, and does not store any data. This role would typically be run alongside a Prometheus instance, or be baked into a collector agent. -- Storage Node: `m3dbnode` processes running on these hosts are the workhorses of the database, they store data; and serve reads and writes. -- Seed Node: First and foremost, these hosts are storage nodes themselves. In addition to that responsibility, they run an embedded ETCD server. This is to allow the various M3DB processes running across the cluster to reason about the topology/configuration of the cluster in a consistent manner. - -**Note**: In very large deployments, you’d use a dedicated ETCD cluster, and only use M3DB Storage and Coordinator Nodes - -## Provisioning - -Enough background, lets get you going with a real cluster! Provision your host (be it VMs from AWS/GCP/etc) or bare-metal servers in your DC with the latest and greatest flavour of Linux you favor. M3DB works on all popular distributions - Ubuntu/RHEL/CentOS, let us know if you run into issues on another platform and we’ll be happy to assist. - -### Network - -If you’re using AWS or GCP it is highly advised to use static IPs so that if you need to replace a host, you don’t have to update your configuration files on all the hosts, you simply decomission the old seed node and provision a new seed node with the same host ID and static IP that the old seed node had. For AWS you can use a [Elastic Network Interface](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) on a VPC and for GCP you can simply use an [internal static IP address](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address). - -In this example you will be creating three static IP addresses for the three seed nodes. - -Further, we assume you have hostnames configured correctly too. i.e. running `hostname` on a host in the cluster returns the host ID you'll be using when specifying instance host IDs when creating the M3DB cluster placement. E.g. running `hostname` on a node `m3db001` should return it's host ID `m3db001`. - -In GCP the name of your instance when you create it will automatically be it's hostname. When you create an instance click "Management, disks, networking, SSH keys" and under "Networking" click the default interface and click the "Primary internal IP" drop down and select "Reserve a static internal IP address" and give it a name, i.e. `m3db001`, a description that describes it's a seed node IP address and use "Assign automatically". - -In AWS it might be simpler to just use whatever the hostname you get for the provisioned VM as your host ID when specifying M3DB placement. Either that or use the `environment` host ID resolver and pass your host ID when launching the database process with an environment variable. You can set to the host ID and specify the environment variable name in config as `envVarName: M3DB_HOST_ID` if you are using an environment variable named `M3DB_HOST_ID`. - -Relevant config snippet: - -```yaml -hostID: - resolver: environment - envVarName: M3DB_HOST_ID -``` - -Then start your process with: - -```shell -M3DB_HOST_ID=m3db001 m3dbnode -f config.yml -``` - -### Kernel - -Ensure you review our [recommended kernel configuration](/docs/operational_guide/kernel_configuration) before running M3DB in production as M3DB may exceed the default limits for some default kernel values. - -## Config files - -We wouldn’t feel right to call this guide, “The Hard Way” and not require you to change some configs by hand. - -**Note**: the steps that follow assume you have the following 3 seed nodes - make necessary adjustment if you have more or are using a dedicated ETCD cluster. Example seed nodes: - -- m3db001 (Region=us-east1, Zone=us-east1-a, Static IP=10.142.0.1) -- m3db002 (Region=us-east1, Zone=us-east1-b, Static IP=10.142.0.2) -- m3db003 (Region=us-east1, Zone=us-east1-c, Static IP=10.142.0.3) - -We’re going to start with the M3DB config template and modify it to work for your cluster. Start by downloading the [config](https://github.com/m3db/m3/blob/master/src/dbnode/config/m3dbnode-cluster-template.yml). Update the config ‘service’ and 'seedNodes' sections to read as follows: - -```yaml -config: - service: - env: default_env - zone: embedded - service: m3db - cacheDir: /var/lib/m3kv - etcdClusters: - - zone: embedded - endpoints: - - 10.142.0.1:2379 - - 10.142.0.2:2379 - - 10.142.0.3:2379 - seedNodes: - initialCluster: - - hostID: m3db001 - endpoint: http://10.142.0.1:2380 - - hostID: m3db002 - endpoint: http://10.142.0.2:2380 - - hostID: m3db003 - endpoint: http://10.142.0.3:2380 -``` - -## Start the seed nodes - -Transfer the config you just crafted to each host in the cluster. And then starting with the seed nodes, start up the m3dbnode process: - -```shell -m3dbnode -f -``` - -**Note**, remember to daemon-ize this using your favourite utility: systemd/init.d/supervisor/etc - -## Create Namespace and Initialize Topology - -The recommended way to create a namespace and initialize a topology is to use the `/api/v1/database/create` api. Below is an example. - -**Note:** In order to create a more custom setup, please refer to the [namespace configuration](/docs/operational_guide/namespace_configuration) and -[placement configuration](/docs/operational_guide/placement_configuration) guides, though this is discouraged. - -```shell -curl -X POST http://localhost:7201/api/v1/database/create -d '{ - "type": "cluster", - "namespaceName": "1week_namespace", - "retentionTime": "168h", - "numShards": "1024", - "replicationFactor": "3", - "hosts": [ - { - "id": "m3db001", - "isolationGroup": "us-east1-a", - "zone": "embedded", - "weight": 100, - "address": "10.142.0.1", - "port": 9000 - }, - { - "id": "m3db002", - "isolationGroup": "us-east1-b", - "zone": "embedded", - "weight": 100, - "address": "10.142.0.2", - "port": 9000 - }, - { - "id": "m3db003", - "isolationGroup": "us-east1-c", - "zone": "embedded", - "weight": 100, - "address": "10.142.0.3", - "port": 9000 - } - ] -}' -``` - -**Note:** Isolation group specifies how the cluster places shards to avoid more than one replica of a shard appearing in the same replica group. As such you must be using at least as many isolation groups as your replication factor. In this example we use the availibity zones `us-east1-a`, `us-east1-b`, `us-east1-c` as our isolation groups which matches our replication factor of 3. - -Shortly after, you should see your node complete bootstrapping: - -```shell -20:10:12.911218[I] updating database namespaces [{adds [default]} {updates []} {removals []}] -20:10:13.462798[I] node tchannelthrift: listening on 0.0.0.0:9000 -20:10:13.463107[I] cluster tchannelthrift: listening on 0.0.0.0:9001 -20:10:13.747173[I] node httpjson: listening on 0.0.0.0:9002 -20:10:13.747506[I] cluster httpjson: listening on 0.0.0.0:9003 -20:10:13.747763[I] bootstrapping shards for range starting ... -... -20:10:13.757834[I] bootstrap finished [{namespace metrics} {duration 10.1261ms}] -20:10:13.758001[I] bootstrapped -20:10:14.764771[I] successfully updated topology to 3 hosts -``` - -Once a namespace has finished bootstrapping, you must mark it as ready before receiving traffic by using the _{{% apiendpoint %}}namespace/ready_. - -{{< tabs name="ready_namespaces" >}} -{{% tab name="Command" %}} - -{{% codeinclude file="how_to/ready-namespace.sh" language="shell" %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "ready": true -} -``` - -{{% /tab %}} -{{< /tabs >}} - -If you need to setup multiple namespaces, you can run the above `/api/v1/database/create` command multiple times with different namespace configurations. - -### Replication factor (RF) - -Recommended is RF3, where each replica is spread across failure domains such as a rack, data center or availability zone. See [Replication Factor Recommendations](/docs/operational_guide/replication_and_deployment_in_zones) for more specifics. - -### Shards - -See [placement configuration](/docs/operational_guide/placement_configuration) to determine the appropriate number of shards to specify. - -## Test it out - -Now you can experiment with writing tagged metrics: - -```shell -curl -sS -X POST localhost:9003/writetagged -d '{ - "namespace": "metrics", - "id": "foo", - "tags": [ - { - "name": "city", - "value": "new_york" - }, - { - "name": "endpoint", - "value": "/request" - } - ], - "datapoint": { - "timestamp": '"$(date "+%s")"', - "value": 42.123456789 - } -}' -``` - -And reading the metrics you've written: - -```shell -curl -sS -X POST http://localhost:9003/query -d '{ - "namespace": "metrics", - "query": { - "regexp": { - "field": "city", - "regexp": ".*" - } - }, - "rangeStart": 0, - "rangeEnd": '"$(date "+%s")"' -}' | jq . -``` - -## Integrations - -[Prometheus as a long term storage remote read/write endpoint](/docs/integrations/prometheus). diff --git a/site/content/how_to/use_as_tsdb.md b/site/content/how_to/use_as_tsdb.md index 3317775d54..c8e32579b3 100644 --- a/site/content/how_to/use_as_tsdb.md +++ b/site/content/how_to/use_as_tsdb.md @@ -114,7 +114,7 @@ For more details on the compression scheme and its limitations, review [the docu #### M3DB setup -For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/operator). However, this tutorial will walk you through configuring a single node setup locally for development. +For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/cluster/binaries_cluster) or [using Kubernetes](/docs/cluster/kubernetes_cluster). However, this tutorial will walk you through configuring a single node setup locally for development. First, run the following command to pull the latest M3DB image: @@ -148,7 +148,7 @@ Once a namespace has finished bootstrapping, you must mark it as ready before re {{< tabs name="ready_namespaces" >}} {{% tab name="Command" %}} -{{% codeinclude file="quickstart/ready-namespace.sh" language="shell" %}} +{{< codeinclude file="docs/includes/quickstart/ready-namespace.sh" language="shell" >}} {{% /tab %}} {{% tab name="Output" %}} diff --git a/site/content/includes/cluster-common-steps.md b/site/content/includes/cluster-common-steps.md new file mode 100644 index 0000000000..08e8c731d9 --- /dev/null +++ b/site/content/includes/cluster-common-steps.md @@ -0,0 +1,179 @@ +## Writing and Querying Metrics + +### Writing Metrics + +M3 supports ingesting [statsd](https://github.com/statsd/statsd#usage) and [Prometheus](https://prometheus.io/docs/concepts/data_model/) formatted metrics. + +This quickstart focuses on Prometheus metrics which consist of a value, a timestamp, and tags to bring context and meaning to the metric. + +You can write metrics using one of two endpoints: + +- _[{{% apiendpoint %}}prom/remote/write](/docs/m3coordinator/api/remote/)_ - Write a Prometheus remote write query to M3DB with a binary snappy compressed Prometheus WriteRequest protobuf message. +- _{{% apiendpoint %}}json/write_ - Write a JSON payload of metrics data. This endpoint is quick for testing purposes but is not as performant for production usage. + +For this quickstart, use the _{{% apiendpoint %}}json/write_ endpoint to write a tagged metric to M3 with the following data in the request body, all fields are required: + +- `tags`: An object of at least one `name`/`value` pairs +- `timestamp`: The UNIX timestamp for the data +- `value`: The value for the data, can be of any type + +{{% notice tip %}} +The examples below use `__name__` as the name for one of the tags, which is a Prometheus reserved tag that allows you to query metrics using the value of the tag to filter results. +{{% /notice %}} + +{{% notice tip %}} +Label names may contain ASCII letters, numbers, underscores, and Unicode characters. They must match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. Label names beginning with `__` are reserved for internal use. [Read more in the Prometheus documentation](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +{{% /notice %}} + +{{< tabs name="write_metrics" >}} +{{< tab name="Command 1" >}} + +{{< codeinclude file="docs/includes/write-metrics-1.sh" language="shell" >}} + +{{< /tab >}} +{{< tab name="Command 2" >}} + +{{< codeinclude file="docs/includes/write-metrics-2.sh" language="shell" >}} + +{{< /tab >}} +{{< tab name="Command 3" >}} + +{{< codeinclude file="docs/includes/write-metrics-3.sh" language="shell" >}} + +{{< /tab >}} +{{< /tabs >}} + +### Querying metrics + +M3 supports three query engines: Prometheus (default), Graphite, and the M3 Query Engine. + +This quickstart uses Prometheus as the query engine, and you have access to [all the features of PromQL queries](https://prometheus.io/docs/prometheus/latest/querying/basics/). + +To query metrics, use the _{{% apiendpoint %}}query_range_ endpoint with the following data in the request body, all fields are required: + +- `query`: A PromQL query +- `start`: Timestamp in `RFC3339Nano` of start range for results +- `end`: Timestamp in `RFC3339Nano` of end range for results +- `step`: A duration or float of the query resolution, the interval between results in the timespan between `start` and `end`. + +Below are some examples using the metrics written above. + +#### Return results in past 45 seconds + +{{< tabs name="example_promql_regex" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$( date -v -45S +%s )" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "3347.26" + ], + [ + {{% now %}}, + "5347.26" + ], + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +#### Values above a certain number + +{{< tabs name="example_promql_range" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date -v -45S "+%s")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} diff --git a/site/content/includes/create-database.sh b/site/content/includes/create-database.sh new file mode 100644 index 0000000000..104dce0586 --- /dev/null +++ b/site/content/includes/create-database.sh @@ -0,0 +1,6 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/database/create -d '{ + "type": "local", + "namespaceName": "default", + "retentionTime": "12h" +}' | jq . diff --git a/site/content/includes/how_to/ready-namespace.sh b/site/content/includes/how_to/ready-namespace.sh index 18158c3b8a..e357b57f52 100644 --- a/site/content/includes/how_to/ready-namespace.sh +++ b/site/content/includes/how_to/ready-namespace.sh @@ -1,4 +1,4 @@ #!/bin/bash curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ "name": "1week_namespace" -}' | jq . \ No newline at end of file +}' | jq . diff --git a/site/content/includes/operational_guide/ready-namespace.sh b/site/content/includes/operational_guide/ready-namespace.sh index 720f2e88c3..27de70ffba 100644 --- a/site/content/includes/operational_guide/ready-namespace.sh +++ b/site/content/includes/operational_guide/ready-namespace.sh @@ -1,4 +1,4 @@ #!/bin/bash curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ "name": "default_unaggregated" -}' | jq . \ No newline at end of file +}' | jq . diff --git a/site/content/includes/quickstart-common-steps.md b/site/content/includes/quickstart-common-steps.md new file mode 100644 index 0000000000..4db7a62ceb --- /dev/null +++ b/site/content/includes/quickstart-common-steps.md @@ -0,0 +1,432 @@ +## Organizing Data with Placements and Namespaces + +A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. + +As a distributed TSDB, M3 helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3 does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into shards. + + + +If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3 uses different terminology to represent some concepts. + +- Every cluster has **one** placement that maps shards to nodes in the cluster. +- A cluster can have **0 or more** namespaces that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. + + + +For example, if the cluster placement states that node A owns shards 1, 2, and 3, then node A owns shards 1, 2, 3 for all configured namespaces in the cluster. Each namespace has its own configuration options, including a name and retention time for the data. + +## Create a Placement and Namespace + +This quickstart uses the _{{% apiendpoint %}}database/create_ endpoint that creates a namespace, and the placement if it doesn't already exist based on the `type` argument. + +You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. + +The `namespaceName` argument must match the namespace in the `local` section of the `M3Coordinator` YAML configuration. If you [add any namespaces](/docs/operational_guide/namespace_configuration) you also need to add them to the `local` section of `M3Coordinator`'s YAML configuration. + +In another terminal, use the following command. + +{{< tabs name="create_placement_namespace" >}} +{{< tab name="Command" >}} + +{{< codeinclude file="docs/includes/create-database.sh" language="shell" >}} + +{{< /tab >}} +{{% tab name="Output" %}} + +```json +{ + "namespace": { + "registry": { + "namespaces": { + "default": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": "43200000000000", + "blockSizeNanos": "1800000000000", + "bufferFutureNanos": "120000000000", + "bufferPastNanos": "600000000000", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", + "futureRetentionPeriodNanos": "0" + }, + "snapshotEnabled": true, + "indexOptions": { + "enabled": true, + "blockSizeNanos": "1800000000000" + }, + "schemaOptions": null, + "coldWritesEnabled": false, + "runtimeOptions": null + } + } + } + }, + "placement": { + "placement": { + "instances": { + "m3db_local": { + "id": "m3db_local", + "isolationGroup": "local", + "zone": "embedded", + "weight": 1, + "endpoint": "127.0.0.1:9000", + "shards": [ + { + "id": 0, + "state": "INITIALIZING", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + }, + … + { + "id": 63, + "state": "INITIALIZING", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + } + ], + "shardSetId": 0, + "hostname": "localhost", + "port": 9000, + "metadata": { + "debugPort": 0 + } + } + }, + "replicaFactor": 1, + "numShards": 64, + "isSharded": true, + "cutoverTime": "0", + "isMirrored": false, + "maxShardSetId": 0 + }, + "version": 0 + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +Placement initialization can take a minute or two. Once all the shards have the `AVAILABLE` state, the node has finished bootstrapping, and you should see the following messages in the node console output. + + + +```shell +{"level":"info","ts":1598367624.0117292,"msg":"bootstrap marking all shards as bootstrapped","namespace":"default","namespace":"default","numShards":64} +{"level":"info","ts":1598367624.0301404,"msg":"bootstrap index with bootstrapped index segments","namespace":"default","numIndexBlocks":0} +{"level":"info","ts":1598367624.0301914,"msg":"bootstrap success","numShards":64,"bootstrapDuration":0.049208827} +{"level":"info","ts":1598367624.03023,"msg":"bootstrapped"} +``` + +You can check on the status by calling the _{{% apiendpoint %}}placement_ endpoint: + +{{< tabs name="check_placement" >}} +{{% tab name="Command" %}} + +```shell +curl {{% apiendpoint %}}placement | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "placement": { + "instances": { + "m3db_local": { + "id": "m3db_local", + "isolationGroup": "local", + "zone": "embedded", + "weight": 1, + "endpoint": "127.0.0.1:9000", + "shards": [ + { + "id": 0, + "state": "AVAILABLE", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + }, + … + { + "id": 63, + "state": "AVAILABLE", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + } + ], + "shardSetId": 0, + "hostname": "localhost", + "port": 9000, + "metadata": { + "debugPort": 0 + } + } + }, + "replicaFactor": 1, + "numShards": 64, + "isSharded": true, + "cutoverTime": "0", + "isMirrored": false, + "maxShardSetId": 0 + }, + "version": 2 +} +``` + +{{% /tab %}} +{{< /tabs >}} + +{{% notice tip %}} +[Read more about the bootstrapping process](/docs/operational_guide/bootstrapping_crash_recovery/). +{{% /notice %}} + +### View Details of a Namespace + +You can also view the attributes of all namespaces by calling the _{{% apiendpoint %}}namespace_ endpoint + +{{< tabs name="check_namespaces" >}} +{{% tab name="Command" %}} + +```shell +curl {{% apiendpoint %}}namespace | jq . +``` + +{{% notice tip %}} +Add `?debug=1` to the request to convert nano units in the output into standard units. +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "registry": { + "namespaces": { + "default": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": "43200000000000", + "blockSizeNanos": "1800000000000", + "bufferFutureNanos": "120000000000", + "bufferPastNanos": "600000000000", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", + "futureRetentionPeriodNanos": "0" + }, + "snapshotEnabled": true, + "indexOptions": { + "enabled": true, + "blockSizeNanos": "1800000000000" + }, + "schemaOptions": null, + "coldWritesEnabled": false, + "runtimeOptions": null + } + } + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +## Writing and Querying Metrics + +### Writing Metrics + +M3 supports ingesting [statsd](https://github.com/statsd/statsd#usage) and [Prometheus](https://prometheus.io/docs/concepts/data_model/) formatted metrics. + +This quickstart focuses on Prometheus metrics which consist of a value, a timestamp, and tags to bring context and meaning to the metric. + +You can write metrics using one of two endpoints: + +- _[{{% apiendpoint %}}prom/remote/write](/docs/m3coordinator/api/remote/)_ - Write a Prometheus remote write query to M3DB with a binary snappy compressed Prometheus WriteRequest protobuf message. +- _{{% apiendpoint %}}json/write_ - Write a JSON payload of metrics data. This endpoint is quick for testing purposes but is not as performant for production usage. + +For this quickstart, use the _{{% apiendpoint %}}json/write_ endpoint to write a tagged metric to M3 with the following data in the request body, all fields are required: + +- `tags`: An object of at least one `name`/`value` pairs +- `timestamp`: The UNIX timestamp for the data +- `value`: The value for the data, can be of any type + +{{% notice tip %}} +The examples below use `__name__` as the name for one of the tags, which is a Prometheus reserved tag that allows you to query metrics using the value of the tag to filter results. +{{% /notice %}} + +{{% notice tip %}} +Label names may contain ASCII letters, numbers, underscores, and Unicode characters. They must match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. Label names beginning with `__` are reserved for internal use. [Read more in the Prometheus documentation](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +{{% /notice %}} + +{{< tabs name="write_metrics" >}} +{{< tab name="Command 1" >}} + +{{< codeinclude file="docs/includes/write-metrics-1.sh" language="shell" >}} + +{{< /tab >}} +{{< tab name="Command 2" >}} + +{{< codeinclude file="docs/includes/write-metrics-2.sh" language="shell" >}} + +{{< /tab >}} +{{< tab name="Command 3" >}} + +{{< codeinclude file="docs/includes/write-metrics-3.sh" language="shell" >}} + +{{< /tab >}} +{{< /tabs >}} + +### Querying metrics + +M3 supports three query engines: Prometheus (default), Graphite, and the M3 Query Engine. + +This quickstart uses Prometheus as the query engine, and you have access to [all the features of PromQL queries](https://prometheus.io/docs/prometheus/latest/querying/basics/). + +To query metrics, use the _{{% apiendpoint %}}query_range_ endpoint with the following data in the request body, all fields are required: + +- `query`: A PromQL query +- `start`: Timestamp in `RFC3339Nano` of start range for results +- `end`: Timestamp in `RFC3339Nano` of end range for results +- `step`: A duration or float of the query resolution, the interval between results in the timespan between `start` and `end`. + +Below are some examples using the metrics written above. + +#### Return results in past 45 seconds + +{{< tabs name="example_promql_regex" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$( date -v -45S +%s )" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "3347.26" + ], + [ + {{% now %}}, + "5347.26" + ], + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +#### Values above a certain number + +{{< tabs name="example_promql_range" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date -v -45S "+%s")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + + diff --git a/site/content/includes/write-metrics-1.sh b/site/content/includes/write-metrics-1.sh new file mode 100755 index 0000000000..453100c993 --- /dev/null +++ b/site/content/includes/write-metrics-1.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 3347.26 +}' diff --git a/site/content/includes/write-metrics-2.sh b/site/content/includes/write-metrics-2.sh new file mode 100755 index 0000000000..eea2d30348 --- /dev/null +++ b/site/content/includes/write-metrics-2.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 5347.26 +}' diff --git a/site/content/includes/write-metrics-3.sh b/site/content/includes/write-metrics-3.sh new file mode 100755 index 0000000000..e6b3a5f046 --- /dev/null +++ b/site/content/includes/write-metrics-3.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 7347.26 +}' diff --git a/site/content/operational_guide/namespace_configuration.md b/site/content/operational_guide/namespace_configuration.md index ff8ae8a006..ba8a0c26e3 100644 --- a/site/content/operational_guide/namespace_configuration.md +++ b/site/content/operational_guide/namespace_configuration.md @@ -37,7 +37,7 @@ Once a namespace has finished bootstrapping, you must mark it as ready so that M {{< tabs name="ready_namespaces" >}} {{% tab name="Command" %}} -{{% codeinclude file="operational_guide/ready-namespace.sh" language="shell" %}} +{{< codeinclude file="docs/includes/operational_guide/ready-namespace.sh" language="shell" >}} {{% /tab %}} {{% tab name="Output" %}} @@ -94,7 +94,7 @@ Once a namespace has finished bootstrapping, you must mark it as ready so that M {{< tabs name="ready_namespaces_adv" >}} {{% tab name="Command" %}} -{{% codeinclude file="operational_guide/ready-namespace.sh" language="shell" %}} +{{< codeinclude file="docs/includes/operational_guide/ready-namespace.sh" language="shell" >}} {{% /tab %}} {{% tab name="Output" %}} diff --git a/site/content/quickstart/binaries.md b/site/content/quickstart/binaries.md new file mode 100644 index 0000000000..ba0ba10730 --- /dev/null +++ b/site/content/quickstart/binaries.md @@ -0,0 +1,97 @@ +--- +linktitle: "Binaries" +weight: 3 +--- + + + +# Creating a Single Node M3 Cluster with Binaries + +This guide shows how to install and configure M3, create a single-node cluster, and read and write metrics to it. + +{{% notice warning %}} +Deploying a single-node M3 cluster is a great way to experiment with M3 and get an idea of what it has to offer, but is not designed for production use. To run M3 in clustered mode, with a separate M3Coordinator [read the clustered mode guide](/docs/cluster). +{{% /notice %}} + +## Prebuilt Binaries + +M3 has pre-built binaries available for Linux and macOS. [Download the latest release from GitHub](https://github.com/m3db/m3/releases/latest). + +## Build From Source + +### Prerequisites + +- [Go 1.10 or higher](https://golang.org/dl/) +- [Make](https://www.gnu.org/software/make/) + +### Build Source + +```shell +make m3dbnode +``` + +## Start Binary + +By default the binary configures a single M3 instance containing: + +- An M3DB storage instance for time series storage. It includes an embedded tag-based metrics index and an etcd server for storing the cluster topology and runtime configuration. +- An M3Coordinator instance for writing and querying tagged metrics, as well as managing cluster topology and runtime configuration. + +It exposes three ports: + +- `7201` to manage the cluster topology, you make most API calls to this endpoint +- `7203` for Prometheus to scrape the metrics produced by M3DB and M3Coordinator + +The command below starts the node using the specified configuration file. + +{{< tabs name="start_container" >}} +{{% tab name="Pre-built binary" %}} + +[Download the example configuration file](https://github.com/m3db/m3/raw/master/src/dbnode/config/m3dbnode-local-etcd.yml). + +```shell +./m3dbnode -f /{FILE_LOCATION}/m3dbnode-local-etcd.yml +``` + +{{% notice info %}} +Depending on your operating system setup, you might need to prefix the command with `sudo`. +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Self-built binary" %}} + +```shell +./bin/m3dbnode -f ./src/dbnode/config/m3dbnode-local-etcd.yml +``` + +{{% notice info %}} +Depending on your operating system setup, you might need to prefix the command with `sudo`. +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Output" %}} + + + + + +![Docker pull and run](/docker-install.gif) + +{{% /tab %}} +{{< /tabs >}} + +{{% notice info %}} +When running the command above on macOS you may see errors about "too many open files." To fix this in your current terminal, use `ulimit` to increase the upper limit, for example `ulimit -n 10240`. +{{% /notice %}} + +## Configuration + +This example uses this [sample configuration file](https://github.com/m3db/m3/raw/master/src/dbnode/config/m3dbnode-local-etcd.yml) by default. + +The file groups configuration into `coordinator` or `db` sections that represent the `M3Coordinator` and `M3DB` instances of single-node cluster. + +{{% notice tip %}} +You can find more information on configuring M3DB in the [operational guides section](/docs/operational_guide). +{{% /notice %}} + +{{< fileinclude file="quickstart-common-steps.md" >}} \ No newline at end of file diff --git a/site/content/quickstart/create-database.sh b/site/content/quickstart/create-database.sh new file mode 100644 index 0000000000..104dce0586 --- /dev/null +++ b/site/content/quickstart/create-database.sh @@ -0,0 +1,6 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/database/create -d '{ + "type": "local", + "namespaceName": "default", + "retentionTime": "12h" +}' | jq . diff --git a/site/content/quickstart/docker.md b/site/content/quickstart/docker.md index 029cb733f5..ce97cafb91 100644 --- a/site/content/quickstart/docker.md +++ b/site/content/quickstart/docker.md @@ -7,9 +7,9 @@ weight: 1 This guide shows how to install and configure M3DB, create a single-node cluster, and read and write metrics to it. - + {{% notice warning %}} -Deploying a single-node M3DB cluster is a great way to experiment with M3DB and get an idea of what it has to offer, but is not designed for production use. To run M3DB in clustered mode with a separate M3Coordinator, [read the clustered mode guide](/docs/how_to/cluster_hard_way). +Deploying a single-node M3DB cluster is a great way to experiment with M3DB and get an idea of what it has to offer, but is not designed for production use. To run M3DB in clustered mode with a separate M3Coordinator, [read the clustered mode guide](/docs/cluster/). {{% /notice %}} ## Prerequisites @@ -70,458 +70,4 @@ The file groups configuration into `coordinator` or `db` sections that represent You can find more information on configuring M3DB in the [operational guides section](/docs/operational_guide/). {{% /notice %}} -## Organizing Data with Placements and Namespaces - -A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. - -As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into shards. - - - -If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3DB uses different terminology to represent some concepts. - -- Every cluster has **one** placement that maps shards to nodes in the cluster. -- A cluster can have **0 or more** namespaces that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. - - - -For example, if the cluster placement states that node A owns shards 1, 2, and 3, then node A owns shards 1, 2, 3 for all configured namespaces in the cluster. Each namespace has its own configuration options, including a name and retention time for the data. - -## Create a Placement and Namespace - -This quickstart uses the _{{% apiendpoint %}}database/create_ endpoint that creates a namespace, and the placement if it doesn't already exist based on the `type` argument. - -You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. - -In another terminal, use the following command. - -{{< tabs name="create_placement_namespace" >}} -{{% tab name="Command" %}} - -{{% codeinclude file="quickstart/create-database.sh" language="shell" %}} - -{{% notice tip %}} -The Docker command used above creates a Docker [persistent volume](https://docs.docker.com/storage/volumes/) to keep the data M3 creates on your host file system between container restarts. If you have already followed this tutorial, the namespace already exists. You can clear the data by deleting the contents of the _m3db_data_ folder, or deleting the namespace with [the DELETE endpoint](/docs/operational_guide/namespace_configuration/#deleting-a-namespace). -{{% /notice %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "namespace": { - "registry": { - "namespaces": { - "default": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": "43200000000000", - "blockSizeNanos": "1800000000000", - "bufferFutureNanos": "120000000000", - "bufferPastNanos": "600000000000", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", - "futureRetentionPeriodNanos": "0" - }, - "snapshotEnabled": true, - "indexOptions": { - "enabled": true, - "blockSizeNanos": "1800000000000" - }, - "schemaOptions": null, - "coldWritesEnabled": false, - "runtimeOptions": null - } - } - } - }, - "placement": { - "placement": { - "instances": { - "m3db_local": { - "id": "m3db_local", - "isolationGroup": "local", - "zone": "embedded", - "weight": 1, - "endpoint": "127.0.0.1:9000", - "shards": [ - { - "id": 0, - "state": "INITIALIZING", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - }, - … - { - "id": 63, - "state": "INITIALIZING", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - } - ], - "shardSetId": 0, - "hostname": "localhost", - "port": 9000, - "metadata": { - "debugPort": 0 - } - } - }, - "replicaFactor": 1, - "numShards": 64, - "isSharded": true, - "cutoverTime": "0", - "isMirrored": false, - "maxShardSetId": 0 - }, - "version": 0 - } -} -``` - -{{< /tab >}} -{{< /tabs >}} - -Placement initialization can take a minute or two. Once all the shards have the `AVAILABLE` state, the node has finished bootstrapping, and you should see the following messages in the node console output. - - - -```shell -{"level":"info","ts":1598367624.0117292,"msg":"bootstrap marking all shards as bootstrapped","namespace":"default","namespace":"default","numShards":64} -{"level":"info","ts":1598367624.0301404,"msg":"bootstrap index with bootstrapped index segments","namespace":"default","numIndexBlocks":0} -{"level":"info","ts":1598367624.0301914,"msg":"bootstrap success","numShards":64,"bootstrapDuration":0.049208827} -{"level":"info","ts":1598367624.03023,"msg":"bootstrapped"} -``` - -You can check on the status by calling the _{{% apiendpoint %}}placement_ endpoint: - -{{< tabs name="check_placement" >}} -{{% tab name="Command" %}} - -```shell -curl {{% apiendpoint %}}placement | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "placement": { - "instances": { - "m3db_local": { - "id": "m3db_local", - "isolationGroup": "local", - "zone": "embedded", - "weight": 1, - "endpoint": "127.0.0.1:9000", - "shards": [ - { - "id": 0, - "state": "AVAILABLE", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - }, - … - { - "id": 63, - "state": "AVAILABLE", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - } - ], - "shardSetId": 0, - "hostname": "localhost", - "port": 9000, - "metadata": { - "debugPort": 0 - } - } - }, - "replicaFactor": 1, - "numShards": 64, - "isSharded": true, - "cutoverTime": "0", - "isMirrored": false, - "maxShardSetId": 0 - }, - "version": 2 -} -``` - -{{% /tab %}} -{{< /tabs >}} - -{{% notice tip %}} -[Read more about the bootstrapping process](/docs/operational_guide/bootstrapping_crash_recovery/). -{{% /notice %}} - -### Ready a Namespace - -Once a namespace has finished bootstrapping, you must mark it as ready before receiving traffic by using the _{{% apiendpoint %}}namespace/ready_. - -{{< tabs name="ready_namespaces" >}} -{{% tab name="Command" %}} - -{{% codeinclude file="quickstart/ready-namespace.sh" language="shell" %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "ready": true -} -``` - -{{% /tab %}} -{{< /tabs >}} - -### View Details of a Namespace - -You can also view the attributes of all namespaces by calling the _{{% apiendpoint %}}namespace_ endpoint - -{{< tabs name="check_namespaces" >}} -{{% tab name="Command" %}} - -```shell -curl {{% apiendpoint %}}namespace | jq . -``` - -{{% notice tip %}} -Add `?debug=1` to the request to convert nano units in the output into standard units. -{{% /notice %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "registry": { - "namespaces": { - "default": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": "43200000000000", - "blockSizeNanos": "1800000000000", - "bufferFutureNanos": "120000000000", - "bufferPastNanos": "600000000000", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", - "futureRetentionPeriodNanos": "0" - }, - "snapshotEnabled": true, - "indexOptions": { - "enabled": true, - "blockSizeNanos": "1800000000000" - }, - "schemaOptions": null, - "coldWritesEnabled": false, - "runtimeOptions": null - } - } - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - -## Writing and Querying Metrics - -### Writing Metrics - -M3 supports ingesting [statsd](https://github.com/statsd/statsd#usage) and [Prometheus](https://prometheus.io/docs/concepts/data_model/) formatted metrics. - -This quickstart focuses on Prometheus metrics which consist of a value, a timestamp, and tags to bring context and meaning to the metric. - -You can write metrics using one of two endpoints: - -- _[{{% apiendpoint %}}prom/remote/write](/docs/m3coordinator/api/remote/)_ - Write a Prometheus remote write query to M3DB with a binary snappy compressed Prometheus WriteRequest protobuf message. -- _{{% apiendpoint %}}json/write_ - Write a JSON payload of metrics data. This endpoint is quick for testing purposes but is not as performant for production usage. - -For this quickstart, use the _{{% apiendpoint %}}json/write_ endpoint to write a tagged metric to M3DB with the following data in the request body, all fields are required: - -- `tags`: An object of at least one `name`/`value` pairs -- `timestamp`: The UNIX timestamp for the data -- `value`: The value for the data, can be of any type - -{{% notice tip %}} -The examples below use `__name__` as the name for one of the tags, which is a Prometheus reserved tag that allows you to query metrics using the value of the tag to filter results. -{{% /notice %}} - -{{% notice tip %}} -Label names may contain ASCII letters, numbers, underscores, and Unicode characters. They must match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. Label names beginning with `__` are reserved for internal use. [Read more in the Prometheus documentation](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). -{{% /notice %}} - -{{< tabs name="write_metrics" >}} -{{< tab name="Command 1" >}} - -{{% codeinclude file="quickstart/write-metrics-1.sh" language="shell" %}} - -{{< /tab >}} -{{< tab name="Command 2" >}} - -{{% codeinclude file="quickstart/write-metrics-2.sh" language="shell" %}} - -{{< /tab >}} -{{< tab name="Command 3" >}} - -{{% codeinclude file="quickstart/write-metrics-3.sh" language="shell" %}} - -{{< /tab >}} -{{< /tabs >}} - -### Querying metrics - -M3DB supports three query engines: Prometheus (default), Graphite, and the M3 Query Engine. - -This quickstart uses Prometheus as the query engine, and you have access to [all the features of PromQL queries](https://prometheus.io/docs/prometheus/latest/querying/basics/). - -To query metrics, use the _{{% apiendpoint %}}query_range_ endpoint with the following data in the request body, all fields are required: - -- `query`: A PromQL query -- `start`: Timestamp in `RFC3339Nano` of start range for results -- `end`: Timestamp in `RFC3339Nano` of end range for results -- `step`: A duration or float of the query resolution, the interval between results in the timespan between `start` and `end`. - -Below are some examples using the metrics written above. - -#### Return results in past 45 seconds - -{{< tabs name="example_promql_regex" >}} -{{% tab name="Linux" %}} - - - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue" \ - -d "start=$(date "+%s" -d "45 seconds ago")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="macOS/BSD" %}} - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue" \ - -d "start=$( date -v -45S +%s )" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "status": "success", - "data": { - "resultType": "matrix", - "result": [ - { - "metric": { - "__name__": "third_avenue", - "checkout": "1", - "city": "new_york" - }, - "values": [ - [ - {{% now %}}, - "3347.26" - ], - [ - {{% now %}}, - "5347.26" - ], - [ - {{% now %}}, - "7347.26" - ] - ] - } - ] - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - -#### Values above a certain number - -{{< tabs name="example_promql_range" >}} -{{% tab name="Linux" %}} - - - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue > 6000" \ - -d "start=$(date "+%s" -d "45 seconds ago")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="macOS/BSD" %}} - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue > 6000" \ - -d "start=$(date -v -45S "+%s")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "status": "success", - "data": { - "resultType": "matrix", - "result": [ - { - "metric": { - "__name__": "third_avenue", - "checkout": "1", - "city": "new_york" - }, - "values": [ - [ - {{% now %}}, - "7347.26" - ] - ] - } - ] - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - - +{{% fileinclude file="quickstart-common-steps.md" %}} diff --git a/site/content/quickstart/kubernetes.md b/site/content/quickstart/kubernetes.md deleted file mode 100644 index cbf0f74b95..0000000000 --- a/site/content/quickstart/kubernetes.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -linkTitle: "Kubernetes" -title: Create a M3DB Cluster on Kubernetes -weight: 2 ---- - -1. Meet the M3DB Kubernetes operator [requirements guide](/docs/operator/getting_started/requirements). -2. Follow the M3DB Kubernetes operator [installation guide](/docs/operator/getting_started/installation). -3. Read the M3DB Kubernetes operator [configuration guide](/docs/operator/configuration/configuring_m3db) and configure [namespaces](/docs/operator/configuration/namespaces). diff --git a/site/content/quickstart/write-metrics-1.sh b/site/content/quickstart/write-metrics-1.sh new file mode 100755 index 0000000000..453100c993 --- /dev/null +++ b/site/content/quickstart/write-metrics-1.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 3347.26 +}' diff --git a/site/content/quickstart/write-metrics-2.sh b/site/content/quickstart/write-metrics-2.sh new file mode 100755 index 0000000000..eea2d30348 --- /dev/null +++ b/site/content/quickstart/write-metrics-2.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 5347.26 +}' diff --git a/site/content/quickstart/write-metrics-3.sh b/site/content/quickstart/write-metrics-3.sh new file mode 100755 index 0000000000..e6b3a5f046 --- /dev/null +++ b/site/content/quickstart/write-metrics-3.sh @@ -0,0 +1,11 @@ +#!/bin/bash +curl -X POST http://localhost:7201/api/v1/json/write -d '{ + "tags": + { + "__name__": "third_avenue", + "city": "new_york", + "checkout": "1" + }, + "timestamp": '\"$(date "+%s")\"', + "value": 7347.26 +}' diff --git a/site/content/troubleshooting/_index.md b/site/content/troubleshooting/_index.md index 6de538ee64..60f378d6ab 100644 --- a/site/content/troubleshooting/_index.md +++ b/site/content/troubleshooting/_index.md @@ -20,7 +20,7 @@ If an m3db node hasn't been able to snapshot for awhile, or is stuck in the comm ## Nodes a crashing with memory allocation errors, but there's plenty of available memory -Ensure you've set `vm.max_map_count` to something like 262,144 using sysctl. Find out more in the [Clustering the Hard Way](/docs/how_to/cluster_hard_way#kernel) document. +Ensure you've set `vm.max_map_count` to something like 262,144 using sysctl. Find out more in the [Clustering the Hard Way](/docs/operational_guide/kernel_configuration) document. ## What to do if my M3DB node is OOM’ing? diff --git a/site/go.mod b/site/go.mod index 0aa9a1753c..4ba2e34207 100644 --- a/site/go.mod +++ b/site/go.mod @@ -1,5 +1,3 @@ module m3-site go 1.15 - -require github.com/chronosphereio/victor v0.0.0-20201122114854-310af010cab1 // indirect diff --git a/site/go.sum b/site/go.sum index e363b4303b..e69de29bb2 100644 --- a/site/go.sum +++ b/site/go.sum @@ -1,15 +0,0 @@ -github.com/chronosphereio/docs-theme v0.0.0-20201009145234-6eb51c4ba87e/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= -github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467 h1:YtTpgpzrlYMN0nTiPTv0dNeBG3kx+AIisv/wbNrqEZU= -github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= -github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 h1:Wz/dFFd3bVR+XZ7shqLyuZwyIh5yDbhIhdbdkFEFnH4= -github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= -github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890 h1:yO288wpyv4dr3nXdXjIsEM60DmeLzC4XquvnKCvoLR0= -github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= -github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 h1:EXZaeDfAkZsOYoP3zCyZlhb+PXZO/PQSmilpTX8bX+0= -github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= -github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5 h1:/eksfMA9uddkIKZ5A6zcpVHjASfV6sVuNDXHSAgMtx0= -github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= -github.com/chronosphereio/victor v0.0.0-20201116163333-353bdc2746cd h1:6iKb0tNHpJX+3WKyv0s/dZ1IN8U7CMylYRPsv7Rjdpo= -github.com/chronosphereio/victor v0.0.0-20201116163333-353bdc2746cd/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= -github.com/chronosphereio/victor v0.0.0-20201122114854-310af010cab1 h1:N7NEcoufF4Suq0cS90d+O5OIAsdrZPFOMHuIoX5+LVo= -github.com/chronosphereio/victor v0.0.0-20201122114854-310af010cab1/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= diff --git a/src/dbnode/config/m3dbnode-cluster-template.yml b/src/dbnode/config/m3dbnode-cluster-template.yml index 4d6689a84c..ed3467d497 100644 --- a/src/dbnode/config/m3dbnode-cluster-template.yml +++ b/src/dbnode/config/m3dbnode-cluster-template.yml @@ -10,18 +10,15 @@ db: resolver: hostname # Fill-out the following and un-comment before using. -# config: -# service: -# env: default_env -# zone: embedded -# service: m3db -# cacheDir: /var/lib/m3kv -# etcdClusters: -# - zone: embedded -# endpoints: -# - HOST1_STATIC_IP_ADDRESS:2379 -# - HOST2_STATIC_IP_ADDRESS:2379 -# - HOST3_STATIC_IP_ADDRESS:2379 + # config: + # discovery: + # type: m3db_cluster + # m3dbCluster: + # env: default_env + # endpoints: + # - HOST1_STATIC_IP_ADDRESS:2379 + # - HOST2_STATIC_IP_ADDRESS:2379 + # - HOST3_STATIC_IP_ADDRESS:2379 # seedNodes: # initialCluster: # - hostID: host1