diff --git a/x-pack/dockerlogbeat/docs/configuration.asciidoc b/x-pack/dockerlogbeat/docs/configuration.asciidoc
new file mode 100644
index 00000000000..7ffaa48154b
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/configuration.asciidoc
@@ -0,0 +1,302 @@
+[[log-driver-configuration]]
+[role="xpack"]
+== {log-driver} configuration options
+
+++++
+Configuration options
+++++
+
+experimental[]
+
+Use the following options to configure the {log-driver-long}. You can
+pass these options with the `--log-opt` flag when you start a container, or
+you can set them in the `daemon.json` file for all containers.
+
+* <>
+* <>
+* <>
+* <>
+* <>
+
+[float]
+=== Usage examples
+
+To set configuration options when you start a container:
+
+include::install.asciidoc[tag=log-driver-run]
+
+To set configuration options for all containers in the `daemon.json` file:
+
+include::install.asciidoc[tag=log-driver-daemon]
+
+For more examples, see <>.
+
+[float]
+[[cloud-options]]
+=== {ecloud} options
+
+[options="header"]
+|=====
+|Option | Description
+
+|`cloud.id`
+|The Cloud ID found in the Elastic Cloud web console. This ID is
+used to resolve the {stack} URLs when connecting to {ess} on {ecloud}.
+
+|`cloud.auth`
+|The username and password combination for connecting to {ess} on {ecloud}. The
+format is `"username:password"`.
+|=====
+
+[float]
+[[es-output-options]]
+=== {es} output options
+
+// TODO: Add the following settings. Syntax is a little different so we might
+// need to add deameon examples that show how to specify these settings:
+// `output.elasticsearch.indices
+// `output.elasticsearch.pipelines`
+
+[options="header"]
+|=====
+|Option |Default |Description
+
+|`output.elasticsearch.hosts`
+|`"localhost:9200"`
+|The list of {es} nodes to connect to. Specify each node as a `URL` or
+`IP:PORT`. For example: `http://192.0.2.0`, `https://myhost:9230` or
+`192.0.2.0:9300`. If no port is specified, the default is `9200`.
+
+|`output.elasticsearch.protocol`
+|`http`
+|The protocol (`http` or `https`) that {es} is reachable on. If you specify a
+URL for `hosts`, the value of `protocol` is overridden by whatever scheme you
+specify in the URL.
+
+|`output.elasticsearch.username`
+|
+|The basic authentication username for connecting to {es}.
+
+|`output.elasticsearch.password`
+|
+|The basic authentication password for connecting to {es}.
+
+|`output.elasticsearch.index`
+|
+|A {beats-ref}/config-file-format-type.html#_format_string_sprintf[format string]
+value that specifies the index to write events to when you're using daily
+indices. For example: +"dockerlogs-%{+yyyy.MM.dd}"+.
+
+3+|*Advanced:*
+
+|`output.elasticsearch.backoff.init`
+|`1s`
+|The number of seconds to wait before trying to reconnect to {es} after
+a network error. After waiting `backoff.init` seconds, the {log-driver}
+tries to reconnect. If the attempt fails, the backoff timer is increased
+exponentially up to `backoff.max`. After a successful connection, the backoff
+timer is reset.
+
+|`output.elasticsearch.backoff.max`
+|`60s`
+|The maximum number of seconds to wait before attempting to connect to
+{es} after a network error.
+
+|`output.elasticsearch.bulk_max_size`
+|`50`
+|The maximum number of events to bulk in a single {es} bulk API index request.
+Specify 0 to allow the queue to determine the batch size.
+
+|`output.elasticsearch.compression_level`
+|`0`
+|The gzip compression level. Valid compression levels range from 1 (best speed)
+to 9 (best compression). Specify 0 to disable compression. Higher compression
+levels reduce network usage, but increase CPU usage.
+
+|`output.elasticsearch.escape_html`
+|`false`
+|Whether to escape HTML in strings.
+
+|`output.elasticsearch.headers`
+|
+|Custom HTTP headers to add to each request created by the {es} output. Specify
+multiple header values for the same header name by separating them with a comma.
+
+|`output.elasticsearch.loadbalance`
+|`false`
+|Whether to load balance when sending events to multiple hosts. The load
+balancer also supports multiple workers per host (see
+`output.elasticsearch.worker`.)
+
+|`output.elasticsearch.max_retries`
+|`3`
+|The number of times to retry publishing an event after a publishing failure.
+After the specified number of retries, the events are typically dropped. Specify
+0 to retry indefinitely.
+
+|`output.elasticsearch.parameters`
+|
+| A dictionary of HTTP parameters to pass within the URL with index operations.
+
+|`output.elasticsearch.path`
+|
+|An HTTP path prefix that is prepended to the HTTP API calls. This is useful for
+cases where {es} listens behind an HTTP reverse proxy that exports the API under
+a custom prefix.
+
+|`output.elasticsearch.pipeline`
+|
+|A {beats-ref}/config-file-format-type.html#_format_string_sprintf[format string]
+value that specifies the {ref}/ingest.html[ingest node pipeline] to write events
+to.
+
+|`output.elasticsearch.proxy_url`
+|
+|The URL of the proxy to use when connecting to the {es} servers. Specify a
+`URL` or `IP:PORT`.
+
+|`output.elasticsearch.timeout`
+|`90`
+|The HTTP request timeout in seconds for the {es} request.
+
+|`output.elasticsearch.worker`
+|`1`
+|The number of workers per configured host publishing events to {es}. Use with
+load balancing mode (`output.elasticsearch.loadbalance`) set to `true`. Example:
+If you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
+host).
+
+|=====
+
+
+[float]
+[[ls-output-options]]
+=== {ls} output options
+
+[options="header"]
+|=====
+|Option | Default | Description
+
+|`output.logstash.hosts`
+|`"localhost:5044"`
+|The list of known {ls} servers to connect to. If load balancing is
+disabled, but multiple hosts are configured, one host is selected randomly
+(there is no precedence). If one host becomes unreachable, another one is
+selected randomly. If no port is specified, the default is `5044`.
+
+|`output.logstash.index`
+|
+|The index root name to write events to. For example +"dockerlogs"+ generates
++"dockerlogs-{version}"+ indices.
+
+3+|*Advanced:*
+
+|`output.logstash.backoff.init`
+|`1s`
+|The number of seconds to wait before trying to reconnect to {ls} after
+a network error. After waiting `backoff.init` seconds, the {log-driver}
+tries to reconnect. If the attempt fails, the backoff timer is increased
+exponentially up to `backoff.max`. After a successful connection, the backoff
+timer is reset.
+
+|`output.logstash.backoff.max`
+|`60s`
+|The maximum number of seconds to wait before attempting to connect to
+{ls} after a network error.
+
+|`output.logstash.bulk_max_size`
+|`2048`
+|The maximum number of events to bulk in a single {ls} request. Specify 0 to
+allow the queue to determine the batch size.
+
+|`output.logstash.compression_level`
+|`0`
+|The gzip compression level. Valid compression levels range from 1 (best speed)
+to 9 (best compression). Specify 0 to disable compression. Higher compression
+levels reduce network usage, but increase CPU usage.
+
+|`output.logstash.escape_html`
+|`false`
+|Whether to escape HTML in strings.
+
+|`output.logstash.loadbalance`
+|`false`
+|Whether to load balance when sending events to multiple {ls} hosts. If set to
+`false`, the driver sends all events to only one host (determined at random) and
+switches to another host if the selected one becomes unresponsive.
+
+|`output.logstash.pipelining`
+|`2`
+|The number of batches to send asynchronously to {ls} while waiting for an ACK
+from {ls}. Specify 0 to disable pipelining.
+
+|`output.logstash.proxy_url`
+|
+|The URL of the SOCKS5 proxy to use when connecting to the {ls} servers. The
+value must be a URL with a scheme of `socks5://`. You can embed a
+username and password in the URL (for example,
+`socks5://user:password@socks5-proxy:2233`).
+
+|`output.logstash.proxy_use_local_resolver`
+|`false`
+|Whether to resolve {ls} hostnames locally when using a proxy. If `false`,
+name resolution occurs on the proxy server.
+
+|`output.logstash.slow_start`
+|`false`
+|When enabled, only a subset of events in a batch are transferred per
+transaction. If there are no errors, the number of events per transaction
+is increased up to the bulk max size (see `output.logstash.bulk_max_size`).
+On error, the number of events per transaction is reduced again.
+
+|`output.logstash.timeout`
+|`30`
+|The number of seconds to wait for responses from the {ls} server before
+timing out.
+
+|`output.logstash.ttl`
+|`0`
+|Time to live for a connection to {ls} after which the connection will be
+re-established. Useful when {ls} hosts represent load balancers. Because
+connections to {ls} hosts are sticky, operating behind load balancers can lead
+to uneven load distribution across instances. Specify a TTL on the connection
+to distribute connections across instances. Specify 0 to disable this feature.
+This option is not supported if `output.logstash.pipelining` is set.
+
+|`output.logstash.worker`
+|`1`
+|The number of workers per configured host publishing events to {ls}. Use with
+load balancing mode (`output.logstash.loadbalance`) set to `true`. Example:
+If you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
+host).
+
+|=====
+
+[float]
+[[kafka-output-options]]
+=== Kafka output options
+
+// TODO: Add kafka output options here.
+
+// NOTE: The following annotation renders as: "Coming in a future update. This
+// documentation is a work in progress."
+
+coming[a future update. This documentation is a work in progress]
+
+Need the docs now? See the
+{filebeat-ref}/kafka-output.html[Kafka output docs] for {filebeat}.
+The {log-driver} supports most of the same options, just make sure you use
+the fully qualified setting names.
+
+[float]
+[[redis-output-options]]
+=== Redis output options
+
+// TODO: Add Redis output options here.
+
+coming[a future update. This documentation is a work in progress]
+
+Need the docs now? See the
+{filebeat-ref}/redis-output.html[Redis output docs] for {filebeat}.
+The {log-driver} supports most of the same options, just make sure you use
+the fully qualified setting names.
diff --git a/x-pack/dockerlogbeat/docs/index.asciidoc b/x-pack/dockerlogbeat/docs/index.asciidoc
new file mode 100644
index 00000000000..ca3f2cd4007
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/index.asciidoc
@@ -0,0 +1,25 @@
+:libbeat-dir: {docdir}/../../../libbeat/docs
+:log-driver: Elastic Logging Plugin
+:log-driver-long: Elastic Logging Plugin for Docker
+:log-driver-alias: elastic-logging-plugin
+:docker-version: Engine API 1.25
+
+= {log-driver} for Docker
+
+include::{libbeat-dir}/version.asciidoc[]
+
+include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[]
+
+include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
+
+include::overview.asciidoc[]
+
+include::install.asciidoc[]
+
+include::configuration.asciidoc[]
+
+include::usage.asciidoc[]
+
+include::troubleshooting.asciidoc[]
+
+include::limitations.asciidoc[]
diff --git a/x-pack/dockerlogbeat/docs/install.asciidoc b/x-pack/dockerlogbeat/docs/install.asciidoc
new file mode 100644
index 00000000000..552dcd3f71e
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/install.asciidoc
@@ -0,0 +1,116 @@
+[[log-driver-installation]]
+[role="xpack"]
+== Install and configure the {log-driver}
+
+++++
+Install and configure
+++++
+
+experimental[]
+
+[float]
+=== Before you begin
+
+Make sure your system meets the following prerequisites:
+
+* Docker: {docker-version} or later
+* {stack}: Version 7.6.0 or later
+
+[float]
+=== Step 1: Install the {log-driver} plugin
+
+// TODO: Test the following commands when the driver is available on docker hub.
+
+1. Install the plugin. You can install it from the Docker store (recommended),
+or build and install the plugin from source in the
+https://github.com/elastic/beats[beats] GitHub repo.
++
+*To install from the Docker store:*
++
+["source","sh",subs="attributes"]
+----
+docker plugin install store/elastic/{log-driver-alias}:{version} --alias {log-driver-alias}
+----
++
+*To build and install from source:*
++
+{beats-devguide}/beats-contributing.html#setting-up-dev-environment[Set up your
+development environment] as described in the _Beats Developer Guide_ then run:
++
+[source,shell]
+----
+cd x-pack/dockerlogbeat
+mage BuildAndInstall
+----
+
+2. If necessary, enable the plugin:
++
+["source","sh",subs="attributes"]
+----
+docker plugin enable elastic/{log-driver-alias}:{version}
+----
+
+3. Verify that the plugin is installed and enabled:
++
+[source,shell]
+----
+docker plugin ls
+----
++
+The output should say something like:
++
+["source","sh",subs="attributes"]
+----
+ID NAME DESCRIPTION ENABLED
+c2ff9d2cf090 elastic/{log-driver-alias}:{version} A beat for docker logs true
+----
+
+[float]
+=== Step 2: Configure the {log-driver}
+
+You can set configuration options for a single container, or for all containers
+running on the host. See <> for a list of
+supported configuration options.
+
+*To configure a single container:*
+
+Pass configuration options at run time when you start the container. For
+example:
+
+// tag::log-driver-run[]
+["source","sh",subs="attributes"]
+----
+docker run --log-driver=elastic/{log-driver-alias}:{version} \
+ --log-opt output.elasticsearch.hosts="https://myhost:9200" \
+ --log-opt output.elasticsearch.username="myusername" \
+ --log-opt output.elasticsearch.password="mypassword" \
+ --log-opt output.elasticsearch.index="elastic-log-driver-%{+yyyy.MM.dd}" \
+ -it debian:jessie /bin/bash
+----
+// end::log-driver-run[]
+
+*To configure all containers running on the host:*
+
+Set configuration options in the Docker `daemon.json` configuration file. For
+example:
+
+// tag::log-driver-daemon[]
+[source,json,subs="attributes"]
+----
+{
+ "log-driver" : "elastic/{log-driver-alias}:{version}",
+ "log-opts" : {
+ "output.elasticsearch.hosts" : "https://myhost:9200",
+ "output.elasticsearch.username" : "myusername",
+ "output.elasticsearch.password" : "mypassword",
+ "output.elasticsearch.index" : "elastic-log-driver-%{+yyyy.MM.dd}"
+ }
+}
+----
+// end::log-driver-daemon[]
+
+NOTE: The default location of the `daemon.json` file varies by platform. On
+Linux, the default location is `/etc/docker/daemon.json`. For more information,
+see the
+https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file[Docker
+docs].
diff --git a/x-pack/dockerlogbeat/docs/limitations.asciidoc b/x-pack/dockerlogbeat/docs/limitations.asciidoc
new file mode 100644
index 00000000000..7ccb9cfeacb
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/limitations.asciidoc
@@ -0,0 +1,13 @@
+[[log-driver-limitations]]
+[role="xpack"]
+== Known problems and limitations
+
+experimental[]
+
+This release of the {log-driver} has the following known problems and
+limitations:
+
+* Spool to disk (beta) is not supported.
+* Complex config options can't be easily represented via `--log-opts`.
+* Mapping templates and other assets that are normally installed by the
+{beats} setup are not available.
diff --git a/x-pack/dockerlogbeat/docs/overview.asciidoc b/x-pack/dockerlogbeat/docs/overview.asciidoc
new file mode 100644
index 00000000000..78bc7692865
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/overview.asciidoc
@@ -0,0 +1,19 @@
+[[log-driver-overview]]
+[role="xpack"]
+== {log-driver} overview
+
+++++
+Overview
+++++
+
+experimental[]
+
+The {log-driver} is a Docker plugin that sends container logs to the
+https://www.elastic.co/elastic-stack[{stack}], where you can search, analyze,
+and visualize the data in real time.
+
+The {log-driver} is built on top of the https://www.elastic.co/beats[{beats}]
+platform and supports many of the features and outputs supported by the
+{beats} shippers.
+
+Beat users: see <>.
diff --git a/x-pack/dockerlogbeat/docs/troubleshooting.asciidoc b/x-pack/dockerlogbeat/docs/troubleshooting.asciidoc
new file mode 100644
index 00000000000..478269ff6ec
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/troubleshooting.asciidoc
@@ -0,0 +1,42 @@
+[[log-driver-troubleshooting]]
+[role="xpack"]
+== Troubleshooting
+
+experimental[]
+
+You can set the debug level to capture debugging output about the {log-driver}.
+To set the debug level:
+
+1. Disable the plugin:
++
+["source","sh",subs="attributes"]
+----
+docker plugin disable elastic/{log-driver-alias}:{version}
+----
+
+2. Set the debug level:
++
+["source","sh",subs="attributes"]
+----
+docker plugin set elastic/{log-driver-alias}:{version} LOG_DRIVER_LEVEL=debug
+----
++
+Where valid settings for `LOG_DRIVER_LEVEL` are `debug`, `info`, `warning`, or
+`error`.
+
+3. Enable the plugin:
++
+["source","sh",subs="attributes"]
+----
+docker plugin enable elastic/{log-driver-alias}:{version}
+----
+
+To view the logs:
+
+On Linux, the {log-driver} logs are written to the same location as other
+docker logs, typically the system journal.
+
+On MacOS, locating the logs is more complicated. For more information, see
+the
+https://github.com/elastic/beats/tree/{branch}/x-pack/dockerlogbeat#debugging-on-macos[Debugging
+on MacOS] section in the readme file.
\ No newline at end of file
diff --git a/x-pack/dockerlogbeat/docs/usage.asciidoc b/x-pack/dockerlogbeat/docs/usage.asciidoc
new file mode 100644
index 00000000000..b2100435baf
--- /dev/null
+++ b/x-pack/dockerlogbeat/docs/usage.asciidoc
@@ -0,0 +1,94 @@
+[[log-driver-usage-examples]]
+== {log-driver} usage examples
+
+++++
+Usage examples
+++++
+
+experimental[]
+
+The following examples show common configurations for the {log-driver}.
+
+[float]
+=== Send Docker logs to {es}
+
+*Docker run command:*
+
+["source","sh",subs="attributes"]
+----
+docker run --log-driver=elastic/{log-driver-alias}:{version} \
+ --log-opt output.elasticsearch.hosts="myhost:9200" \
+ --log-opt output.elasticsearch.protocol="https" \
+ --log-opt output.elasticsearch.username="myusername" \
+ --log-opt output.elasticsearch.password="mypassword" \
+ --log-opt output.elasticsearch.index="elastic-log-driver-%{+yyyy.MM.dd}" \
+ -it debian:jessie /bin/bash
+----
+
+*Daemon configuration:*
+
+["source","json",subs="attributes"]
+----
+{
+ "log-driver" : "elastic/{log-driver-alias}:{version}",
+ "log-opts" : {
+ "output.elasticsearch.hosts" : "myhost:9200",
+ "output.elasticsearch.protocol" : "https",
+ "output.elasticsearch.username" : "myusername",
+ "output.elasticsearch.password" : "mypassword",
+ "output.elasticsearch.index" : "elastic-log-driver-%{+yyyy.MM.dd}"
+ }
+}
+----
+
+[float]
+=== Send Docker logs to {ess} on {ecloud}
+
+*Docker run command:*
+
+["source","sh",subs="attributes"]
+----
+docker run --log-driver=elastic/{log-driver-alias}:{version} \
+ --log-opt cloud.id="MyElasticStack:daMbY2VudHJhbDekZ2NwLmN4b3VkLmVzLmliJDVkYmQwtGJiYjs0NTRiN4Q5ODJmNGUwm1IxZmFkNjM5JDFiNjdkMDE4MTgxMTQzNTM5ZGFiYWJjZmY0OWIyYWE5" \
+ --log-opt cloud.auth="myusername:mypassword" \
+ --log-opt output.elasticsearch.index="elastic-log-driver-%{+yyyy.MM.dd}" \
+ -it debian:jessie /bin/bash
+----
+
+*Daemon configuration:*
+
+["source","json",subs="attributes"]
+----
+{
+ "log-driver" : "elastic/{log-driver-alias}:{version}",
+ "log-opts" : {
+ "cloud.id" : "MyElasticStack:daMbY2VudHJhbDekZ2NwLmN4b3VkLmVzLmliJDVkYmQwtGJiYjs0NTRiN4Q5ODJmNGUwm1IxZmFkNjM5JDFiNjdkMDE4MTgxMTQzNTM5ZGFiYWJjZmY0OWIyYWE5",
+ "cloud.auth" : "myusername:mypassword",
+ "output.elasticsearch.index" : "elastic-log-driver-%{+yyyy.MM.dd}"
+ }
+}
+----
+
+[float]
+=== Send Docker logs to {ls}
+
+*Docker run command:*
+
+["source","sh",subs="attributes"]
+----
+docker run --log-driver=elastic/{log-driver-alias}:{version} \
+ --log-opt output.logstash.hosts="myhost:5044" \
+ -it debian:jessie /bin/bash
+----
+
+*Daemon configuration:*
+
+["source","json",subs="attributes"]
+----
+{
+ "log-driver" : "elastic/{log-driver-alias}:{version}",
+ "log-opts" : {
+ "output.logstash.hosts" : "myhost:5044"
+ }
+}
+----