diff --git a/_modules/vault.py b/_modules/vault.py index ad05b28..0bba4ae 100644 --- a/_modules/vault.py +++ b/_modules/vault.py @@ -16,67 +16,246 @@ :configuration: The salt-master must be configured to allow peer-runner configuration, as well as configuration for the module. - Add this segment to the master configuration file, or - /etc/salt/master.d/vault.conf: + .. versionchanged:: pending_pr + + The configuration structure has changed significantly to account for many + new features. If found, the old configuration structure will be translated + to the new one automatically. + + To allow minions to pull configuration and credentials from the Salt master, + add this segment to the master configuration file: + + .. code-block:: yaml + + peer_run: + .*: + - vault.get_config + - vault.generate_new_token # relevant when ``issue:type`` == ``token`` + - vault.generate_secret_id # relevant when ``issue:type`` == ``approle`` + + Minimally required configuration: .. code-block:: yaml vault: - url: https://vault.service.domain:8200 - verify: /etc/ssl/certs/ca-certificates.crt - role_name: minion_role - namespace: vault_enterprice_namespace - auth: - method: approle - role_id: 11111111-2222-3333-4444-1111111111111 - secret_id: 11111111-1111-1111-1111-1111111111111 - policies: - - saltstack/minions - - saltstack/minion/{minion} - .. more policies - keys: - - n63/TbrQuL3xaIW7ZZpuXj/tIfnK1/MbVxO4vT3wYD2A - - S9OwCvMRhErEA4NVVELYBs6w/Me6+urgUr24xGK44Uy3 - - F1j4b7JKq850NS6Kboiy5laJ0xY8dWJvB3fcwA+SraYl - - 1cYtvjKJNDVam9c7HNqJUfINk4PYyAXIpjkpN/sIuzPv - - 3pPK5X6vGtwLhNOFv1U2elahECz3HpRUfNXJFYLw6lid - - url - Url to your Vault installation. Required. - - verify - For details please see - https://requests.readthedocs.io/en/master/user/advanced/#ssl-cert-verification - - .. versionadded:: 2018.3.0 - - namespaces - Optional Vault Namespace. Used with Vault enterprice - - For detail please see: - https://www.vaultproject.io/docs/enterprise/namespaces - - .. versionadded:: 3004 - - role_name - Role name for minion tokens created. If omitted, minion tokens will be - created without any role, thus being able to inherit any master token - policy (including token creation capabilities). Optional. + auth: + token: abcdefg-hijklmnop-qrstuvw + server: + urL: https://vault.example.com:8200 - For details please see: - https://www.vaultproject.io/api/auth/token/index.html#create-token + A sensible example configuration, e.g. in /etc/salt/master.d/vault.conf: - Example configuration: - https://www.nomadproject.io/docs/vault-integration/index.html#vault-token-role-configuration + .. code-block:: yaml + + vault: + auth: + method: approle + role_id: e5a7b66e-5d08-da9c-7075-71984634b882 + secret_id: 841771dc-11c9-bbc7-bcac-6a3945a69cd9 + cache: + backend: file + issue: + token: + role_name: salt_minion + params: + ttl: 30 + uses: 10 + policies: + assign: + - salt_minion + - salt_role_{pillar[roles]} + server: + url: https://vault.example.com:8200 + verify: /etc/ssl/cert.pem + + The above configuration requires the following policies for the master: + + .. code-block:: terraform + + # Issue tokens + path "auth/token/create" { + capabilities = ["create", "read", "update"] + } + + # Issue tokens with token roles + path "auth/token/create/*" { + capabilities = ["create", "read", "update"] + } + + A sensible example configuration that issues AppRoles to minions + from a separate authentication endpoint (notice differing mounts): + + .. code-block:: yaml + + vault: + auth: + method: approle + mount: approle # <-- mount the salt master authenticates at + role_id: e5a7b66e-5d08-da9c-7075-71984634b882 + secret_id: 841771dc-11c9-bbc7-bcac-6a3945a69cd9 + cache: + backend: file + issue: + type: approle + approle: + mount: salt-minions # <-- mount the salt master manages + metadata: + entity: + minion-id: '{minion}' + role: '{pillar[role]}' + server: + url: https://vault.example.com:8200 + verify: /etc/ssl/cert.pem + ext_pillar: + - vault: path=salt/minions/{minion} + - vault: path=salt/roles/{pillar[role]} + + The above configuration requires the following policies for the master: + + .. code-block:: terraform + + # List existing AppRoles + path "auth/salt-minions/role" { + capabilities = ["list"] + } + + # Manage AppRoles + path "auth/salt-minions/role/*" { + capabilities = ["read", "create", "update", "delete"] + } + + # Lookup mount accessor + path "sys/auth/salt-minions" { + capabilities = ["read", "sudo"] + } + + # Lookup entities by alias name (role-id) and alias mount accessor + path "identity/lookup/entity" { + capabilities = ["create", "update"] + allowed_parameters = { + "alias_name" = [] + "alias_mount_accessor" = ["auth_approle_0a1b2c3d"] + } + } + + # Manage entities with name prefix salt_minion_ + path "identity/entity/name/salt_minion_*" { + capabilities = ["read", "create", "update", "delete"] + } + + # Create entity aliases – you can restrict the mount_accessor + # This might allow privilege escalation in case the salt master + # is compromised and the attacker knows the entity ID of an + # entity with relevant policies attached - although you might + # have other problems at that point. + path "identity/entity-alias" { + capabilities = ["create", "update"] + allowed_parameters = { + "id" = [] + "canonical_id" = [] + "mount_accessor" = ["auth_approle_0a1b2c3d"] + "name" = [] + } + } + + This enables you to write templated ACL policies like: + + .. code-block:: terraform + + path "salt/data/minions/{{identity.entity.metadata.minion-id}}" { + capabilities = ["read"] + } + + path "salt/data/roles/{{identity.entity.metadata.role}}" { + capabilities = ["read"] + } + + + All possible master configuration options with defaults: + + .. code-block:: yaml + + vault: + auth: + approle_mount: approle + approle_name: salt-master + method: token + role_id: + secret_id: null + token: + cache: + backend: session + config: 3600 + secret: ttl + issue: + allow_minion_override_params: false + type: token + approle: + mount: salt-minions + params: + bind_secret_id: true + secret_id_num_uses: 1 + secret_id_ttl: 60 + token_explicit_max_ttl: 60 + token_num_uses: 10 + token: + role_name: null + params: + ttl: null + uses: 1 + wrap: 30s + keys: [] + metadata: + entity: + minion-id: '{minion}' + token: + saltstack-jid: '{jid}' + saltstack-minion: '{minion}' + saltstack-user: '{user}' + policies: + assign: + - saltstack/minions + - saltstack/{minion} + cache_time: 60 + refresh_pillar: null + server: + url: + namespace: null + verify: null auth - Currently only token and approle auth types are supported. Required. + ~~~~ + Contains authentication information for the local machine. + + approle_mount + .. versionadded:: pending_pr + + The name of the AppRole authentication mount point. Defaults to ``approle``. + + approle_name + .. versionadded:: pending_pr + + The name of the AppRole. Defaults to ``salt-master``. + + method + Currently only ``token`` and ``approle`` auth types are supported. + Defaults to ``token``. Approle is the preferred way to authenticate with Vault as it provide some advanced options to control authentication process. Please visit Vault documentation for more info: https://www.vaultproject.io/docs/auth/approle.html + role_id + The role ID of the AppRole. Required if auth:method == ``approle``. + + secret_id + The secret ID of the AppRole. + Only required if the configured role ID requires it. + + token + Token to authenticate to Vault with. Required if auth:method == ``token``. + The token must be able to create tokens with the policies that should be assigned to minions. You can still use the token auth via a OS environment variable via this @@ -84,13 +263,15 @@ .. code-block:: yaml - vault: - url: https://vault.service.domain:8200 - auth: - method: token - token: sdb://osenv/VAULT_TOKEN - osenv: - driver: env + vault: + auth: + method: token + token: sdb://osenv/VAULT_TOKEN + server: + url: https://vault.service.domain:8200 + + osenv: + driver: env And then export the VAULT_TOKEN variable in your OS: @@ -98,57 +279,176 @@ export VAULT_TOKEN=11111111-1111-1111-1111-1111111111111 - Configuration keys ``uses`` or ``ttl`` may also be specified under ``auth`` - to configure the tokens generated on behalf of minions to be reused for the - defined number of uses or length of time in seconds. These settings may also be configured - on the minion when ``allow_minion_override`` is set to ``True`` in the master - config. + cache + ~~~~~ + Configures configuration cache on minions and secret cache on all hosts as well + as metadata cache for KV secrets. - Defining ``uses`` will cause the salt master to generate a token with that number of uses rather - than a single use token. This multi-use token will be cached on the minion. The type of minion - cache can be specified with ``token_backend: session`` or ``token_backend: disk``. The value of - ``session`` is the default, and will store the vault information in memory only for that session. - The value of ``disk`` will write to an on disk file, and persist between state runs (most - helpful for multi-use tokens). + backend + .. versionchanged:: pending_pr - .. code-block:: bash + This used to be found in ``auth:token_backend``. + + The cache backend in use. Defaults to ``session``, which will store the + vault information in memory only for that session. Setting this to anything + else will use the configured cache for minion data (:conf_master:`cache `), + by default the local filesystem. + + config + .. versionadded:: pending_pr + + The time in seconds to cache queried configuration from the master. + Defaults to 3600 (1h). + + secret + .. versionadded:: pending_pr + + The time in seconds to cache tokens/secret-ids for. Defaults to ``ttl``, + which caches the secret for as long as it is valid. + + issue + ~~~~~ + Configures authentication data issued by the master to minions. + + type + .. versionadded:: pending_pr + + The type of authentication to issue to minions. Can be ``token`` or ``approle``. + Defaults to ``token``. + + To be able to issue AppRoles to minions, the master needs to be able to + create new AppRoles on the configured auth mount (see policy example above). + It is strongly encouraged to create a separate mount dedicated to minions. + + approle + .. versionadded:: pending_pr + + Configuration regarding issued AppRoles. + + ``mount`` specifies the name of the auth mount the master manages. + Defaults to ``salt-minions``. This mount should be exclusively dedicated + to the Salt master. + + ``params`` configures the AppRole the master creates for minions. See the + `Vault API docs `_ + for details. The configuration is only relevant for the first time the + AppRole is created. If you update these params, you will need to update + the minion AppRoles manually using the vault runner: + ``salt-run vault.sync_approles`` . + + token + .. versionadded:: pending_pr + + Configuration regarding issued tokens. + + ``role_name`` specifies the role name for minion tokens created. Optional. + + .. versionchanged:: pending_pr + + This used to be found in ``role_name``. + + If omitted, minion tokens will be created without any role, thus being able + to inherit any master token policy (including token creation capabilities). + + For details please see: + https://www.vaultproject.io/api/auth/token/index.html#create-token + + Example configuration: + https://www.nomadproject.io/docs/vault-integration/index.html#vault-token-role-configuration + + ``params`` configures the tokens the master issues to minions. + + .. versionchanged:: pending_pr + + This used to be found in ``auth:ttl`` and ``auth:uses``. + + This setting currently concerns token reuse only. If unset, the master + issues single-use tokens to minions, which can be quite expensive. You + can set ``ttl`` (configuring the explicit_max_ttl for tokens) and ``uses`` + (configuring num_uses, the number of requests a single token is allowed to issue). + To make full use of multi-use tokens, you should configure a cache that + survives a single session. + + + allow_minion_override_params + .. versionchanged:: pending_pr - vault: - auth: - method: token - token: xxxxxx - uses: 10 - ttl: 43200 - allow_minion_override: True - token_backend: disk + This used to be found in ``auth:allow_minion_override``. - .. versionchanged:: 3001 + Whether to allow minions to request to override parameters for issuing credentials, + especially ``ttl`` and ``num_uses``. Defaults to false. + + .. note:: + + Minion override parameters should be specified in the minion configuration + under ``vault:issue_params``. ``ttl`` and ``uses`` always refer to + issued token lifecycle settings. For AppRoles specifically, there + are more parameters, such as ``secret_id_num_uses`` and ``secret_id_ttl``. + ``bind_secret_id`` can not be overridden. + + wrap + .. versionadded:: pending_pr + + The time a minion has to unwrap a wrapped secret issued by the master. + Set this to false to disable wrapping, otherwise a time string like ``30s`` + can be used. Defaults to 30s. + + keys + ~~~~ + List of keys to use to unseal vault server with the ``vault.unseal`` runner. + + metadata + ~~~~~~~~ + .. versionadded:: pending_pr + + Configures metadata for the issued secrets. Values have to be strings and can + be templated with the following variables: + + - ``{jid}`` Salt job ID that issued the secret. + - ``{minion}`` The minion ID the secret was issued for. + - ``{user}`` The user the Salt daemon issuing the secret was running as. + - ``{pillar[]}`` A minion pillar value that does not depend on Vault. + - ``{grains[]}`` A minion grain value. + + .. note:: + + Values have to be strings, hence templated variables that resolve to lists + will be concatenated to an alphabetically sorted comma-separated list. + + entity + Configures the metadata associated with the minion entity inside Vault. + Entities are only created when issuing AppRoles to minions. + + token + Configures the metadata associated with issued tokens. policies - Policies that are assigned to minions when requesting a token. These - can either be static, eg ``saltstack/minions``, or templated with grain - values, eg ``my-policies/{grains[os]}``. ``{minion}`` is shorthand for - ``grains[id]``, eg ``saltstack/minion/{minion}``. + ~~~~~~~~ + .. versionchanged:: pending_pr - .. important:: + This used to specify the list of policies associated with a minion token only. + The equivalent is found in ``assign``. - See :ref:`Is Targeting using Grain Data Secure? - ` for important security information. In short, - everything except ``grains[id]`` is minion-controlled. + assign + List of policies that are assigned to issued minion authentication data, + either token or AppRole. - If a template contains a grain which evaluates to a list, it will be - expanded into multiple policies. For example, given the template - ``saltstack/by-role/{grains[roles]}``, and a minion having these grains: + They can be static strings or string templates with - .. code-block:: yaml + - ``{minion}`` The minion ID. + - ``{pillar[]}`` A minion pillar value. + - ``{grains[]}`` A minion grain value. + + For pillar and grain values, lists are expanded, so ``salt_role_{pillar[roles]}`` + with ``[a, b]`` results in ``salt_role_a`` and ``salt_role_b`` to be issued. - grains: - roles: - - web - - database + Defaults to ``[saltstack/minions, saltstack/{minion}]``. - The minion will have the policies ``saltstack/by-role/web`` and - ``saltstack/by-role/database``. + .. important:: + + See :ref:`Is Targeting using Grain Data Secure? + ` for important security information. In short, + everything except ``grains[id]`` is minion-controlled. .. note:: @@ -157,116 +457,158 @@ throw an exception. Strings and numbers are examples of types which work well. - Optional. If policies is not configured, ``saltstack/minions`` and - ``saltstack/{minion}`` are used as defaults. + cache_time + .. versionadded:: pending_pr - keys - List of keys to use to unseal vault server with the vault.unseal runner. + Number of seconds compiled templated policies are cached on the master. + This is important when using pillar values in templates, since compiling + the pillar is an expensive operation. + refresh_pillar + .. versionadded:: pending_pr - Add this segment to the master configuration file, or - /etc/salt/master.d/peer_run.conf: + Whether to refresh the minion pillar when compiling templated policies + that contain pillar variables. - .. code-block:: yaml + - ``null`` (default) only compiles the pillar when no cached pillar is found. + - ``false`` never compiles the pillar. This means templated policies that + contain pillar values are skipped if no cached pillar is found. + - ``true`` always compiles the pillar. This can cause additional strain + on the master since the compilation is costly. - peer_run: - .*: - - vault.generate_token + server + ~~~~~~ + ..versionchanged:: pending_pr + + The values found in here were found in the ``vault`` root namespace previously. + + Configures Vault server details. + + url + Url to your Vault installation. Required. + + verify + For details please see + https://requests.readthedocs.io/en/master/user/advanced/#ssl-cert-verification + + .. versionadded:: 2018.3.0 + + namespaces + Optional Vault Namespace. Used with Vault enterprice + + For detail please see: + https://www.vaultproject.io/docs/enterprise/namespaces + + .. versionadded:: 3004 .. _vault-setup: """ import logging -import os -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, SaltException + +# import salt.utils.vault +import vaultutil as vault log = logging.getLogger(__name__) def read_secret(path, key=None, metadata=False, default=None): """ + Return the value of key at path in vault, or entire secret. + .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. - Return the value of key at path in vault, or entire secret + CLI Example: + + .. code-block:: bash + + salt '*' vault.read_secret salt/kv/secret - :param metadata: Optional - If using KV v2 backend, display full results, including metadata + Required policy: - .. versionadded:: 3001 + .. code-block:: terraform - Jinja Example: + path "" { + capabilities = ["read"] + } - .. code-block:: jinja + # or KV v2 + path "/data/" { + capabilities = ["read"] + } - my-secret: {{ salt['vault'].read_secret('secret/my/secret', 'some-key') }} + path + The path to the secret, including mount. - {{ salt['vault'].read_secret('/secret/my/secret', 'some-key', metadata=True)['data'] }} + key + The data field at to read. If unspecified, returns the + whole dataset. - .. code-block:: jinja + metadata + .. versionadded:: 3001 + + If using KV v2 backend, display full results, including metadata. + Defaults to False. + + default + .. versionadded:: 3001 - {% set supersecret = salt['vault'].read_secret('secret/my/secret') %} - secrets: - first: {{ supersecret.first }} - second: {{ supersecret.second }} + When the path or path/key combination is not found, an exception will + be raised, unless a default is provided here. """ if default is None: default = CommandExecutionError - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] + if key is not None: + metadata = False log.debug("Reading Vault secret for %s at %s", __grains__["id"], path) try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code != 200: - response.raise_for_status() - data = response.json()["data"] - - # Return data of subkey if requested + data = vault.read_kv(path, __opts__, __context__, include_metadata=metadata) if key is not None: - if version2["v2"]: - return data["data"][key] - else: - return data[key] - # Just return data from KV V2 if metadata isn't needed - if version2["v2"]: - if not metadata: - return data["data"] - + return data[key] return data except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to read secret! {}: {}".format(type(err).__name__, err) - ) + ) from err return default def write_secret(path, **kwargs): """ - Set secret at the path in vault. The vault policy used must allow this. + Set secret dataset at . The vault policy used must allow this. + Fields are specified as arbitrary keyword arguments. CLI Example: .. code-block:: bash salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar" + + Required policy: + + .. code-block:: terraform + + path "" { + capabilities = ["create", "update"] + } + + # or KV v2 + path "/data/" { + capabilities = ["create", "update"] + } + + path + The path to the secret, including mount. """ log.debug("Writing vault secrets for %s at %s", __grains__["id"], path) data = {x: y for x, y in kwargs.items() if not x.startswith("__")} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - data = {"data": data} try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - if response.status_code == 200: - return response.json()["data"] - elif response.status_code != 204: - response.raise_for_status() + vault.write_kv(path, data, __opts__, __context__) return True except Exception as err: # pylint: disable=broad-except log.error("Failed to write secret! %s: %s", type(err).__name__, err) @@ -282,44 +624,103 @@ def write_raw(path, raw): .. code-block:: bash salt '*' vault.write_raw "secret/my/secret" '{"user":"foo","password": "bar"}' + + Required policy: see write_secret + + path + The path to the secret, including mount. + + raw + Secret data to write to . Has to be a mapping. """ log.debug("Writing vault secrets for %s at %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - raw = {"data": raw} try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=raw) - if response.status_code == 200: - return response.json()["data"] - elif response.status_code != 204: - response.raise_for_status() + vault.write_kv(path, raw, __opts__, __context__) return True except Exception as err: # pylint: disable=broad-except log.error("Failed to write secret! %s: %s", type(err).__name__, err) return False -def delete_secret(path): +def patch_secret(path, **kwargs): + """ + Patch secret dataset at . Fields are specified as arbitrary keyword arguments. + Requires KV v2 and "patch" capability. + + .. note:: + + This uses JSON Merge Patch format internally. + Keys set to ``null`` (JSON/YAML)/``None`` (Python) will be deleted. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.patch_secret "secret/my/secret" password="baz" + + Required policy: + + .. code-block:: terraform + + path "/data/" { + capabilities = ["patch"] + } + + path + The path to the secret, including mount. + """ + # TODO: patch can be emulated as read, local update and write + # -> catch VaultPermissionDeniedError and try that way + log.debug("Patching vault secrets for %s at %s", __grains__["id"], path) + data = {x: y for x, y in kwargs.items() if not x.startswith("__")} + try: + vault.patch_kv(path, data, __opts__, __context__) + return True + except Exception as err: # pylint: disable=broad-except + log.error("Failed to patch secret! %s: %s", type(err).__name__, err) + return False + + +def delete_secret(path, *args): """ Delete secret at the path in vault. The vault policy used must allow this. + If is on KV v2, the secret will be soft-deleted. CLI Example: .. code-block:: bash salt '*' vault.delete_secret "secret/my/secret" + salt '*' vault.delete_secret "secret/my/secret" 0 1 2 3 + + Required policy: + + .. code-block:: terraform + + path "" { + capabilities = ["delete"] + } + + # or KV v2 + path "/data/" { + capabilities = ["delete"] + } + + # KV v2 versions + path "/delete/" { + capabilities = ["update"] + } + + path + The path to the secret, including mount. + + .. versionadded:: pending_pr + + For KV v2, you can specify versions to soft-delete as supplemental arguments. """ log.debug("Deleting vault secrets for %s in %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("DELETE", url) - if response.status_code != 204: - response.raise_for_status() + vault.delete_kv(path, __opts__, __context__, versions=list(args) or None) return True except Exception as err: # pylint: disable=broad-except log.error("Failed to delete secret! %s: %s", type(err).__name__, err) @@ -331,87 +732,281 @@ def destroy_secret(path, *args): .. versionadded:: 3001 Destroy specified secret version at the path in vault. The vault policy - used must allow this. Only supported on Vault KV version 2 + used must allow this. Only supported on Vault KV version 2. CLI Example: .. code-block:: bash salt '*' vault.destroy_secret "secret/my/secret" 1 2 + + Required policy: + + .. code-block:: terraform + + path "/destroy/" { + capabilities = ["update"] + } + + path + The path to the secret, including mount. + + You can specify versions to destroy as supplemental arguments. """ log.debug("Destroying vault secrets for %s in %s", __grains__["id"], path) - data = {"versions": list(args)} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["destroy"] - else: - log.error("Destroy operation is only supported on KV version 2") - return False try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - if response.status_code != 204: - response.raise_for_status() + vault.destroy_kv(path, list(args), __opts__, __context__) return True except Exception as err: # pylint: disable=broad-except log.error("Failed to delete secret! %s: %s", type(err).__name__, err) return False -def list_secrets(path, default=None): +def list_secrets(path, default=None, keys_only=False): """ + List secret keys at the path in vault. The vault policy used must allow this. + The path should end with a trailing slash. + .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. - List secret keys at the path in vault. The vault policy used must allow this. - The path should end with a trailing slash. - CLI Example: .. code-block:: bash - salt '*' vault.list_secrets "secret/my/" + salt '*' vault.list_secrets "secret/my/" + + Required policy: + + .. code-block:: terraform + + path "/" { + capabilities = ["list"] + } + + # or KV v2 + path "/metadata/" { + capabilities = ["list"] + } + + path + The path to the secret, including mount. + + default + .. versionadded:: 3001 + + When the path is not found, an exception will be raised, unless a default + is provided here. + + keys_only + .. versionadded:: pending_pr + + This function used to return a dictionary like ``{"keys": ["some/", "some/key"]}``. + Setting this to True will only return the list of keys. + For backwards-compatibility reasons, this defaults to False. """ if default is None: default = CommandExecutionError log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["metadata"] try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("LIST", url) - if response.status_code != 200: - response.raise_for_status() - return response.json()["data"] + keys = vault.list_kv(path, __opts__, __context__) + if keys_only: + return keys + # this is the way Salt behaved previously + return {"keys": keys} except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to list secrets! {}: {}".format(type(err).__name__, err) - ) + ) from err return default -def clear_token_cache(): +def clear_token_cache(connection_only=True): """ .. versionchanged:: 3001 - Delete minion Vault token cache file + Delete minion Vault token cache. CLI Example: .. code-block:: bash - salt '*' vault.clear_token_cache + salt '*' vault.clear_token_cache + + connection_only + .. versionadded:: pending_pr + + Only delete cache data scoped to a connection configuration. This is currently + true for all Vault cache data, but might change in the future. + Defaults to True. """ - log.debug("Deleting cache file") - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") + log.debug("Deleting vault connection cache.") + vault.clear_cache(__opts__, connection_only=connection_only) - if os.path.exists(cache_file): - os.remove(cache_file) - return True - else: - log.info("Attempted to delete vault cache file, but it does not exist.") + +def policy_fetch(policy): + """ + .. versionadded:: pending_pr + + Fetch the rules associated with an ACL policy. Returns None if the policy + does not exist. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policy_fetch salt_minion + + Required policy: + + .. code-block:: terraform + + path "sys/policy/" { + capabilities = ["read"] + } + + policy + The name of the policy + """ + + endpoint = f"sys/policy/{policy}" + + try: + data = vault.query("GET", endpoint, __opts__, __context__) + return data["rules"] + + except vault.VaultNotFoundError: + return None + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policy_write(policy, rules): + r""" + .. versionadded:: pending_pr + + Create or update an ACL policy. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policy_write salt_minion "path \"secret/foo\" {..." + + Required policy: + + .. code-block:: terraform + + path "sys/policy/" { + capabilities = ["create", "update"] + } + + policy + The name of the policy + + rules + Rules formatted as in-line HCL + """ + endpoint = f"sys/policy/{policy}" + payload = {"rules": rules} + try: + return vault.query("POST", endpoint, __opts__, __context__, payload=payload) + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policy_delete(policy): + """ + .. versionadded:: pending_pr + + Delete an ACL policy. Returns False if the policy did not exist. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policy_delete salt_minion + + Required policy: + + .. code-block:: terraform + + path "sys/policy/" { + capabilities = ["delete"] + } + + policy + The name of the policy + """ + endpoint = f"sys/policy/{policy}" + + try: + return vault.query("DELETE", endpoint, __opts__, __context__) + except vault.VaultNotFoundError: return False + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def policies_list(): + """ + .. versionadded:: pending_pr + + List all ACL policies. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.policies_list + + Required policy: + + .. code-block:: terraform + + path "sys/policy" { + capabilities = ["read"] + } + """ + try: + return vault.query("GET", "sys/policy", __opts__, __context__)["policies"] + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err + + +def query(method, endpoint, payload=None): + """ + .. versionadded:: pending_pr + + Issue arbitrary queries against the Vault API. + + CLI Example: + + .. code-block:: bash + + salt '*' vault.query GET auth/token/lookup-self + + Required policy: Depends on the query. + + You can ask the vault CLI to output the necessary policy: + + .. code-block:: bash + + vault read -output-policy auth/token/lookup-self + + method + HTTP method to use + + endpoint + Vault API endpoint to issue the request against. Do not include ``/v1/``. + + payload + Optional dictionary to use as JSON payload. + """ + try: + return vault.query(method, endpoint, __opts__, __context__, payload=payload) + except SaltException as err: + raise CommandExecutionError("{}: {}".format(type(err).__name__, err)) from err diff --git a/_pillar/vault.py b/_pillar/vault.py index 020a43f..285eb1d 100644 --- a/_pillar/vault.py +++ b/_pillar/vault.py @@ -119,14 +119,19 @@ minion-passwd: minionbadpasswd1 +.. versionadded:: pending_pr + + Pillar values from previously rendered pillars can be used to template + vault ext_pillar paths. + Using pillar values to template vault pillar paths requires them to be defined before the vault ext_pillar is called. Especially consider the significancy -of ``ext_pillar_first`` master config setting. +of :conf_master:`ext_pillar_first ` master config setting. You cannot use pillar values sourced from Vault in pillar-templated policies. If a pillar pattern matches multiple paths, the results are merged according to -the master configuration values ``pillar_source_merging_strategy`` and -``pillar_merge_lists`` by default. If the optional nesting_key was defined, +the master configuration values :conf_master:`pillar_source_merging_strategy ` +and :conf_master:`pillar_merge_lists ` by default. If the optional nesting_key was defined, the merged result will be nested below. There is currently no way to nest multiple results under different keys. @@ -145,7 +150,10 @@ import logging import salt.utils.dictupdate -from requests.exceptions import HTTPError +from salt.exceptions import SaltException + +# import salt.utils.vault +import vaultutil as vault log = logging.getLogger(__name__) @@ -177,7 +185,7 @@ def ext_pillar( paths = [comp for comp in comps if comp.startswith("path=")] if not paths: - log.error('"%s" is not a valid Vault ext_pillar config', conf) + log.error(f"`{conf}` is not a valid Vault ext_pillar config.") return {} merge_strategy = merge_strategy or __opts__.get( @@ -189,28 +197,16 @@ def ext_pillar( path_pattern = paths[0].replace("path=", "") for path in _get_paths(path_pattern, minion_id, pillar): - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - - url = "v1/{}".format(path) try: - response = __utils__["vault.make_request"]("GET", url) - if response.status_code != 200: - response.raise_for_status() - data = response.json()["data"] - if version2["v2"]: - vault_pillar_single = data["data"] - else: - vault_pillar_single = data + vault_pillar_single = vault.read_kv(path, __opts__, __context__) vault_pillar = salt.utils.dictupdate.merge( vault_pillar, vault_pillar_single, strategy=merge_strategy, merge_lists=merge_lists, ) - except HTTPError as e: - log.warning("Failed to read secret! {}: {}".format(type(err).__name__, err)) + except SaltException as err: + log.warning("Failed to read secret! %s: %s", type(err).__name__, err) if nesting_key: vault_pillar = {nesting_key: vault_pillar} @@ -225,12 +221,10 @@ def _get_paths(path_pattern, minion_id, pillar): paths = [] try: - for expanded_pattern in __utils__["vault.expand_pattern_lists"]( - path_pattern, **mappings - ): + for expanded_pattern in vault.expand_pattern_lists(path_pattern, **mappings): paths.append(expanded_pattern.format(**mappings)) except KeyError: log.warning("Could not resolve path pattern %s", path_pattern) - log.debug("%s paths: %s", minion_id, paths) + log.debug(f"{minion_id} paths: {paths}") return paths diff --git a/_runners/vault.py b/_runners/vault.py index ad1bf6c..8210e97 100644 --- a/_runners/vault.py +++ b/_runners/vault.py @@ -1,6 +1,6 @@ """ Runner functions supporting the Vault modules. Configuration instructions are -documented in the execution module docs. +documented in the :ref:`execution module docs `. :maintainer: SaltStack :maturity: new @@ -9,17 +9,21 @@ import base64 import copy -import json import logging -import time +import os from collections.abc import Mapping -import requests import salt.cache import salt.crypt import salt.exceptions import salt.pillar -from salt.exceptions import SaltRenderError, SaltRunnerError + +# import salt.utils.vault +import salt.utils.data +import salt.utils.versions +from salt.exceptions import SaltInvocationError, SaltRunnerError + +import vaultutil as vault log = logging.getLogger(__name__) @@ -28,6 +32,8 @@ def generate_token( minion_id, signature, impersonated_by_master=False, ttl=None, uses=None ): """ + .. deprecated:: pending_pr + Generate a Vault token for minion minion_id minion_id @@ -54,89 +60,300 @@ def generate_token( ) _validate_signature(minion_id, signature, impersonated_by_master) try: - config = __opts__.get("vault", {}) - verify = config.get("verify", None) - # Vault Enterprise requires a namespace - namespace = config.get("namespace") - # Allow disabling of minion provided values via the master - allow_minion_override = config["auth"].get("allow_minion_override", False) - # This preserves the previous behavior of default TTL and 1 use - if not allow_minion_override or uses is None: - uses = config["auth"].get("uses", 1) - if not allow_minion_override or ttl is None: - ttl = config["auth"].get("ttl", None) - storage_type = config["auth"].get("token_backend", "session") - policies_refresh_pillar = config.get("policies_refresh_pillar", None) - policies_cache_time = config.get("policies_cache_time", 60) - - if config["auth"]["method"] == "approle": - if __utils__["vault.selftoken_expired"](): - log.debug("Vault token expired. Recreating one") - # Requesting a short ttl token - url = "{}/v1/auth/approle/login".format(config["url"]) - payload = {"role_id": config["auth"]["role_id"]} - if "secret_id" in config["auth"]: - payload["secret_id"] = config["auth"]["secret_id"] - # Vault Enterprise call requires headers - headers = None - if namespace is not None: - headers = {"X-Vault-Namespace": namespace} - response = requests.post( - url, headers=headers, json=payload, verify=verify - ) - if response.status_code != 200: - return {"error": response.reason} - config["auth"]["token"] = response.json()["auth"]["client_token"] - - url = _get_token_create_url(config) - headers = {"X-Vault-Token": config["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - audit_data = { - "saltstack-jid": globals().get("__jid__", ""), - "saltstack-minion": minion_id, - "saltstack-user": globals().get("__user__", ""), + salt.utils.versions.warn_until( + "Argon", + "vault.generate_token endpoint is deprecated. Please update your minions.", + ) + + if _config("issue:type") != "token": + log.warning( + "Master is not configured to issue tokens. Since the minion uses " + "this deprecated endpoint, issuing token anyways." + ) + + issue_params = {} + if ttl is not None: + issue_params["ttl"] = ttl + if uses is not None: + issue_params["uses"] = uses + + token = _generate_token( + minion_id, issue_params=issue_params or None, wrap=False + ) + ret = { + "token": token["client_token"], + "lease_duration": token["lease_duration"], + "renewable": token["renewable"], + "issued": token["creation_time"], + "url": _config("server:url"), + "verify": _config("server:verify"), + "token_backend": _config("cache:backend"), + "namespace": _config("server:namespace"), } - payload = { - "policies": _get_policies_cached( - minion_id, - config, - refresh_pillar=policies_refresh_pillar, - expire=policies_cache_time, - ), - "num_uses": uses, - "meta": audit_data, + if token["num_uses"] >= 0: + ret["uses"] = token["num_uses"] + + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def generate_new_token( + minion_id, signature, impersonated_by_master=False, issue_params=None +): + """ + .. versionadded:: pending_pr + + Generate a Vault token for minion minion_id. + + minion_id + The id of the minion that requests a token + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Dictionary of parameters for the generated tokens. + See master configuration vault:issue:token:params for possible values. + Requires "allow_minion_override_params" master configuration setting to be effective. + """ + log.debug( + f"Token generation request for {minion_id} (impersonated by master: {impersonated_by_master})", + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + if _config("issue:type") != "token": + raise SaltInvocationError("Master does not issue tokens.") + + ret = { + "server": _config("server"), + "auth": {}, } - if ttl is not None: - payload["explicit_max_ttl"] = str(ttl) + token = _generate_token(minion_id, issue_params=issue_params) + ret.update(token) + + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def _generate_token(minion_id, issue_params=None, wrap=None): + if wrap is None: + wrap = _config("issue:wrap") + endpoint = "auth/token/create" + if _config("issue:token:role_name") is not None: + endpoint += "/" + _config("issue:token:role_name") + + payload = _parse_issue_params(issue_params, issue_type="token") + payload["policies"] = _get_policies_cached( + minion_id, + refresh_pillar=_config("policies:refresh_pillar"), + expire=_config("policies:cache_time"), + ) + + if not payload["policies"]: + raise SaltRunnerError("No policies matched minion.") + + payload["meta"] = _get_metadata(minion_id, _config("metadata:token")) + client = _get_master_client() + log.trace("Sending token creation request to Vault.") + res = client.post(endpoint, payload, wrap=wrap) + + if wrap: + return _filter_wrapped(res) + token = vault.VaultToken(**res["auth"]) + return token.serialize_for_minion() + + +def get_config(minion_id, signature, impersonated_by_master=False, issue_params=None): + """ + .. versionadded:: pending_pr + + Return Vault configuration for minion . + + minion_id + The id of the minion that requests the configuration. + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to contact the Vault server on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Parameters for credential issuance. Needs allow_minion_override_params in master + config set in order to apply. + """ + log.debug( + f"Config request for {minion_id} (impersonated by master: {impersonated_by_master})", + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + minion_config = { + "auth": { + "method": _config("issue:type"), + }, + "cache": _config("cache"), + "server": _config("server"), + "wrap_info_nested": [], + } + + if _config("issue:type") == "token": + minion_config["auth"]["token"] = _generate_token( + minion_id, issue_params=issue_params + ) + if _config("issue:wrap"): + minion_config["wrap_info_nested"].append("auth:token") + if _config("issue:type") == "approle": + minion_config["auth"]["approle_mount"] = _config("issue:approle:mount") + minion_config["auth"]["approle_name"] = minion_id + minion_config["auth"]["secret_id"] = _config( + "issue:approle:params:bind_secret_id" + ) + minion_config["auth"]["role_id"] = _get_role_id( + minion_id, issue_params=issue_params + ) + if _config("issue:wrap"): + minion_config["wrap_info_nested"].append("auth:role_id") - if payload["policies"] == []: - return {"error": "No policies matched minion"} + return minion_config + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} - log.trace("Sending token creation request to Vault") - response = requests.post(url, headers=headers, json=payload, verify=verify) - if response.status_code != 200: - return {"error": response.reason} +def get_role_id(minion_id, signature, impersonated_by_master=False, issue_params=None): + """ + .. versionadded:: pending_pr + + Return the Vault role-id for minion . Requires the master to be configured + to generate AppRoles for minions (configuration: ``vault:issue:type``). + + minion_id + The id of the minion that requests a token + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + + issue_params + Dictionary of configuration values for the generated AppRole. + See master configuration vault:issue:approle:params for possible values. + Requires "allow_minion_override_params" master configuration setting to be effective. + """ + log.debug( + f"role-id request for {minion_id} (impersonated by master: {impersonated_by_master})", + ) + _validate_signature(minion_id, signature, impersonated_by_master) + + try: + if _config("issue:type") != "approle": + raise SaltInvocationError("Master does not issue AppRoles.") + + ret = { + "server": _config("server"), + "data": {}, + } + + role_id = _get_role_id(minion_id, issue_params=issue_params) + if _config("issue:wrap"): + ret.update(role_id) + else: + ret["data"]["role_id"] = role_id + return ret + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} + + +def _get_role_id(minion_id, issue_params=None): + wrap = _config("issue:wrap") + role_id = _lookup_role_id(minion_id, wrap=wrap) + + if role_id is False: + # This means the role has to be created first + log.debug(f"Creating new AppRole for {minion_id}.") + # create AppRole with role name + # token_policies are set on the AppRole + _manage_approle(minion_id, issue_params) + # create/update entity with name salt_minion_ + # metadata is set on the entity (to allow policy path templating) + _manage_entity(minion_id) + # ensure the new AppRole is mapped to the entity + _manage_entity_alias(minion_id) + + role_id = _lookup_role_id(minion_id, wrap=wrap) + if role_id is False: + raise SaltRunnerError(f"Failed to create AppRole for minion {minion_id}.") + + if wrap: + return _filter_wrapped(role_id) + + return role_id + + +def generate_secret_id(minion_id, signature, impersonated_by_master=False): + """ + .. versionadded:: pending_pr + + Generate a Vault secret-id for minion . Requires the master to be configured + to generate AppRoles for minions (configuration: ``vault:issue:type``). + + minion_id + The id of the minion that requests a token + + signature + Cryptographic signature which validates that the request is indeed sent + by the minion (or the master, see impersonated_by_master). + + impersonated_by_master + If the master needs to create a token on behalf of the minion, this is + True. This happens when the master generates minion pillars. + """ + log.debug( + f"secret-id generation request for {minion_id} (impersonated by master: {impersonated_by_master})", + ) + _validate_signature(minion_id, signature, impersonated_by_master) + try: + if _config("issue:type") != "approle": + raise SaltInvocationError("Master does not issue AppRoles nor secret-ids.") - auth_data = response.json()["auth"] ret = { - "token": auth_data["client_token"], - "lease_duration": auth_data["lease_duration"], - "renewable": auth_data["renewable"], - "issued": int(round(time.time())), - "url": config["url"], - "verify": verify, - "token_backend": storage_type, - "namespace": namespace, + "server": _config("server"), + "data": {}, } - if uses >= 0: - ret["uses"] = uses + wrap = _config("issue:wrap") + secret_id, meta_info = _get_secret_id(minion_id, wrap=wrap, meta_info=True) + + if wrap: + ret.update(secret_id) + else: + ret["data"] = secret_id.serialize_for_minion() + + ret["misc_data"] = { + "secret_id_num_uses": meta_info["secret_id_num_uses"], + } return ret - except Exception as e: # pylint: disable=broad-except - return {"error": str(e)} + except vault.VaultNotFoundError as err: + # when the role does not exist, make sure the minion requests + # new configuration details to generate one + return { + "expire_cache": True, + "error": "{}: {}".format(type(err).__name__, str(err)), + } + except Exception as err: # pylint: disable=broad-except + return {"error": "{}: {}".format(type(err).__name__, str(err))} def unseal(): @@ -163,15 +380,15 @@ def unseal(): salt-run vault.unseal """ for key in __opts__["vault"]["keys"]: - ret = __utils__["vault.make_request"]( - "PUT", "v1/sys/unseal", data=json.dumps({"key": key}) - ).json() + ret = vault.query( + "POST", "sys/unseal", __opts__, __context__, payload={"key": key} + ) if ret["sealed"] is False: return True return False -def show_policies(minion_id, refresh_pillar=None, expire=None): +def show_policies(minion_id, refresh_pillar="__unset__", expire=None): """ Show the Vault policies that are applied to tokens for the given minion. @@ -187,7 +404,7 @@ def show_policies(minion_id, refresh_pillar=None, expire=None): pillar data, make sure their compilation does not rely on the vault execution module. It will be broken since otherwise, an infinite loop would result. None will only refresh when the cached data is unavailable, boolean values - force one behavior always. Defaults to config value ``policies_refresh_pillar`` or None. + force one behavior always. Defaults to config value ``policies:refresh_pillar`` or None. expire Policy computation can be heavy in case the pillar data has not been cached. @@ -200,14 +417,190 @@ def show_policies(minion_id, refresh_pillar=None, expire=None): salt-run vault.show_policies myminion """ - config = __opts__["vault"] - if refresh_pillar is None: - refresh_pillar = config.get("policies_refresh_pillar", None) - if expire is None: - expire = config.get("policies_cache_time", 60) - return _get_policies_cached( - minion_id, config, refresh_pillar=refresh_pillar, expire=expire + refresh_pillar = ( + refresh_pillar + if refresh_pillar != "__unset__" + else _config("policies:refresh_pillar") ) + expire = expire if expire is not None else _config("policies:cache_time") + return _get_policies_cached(minion_id, refresh_pillar=refresh_pillar, expire=expire) + + +def sync_approles(minions=None, up=False, down=False, issue_params=None): + """ + Sync minion AppRole parameters with current settings, including associated + token policies. + + .. note:: + Only updates existing AppRoles. They are issued during the first request + for one by the minion. + + If no parameter is specified, will try to sync AppRoles for all known minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.sync_approles + salt-run vault.sync_approles ecorp issue_params="{ttl: 0, num_uses: 1337}" + + minions + (List of) ID(s) of the minion(s) to update the AppRole for. + Defaults to None. + + up + Find all minions that are up and update their AppRoles. + Defaults to False. + + down + Find all minions that are down and update their AppRoles. + Defaults to False. + + issue_params + Overrides for AppRole parameters. See ``issue:approle:params`` and + ``issue:allow_minion_override_params`` (the latter for the description + only since the runner works on the master-side). + """ + if "approle" != _config("issue:type"): + raise SaltRunnerError("Master does not issue AppRoles to minions.") + if minions is not None: + if not isinstance(minions, list): + minions = [minions] + elif up or down: + minions = [] + if up: + minions.extend(__salt__["manage.list_state"]()) + if down: + minions.extend(__salt__["manage.list_not_state"]()) + else: + minions = _list_all_known_minions() + + for minion in set(minions) & set(list_approles()): + _manage_approle(minion, issue_params, params_from_master=True) + return True + + +def list_approles(): + """ + List all AppRoles that have been created by the Salt master. + They are named after the minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.list_approles + """ + if "approle" != _config("issue:type"): + raise SaltRunnerError("Master does not issue AppRoles to minions.") + endpoint = "auth/{}/role".format(_config("issue:approle:mount")) + client = _get_master_client() + return client.list(endpoint)["data"]["keys"] + + +def sync_entities(minions=None, up=False, down=False): + """ + Sync minion entities with current settings. Only updates entities for minions + with existing AppRoles. + + .. note:: + This updates associated metadata only. Entities are created only + when issuing AppRoles to minions (``issue:type`` == ``approle``). + + If no parameter is specified, will try to sync entities for all known minions. + + CLI Example: + + .. code-block:: bash + + salt-run vault.sync_entities + + minions + (List of) ID(s) of the minion(s) to update the entity for. + Defaults to None. + + up + Find all minions that are up and update their associated entities. + Defaults to False. + + down + Find all minions that are down and update their associated entities. + Defaults to False. + """ + if "approle" != _config("issue:type"): + raise SaltRunnerError( + "Master is not configured to issue AppRoles to minions, which is a " + "requirement to use managed entities with Salt." + ) + if minions is not None: + if not isinstance(minions, list): + minions = [minions] + elif up or down: + minions = [] + if up: + minions.extend(__salt__["manage.list_state"]()) + if down: + minions.extend(__salt__["manage.list_not_state"]()) + else: + minions = _list_all_known_minions() + + for minion in set(minions) & set(list_approles()): + _manage_entity(minion) + entity = _lookup_entity_by_alias(minion) + if not entity or not entity["name"] == f"salt_minion_{minion}": + log.info( + f"Fixing association of minion AppRole to minion entity for {minion}." + ) + _manage_entity_alias(minion) + return True + + +def cleanup_auth(): + """ + Removes AppRoles and entities associated with unknown minion IDs. + Can only clean up entities if the AppRole still exists. + + .. warning:: + Make absolutely sure that the configured minion approle issue mount is + exclusively dedicated to the Salt master, otherwise you might lose data + by using this function! (config: ``issue:approle:mount``) + + This detects unknown existing AppRoles by listing all roles on the + configured minion approle mount and deducting known minions from the + returned list. + + CLI Example: + + .. code-block:: bash + + salt-run vault.cleanup_auth + """ + ret = {"approles": [], "entities": []} + + for minion in set(list_approles()) - set(_list_all_known_minions()): + if _fetch_entity_by_name(minion): + _delete_entity(minion) + ret["entities"].append(minion) + _delete_approle(minion) + ret["approles"].append(minion) + return {"deleted": ret} + + +def _config(key=None): + ckey = "vault_master_config" + if ckey not in __context__: + __context__[ckey] = vault.parse_config(__opts__.get("vault", {})) + + if key is None: + return __context__[ckey] + val = salt.utils.data.traverse_dict(__context__[ckey], key, vault.VaultException) + if val == vault.VaultException: + raise vault.VaultException("Requested configuration value does not exist.") + return val + + +def _list_all_known_minions(): + return os.listdir(__opts__["pki_dir"] + "/minions") def _validate_signature(minion_id, signature, impersonated_by_master): @@ -217,79 +610,51 @@ def _validate_signature(minion_id, signature, impersonated_by_master): """ pki_dir = __opts__["pki_dir"] if impersonated_by_master: - public_key = "{}/master.pub".format(pki_dir) + public_key = f"{pki_dir}/master.pub" else: - public_key = "{}/minions/{}".format(pki_dir, minion_id) + public_key = f"{pki_dir}/minions/{minion_id}" log.trace("Validating signature for %s", minion_id) signature = base64.b64decode(signature) if not salt.crypt.verify_signature(public_key, minion_id, signature): raise salt.exceptions.AuthenticationError( - "Could not validate token request from {}".format(minion_id) + f"Could not validate token request from {minion_id}" ) log.trace("Signature ok") -# **kwargs is necessary because salt.cache.Cache does not pop expire from kwargs -def _get_policies(minion_id, config, refresh_pillar=None, **kwargs): +# **kwargs because salt.cache.Cache does not pop "expire" from kwargs +def _get_policies( + minion_id, refresh_pillar=None, **kwargs +): # pylint: disable=unused-argument """ Get the policies that should be applied to a token for minion_id """ - _, grains, pillar = salt.utils.minions.get_minion_data(minion_id, __opts__) - policy_patterns = config.get( - "policies", ["saltstack/minion/{minion}", "saltstack/minions"] - ) - - # salt.utils.minions.get_minion_data only returns data from cache or None. - # To make sure the correct policies are available, the pillar needs to be - # refreshed. This can cause an infinite loop if the pillar data itself - # depends on the vault execution module, which relies on this function. - # By default, only refresh when necessary. Boolean values force one way. - if refresh_pillar is True or (refresh_pillar is None and pillar is None): - if __opts__.get("_vault_runner_is_compiling_pillar_templates"): - raise SaltRunnerError( - "Cyclic dependency detected while refreshing pillar for vault policy templating. " - "This is caused by some pillar value relying on the vault execution module. " - "Either remove the dependency from your pillar, disable refreshing pillar data for policy templating " - "or do not use pillar values in policy templates." - ) - local_opts = copy.deepcopy(__opts__) - # Relying on opts for ext_pillars does not work properly (only the first one runs - # correctly because the opts dunder is synced to the initial modules as well) - extra_minion_data = {"_vault_runner_is_compiling_pillar_templates": True} - local_opts.update(extra_minion_data) - pillar = LazyPillar( - local_opts, grains, minion_id, extra_minion_data=extra_minion_data - ) - elif pillar is None: - # Make sure pillar is a dict. Necessary because a check on LazyPillar would - # refresh it unconditionally (even when no pillar values are used) - pillar = {} - + grains, pillar = _get_minion_data(minion_id, refresh_pillar) mappings = {"minion": minion_id, "grains": grains or {}, "pillar": pillar} policies = [] - for pattern in policy_patterns: + for pattern in _config("policies:assign"): try: - for expanded_pattern in __utils__["vault.expand_pattern_lists"]( - pattern, **mappings - ): + for expanded_pattern in vault.expand_pattern_lists(pattern, **mappings): policies.append( expanded_pattern.format(**mappings).lower() # Vault requirement ) except KeyError: - log.warning("Could not resolve policy pattern %s", pattern) + log.warning( + "Could not resolve policy pattern %s for minion %s", pattern, minion_id + ) log.debug("%s policies: %s", minion_id, policies) return policies -def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): +def _get_policies_cached(minion_id, refresh_pillar=None, expire=60): # expiration of 0 disables cache if not expire: - return _get_policies(minion_id, config, refresh_pillar=refresh_pillar) - cbank = "minions/{}".format(minion_id) - ckey = "vault_policies" + return _get_policies(minion_id, refresh_pillar=refresh_pillar) + cbank = f"minions/{minion_id}/vault" + ckey = "policies" cache = salt.cache.factory(__opts__) policies = cache.cache( cbank, @@ -297,7 +662,6 @@ def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): _get_policies, expire=expire, minion_id=minion_id, - config=config, refresh_pillar=refresh_pillar, ) if not isinstance(policies, list): @@ -309,31 +673,302 @@ def _get_policies_cached(minion_id, config, refresh_pillar=None, expire=60): _get_policies, expire=expire, minion_id=minion_id, - config=config, refresh_pillar=refresh_pillar, ) return policies -def _get_token_create_url(config): +def _get_minion_data(minion_id, refresh_pillar=None): + _, grains, pillar = salt.utils.minions.get_minion_data(minion_id, __opts__) + + # salt.utils.minions.get_minion_data only returns data from cache or None. + # To make sure the correct policies are available, the pillar needs to be + # refreshed. This can cause an infinite loop if the pillar data itself + # depends on the vault execution module, which relies on this function. + # By default, only refresh when necessary. Boolean values force one way. + if refresh_pillar is True or (refresh_pillar is None and pillar is None): + if __opts__.get("_vault_runner_is_compiling_pillar_templates"): + raise SaltRunnerError( + "Cyclic dependency detected while refreshing pillar for vault policy templating. " + "This is caused by some pillar value relying on the vault execution module. " + "Either remove the dependency from your pillar, disable refreshing pillar data for policy templating " + "or do not use pillar values in policy templates." + ) + local_opts = copy.deepcopy(__opts__) + # Relying on opts for ext_pillars does not work properly (only the first one runs + # correctly because the opts dunder is synced to the initial modules as well) + extra_minion_data = {"_vault_runner_is_compiling_pillar_templates": True} + local_opts.update(extra_minion_data) + pillar = LazyPillar( + local_opts, grains, minion_id, extra_minion_data=extra_minion_data + ) + elif pillar is None: + # Make sure pillar is a dict. Necessary because a check on LazyPillar would + # refresh it unconditionally (even when no pillar values are used) + pillar = {} + + return grains, pillar + + +def _get_metadata(minion_id, metadata_patterns, refresh_pillar=None): + _, pillar = _get_minion_data(minion_id, refresh_pillar) + mappings = { + "minion": minion_id, + "pillar": pillar, + "jid": globals().get("__jid__", ""), + "user": globals().get("__user__", ""), + } + metadata = {} + for key, pattern in metadata_patterns.items(): + metadata[key] = [] + try: + for expanded_pattern in vault.expand_pattern_lists(pattern, **mappings): + metadata[key].append(expanded_pattern.format(**mappings)) + except KeyError: + log.warning( + "Could not resolve metadata pattern %s for minion %s", + pattern, + minion_id, + ) + + log.debug(f"{minion_id} metadata: {metadata}") + return {k: ",".join(v) for k, v in metadata.items()} + + +def _parse_issue_params(params, issue_type=None, params_from_master=False): + if not _config("issue:allow_minion_override_params") and not params_from_master: + params = {} + + no_override_params = [ + "bind_secret_id", + "secret_id_bound_cidrs", + "token_policies", + "token_bound_cidrs", + ] + + # issue_type is used to override the configured type for minions using the old endpoint + # TODO: remove this once the endpoint has been removed + issue_type = issue_type or _config("issue:type") + if "token" == issue_type: + valid_params = { + "ttl": "explicit_max_ttl", + "uses": "num_uses", + } + elif "approle" == issue_type: + valid_params = { + "bind_secret_id": "bind_secret_id", + "secret_id_num_uses": "secret_id_num_uses", + "secret_id_ttl": "secret_id_ttl", + "ttl": "token_explicit_max_ttl", + "uses": " token_num_uses", + } + else: + raise SaltRunnerError( + "Invalid configuration for minion Vault authentication issuance." + ) + + configured_params = _config(f"issue:{issue_type}:params") + ret = {} + + for valid_param, vault_param in valid_params.items(): + if valid_param in configured_params: + ret[vault_param] = configured_params[valid_param] + if valid_param in params and vault_param not in no_override_params: + ret[vault_param] = params[valid_param] + + return ret + + +def _filter_wrapped(wrapped): + return { + "wrap_info": { + "token": wrapped.id, + "ttl": wrapped.ttl, + "creation_time": wrapped.creation_time, + "creation_path": wrapped.creation_path, + }, + } + + +def _manage_approle(minion_id, issue_params, params_from_master=False): + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + payload = _parse_issue_params(issue_params, params_from_master=params_from_master) + payload["token_policies"] = _get_policies_cached(minion_id, refresh_pillar=True) + client = _get_master_client() + log.debug(f"Creating/updating AppRole for minion {minion_id}.") + client.post(endpoint, payload) + + +def _delete_approle(minion_id): + endpoint = "auth/{}/role/{}".format(_config("issue:approle:mount"), minion_id) + client = _get_master_client() + log.debug(f"Deleting approle for minion {minion_id}.") + client.delete(endpoint) + + +def _lookup_role_id(minion_id, wrap=None): + if wrap is None: + wrap = _config("issue:wrap") + client = _get_master_client() + endpoint = "auth/{}/role/{}/role-id".format( + _config("issue:approle:mount"), minion_id + ) + try: + role_id = client.get(endpoint, wrap=wrap) + except vault.VaultNotFoundError: + return False + if wrap: + return role_id + return role_id["data"]["role_id"] + + +def _get_secret_id(minion_id, wrap=None, meta_info=False): + if wrap is None: + wrap = _config("issue:wrap") + client = _get_master_client() + endpoint = "auth/{}/role/{}/secret-id".format( + _config("issue:approle:mount"), minion_id + ) + response = client.post(endpoint, wrap=wrap) + if wrap: + # wrapped responses are always VaultWrappedResponse objects + secret_id = _filter_wrapped(response) + accessor = response.wrapped_accessor + else: + secret_id = vault.VaultAppRoleSecretId(**response["data"]) + accessor = response["data"]["secret_id_accessor"] + if not meta_info: + return secret_id + # sadly, secret_id_num_uses is not part of the information returned + meta_info = client.post( + endpoint + "-accessor/lookup", payload={"secret_id_accessor": accessor} + )["data"] + + return secret_id, meta_info + + +def _lookup_mount_accessor(mount): + log.debug(f"Looking up mount accessor ID for mount {mount}.") + endpoint = f"sys/auth/{mount}" + client = _get_master_client() + return client.get(endpoint)["accessor"] + + +def _lookup_entity_by_alias(minion_id): """ - Create Vault url for token creation + This issues a lookup for the entity using the role-id and mount accessor, + thus verifies that an entity and associated entity alias exists. """ - role_name = config.get("role_name", None) - auth_path = "/v1/auth/token/create" - base_url = config["url"] - return "/".join(x.strip("/") for x in (base_url, auth_path, role_name) if x) + minion_mount_accessor = _lookup_mount_accessor(_config("issue:approle:mount")) + role_id = _lookup_role_id(minion_id, wrap=False) + client = _get_master_client() + endpoint = "identity/lookup/entity" + payload = { + "alias_name": role_id, + "alias_mount_accessor": minion_mount_accessor, + } + entity = client.post(endpoint, payload) + if isinstance(entity, dict): + return entity["data"] + return False + + +def _fetch_entity_by_name(minion_id): + client = _get_master_client() + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + try: + return client.get(endpoint)["data"] + except vault.VaultNotFoundError: + return False + + +def _manage_entity(minion_id): + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + payload = { + "metadata": _get_metadata(minion_id, _config("metadata:entity"), True), + } + client = _get_master_client() + client.post(endpoint, payload=payload) + + +def _delete_entity(minion_id): + endpoint = f"identity/entity/name/salt_minion_{minion_id}" + client = _get_master_client() + client.delete(endpoint) + + +def _manage_entity_alias(minion_id): + log.debug(f"Creating entity alias for minion {minion_id}.") + minion_mount_accessor = _lookup_mount_accessor(_config("issue:approle:mount")) + role_id = _lookup_role_id(minion_id, wrap=False) + entity = _fetch_entity_by_name(minion_id) + if not entity: + raise SaltRunnerError( + f"There is no entity to create an alias for for minion {minion_id}." + ) + payload = { + "canonical_id": entity["id"], + "mount_accessor": minion_mount_accessor, + "name": str(role_id), + } + for alias in entity["aliases"]: + if alias["mount_accessor"] == minion_mount_accessor: + payload["id"] = alias["id"] + client = _get_master_client() + client.post("identity/entity-alias", payload=payload) + + +def _get_master_client(): + # force_local is necessary when issuing credentials while impersonating + # minions since the opts dict cannot be used to distinguish master from + # minion in that case + client = vault.get_authd_client(__opts__, __context__, force_local=True) + return client + + +def _revoke_token(token=None, accessor=None): + if not token and not accessor: + raise SaltInvocationError("Need either token or accessor to revoke token.") + endpoint = "auth/token/revoke" + if token: + payload = {"token": token} + else: + endpoint += "-accessor" + payload = {"accessor": accessor} + client = _get_master_client() + return client.post(endpoint, payload=payload) + + +def _destroy_secret_id(minion_id, mount, secret_id=None, accessor=None): + if not secret_id and not accessor: + raise SaltInvocationError( + "Need either secret_id or accessor to destroy secret-id." + ) + if secret_id: + endpoint = f"auth/{mount}/role/{minion_id}/secret-id/destroy" + payload = {"secret_id": str(secret_id)} + else: + endpoint = f"auth/{mount}/role/{minion_id}/secret-id-accessor/destroy" + payload = {"secret_id_accessor": accessor} + client = _get_master_client() + return client.post(endpoint, payload=payload) class LazyPillar(Mapping): + """ + Simulates a pillar dictionary. Only compiles the pillar + once an item is requested. + """ + def __init__(self, opts, grains, minion_id, extra_minion_data=None): self.opts = opts self.grains = grains self.minion_id = minion_id self.extra_minion_data = extra_minion_data or {} + self._pillar = None def _load(self): - log.info("Refreshing pillar for vault policies.") + log.info("Refreshing pillar for vault templating.") self._pillar = salt.pillar.get_pillar( self.opts, self.grains, @@ -342,16 +977,16 @@ def _load(self): ).compile_pillar() def __getitem__(self, key): - if not hasattr(self, "_pillar"): + if self._pillar is None: self._load() return self._pillar[key] def __iter__(self): - if not hasattr(self, "_pillar"): + if self._pillar is None: self._load() yield from self._pillar def __len__(self): - if not hasattr(self, "_pillar"): + if self._pillar is None: self._load() return len(self._pillar) diff --git a/_sdb/vault.py b/_sdb/vault.py index 08360e2..8436d7c 100644 --- a/_sdb/vault.py +++ b/_sdb/vault.py @@ -9,7 +9,7 @@ This module allows access to Hashicorp Vault using an ``sdb://`` URI. -Base configuration instructions are documented in the execution module docs. +Base configuration instructions are documented in the :ref:`execution module docs `. Below are noted extra configuration required for the sdb module, but the base configuration must also be completed. @@ -44,6 +44,9 @@ import salt.exceptions +# import salt.utils.vault +import vaultutil as vault + log = logging.getLogger(__name__) __func_alias__ = {"set_": "set"} @@ -59,61 +62,34 @@ def set_(key, value, profile=None): path, key = key.rsplit("/", 1) data = {key: value} - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - data = {"data": data} - try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("POST", url, json=data) - - if response.status_code != 204: - response.raise_for_status() + vault.write_kv(path, data, __opts__, __context__) return True - except Exception as e: # pylint: disable=broad-except - log.error("Failed to write secret! %s: %s", type(e).__name__, e) - raise salt.exceptions.CommandExecutionError(e) + except Exception as err: # pylint: disable=broad-except + log.error("Failed to write secret! %s: %s", type(err).__name__, err) + raise salt.exceptions.CommandExecutionError(err) from err def get(key, profile=None): """ Get a value from the vault service """ + full_path = key if "?" in key: path, key = key.split("?") else: path, key = key.rsplit("/", 1) - version2 = __utils__["vault.is_v2"](path) - if version2["v2"]: - path = version2["data"] - try: - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code == 404: - if version2["v2"]: - path = version2["data"] + "/" + key - url = "v1/{}".format(path) - response = __utils__["vault.make_request"]("GET", url) - if response.status_code == 404: - return None - else: - return None - if response.status_code != 200: - response.raise_for_status() - data = response.json()["data"] - - if version2["v2"]: - if key in data["data"]: - return data["data"][key] - else: - return data["data"] - else: - if key in data: - return data[key] + try: + res = vault.read_kv(path, __opts__, __context__) + if key in res: + return res[key] + return None + except vault.VaultNotFoundError: + return vault.read_kv(full_path, __opts__, __context__) + except vault.VaultNotFoundError: return None - except Exception as e: # pylint: disable=broad-except - log.error("Failed to read secret! %s: %s", type(e).__name__, e) - raise salt.exceptions.CommandExecutionError(e) + except Exception as err: # pylint: disable=broad-except + log.error("Failed to read secret! %s: %s", type(err).__name__, err) + raise salt.exceptions.CommandExecutionError(err) from err diff --git a/_states/vault.py b/_states/vault.py index 54de5b8..9b3e143 100644 --- a/_states/vault.py +++ b/_states/vault.py @@ -1,6 +1,7 @@ """ States for managing Hashicorp Vault. -Currently handles policies. Configuration instructions are documented in the execution module docs. +Currently handles policies. +Configuration instructions are documented in the :ref:`execution module docs `. :maintainer: SaltStack :maturity: new @@ -13,6 +14,8 @@ import difflib import logging +from salt.exceptions import CommandExecutionError + log = logging.getLogger(__name__) @@ -41,85 +44,88 @@ def policy_present(name, rules): } """ - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("GET", url) + ret = {"name": name, "changes": {}, "result": True, "comment": ""} + try: - if response.status_code == 200: - return _handle_existing_policy(name, rules, response.json()["rules"]) - elif response.status_code == 404: - return _create_new_policy(name, rules) - else: - response.raise_for_status() - except Exception as e: # pylint: disable=broad-except - return { - "name": name, - "changes": {}, - "result": False, - "comment": "Failed to get policy: {}".format(e), - } + existing_rules = __salt__["vault.policy_fetch"](name) + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = f"Failed to read policy: {err}" + return ret + if existing_rules == rules: + ret["comment"] = "Policy exists, and has the correct content" + return ret + + diff = "".join( + difflib.unified_diff( + (existing_rules or "").splitlines(True), rules.splitlines(True) + ) + ) + + ret["changes"] = {name: diff} -def _create_new_policy(name, rules): if __opts__["test"]: - return { - "name": name, - "changes": {name: {"old": "", "new": rules}}, - "result": None, - "comment": "Policy would be created", - } + ret["result"] = None + ret["comment"] = ( + "Policy would be " + "created" if existing_rules is None else "updated" + ) + return ret - payload = {"rules": rules} - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("PUT", url, json=payload) - if response.status_code not in [200, 204]: + try: + __salt__["vault.policy_write"](name, rules) + ret["comment"] = ( + "Policy has been " + "created" if existing_rules is None else "updated" + ) + return ret + except CommandExecutionError as err: return { "name": name, "changes": {}, "result": False, - "comment": "Failed to create policy: {}".format(response.reason), + "comment": f"Failed to write policy: {err}", } - return { - "name": name, - "result": True, - "changes": {name: {"old": None, "new": rules}}, - "comment": "Policy was created", - } +def policy_absent(name): + """ + Ensure a Vault policy with the given name and rules is absent. -def _handle_existing_policy(name, new_rules, existing_rules): - ret = {"name": name} - if new_rules == existing_rules: - ret["result"] = True - ret["changes"] = {} - ret["comment"] = "Policy exists, and has the correct content" + name + The name of the policy + """ + ret = {"name": name, "changes": {}, "result": True, "comment": ""} + + try: + existing_rules = __salt__["vault.policy_fetch"](name) + except CommandExecutionError as err: + ret["result"] = False + ret["comment"] = f"Failed to read policy: {err}" return ret - change = "".join( - difflib.unified_diff( - existing_rules.splitlines(True), new_rules.splitlines(True) - ) - ) + if existing_rules is None: + ret["comment"] = "Policy is already absent" + return ret + + ret["changes"] = {"deleted": name} + if __opts__["test"]: ret["result"] = None - ret["changes"] = {name: {"change": change}} - ret["comment"] = "Policy would be changed" + ret["comment"] = "Policy would be deleted" return ret - payload = {"rules": new_rules} - - url = "v1/sys/policy/{}".format(name) - response = __utils__["vault.make_request"]("PUT", url, json=payload) - if response.status_code not in [200, 204]: + try: + if not __salt__["vault.policy_delete"](name): + raise CommandExecutionError( + "Policy was initially reported as existent, but seemed to be " + "absent while deleting." + ) + ret["comment"] = "Policy has been deleted" + return ret + except CommandExecutionError as err: return { "name": name, "changes": {}, "result": False, - "comment": "Failed to change policy: {}".format(response.reason), + "comment": f"Failed to delete policy: {err}", } - - ret["result"] = True - ret["changes"] = {name: {"change": change}} - ret["comment"] = "Policy was updated" - - return ret diff --git a/_utils/vault.py b/_utils/vault.py deleted file mode 100644 index fb84cdb..0000000 --- a/_utils/vault.py +++ /dev/null @@ -1,597 +0,0 @@ -""" -:maintainer: SaltStack -:maturity: new -:platform: all - -Utilities supporting modules for Hashicorp Vault. Configuration instructions are -documented in the execution module docs. -""" - -import base64 -import logging -import os -import string -import tempfile -import time - -import requests -import salt.crypt -import salt.exceptions -import salt.utils.json -import salt.utils.versions - -log = logging.getLogger(__name__) - - -# Load the __salt__ dunder if not already loaded (when called from utils-module) -__salt__ = None - - -def __virtual__(): - try: - global __salt__ # pylint: disable=global-statement - if not __salt__: - __salt__ = salt.loader.minion_mods(__opts__) - logging.getLogger("requests").setLevel(logging.WARNING) - return True - except Exception as e: # pylint: disable=broad-except - log.error("Could not load __salt__: %s", e) - return False - - -def _get_token_and_url_from_master(): - """ - Get a token with correct policies for the minion, and the url to the Vault - service - """ - minion_id = __grains__["id"] - pki_dir = __opts__["pki_dir"] - # Allow minion override salt-master settings/defaults - try: - uses = __opts__.get("vault", {}).get("auth", {}).get("uses", None) - ttl = __opts__.get("vault", {}).get("auth", {}).get("ttl", None) - except (TypeError, AttributeError): - # If uses or ttl are not defined, just use defaults - uses = None - ttl = None - - # When rendering pillars, the module executes on the master, but the token - # should be issued for the minion, so that the correct policies are applied - if __opts__.get("__role", "minion") == "minion": - private_key = "{}/minion.pem".format(pki_dir) - log.debug("Running on minion, signing token request with key %s", private_key) - signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) - result = __salt__["publish.runner"]( - "vault.generate_token", arg=[minion_id, signature, False, ttl, uses] - ) - else: - private_key = "{}/master.pem".format(pki_dir) - log.debug( - "Running on master, signing token request for %s with key %s", - minion_id, - private_key, - ) - signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) - result = __salt__["saltutil.runner"]( - "vault.generate_token", - minion_id=minion_id, - signature=signature, - impersonated_by_master=True, - ttl=ttl, - uses=uses, - ) - if not result: - log.error( - "Failed to get token from master! No result returned - " - "is the peer publish configuration correct?" - ) - raise salt.exceptions.CommandExecutionError(result) - if not isinstance(result, dict): - log.error("Failed to get token from master! Response is not a dict: %s", result) - raise salt.exceptions.CommandExecutionError(result) - if "error" in result: - log.error( - "Failed to get token from master! An error was returned: %s", - result["error"], - ) - raise salt.exceptions.CommandExecutionError(result) - if "session" in result.get("token_backend", "session"): - # This is the only way that this key can be placed onto __context__ - # Thus is tells the minion that the master is configured for token_backend: session - log.debug("Using session storage for vault credentials") - __context__["vault_secret_path_metadata"] = {} - return { - "url": result["url"], - "token": result["token"], - "verify": result.get("verify", None), - "namespace": result.get("namespace"), - "uses": result.get("uses", 1), - "lease_duration": result["lease_duration"], - "issued": result["issued"], - } - - -def get_vault_connection(): - """ - Get the connection details for calling Vault, from local configuration if - it exists, or from the master otherwise - """ - - def _use_local_config(): - log.debug("Using Vault connection details from local config") - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - try: - if __opts__["vault"]["auth"]["method"] == "approle": - verify = __opts__["vault"].get("verify", None) - if selftoken_expired(): - log.debug("Vault token expired. Recreating one") - # Requesting a short ttl token - url = "{}/v1/auth/approle/login".format(__opts__["vault"]["url"]) - payload = {"role_id": __opts__["vault"]["auth"]["role_id"]} - if "secret_id" in __opts__["vault"]["auth"]: - payload["secret_id"] = __opts__["vault"]["auth"]["secret_id"] - if namespace is not None: - headers = {"X-Vault-Namespace": namespace} - response = requests.post( - url, headers=headers, json=payload, verify=verify - ) - else: - response = requests.post(url, json=payload, verify=verify) - if response.status_code != 200: - errmsg = "An error occurred while getting a token from approle" - raise salt.exceptions.CommandExecutionError(errmsg) - __opts__["vault"]["auth"]["token"] = response.json()["auth"][ - "client_token" - ] - if __opts__["vault"]["auth"]["method"] == "wrapped_token": - verify = __opts__["vault"].get("verify", None) - if _wrapped_token_valid(): - url = "{}/v1/sys/wrapping/unwrap".format(__opts__["vault"]["url"]) - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.post(url, headers=headers, verify=verify) - if response.status_code != 200: - errmsg = "An error occured while unwrapping vault token" - raise salt.exceptions.CommandExecutionError(errmsg) - __opts__["vault"]["auth"]["token"] = response.json()["auth"][ - "client_token" - ] - return { - "url": __opts__["vault"]["url"], - "namespace": namespace, - "token": __opts__["vault"]["auth"]["token"], - "verify": __opts__["vault"].get("verify", None), - "issued": int(round(time.time())), - "ttl": 3600, - } - except KeyError as err: - errmsg = 'Minion has "vault" config section, but could not find key "{}" within'.format( - err - ) - raise salt.exceptions.CommandExecutionError(errmsg) - - if "vault" in __opts__ and __opts__.get("__role", "minion") == "master": - if "id" in __grains__: - log.debug("Contacting master for Vault connection details") - return _get_token_and_url_from_master() - else: - return _use_local_config() - elif any( - ( - __opts__.get("local", None), - __opts__.get("file_client", None) == "local", - __opts__.get("master_type", None) == "disable", - ) - ): - return _use_local_config() - else: - log.debug("Contacting master for Vault connection details") - return _get_token_and_url_from_master() - - -def del_cache(): - """ - Delete cache file - """ - log.debug("Deleting cache file") - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - - if os.path.exists(cache_file): - os.remove(cache_file) - else: - log.debug("Attempted to delete vault cache file, but it does not exist.") - - -def write_cache(connection): - """ - Write the vault token to cache - """ - # If uses is 1 and unlimited_use_token is not true, then this is a single use token and should not be cached - # In that case, we still want to cache the vault metadata lookup information for paths, so continue on - if ( - connection.get("uses", None) == 1 - and "unlimited_use_token" not in connection - and "vault_secret_path_metadata" not in connection - ): - log.debug("Not caching vault single use token") - __context__["vault_token"] = connection - return True - elif ( - "vault_secret_path_metadata" in __context__ - and "vault_secret_path_metadata" not in connection - ): - # If session storage is being used, and info passed is not the already saved metadata - log.debug("Storing token only for this session") - __context__["vault_token"] = connection - return True - elif "vault_secret_path_metadata" in __context__: - # Must have been passed metadata. This is already handled by _get_secret_path_metadata - # and does not need to be resaved - return True - temp_fp, temp_file = tempfile.mkstemp(dir=__opts__["cachedir"]) - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - try: - log.debug("Writing vault cache file") - # Detect if token was issued without use limit - if connection.get("uses") == 0: - connection["unlimited_use_token"] = True - else: - connection["unlimited_use_token"] = False - with salt.utils.files.fpopen(temp_file, "w", mode=0o600) as fp_: - fp_.write(salt.utils.json.dumps(connection)) - os.close(temp_fp) - # Atomic operation to pervent race condition with concurrent calls. - os.rename(temp_file, cache_file) - return True - except OSError: - log.error( - "Failed to cache vault information", exc_info_on_loglevel=logging.DEBUG - ) - return False - - -def _read_cache_file(): - """ - Return contents of cache file - """ - try: - cache_file = os.path.join(__opts__["cachedir"], "salt_vault_token") - with salt.utils.files.fopen(cache_file, "r") as contents: - return salt.utils.json.load(contents) - except FileNotFoundError: - return {} - - -def get_cache(): - """ - Return connection information from vault cache file - """ - - def _gen_new_connection(): - log.debug("Refreshing token") - connection = get_vault_connection() - write_status = write_cache(connection) - return connection - - connection = _read_cache_file() - # If no cache, or only metadata info is saved in cache, generate a new token - if not connection or "url" not in connection: - return _gen_new_connection() - - # Drop 10 seconds from ttl to be safe - if "lease_duration" in connection: - ttl = connection["lease_duration"] - else: - ttl = connection["ttl"] - ttl10 = connection["issued"] + ttl - 10 - cur_time = int(round(time.time())) - - # Determine if ttl still valid - if ttl10 < cur_time: - log.debug("Cached token has expired %s < %s: DELETING", ttl10, cur_time) - del_cache() - return _gen_new_connection() - else: - log.debug("Token has not expired %s > %s", ttl10, cur_time) - return connection - - -def make_request( - method, - resource, - token=None, - vault_url=None, - namespace=None, - get_token_url=False, - retry=False, - **args -): - """ - Make a request to Vault - """ - if "vault_token" in __context__: - connection = __context__["vault_token"] - else: - connection = get_cache() - token = connection["token"] if not token else token - vault_url = connection["url"] if not vault_url else vault_url - namespace = namespace or connection.get("namespace") - if "verify" in args: - args["verify"] = args["verify"] - else: - try: - args["verify"] = __opts__.get("vault").get("verify", None) - except (TypeError, AttributeError): - # Don't worry about setting verify if it doesn't exist - pass - url = "{}/{}".format(vault_url, resource) - headers = {"X-Vault-Token": str(token), "Content-Type": "application/json"} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.request(method, url, headers=headers, **args) - if not response.ok and response.json().get("errors", None) == ["permission denied"]: - log.info("Permission denied from vault") - del_cache() - if not retry: - log.debug("Retrying with new credentials") - response = make_request( - method, - resource, - token=None, - vault_url=vault_url, - get_token_url=get_token_url, - retry=True, - **args - ) - else: - log.error("Unable to connect to vault server: %s", response.text) - return response - elif not response.ok: - log.error("Error from vault: %s", response.text) - return response - - # Decrement vault uses, only on secret URL lookups and multi use tokens - if ( - "uses" in connection - and not connection.get("unlimited_use_token") - and not resource.startswith("v1/sys") - ): - log.debug("Decrementing Vault uses on limited token for url: %s", resource) - connection["uses"] -= 1 - if connection["uses"] <= 0: - log.debug("Cached token has no more uses left.") - if "vault_token" not in __context__: - del_cache() - else: - log.debug("Deleting token from memory") - del __context__["vault_token"] - else: - log.debug("Token has %s uses left", connection["uses"]) - write_cache(connection) - - if get_token_url: - return response, token, vault_url - else: - return response - - -def selftoken_expired(): - """ - Validate the current token exists and is still valid - """ - try: - verify = __opts__["vault"].get("verify", None) - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - url = "{}/v1/auth/token/lookup-self".format(__opts__["vault"]["url"]) - if "token" not in __opts__["vault"]["auth"]: - return True - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.get(url, headers=headers, verify=verify) - if response.status_code != 200: - return True - return False - except Exception as e: # pylint: disable=broad-except - raise salt.exceptions.CommandExecutionError( - "Error while looking up self token : {}".format(e) - ) - - -def _wrapped_token_valid(): - """ - Validate the wrapped token exists and is still valid - """ - try: - verify = __opts__["vault"].get("verify", None) - # Vault Enterprise requires a namespace - namespace = __opts__["vault"].get("namespace") - url = "{}/v1/sys/wrapping/lookup".format(__opts__["vault"]["url"]) - if "token" not in __opts__["vault"]["auth"]: - return False - headers = {"X-Vault-Token": __opts__["vault"]["auth"]["token"]} - if namespace is not None: - headers["X-Vault-Namespace"] = namespace - response = requests.post(url, headers=headers, verify=verify) - if response.status_code != 200: - return False - return True - except Exception as e: # pylint: disable=broad-except - raise salt.exceptions.CommandExecutionError( - "Error while looking up wrapped token : {}".format(e) - ) - - -def is_v2(path): - """ - Determines if a given secret path is kv version 1 or 2 - - CLI Example: - - .. code-block:: bash - - salt '*' vault.is_v2 "secret/my/secret" - """ - ret = {"v2": False, "data": path, "metadata": path, "delete": path, "type": None} - path_metadata = _get_secret_path_metadata(path) - if not path_metadata: - # metadata lookup failed. Simply return not v2 - return ret - ret["type"] = path_metadata.get("type", "kv") - if ( - ret["type"] == "kv" - and path_metadata["options"] is not None - and path_metadata.get("options", {}).get("version", "1") in ["2"] - ): - ret["v2"] = True - ret["data"] = _v2_the_path(path, path_metadata.get("path", path)) - ret["metadata"] = _v2_the_path( - path, path_metadata.get("path", path), "metadata" - ) - ret["destroy"] = _v2_the_path(path, path_metadata.get("path", path), "destroy") - return ret - - -def _v2_the_path(path, pfilter, ptype="data"): - """ - Given a path, a filter, and a path type, properly inject 'data' or 'metadata' into the path - - CLI Example: - - .. code-block:: python - - _v2_the_path('dev/secrets/fu/bar', 'dev/secrets', 'data') => 'dev/secrets/data/fu/bar' - """ - possible_types = ["data", "metadata", "destroy"] - assert ptype in possible_types - msg = ( - "Path {} already contains {} in the right place - saltstack duct tape?".format( - path, ptype - ) - ) - - path = path.rstrip("/").lstrip("/") - pfilter = pfilter.rstrip("/").lstrip("/") - - together = pfilter + "/" + ptype - - otype = possible_types[0] if possible_types[0] != ptype else possible_types[1] - other = pfilter + "/" + otype - if path.startswith(other): - path = path.replace(other, together, 1) - msg = 'Path is a "{}" type but "{}" type requested - Flipping: {}'.format( - otype, ptype, path - ) - elif not path.startswith(together): - msg = "Converting path to v2 {} => {}".format( - path, path.replace(pfilter, together, 1) - ) - path = path.replace(pfilter, together, 1) - - log.debug(msg) - return path - - -def _get_secret_path_metadata(path): - """ - Given a path, query vault to determine mount point, type, and version - - CLI Example: - - .. code-block:: python - - _get_secret_path_metadata('dev/secrets/fu/bar') - """ - ckey = "vault_secret_path_metadata" - - # Attempt to lookup from cache - if ckey in __context__: - cache_content = __context__[ckey] - else: - cache_content = _read_cache_file() - if ckey not in cache_content: - cache_content[ckey] = {} - - ret = None - if path.startswith(tuple(cache_content[ckey].keys())): - log.debug("Found cached metadata for %s", path) - ret = next(v for k, v in cache_content[ckey].items() if path.startswith(k)) - else: - log.debug("Fetching metadata for %s", path) - try: - url = "v1/sys/internal/ui/mounts/{}".format(path) - response = make_request("GET", url) - if response.ok: - response.raise_for_status() - if response.json().get("data", False): - log.debug("Got metadata for %s", path) - ret = response.json()["data"] - # Write metadata to cache file - # Check for new cache content from make_request - if "url" not in cache_content: - if ckey in __context__: - cache_content = __context__[ckey] - else: - cache_content = _read_cache_file() - if ckey not in cache_content: - cache_content[ckey] = {} - cache_content[ckey][path] = ret - write_cache(cache_content) - else: - raise response.json() - except Exception as err: # pylint: disable=broad-except - log.error("Failed to get secret metadata %s: %s", type(err).__name__, err) - return ret - - -def expand_pattern_lists(pattern, **mappings): - """ - Expands the pattern for any list-valued mappings, such that for any list of - length N in the mappings present in the pattern, N copies of the pattern are - returned, each with an element of the list substituted. - - pattern: - A pattern to expand, for example ``by-role/{grains[roles]}`` - - mappings: - A dictionary of variables that can be expanded into the pattern. - - Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains - - .. code-block:: yaml - - grains: - roles: - - web - - database - - This function will expand into two patterns, - ``[by-role/web, by-role/database]``. - - Note that this method does not expand any non-list patterns. - """ - expanded_patterns = [] - f = string.Formatter() - - # This function uses a string.Formatter to get all the formatting tokens from - # the pattern, then recursively replaces tokens whose expanded value is a - # list. For a list with N items, it will create N new pattern strings and - # then continue with the next token. In practice this is expected to not be - # very expensive, since patterns will typically involve a handful of lists at - # most. - - for (_, field_name, _, _) in f.parse(pattern): - if field_name is None: - continue - (value, _) = f.get_field(field_name, None, mappings) - if isinstance(value, list): - token = "{{{0}}}".format(field_name) - expanded = [pattern.replace(token, str(elem)) for elem in value] - for expanded_item in expanded: - result = expand_pattern_lists(expanded_item, **mappings) - expanded_patterns += result - return expanded_patterns - return [pattern] diff --git a/_utils/vaultutil.py b/_utils/vaultutil.py new file mode 100644 index 0000000..5cf722d --- /dev/null +++ b/_utils/vaultutil.py @@ -0,0 +1,2124 @@ +""" +:maintainer: SaltStack +:maturity: new +:platform: all + +Utilities supporting modules for Hashicorp Vault. Configuration instructions are +documented in the :ref:`execution module docs `. +""" + +import base64 +import copy +import datetime +import logging +import re +import string +import time + +import requests +import salt.cache +import salt.crypt +import salt.exceptions +import salt.utils.data +import salt.utils.dictupdate +import salt.utils.json +import salt.utils.versions + +log = logging.getLogger(__name__) +logging.getLogger("requests").setLevel(logging.WARNING) + + +# Make __salt__ available globally to avoid loading minion_mods multiple times +__salt__ = None + + +def query( + method, + endpoint, + opts, + context, + payload=None, + wrap=False, + raise_error=True, +): + """ + Make a request to Vault + """ + vault = get_authd_client(opts, context) + return vault.request( + method, endpoint, payload=payload, wrap=wrap, raise_error=raise_error + ) + + +def query_raw( + method, + endpoint, + opts, + context, + payload=None, + wrap=False, +): + """ + Make a request to Vault + """ + vault = get_authd_client(opts, context) + return vault.request_raw(method, endpoint, payload=payload, wrap=wrap) + + +def is_v2(path, opts=None, context=None): + """ + Determines if a given secret path is kv version 1 or 2 + + CLI Example: + + .. code-block:: bash + + salt '*' vault.is_v2 "secret/my/secret" + """ + # TODO: consider if at least context is really necessary to require + if opts is None or context is None: + opts = globals().get("__opts__", {}) if opts is None else opts + context = globals().get("__context__", {}) if context is None else context + salt.utils.versions.warn_until( + "Argon", + "The __utils__ loader functionality will be removed. This will " + "cause context/opts dunders to be unavailable in utility modules. " + "Please pass opts and context from importing Salt modules explicitly.", + ) + kv = _get_kv(opts, context) + return kv.is_v2(path) + + +def read_kv(path, opts, context, include_metadata=False): + """ + Read secret at . + """ + kv = _get_kv(opts, context) + try: + return kv.read(path, include_metadata=include_metadata) + except (VaultAuthExpired, VaultPermissionDeniedError): + # in case metadata lookups spend a use TODO: check if necessary + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.read(path, include_metadata=include_metadata) + + +def write_kv(path, data, opts, context): + """ + Write secret to . + """ + kv = _get_kv(opts, context) + try: + return kv.write(path, data) + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.write(path, data) + + +def patch_kv(path, data, opts, context): + """ + Patch secret at . + """ + kv = _get_kv(opts, context) + try: + return kv.patch(path, data) + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.patch(path, data) + + +def delete_kv(path, opts, context, versions=None): + """ + Delete secret at . For KV v2, versions can be specified, + which will be soft-deleted. + """ + kv = _get_kv(opts, context) + try: + return kv.delete(path, versions=versions) + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.delete(path, versions=versions) + + +def destroy_kv(path, versions, opts, context): + """ + Destroy secret at . Requires KV v2. + """ + kv = _get_kv(opts, context) + try: + return kv.destroy(path, versions) + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.destroy(path, versions) + + +def list_kv(path, opts, context): + """ + List secrets at . Returns ``{"keys": []}`` by default + for backwards-compatibility reasons, unless is True. + """ + kv = _get_kv(opts, context) + try: + return kv.list(path) + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + kv = _get_kv(opts, context) + return kv.list(path) + + +def _get_kv(opts, context): + client, config = get_authd_client(opts, context, get_config=True) + cbank = _get_config_cache_bank() + "/connection" + ckey = "secret_path_metadata" + metadata_cache = VaultCache(config, opts, context, cbank, ckey) + return VaultKV(client, metadata_cache) + + +def clear_cache(opts, ckey=None, connection_only=True): + """ + Clears non-session cache. + """ + cache = salt.cache.factory(opts) + if SALT_RUNTYPE_MASTER_IMPERSONATING == _get_salt_run_type(opts): + cbank = _get_config_cache_bank(None, opts["grains"]["id"]) + else: + cbank = _get_config_cache_bank() + if connection_only: + cbank += "/connection" + cache.flush(cbank, ckey) + + +def expand_pattern_lists(pattern, **mappings): + """ + Expands the pattern for any list-valued mappings, such that for any list of + length N in the mappings present in the pattern, N copies of the pattern are + returned, each with an element of the list substituted. + + pattern: + A pattern to expand, for example ``by-role/{grains[roles]}`` + + mappings: + A dictionary of variables that can be expanded into the pattern. + + Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains + + .. code-block:: yaml + + grains: + roles: + - web + - database + + This function will expand into two patterns, + ``[by-role/web, by-role/database]``. + + Note that this method does not expand any non-list patterns. + """ + expanded_patterns = [] + f = string.Formatter() + + # This function uses a string.Formatter to get all the formatting tokens from + # the pattern, then recursively replaces tokens whose expanded value is a + # list. For a list with N items, it will create N new pattern strings and + # then continue with the next token. In practice this is expected to not be + # very expensive, since patterns will typically involve a handful of lists at + # most. + + for (_, field_name, _, _) in f.parse(pattern): + if field_name is None: + continue + (value, _) = f.get_field(field_name, None, mappings) + if isinstance(value, list): + token = f"{{{field_name}}}" + expanded = [pattern.replace(token, str(elem)) for elem in value] + for expanded_item in expanded: + result = expand_pattern_lists(expanded_item, **mappings) + expanded_patterns += result + return expanded_patterns + return [pattern] + + +SALT_RUNTYPE_MASTER = 0 +SALT_RUNTYPE_MASTER_IMPERSONATING = 1 +SALT_RUNTYPE_MINION_LOCAL = 2 +SALT_RUNTYPE_MINION_REMOTE = 3 + + +def _get_salt_run_type(opts): + if "vault" in opts and opts.get("__role", "minion") == "master": + if "grains" in opts and "id" in opts["grains"]: + return SALT_RUNTYPE_MASTER_IMPERSONATING + return SALT_RUNTYPE_MASTER + if any( + ( + opts.get("local", None), + opts.get("file_client", None) == "local", + opts.get("master_type", None) == "disable", + ) + ): + return SALT_RUNTYPE_MINION_LOCAL + return SALT_RUNTYPE_MINION_REMOTE + + +def _get_config_cache_bank(config=None, minion_id=None, opts=None, force_local=False): + if force_local: + # pillar compilation would otherwise leak tokens between master + # and minions + minion_id = None + elif minion_id is None and opts is not None: + if ( + _get_salt_run_type(opts) == SALT_RUNTYPE_MASTER_IMPERSONATING + and not force_local + ): + minion_id = opts["grains"]["id"] + prefix = "vault" if minion_id is None else f"minions/{minion_id}/vault" + if config is None: + return prefix + url_key = config["url"].replace("://", "_") + ns_key = config["namespace"] or "__default_ns__" + ns_key.replace("/", "_") + return f"{prefix}/{url_key}/{ns_key}" + + +def get_authd_client(opts, context, force_local=False, get_config=False): + """ + Returns an AuthenticatedVaultClient that is valid for at least one query. + """ + try: + client, config = _build_authd_client(opts, context, force_local=force_local) + except (VaultAuthExpired, VaultConfigExpired, VaultPermissionDeniedError): + clear_cache(opts) + client, config = _build_authd_client(opts, context, force_local=force_local) + + # do not check the vault server for token validity because that consumes a use + if client.is_valid(remote=False): + if get_config: + return client, config + return client + + client = _build_authd_client(opts, context, force_local=force_local) + if get_config: + return client, config + return client + + +def _build_authd_client(opts, context, force_local=False): + cbank = _get_config_cache_bank(opts=opts, force_local=force_local) + "/connection" + config, embedded_token = _get_connection_config( + cbank, opts, context, force_local=force_local + ) + token_cache = VaultAuthCache(config, opts, context, cbank, "token", VaultToken) + + client = None + + if config["auth"]["method"] == "approle": + secret_id = config["auth"]["secret_id"] or None + cached_token = token_cache.get(10) + if secret_id: + secret_id_cache = VaultAuthCache( + config, opts, context, cbank, "secret_id", VaultAppRoleSecretId + ) + secret_id = secret_id_cache.get() + # only fetch secret-id if there is no cached valid token + if cached_token is None and secret_id is None: + secret_id = _fetch_secret_id( + config, opts, secret_id_cache, force_local=force_local + ) + if secret_id is None: + secret_id = InvalidVaultAppRoleSecretId() + role_id = config["auth"]["role_id"] + # this happens with wrapped response merging + if isinstance(role_id, dict): + role_id = role_id["role_id"] + approle = VaultAppRole(role_id, secret_id) + token_auth = VaultTokenAuth(cache=token_cache) + unauthd_client = VaultClient(**config["server"]) + auth = VaultAppRoleAuth( + approle, + unauthd_client, + mount=config["auth"]["approle_mount"], + token_store=token_auth, + ) + client = AuthenticatedVaultClient(auth, **config["server"]) + if config["auth"]["method"] in ["token", "wrapped_token"]: + token = _fetch_token( + config, + opts, + token_cache, + force_local=force_local, + embedded_token=embedded_token, + ) + auth = VaultTokenAuth(token=token, cache=token_cache) + client = AuthenticatedVaultClient(auth, **config["server"]) + + if client is not None: + return client, config + raise salt.exceptions.SaltException("Connection configuration is invalid.") + + +def _get_connection_config(cbank, opts, context, force_local=False): + def cache_or_fetch(cbank, opts, context): + if cbank in context and "config" in context[cbank]: + return context[cbank]["config"] + cache = salt.cache.factory(opts) + if cache.contains(cbank, "config"): + config = cache.fetch(cbank, "config") + config_ttl = config.get("cache", {}).get("config", 60) + config_updated = cache.updated(cbank, "config") + if int(time.time()) - config_updated < config_ttl: + log.debug("Using cached Vault server connection configuration.") + return config, None + log.debug("Cached config outdated, flushing connection config cache.") + cache.flush(cbank) + + log.debug("Using new Vault server connection configuration.") + config = _query_master( + "get_config", opts, issue_params=opts.get("issue_params") + ) + config = parse_config(config) + # do not couple token cache with configuration cache + embedded_token = config["auth"].pop("token", None) + config = { + "auth": config["auth"], + "cache": config["cache"], + "server": config["server"], + } + if "session" == config["cache"]["backend"]: + # reset all connection-scoped data + context[cbank] = {"config": config} + else: + cache.store(cbank, "config", config) + return config, embedded_token + + runtype = _get_salt_run_type(opts) + + if runtype in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] or force_local: + # only cache config fetched from remote + return _use_local_config(opts) + + log.debug("Using Vault server connection configuration from remote.") + return cache_or_fetch(cbank, opts, context) + + +def _fetch_secret_id(config, opts, secret_id_cache, force_local=False): + def cache_or_fetch(config, opts, secret_id_cache): + secret_id = secret_id_cache.get() + if secret_id is not None: + return secret_id + + log.debug("Fetching new Vault AppRole secret-id.") + creation_path = "auth/{}/role/{}/secret-id".format( + config["auth"]["approle_mount"], config["auth"]["approle_name"] + ) + secret_id = _query_master( + "generate_secret_id", + opts, + expected_server=config["server"], + unwrap_expected_creation_path=creation_path, + ) + secret_id = VaultAppRoleSecretId(**secret_id["data"]) + + # do not cache single-use secret-ids + if secret_id.secret_id_num_uses != 1: + secret_id_cache.store(secret_id) + return secret_id + + if ( + _get_salt_run_type(opts) in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] + or force_local + ): + secret_id = config["auth"]["secret_id"] + if isinstance(secret_id, dict): + if secret_id.get("wrap_info"): + unauthd_client = VaultClient(**config["server"]) + secret_id = unauthd_client.unwrap( + secret_id["wrap_info"]["token"], + expected_creation_path=_get_expected_creation_path( + "secret_id", config + ), + )["data"] + return VaultAppRoleSecretId(**secret_id) + if secret_id: + # assume locally configured secret_ids do not expire + return VaultAppRoleSecretId( + config["auth"]["secret_id"], + secret_id_ttl=config["cache"]["config"], + secret_id_num_uses=None, + ) + # when secret_id is falsey, the approle does not require secret ids, + # hence a call to this function is superfluous + raise salt.exceptions.SaltException("This code path should not be hit at all.") + + log.debug("Using secret_id issued by master.") + return cache_or_fetch(config, opts, secret_id_cache) + + +def _fetch_token(config, opts, token_cache, force_local=False, embedded_token=None): + def cache_or_fetch(config, opts, token_cache, embedded_token): + token = token_cache.get(10) + if token is not None: + log.debug("Using cached token.") + return token + + if isinstance(embedded_token, dict): + token = VaultToken(**embedded_token) + + if not isinstance(token, VaultToken) or not token.is_valid(10): + log.debug("Fetching new Vault token.") + creation_path = r"auth/token/create(/[\w-]+)?" + token = _query_master( + "generate_new_token", + opts, + expected_server=config["server"], + unwrap_expected_creation_path=creation_path, + issue_params=opts.get("issue_params"), + ) + token = VaultToken(**token["auth"]) + + # do not cache single-use tokens + if token.num_uses != 1: + token_cache.store(token) + return token + + runtype = _get_salt_run_type(opts) + + if runtype in [SALT_RUNTYPE_MASTER, SALT_RUNTYPE_MINION_LOCAL] or force_local: + if isinstance(embedded_token, dict): + if embedded_token.get("wrap_info"): + unauthd_client = VaultClient(**config["server"]) + token = unauthd_client.unwrap( + embedded_token["wrap_info"]["token"], + expected_creation_path=_get_expected_creation_path("token", config), + )["auth"] + token = VaultToken(**embedded_token) + elif config["auth"]["method"] == "wrapped_token": + unauthd_client = VaultClient(**config["server"]) + token = unauthd_client.unwrap( + embedded_token, + expected_creation_path=_get_expected_creation_path("token", config), + )["auth"] + elif embedded_token is not None: + token = token_cache.get() + if token is None or token != str(token): + # lookup and verify raw token + client = VaultClient(**config["server"]) + token_info = client.token_lookup(embedded_token, raw=True) + if token_info.status_code != 200: + raise VaultException( + "Configured token cannot be verified. It is most likely expired or invalid." + ) + token = VaultToken(**token_info.json()["data"]) + token_cache.store(token) + if token is not None: + return token + raise VaultException("Invalid configuration, missing token.") + + log.debug("Using token generated by master.") + return cache_or_fetch(config, opts, token_cache, embedded_token) + + +def _query_master( + func, + opts, + expected_server=None, + unwrap_client=None, + unwrap_expected_creation_path=None, + **kwargs, +): + def check_result( + result, + expected_server=None, + unwrap_client=None, + unwrap_expected_creation_path=None, + ): + if not result: + log.error( + "Failed to get Vault connection from master! No result returned - " + "is the peer publish configuration correct?" + ) + raise salt.exceptions.CommandExecutionError(result) + if not isinstance(result, dict): + log.error( + "Failed to get Vault connection from master! Response is not a dict: %s", + result, + ) + raise salt.exceptions.CommandExecutionError(result) + if "error" in result: + log.error( + "Failed to get Vault connection from master! An error was returned: %s", + result["error"], + ) + if result.get("expire_cache"): + raise VaultConfigExpired() + raise salt.exceptions.CommandExecutionError(result) + + config_expired = False + + if result.get("expire_cache") or ( + expected_server is not None and result.get("server", {}) != expected_server + ): + # make sure to fetch wrapped data anyways for security reasons + config_expired = True + + # this is used to augment some vault responses with data fetched by the master + # e.g. secret_id_num_uses + misc_data = result.get("misc_data", {}) + + if "wrap_info" in result or result.get("wrap_info_nested"): + if ( + unwrap_client is not None + and unwrap_client.get_config() != result["server"] + ): + unwrap_client = None + # make sure to fetch wrapped data anyways for security reasons + config_expired = True + + if unwrap_client is None: + unwrap_client = VaultClient(**result["server"]) + + for key in [""] + result.get("wrap_info_nested", []): + if key: + wrapped = salt.utils.data.traverse_dict(result, key) + else: + wrapped = result + if not wrapped or "wrap_info" not in wrapped: + continue + wrapped_response = VaultWrappedResponse(**wrapped["wrap_info"]) + unwrapped_response = unwrap_client.unwrap( + wrapped_response, + expected_creation_path=unwrap_expected_creation_path, + ) + if key: + salt.utils.dictupdate.set_dict_key_value( + result, + key, + unwrapped_response.get("auth") + or unwrapped_response.get("data"), + ) + else: + if unwrapped_response.get("auth"): + result.update({"auth": unwrapped_response["auth"]}) + if unwrapped_response.get("data"): + result.update({"data": unwrapped_response["data"]}) + + if config_expired: + raise VaultConfigExpired() + + for key, val in misc_data.items(): + if key not in result["data"]: + result["data"][key] = val + + result.pop("wrap_info", None) + result.pop("misc_data", None) + return result + + global __salt__ # pylint: disable=global-statement + if __salt__ is None: + __salt__ = salt.loader.minion_mods(opts) + + minion_id = opts["grains"]["id"] + pki_dir = opts["pki_dir"] + + # When rendering pillars, the module executes on the master, but the token + # should be issued for the minion, so that the correct policies are applied + if opts.get("__role", "minion") == "minion": + private_key = f"{pki_dir}/minion.pem" + log.debug( + f"Running on minion, signing request `vault.{func}` with key {private_key}", + ) + signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) + arg = [ + ("minion_id", minion_id), + ("signature", signature), + ("impersonated_by_master", False), + ] + list(kwargs.items()) + + result = __salt__["publish.runner"]( + f"vault.{func}", arg=[{"__kwarg__": True, k: v} for k, v in arg] + ) + else: + private_key = f"{pki_dir}/master.pem" + log.debug( + f"Running on master, signing request `vault.{func}` for {minion_id} " + f"with key {private_key}", + ) + signature = base64.b64encode(salt.crypt.sign_message(private_key, minion_id)) + result = __salt__["saltutil.runner"]( + f"vault.{func}", + minion_id=minion_id, + signature=signature, + impersonated_by_master=True, + **kwargs, + ) + return check_result( + result, + expected_server=expected_server, + unwrap_client=unwrap_client, + unwrap_expected_creation_path=unwrap_expected_creation_path, + ) + + +def _use_local_config(opts): + log.debug("Using Vault connection details from local config.") + config = parse_config(opts.get("vault", {})) + embedded_token = config["auth"].pop("token", None) + return { + "auth": config["auth"], + "cache": config["cache"], + "server": config["server"], + }, embedded_token + + +def parse_config(config): + """ + Returns a vault configuration dictionary that has all + keys with defaults. Checks if required data is available. + """ + default_config = { + "auth": { + "approle_mount": "approle", + "approle_name": "salt-master", + "method": "token", + "secret_id": None, + }, + "cache": { + "backend": "session", + "config": 3600, + "secret": "ttl", + }, + "issue": { + "allow_minion_override_params": False, + "type": "token", + "approle": { + "mount": "salt-minions", + "params": { + "bind_secret_id": True, + "secret_id_num_uses": 1, + "secret_id_ttl": 60, + "ttl": 60, + "uses": 10, + }, + }, + "token": { + "role_name": None, + "params": { + "ttl": None, + "uses": 1, + }, + }, + "wrap": "30s", + }, + "issue_params": {}, + "metadata": { + "entity": { + "minion-id": "{minion}", + }, + "token": { + "saltstack-jid": "{jid}", + "saltstack-minion": "{minion}", + "saltstack-user": "{user}", + }, + }, + "policies": { + "assign": [ + "saltstack/minions", + "saltstack/{minion}", + ], + "cache_time": 60, + "refresh_pillar": None, + }, + "server": { + "namespace": None, + "verify": None, + }, + } + try: + # Policy generation has params, the new config groups them together. + if isinstance(config.get("policies", {}), list): + policies_list = config["policies"] + config["policies"] = {"assign": policies_list} + merged = salt.utils.dictupdate.merge( + default_config, + config, + strategy="smart", + merge_lists=False, + ) + # ttl, uses were used as configuration for issuance and minion overrides as well + # as token meta information. The new configuration splits those semantics. + for old_token_conf in ["ttl", "uses"]: + if old_token_conf in merged["auth"]: + merged["issue"]["token"]["params"][old_token_conf] = merged[ + "issue_params" + ][old_token_conf] = merged["auth"][old_token_conf] + # Those were found in the root namespace, but grouping them together + # makes semantic and practical sense. + for old_server_conf in ["namespace", "url", "verify"]: + if old_server_conf in merged: + merged["server"][old_server_conf] = merged[old_server_conf] + if "role_name" in merged: + merged["issue"]["token"]["role_name"] = merged["role_name"] + if "token_backend" in merged["auth"]: + merged["cache"]["backend"] = merged["auth"]["token_backend"] + if "allow_minion_override" in merged["auth"]: + merged["issue"]["allow_minion_override_params"] = merged["auth"][ + "allow_minion_override" + ] + if merged["auth"]["method"] == "approle": + if "role_id" not in merged["auth"]: + raise AssertionError("auth:role_id is required for approle auth") + elif merged["auth"]["method"] == "token": + if "token" not in merged["auth"]: + raise AssertionError("auth:token is required for token auth") + if "url" not in merged["server"]: + raise AssertionError("server:url is required") + except AssertionError as err: + raise salt.exceptions.CommandExecutionError( + f"Invalid vault configuration: {err}" + ) from err + return merged + + +def _get_expected_creation_path(secret_type, config=None): + if "token" == secret_type: + return r"auth/token/create(/[^/]+)?" + + if "secret_id" == secret_type: + if config is not None: + return "auth/{}/role/{}/secret-id".format( + config["auth"]["approle_mount"], config["auth"]["approle_name"] + ) + return r"auth/[^/]+/role/[^/]+/secret-id" + + if "role_id" == secret_type: + if config is not None: + return "auth/{}/role/{}/role-id".format( + config["auth"]["approle_mount"], config["auth"]["approle_name"] + ) + return r"auth/[^/]+/role/[^/]+/role-id" + + raise salt.exceptions.SaltInvocationError( + f"secret_type must be one of token, secret_id, role_id, got `{secret_type}`." + ) + + +class VaultException(salt.exceptions.SaltException): + """ + Base class for exceptions raised by this module + """ + + +class VaultAuthExpired(VaultException): + """ + Raised when authentication data is reported to be outdated locally. + """ + + +class VaultConfigExpired(VaultException): + """ + Raised when secret authentication data queried from the master reports + a different server configuration than locally cached. + """ + + +class VaultUnwrapException(VaultException): + """ + Raised when an expected creation path for a wrapping token differs + from the reported one. + This has to be taken seriously as it indicates tampering. + """ + + +# https://www.vaultproject.io/api-docs#http-status-codes +class VaultInvocationError(VaultException): + """ + HTTP 400 and InvalidArgumentException for this module + """ + + +class VaultPermissionDeniedError(VaultException): + """ + HTTP 403 + """ + + +class VaultNotFoundError(VaultException): + """ + HTTP 404 + In some cases, this is also raised when the client does not have + the correct permissions for the requested endpoint. + """ + + +class VaultUnsupportedOperationError(VaultException): + """ + HTTP 405 + """ + + +class VaultPreconditionFailedError(VaultException): + """ + HTTP 412 + """ + + +class VaultServerError(VaultException): + """ + HTTP 500 + HTTP 502 + """ + + +class VaultUnavailableError(VaultException): + """ + HTTP 503 + Indicates maintenance or sealed status. + """ + + +class VaultClient: + """ + Unauthenticated client for the Vault API. + Base class for authenticated client. + """ + + def __init__(self, url, namespace=None, verify=None): + self.url = url + self.namespace = namespace + self.verify = verify + + def delete(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("POST", ...) + """ + return self.request( + "DELETE", + endpoint, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def get(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("GET", ...) + """ + return self.request( + "GET", endpoint, wrap=wrap, raise_error=raise_error, add_headers=add_headers + ) + + def list(self, endpoint, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("LIST", ...) + TODO: configuration to enable GET requests with query parameters for LIST? + """ + return self.request( + "LIST", + endpoint, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def post( + self, endpoint, payload=None, wrap=False, raise_error=True, add_headers=None + ): + """ + Wrapper for client.request("POST", ...) + Vault considers POST and PUT to be synonymous. + """ + return self.request( + "POST", + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def patch(self, endpoint, payload, wrap=False, raise_error=True, add_headers=None): + """ + Wrapper for client.request("PATCH", ...) + """ + return self.request( + "PATCH", + endpoint, + payload=payload, + wrap=wrap, + raise_error=raise_error, + add_headers=add_headers, + ) + + def request( + self, + method, + endpoint, + payload=None, + wrap=False, + raise_error=True, + add_headers=None, + ): + """ + Issue a request against the Vault API. Returns boolean when no data was returned, + otherwise the decoded json data. + """ + res = self.request_raw( + method, endpoint, payload=payload, wrap=wrap, add_headers=add_headers + ) + if res.status_code == 204: + return True + data = res.json() + if not res.ok: + if raise_error: + self._raise_status(res) + return data or False + if wrap: + return VaultWrappedResponse(**data["wrap_info"]) + return data + + def request_raw(self, method, endpoint, payload=None, wrap=False, add_headers=None): + """ + Issue a request against the Vault API. Returns the raw response object. + """ + url = self._get_url(endpoint) + headers = self._get_headers(wrap) + if isinstance(add_headers, dict): + headers.update(add_headers) + res = requests.request(method, url, headers=headers, json=payload) + return res + + def unwrap(self, wrapped, expected_creation_path=None): + """ + Unwraps the data associated with a wrapping token. + + wrapped + Wrapping token to unwrap + + expected_creation_path + Regex expression or list of expressions that should fully match the + wrapping token creation path. At least one match is required. + Defaults to None, which skips the check. + + .. note:: + This check prevents tampering with wrapping tokens, which are + valid for one request only. Usually, if an attacker sniffs a wrapping + token, there will be two unwrapping requests, causing an audit warning. + If the attacker can issue a new wrapping token and insert it into the + response instead, this warning would be silenced. Assuming they do not + possess the permissions to issue a wrapping token from the correct + endpoint, checking the creation path makes this kind of attack obvious. + """ + if expected_creation_path: + wrap_info = self.wrap_info(wrapped) + if not isinstance(expected_creation_path, list): + expected_creation_path = [expected_creation_path] + if not any( + re.fullmatch(p, wrap_info["creation_path"]) + for p in expected_creation_path + ): + raise VaultUnwrapException( + "Wrapped response was not created from expected Vault path: " + f"`{wrapped.creation_path}` is not matched by any of `{expected_creation_path}`.\n" + "This indicates tampering with the wrapping token by a third party " + "and should be taken very seriously! If you changed some authentication-" + "specific configuration on the master recently, especially minion " + "approle mount, you should consider if this error was caused by outdated " + "cached data on this minion instead." + ) + url = self._get_url("sys/wrapping/unwrap") + headers = self._get_headers() + payload = {} + if "X-Vault-Token" not in headers: + headers["X-Vault-Token"] = str(wrapped) + else: + payload["token"] = str(wrapped) + res = requests.request("POST", url, headers=headers, json=payload) + if not res.ok: + self._raise_status(res) + return res.json() + + def wrap_info(self, wrapped): + """ + Lookup wrapping token meta information. + """ + endpoint = "sys/wrapping/lookup" + add_headers = {"X-Vault-Token": str(wrapped)} + return self.post(endpoint, wrap=False, add_headers=add_headers)["data"] + + def token_lookup(self, token=None, accessor=None, raw=False): + """ + Lookup token meta information. + + token + The token to look up or to use to look up the accessor. + Required. + + accessor + The accessor to use to query the token meta information. + + raw + Return the raw response object instead of response data. + Also disables status code checking. + """ + endpoint = "auth/token/lookup-self" + method = "GET" + payload = {} + if token is None: + raise VaultInvocationError( + "Unauthenticated VaultClient needs a token to lookup." + ) + add_headers = {"X-Vault-Token": token} + + if accessor is not None: + endpoint = "auth/token/lookup-accessor" + payload["accessor"] = accessor + + res = self.request_raw( + method, endpoint, payload=payload, wrap=False, add_headers=add_headers + ) + if raw: + return res + self._raise_status(res) + return res.json()["data"] + + def token_valid(self, remote=True): # pylint: disable=unused-argument + return False + + def get_config(self): + """ + Returns Vault server configuration used by this client. + """ + return { + "url": self.url, + "namespace": self.namespace, + "verify": self.verify, + } + + def _get_url(self, endpoint): + endpoint = endpoint.strip("/") + return f"{self.url}/v1/{endpoint}" + + def _get_headers(self, wrap=False): + headers = {"Content-Type": "application/json", "X-Vault-Request": "true"} + if self.namespace is not None: + headers["X-Vault-Namespace"] = self.namespace + if wrap: + headers["X-Vault-Wrap-TTL"] = wrap + return headers + + def _raise_status(self, res): + errors = ", ".join(res.json().get("errors", [])) + if res.status_code == 400: + raise VaultInvocationError(errors) + if res.status_code == 403: + raise VaultPermissionDeniedError(errors) + if res.status_code == 404: + raise VaultNotFoundError(errors) + if res.status_code == 405: + raise VaultUnsupportedOperationError(errors) + if res.status_code == 412: + raise VaultPreconditionFailedError(errors) + if res.status_code in [500, 502]: + raise VaultServerError(errors) + if res.status_code == 503: + raise VaultUnavailableError(errors) + res.raise_for_status() + + +class AuthenticatedVaultClient(VaultClient): + """ + Authenticated client for the Vault API. + This should be used for most operations. + """ + + def __init__(self, auth, url, **kwargs): + self.auth = auth + super().__init__(url, **kwargs) + + def is_valid(self, remote=True): + """ + Check whether this client's authentication information is + still valid. + + remote + Check with the remote Vault server as well. This consumes + a token use. Defaults to true. + """ + if not self.auth.is_valid(): + return False + if not remote: + return True + try: + res = self.token_lookup(raw=True) + if res.status_code != 200: + return False + return True + except Exception as err: # pylint: disable=broad-except + raise salt.exceptions.CommandExecutionError( + "Error while looking up self token." + ) from err + + def token_lookup(self, token=None, accessor=None, raw=False): + """ + Lookup token meta information. + + token + The token to look up. If neither token nor accessor + are specified, looks up the current token in use by + this client. + + accessor + The accessor of the token to query the meta information for. + + raw + Return the raw response object instead of response data. + Also disables status code checking. + """ + endpoint = "auth/token/lookup" + method = "POST" + payload = {} + if token is None and accessor is None: + endpoint += "-self" + method = "GET" + if token is not None: + payload["token"] = token + elif accessor is not None: + endpoint += "-accessor" + payload["accessor"] = accessor + if raw: + return self.request_raw(method, endpoint, payload=payload, wrap=False) + return self.request(method, endpoint, payload=payload, wrap=False)["data"] + + def token_renew(self, increment=None, token=None, accessor=None): + """ + Renew a token. + + increment + The time the token should be requested to be renewed for, starting + from the current point in time. The server might not honor this increment. + Can be an integer (seconds) or a time string like ``1h``. Optional. + + token + The token that should be renewed. Optional. + If token and accessor are unset, renews the token currently in use + by this client. + + accessor + The accessor of the token that should be renewed. Optional. + """ + endpoint = "auth/token/renew" + payload = {} + + if token is None and accessor is None: + if not self.auth.is_renewable(): + return False + endpoint += "-self" + + if increment is not None: + payload["increment"] = increment + if token is not None: + payload["token"] = token + elif accessor is not None: + endpoint += "accessor" + payload["accessor"] = accessor + + res = self.post(endpoint, payload=payload) + + if token is None and accessor is None: + self.auth.update_token(res["auth"]) + return res["auth"] + + def request_raw(self, method, endpoint, payload=None, wrap=False, add_headers=None): + ret = super().request_raw( + method, endpoint, payload=payload, wrap=wrap, add_headers=add_headers + ) + if not endpoint.startswith("sys") and ret.ok or ret.status_code == 404: + # this is wonky tbh, there are many endpoints that consume a token use + self.auth.used() + return ret + + def _get_headers(self, wrap=False): + headers = super()._get_headers(wrap) + headers["X-Vault-Token"] = str(self.auth.get_token()) + return headers + + +class VaultCache: + """ + Encapsulates session and other cache backends for a single domain + like secret path metadata. + """ + + def __init__(self, config, opts, context, cbank, ckey): + self.config = config + self.opts = opts + self.context = context + self.cbank = cbank + self.ckey = ckey + cache = None + if config["cache"]["backend"] != "session": + cache = salt.cache.factory(opts) + self.cache = cache + + def exists(self): + """ + Check whether data for this domain exists + """ + if self.cache is not None: + return self.cache.contains(self.cbank, self.ckey) + return self.cbank in self.context and self.ckey in self.context[self.cbank] + + def get(self): + """ + Return the cached data for this domain or None + """ + if not self.exists(): + return None + if self.cache is not None: + return self.cache.fetch(self.cbank, self.ckey) + return self.context[self.cbank][self.ckey] + + def flush(self): + """ + Flush the cache for this domain + """ + if self.cache is not None: + self.cache.flush(self.cbank, self.ckey) + else: + self.context[self.cbank].pop(self.ckey, None) + + def store(self, value): + """ + Store data for this domain + """ + if self.cache is not None: + self.cache.store(self.cbank, self.ckey, value) + return + if self.cbank not in self.context: + self.context[self.cbank] = {} + self.context[self.cbank][self.ckey] = value + + +class VaultAuthCache(VaultCache): + """ + Implements authentication secret-specific caches. Checks whether + the cached secrets are still valid before returning. + """ + + def __init__(self, config, opts, context, cbank, ckey, auth_cls): + super().__init__(config, opts, context, cbank, ckey) + self.auth_cls = auth_cls + self.max_cache_time = config["cache"]["secret"] + + def get(self, seconds_future=0): + """ + Returns valid cached authentication data or None + """ + if not self.exists(): + return None + if self.cache is not None: + if "ttl" != self.max_cache_time: + last_updated = self.cache.updated(self.cbank, self.ckey) + if int(time.time()) - last_updated > self.max_cache_time: + log.debug("Cached secret outdated because of absolute config.") + self.flush() + return None + auth = self.auth_cls(**self.cache.fetch(self.cbank, self.ckey)) + else: + auth = self.auth_cls(**self.context[self.cbank][self.ckey]) + if auth.is_valid(seconds_future): + log.debug("Using cached secret.") + return auth + log.debug("Cached secret not valid anymore.") + self.flush() + return None + + def store(self, value): + """ + Store auth data + """ + if isinstance(value, VaultLease): + value = value.to_dict() + return super().store(value) + + +class VaultTokenAuth: + """ + Container for authentication tokens + """ + + def __init__(self, cache=None, token=None): + self.cache = cache + if token is None and cache is not None: + token = cache.get() + if token is None: + token = InvalidVaultToken() + self.token = token + + def is_renewable(self): + """ + Check whether the contained token is renewable, + which requires it to be valid and renewable + """ + return self.is_valid() and self.token.renewable + + def is_valid(self, seconds_future=0): + """ + Check whether the contained token is valid + """ + return self.token.is_valid(seconds_future) + + def get_token(self): + """ + Get the contained token if it is valid, otherwise + raises VaultAuthExpired + """ + if self.token.is_valid(): + return self.token + raise VaultAuthExpired() + + def used(self): + """ + Increment the use counter for the contained token + """ + self.token.used() + if self.token.num_uses != 0: + self._write_cache() + + def update_token(self, auth): + """ + Partially update the contained token (e.g. after renewal) + """ + self.token = self.token.with_(**auth) + self._write_cache() + + def replace_token(self, token): + """ + Completely replace the contained token with a new one + """ + self.token = token + self._write_cache() + + def _write_cache(self): + if self.cache is not None: + self.cache.store(self.token) + + +class VaultAppRoleAuth: + """ + Issues tokens from AppRole credentials. + """ + + def __init__(self, approle, client, mount="approle", token_store=None): + self.approle = approle + self.client = client + self.mount = mount + if token_store is None: + token_store = VaultTokenAuth() + self.token = token_store + + def is_renewable(self): + """ + Check whether the currently used token is renewable. + Secret IDs are not renewable. + """ + return self.token.is_renewable() + + def is_valid(self, seconds_future=0): + """ + Check whether the contained authentication data can be used + to issue a valid token + """ + return self.token.is_valid(seconds_future) or self.approle.is_valid( + seconds_future + ) + + def get_token(self): + """ + Return the token issued by the last login, if it is still valid, otherwise + login with the contained AppRole, if it is valid. Otherwise, + raises VaultAuthExpired + """ + if self.token.is_valid(): + return self.token.get_token() + if self.approle.is_valid(): + return self._login() + raise VaultAuthExpired() + + def used(self): + """ + Increment the use counter for the currently used token + """ + self.token.used() + + def update_token(self, auth): + """ + Partially update the contained token (e.g. after renewal) + """ + self.token.update_token(auth) + + def _login(self): + log.debug("Vault token expired. Recreating one by authenticating with AppRole.") + endpoint = f"auth/{self.mount}/login" + payload = self.approle.payload() + res = self.client.post(endpoint, payload=payload) + self.approle.secret_id.used() + self._replace_token(res["auth"]) + return self.token.get_token() + + def _replace_token(self, auth): + self.token.replace_token(VaultToken(**auth)) + + +class VaultLease: + """ + Base class for Vault leases that expire with time. + """ + + def __init__( + self, + lease_id, + renewable, + lease_duration, + creation_time=None, + **kwargs, # pylint: disable=unused-argument + ): + self.id = lease_id + self.renewable = renewable + self.lease_duration = lease_duration + if creation_time is not None: + try: + creation_time = int(creation_time) + except ValueError: + # Most endpoints respond with RFC3339-formatted strings + # This is a hacky way to use inbuilt tools only (Python >=v3.7) + first, second = creation_time.split(".") + second, third = second.split("+") + if len(second) > 6: + second = second[:6] + creation_time = int( + datetime.datetime.fromisoformat( + f"{first}.{second}+{third}" + ).timestamp() + ) + self.creation_time = creation_time or int(round(time.time())) + + def is_valid(self, seconds_future=0): + """ + Checks whether the lease is currently valid + + seconds_future + Allows to check whether the lease will still be valid + x seconds from now on. Defaults to 0. + """ + if not self.lease_duration: + return True + return self.creation_time + self.lease_duration > time.time() + seconds_future + + def with_(self, **kwargs): + """ + Partially update the contained data + """ + attrs = copy.copy(self.__dict__) + attrs.update(kwargs) + return type(self)(**attrs) + + def __str__(self): + return self.id + + def __repr__(self): + return repr(self.to_dict()) + + def to_dict(self): + """ + Return a dict of all contained attributes + """ + return self.__dict__ + + +class VaultWrappedResponse(VaultLease): + """ + Data object representing a wrapped response + """ + + def __init__( + self, + token, + ttl, + creation_time, + creation_path, + accessor=None, + wrapped_accessor=None, + **kwargs, + ): + self.accessor = accessor + self.wrapped_accessor = wrapped_accessor + self.creation_path = creation_path + super().__init__( + token, + renewable=False, + lease_duration=ttl, + creation_time=creation_time, + **kwargs, + ) + self.token = self.id + self.ttl = self.lease_duration + + +class VaultAppRoleSecretId(VaultLease): + """ + Data object representing an AppRole secret-id. + """ + + def __init__( + self, + secret_id, + secret_id_ttl, + secret_id_num_uses=None, + creation_time=None, + use_count=0, + **kwargs, + ): + self.num_uses = self.secret_id_num_uses = secret_id_num_uses + self.use_count = use_count + super().__init__( + secret_id, + renewable=False, + lease_duration=secret_id_ttl, + creation_time=creation_time, + ) + self.secret_id_ttl = self.lease_duration + self.secret_id = self.id + + def payload(self): + """ + Return the payload to use for POST requests using this secret-id + """ + return {"secret_id": str(self)} + + def is_valid(self, seconds_future=0): + """ + Check whether this secret-id is still valid. Takes into account + the maximum number of uses, if they are known, and lease duration. + + seconds_future + Allows to check whether the lease will still be valid + x seconds from now on. Defaults to 0. + """ + return super().is_valid(seconds_future) and ( + self.num_uses is None + or self.num_uses == 0 + or self.num_uses - self.use_count > 0 + ) + + def used(self): + """ + Increment the use counter of this secret-id by one. + """ + self.use_count += 1 + + def serialize_for_minion(self): + """ + Serialize all necessary data to recreate this object + into a dict that can be sent to a minion. + """ + data = { + "secret_id": self.secret_id, + "secret_id_ttl": self.secret_id_ttl, + "creation_time": self.creation_time, + } + if self.secret_id_num_uses is not None: + data["secret_id_num_uses"] = self.secret_id_num_uses + return data + + +class VaultToken(VaultLease): + """ + Data object representing an authentication token + """ + + def __init__( + self, + client_token, + renewable, + lease_duration, + num_uses, + accessor=None, + entity_id=None, + creation_time=None, + use_count=0, + **kwargs, + ): + self.accessor = accessor + self.num_uses = num_uses + self.entity_id = entity_id + self.use_count = use_count + super().__init__( + client_token, + renewable=renewable, + lease_duration=lease_duration, + creation_time=creation_time, + ) + # instantiation is currently suboptimal + # this is needed to make new copies with updated params + self.client_token = self.id + + def is_valid(self, seconds_future=0): + """ + Check whether this token is still valid. Takes into account + the maximum number of uses, and lease duration. + + seconds_future + Allows to check whether the lease will still be valid + x seconds from now on. Defaults to 0. + """ + return super().is_valid(seconds_future) and ( + self.num_uses == 0 or self.num_uses - self.use_count > 0 + ) + + def used(self): + """ + Increment the use counter of this token by one. + """ + self.use_count += 1 + + def payload(self): + """ + Return the payload to use for POST requests using this token + """ + return {"token": str(self)} + + def serialize_for_minion(self): + """ + Serialize all necessary data to recreate this object + into a dict that can be sent to a minion. + """ + return { + "client_token": self.client_token, + "renewable": self.renewable, + "lease_duration": self.lease_duration, + "num_uses": self.num_uses, + "creation_time": self.creation_time, + } + + +class VaultAppRole: + """ + Container that represents an AppRole + """ + + def __init__(self, role_id, secret_id=None): + self.role_id = role_id + self.secret_id = secret_id + + def replace_secret_id(self, secret_id): + """ + Replace the contained secret-id with a new one + """ + self.secret_id = secret_id + + def is_valid(self, seconds_future=0): + """ + Checks whether the contained data can be used to authenticate + to Vault. secret-ids might not be required by the server when + bind_secret_id is set to false. + """ + if self.secret_id is None: + return True + return self.secret_id.is_valid(seconds_future) + + def payload(self): + """ + Return the payload to use for POST requests using this AppRole + """ + payload = {} + if self.secret_id is not None: + payload = self.secret_id.payload() + payload["role_id"] = self.role_id + return payload + + +class InvalidVaultToken(VaultToken): + def __init__(self, *args, **kwargs): + pass + + def is_valid(self, seconds_future=0): + return False + + +class InvalidVaultAppRoleSecretId(VaultAppRoleSecretId): + def __init__(self, *args, **kwargs): + pass + + def is_valid(self, seconds_future=0): + return False + + +class VaultKV: + """ + Interface to Vault secret paths + """ + + def __init__(self, client, metadata_cache): + self.client = client + self.metadata_cache = metadata_cache + + def read(self, path, include_metadata=False): + """ + Read secret data at path. + + include_metadata + For kv-v2, include metadata in the return value: + ``{"data": {} ,"metadata": {}}``. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["data"] + res = self.client.get(path) + ret = res["data"] + if v2_info["v2"] and not include_metadata: + return ret["data"] + return ret + + def write(self, path, data): + """ + Write secret data to path. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["data"] + data = {"data": data} + return self.client.post(path, payload=data) + + def patch(self, path, data): + """ + Patch existing data. Requires kv-v2. + This uses JSON Merge Patch format, see + https://datatracker.ietf.org/doc/html/draft-ietf-appsawg-json-merge-patch-07 + """ + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise VaultInvocationError("Patch operation requires kv-v2.") + path = v2_info["data"] + data = {"data": data} + add_headers = {"Content-Type": "application/merge-patch+json"} + return self.client.patch(path, payload=data, add_headers=add_headers) + + def delete(self, path, versions=None): + """ + Delete secret path data. For kv-v1, this is permanent. + For kv-v2, this only soft-deletes the data. + + versions + For kv-v2, specifies versions to soft-delete. Needs to be castable + to a list of integers. + """ + method = "DELETE" + payload = None + versions = self._parse_versions(versions) + v2_info = self.is_v2(path) + + if v2_info["v2"]: + if versions is not None: + method = "POST" + path = v2_info["delete_versions"] + payload = {"versions": versions} + else: + # data and delete operations only differ by HTTP verb + path = v2_info["data"] + elif versions is not None: + # semantically, for kv-v1 this resembles destroy + if 0 not in versions: + raise salt.exceptions.SaltInvocationError( + "Versions are not supported on kv-v1 paths." + ) + # if the latest version was requested to be deleted anyways, continue + log.warning( + "Versions to destroy were requested, but the secret path does " + "not use kv-v2. Deleting the secret only." + ) + + return self.client.request(method, path, payload=payload) + + def destroy(self, path, versions): + """ + Permanently remove version data. Requires kv-v2. + + versions + Specifies versions to destroy. Needs to be castable + to a list of integers. + """ + versions = self._parse_versions(versions) + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise salt.exceptions.SaltInvocationError( + "Destroy operation requires kv-v2." + ) + path = v2_info["destroy"] + payload = {"versions": versions} + return self.client.post(path, payload=payload) + + def _parse_versions(self, versions): + if versions is None: + return versions + if not isinstance(versions, list): + versions = [versions] + try: + versions = [int(x) for x in versions] + except ValueError as err: + raise VaultInvocationError( + "Versions have to be specified as integers." + ) from err + return versions + + def nuke(self, path): + """ + Delete path metadata and version data, including all version history. + Requires kv-v2. + """ + v2_info = self.is_v2(path) + if not v2_info["v2"]: + raise salt.exceptions.SaltInvocationError("Nuke operation requires kv-v2.") + path = v2_info["metadata"] + return self.client.delete(path) + + def list(self, path): + """ + List keys at path. + """ + v2_info = self.is_v2(path) + if v2_info["v2"]: + path = v2_info["metadata"] + + return self.client.list(path)["data"]["keys"] + + def is_v2(self, path): + """ + Determines if a given secret path is kv version 1 or 2. + """ + ret = { + "v2": False, + "data": path, + "metadata": path, + "delete": path, + "type": None, + } + path_metadata = self._get_secret_path_metadata(path) + if not path_metadata: + # metadata lookup failed. Simply return not v2 + return ret + ret["type"] = path_metadata.get("type", "kv") + if ( + ret["type"] == "kv" + and path_metadata["options"] is not None + and path_metadata.get("options", {}).get("version", "1") in ["2"] + ): + ret["v2"] = True + ret["data"] = self._v2_the_path(path, path_metadata.get("path", path)) + ret["metadata"] = self._v2_the_path( + path, path_metadata.get("path", path), "metadata" + ) + ret["delete"] = ret["data"] + ret["delete_versions"] = self._v2_the_path( + path, path_metadata.get("path", path), "delete" + ) + ret["destroy"] = self._v2_the_path( + path, path_metadata.get("path", path), "destroy" + ) + return ret + + def _v2_the_path(self, path, pfilter, ptype="data"): + """ + Given a path, a filter, and a path type, properly inject + 'data' or 'metadata' into the path. + """ + possible_types = ["data", "metadata", "delete", "destroy"] + if ptype not in possible_types: + raise AssertionError() + msg = f"Path {path} already contains {ptype} in the right place - saltstack duct tape?" + + path = path.rstrip("/").lstrip("/") + pfilter = pfilter.rstrip("/").lstrip("/") + + together = pfilter + "/" + ptype + + otype = possible_types[0] if possible_types[0] != ptype else possible_types[1] + other = pfilter + "/" + otype + if path.startswith(other): + path = path.replace(other, together, 1) + msg = f'Path is a "{otype}" type but "{ptype}" type requested - Flipping: {path}' + elif not path.startswith(together): + old_path = path + path = path.replace(pfilter, together, 1) + msg = f"Converting path to v2 {old_path} => {path}" + log.debug(msg) + return path + + def _get_secret_path_metadata(self, path): + """ + Given a path, query vault to determine mount point, type, and version. + """ + cache_content = self.metadata_cache.get() or {} + + ret = None + if path.startswith(tuple(cache_content.keys())): + log.debug("Found cached metadata for %s", path) + ret = next(v for k, v in cache_content.items() if path.startswith(k)) + else: + log.debug("Fetching metadata for %s", path) + try: + endpoint = f"sys/internal/ui/mounts/{path}" + res = self.client.get(endpoint) + if "data" in res: + log.debug("Got metadata for %s", path) + cache_content[path] = ret = res["data"] + self.metadata_cache.store(cache_content) + else: + raise res + except Exception as err: # pylint: disable=broad-except + log.error( + "Failed to get secret metadata %s: %s", type(err).__name__, err + ) + return ret + + +#################################################################################### +# The following functions were available in previous versions and are deprecated +# TODO: remove deprecated functions after v3008 (Argon) +#################################################################################### + + +def get_vault_connection(): + """ + Get the connection details for calling Vault, from local configuration if + it exists, or from the master otherwise + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.get_vault_connection is deprecated, " + "please use salt.utils.vault.get_authd_client.", + ) + + opts = globals().get("__opts__", {}) + context = globals().get("__context__", {}) + + vault = get_authd_client(opts, context) + try: + token = vault.auth.get_token() + except (VaultAuthExpired, VaultPermissionDeniedError): + clear_cache(opts) + vault = get_authd_client(opts, context) + token = vault.auth.get_token() + + server_config = vault.get_config() + + return { + "url": server_config["url"], + "namespace": server_config["namespace"], + "token": str(token), + "verify": server_config["verify"], + "issued": token.creation_time, + "ttl": token.explicit_max_ttl, + } + + +def del_cache(): + """ + Delete cache file + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.del_cache is deprecated, please use salt.utils.vault.clear_cache.", + ) + clear_cache(globals().get("__opts__", {}), connection_only=False) + + +def write_cache(connection): # pylint: disable=unused-argument + """ + Write the vault token to cache + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.write_cache is deprecated without replacement.", + ) + # always return false since cache is managed internally + return False + + +def get_cache(): + """ + Return connection information from vault cache file + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.get_cache is deprecated, please use salt.utils.vault.get_authd_client.", + ) + return get_vault_connection() + + +def make_request( + method, + resource, + token=None, # pylint: disable=unused-argument + vault_url=None, # pylint: disable=unused-argument + namespace=None, # pylint: disable=unused-argument + get_token_url=False, # pylint: disable=unused-argument + retry=False, # pylint: disable=unused-argument + **args, +): + """ + Make a request to Vault + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.make_request is deprecated, please use " + "salt.utils.vault.query or salt.utils.vault.query_raw.", + ) + + opts = globals().get("__opts__", {}) + context = globals().get("__context__", {}) + endpoint = resource.lstrip("/").lstrip("v1/") + payload = args.get("json") + + if "data" in args: + payload = salt.utils.json.loads(args["data"]) + + try: + return query_raw(method, endpoint, opts, context, payload=payload, wrap=False) + except VaultAuthExpired: + # mimic the previous behavior somewhat + # VaultAuthExpired should not be thrown at all though + response = requests.models.Response() + response.status_code = 403 + response.reason = "Permission denied" + return response + + +def selftoken_expired(): + """ + Validate the current token exists and is still valid + """ + salt.utils.versions.warn_until( + "Argon", + "salt.utils.vault.selftoken_expired is deprecated, please rely on the " + "utility module for token management.", + ) + opts = globals().get("__opts__", {}) + context = globals().get("__context__", {}) + + try: + if _get_salt_run_type(opts) in [ + SALT_RUNTYPE_MASTER_IMPERSONATING, + SALT_RUNTYPE_MINION_REMOTE, + ]: + return True + vault = get_authd_client(opts, context) + return vault.is_valid(remote=True) + + except Exception as err: # pylint: disable=broad-except + raise salt.exceptions.CommandExecutionError( + "Error while looking up self token." + ) from err