diff --git a/Dockerfiles/dashboards-helper.Dockerfile b/Dockerfiles/dashboards-helper.Dockerfile index 0b792de8f..6084559ba 100644 --- a/Dockerfiles/dashboards-helper.Dockerfile +++ b/Dockerfiles/dashboards-helper.Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.16 +FROM alpine:3.17 # Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile index 782360731..a8ee005b0 100644 --- a/Dockerfiles/filebeat.Dockerfile +++ b/Dockerfiles/filebeat.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/beats/filebeat-oss:8.5.1 +FROM docker.elastic.co/beats/filebeat-oss:8.5.2 # Copyright (c) 2022 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/name-map-ui.Dockerfile b/Dockerfiles/name-map-ui.Dockerfile index b44db9d61..d4ae562cd 100644 --- a/Dockerfiles/name-map-ui.Dockerfile +++ b/Dockerfiles/name-map-ui.Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.16 +FROM alpine:3.17 # Copyright (c) 2022 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" @@ -26,13 +26,13 @@ ENV LISTJS_VERSION v1.5.0 RUN apk update --no-cache && \ apk upgrade --no-cache && \ - apk --no-cache add bash php8 php8-fpm php8-mysqli php8-json php8-openssl php8-curl php8-fileinfo \ - php8-zlib php8-xml php8-phar php8-intl php8-dom php8-xmlreader php8-ctype php8-session \ - php8-mbstring php8-gd nginx supervisor curl inotify-tools file psmisc shadow openssl tini + apk --no-cache add bash php81 php81-fpm php81-mysqli php81-json php81-openssl php81-curl php81-fileinfo \ + php81-zlib php81-xml php81-phar php81-intl php81-dom php81-xmlreader php81-ctype php81-session \ + php81-mbstring php81-gd nginx supervisor curl inotify-tools file psmisc shadow openssl tini COPY name-map-ui/config/nginx.conf /etc/nginx/nginx.conf -COPY name-map-ui/config/fpm-pool.conf /etc/php8/php-fpm.d/www.conf -COPY name-map-ui/config/php.ini /etc/php8/conf.d/custom.ini +COPY name-map-ui/config/fpm-pool.conf /etc/php81/php-fpm.d/www.conf +COPY name-map-ui/config/php.ini /etc/php81/conf.d/custom.ini COPY name-map-ui/config/supervisord.conf /etc/supervisord.conf COPY name-map-ui/config/supervisor_logstash_ctl.conf /etc/supervisor/logstash/supervisord.conf COPY name-map-ui/config/supervisor_netbox_ctl.conf /etc/supervisor/netbox/supervisord.conf diff --git a/Dockerfiles/netbox.Dockerfile b/Dockerfiles/netbox.Dockerfile index d8f6bf448..360e65a6a 100644 --- a/Dockerfiles/netbox.Dockerfile +++ b/Dockerfiles/netbox.Dockerfile @@ -28,6 +28,8 @@ ENV SUPERCRONIC "supercronic-linux-amd64" ENV SUPERCRONIC_SHA1SUM "d7f4c0886eb85249ad05ed592902fa6865bb9d70" ENV SUPERCRONIC_CRONTAB "/etc/crontab" +ENV NETBOX_DEVICETYPE_LIBRARY_URL "https://codeload.github.com/netbox-community/devicetype-library/tar.gz/master" + ARG NETBOX_DEFAULT_SITE=Malcolm ARG NETBOX_CRON=false @@ -54,10 +56,12 @@ RUN apt-get -q update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ groupadd --gid ${DEFAULT_GID} ${PUSER} && \ - useradd -m --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ - usermod -a -G tty ${PUSER} && \ - mkdir -p /opt/unit && \ + useradd -m --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ + usermod -a -G tty ${PUSER} && \ + mkdir -p /opt/unit /opt/netbox-devicetype-library && \ chown -R $PUSER:$PGROUP /etc/netbox /opt/unit /opt/netbox && \ + cd /opt && \ + curl -sSL "$NETBOX_DEVICETYPE_LIBRARY_URL" | tar xzvf - -C ./netbox-devicetype-library --strip-components 1 && \ mkdir -p /opt/netbox/netbox/$BASE_PATH && \ mv /opt/netbox/netbox/static /opt/netbox/netbox/$BASE_PATH/static && \ jq '. += { "settings": { "http": { "discard_unsafe_fields": false } } }' /etc/unit/nginx-unit.json | jq ".routes[0].match.uri = \"/${BASE_PATH}/static/*\"" > /etc/unit/nginx-unit-new.json && \ diff --git a/Dockerfiles/nginx.Dockerfile b/Dockerfiles/nginx.Dockerfile index 6ef36ac61..f6870880c 100644 --- a/Dockerfiles/nginx.Dockerfile +++ b/Dockerfiles/nginx.Dockerfile @@ -25,13 +25,15 @@ WORKDIR /site # build documentation, remove unnecessary files, then massage a bit to work nicely with NGINX (which will be serving it) RUN find /site -type f -name "*.md" -exec sed -i "s/{{[[:space:]]*site.github.build_revision[[:space:]]*}}/$VCS_REVISION/g" "{}" \; && \ ( [ -n "${GITHUB_TOKEN}" ] && export JEKYLL_GITHUB_TOKEN="${GITHUB_TOKEN}" || true ) && \ + sed -i "s/^\(show_downloads:\).*/\1 false/" /site/_config.yml && \ + sed -i -e "/^mastodon:/,+2d" /site/_config.yml && \ docker-entrypoint.sh bundle exec jekyll build && \ find /site/_site -type f -name "*.md" -delete && \ find /site/_site -type f -name "*.html" -exec sed -i "s@/\(docs\|assets\)@/readme/\1@g" "{}" \; && \ find /site/_site -type f -name "*.html" -exec sed -i 's@\(href=\)"/"@\1"/readme/"@g' "{}" \; # build NGINX image -FROM alpine:3.16 +FROM alpine:3.17 LABEL maintainer="malcolm@inl.gov" LABEL org.opencontainers.image.authors='malcolm@inl.gov' @@ -141,7 +143,7 @@ RUN set -x ; \ " ; \ apk update --no-cache; \ apk upgrade --no-cache; \ - apk add --no-cache curl shadow; \ + apk add --no-cache curl shadow libressl; \ addgroup -g ${DEFAULT_GID} -S ${PGROUP} ; \ adduser -S -D -H -u ${DEFAULT_UID} -h /var/cache/nginx -s /sbin/nologin -G ${PGROUP} -g ${PUSER} ${PUSER} ; \ addgroup ${PUSER} shadow ; \ diff --git a/Dockerfiles/zeek.Dockerfile b/Dockerfiles/zeek.Dockerfile index 5e5305432..61f6653da 100644 --- a/Dockerfiles/zeek.Dockerfile +++ b/Dockerfiles/zeek.Dockerfile @@ -31,7 +31,7 @@ ENV PUSER_PRIV_DROP false # for download and install ARG ZEEK_LTS=true -ARG ZEEK_VERSION=5.0.3-0 +ARG ZEEK_VERSION=5.0.4-0 ENV ZEEK_LTS $ZEEK_LTS ENV ZEEK_VERSION $ZEEK_VERSION diff --git a/api/requirements.txt b/api/requirements.txt index 0653fab98..8183c85d8 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,7 +1,7 @@ pytz==2021.3 Flask==2.0.2 gunicorn==20.1.0 -opensearch-py==2.0.0 +opensearch-py==2.0.1 opensearch-dsl==2.0.1 requests==2.26.0 regex==2022.3.2 diff --git a/dashboards/templates/composable/component/arkime.json b/dashboards/templates/composable/component/arkime.json index 9dd04874b..b1b975fd5 100644 --- a/dashboards/templates/composable/component/arkime.json +++ b/dashboards/templates/composable/component/arkime.json @@ -16,6 +16,7 @@ "protocol": { "type": "keyword" }, "quic.host": { "type": "keyword" }, "quic.version": { "type": "keyword" }, + "rootId": { "type": "keyword" }, "source.geo.country_code2": { "type": "keyword" }, "source.geo.country_code3": { "type": "keyword" }, "source.geo.dma_code": { "type": "short" }, diff --git a/docker-compose-standalone.yml b/docker-compose-standalone.yml index 5505dcab3..12a27b3ad 100644 --- a/docker-compose-standalone.yml +++ b/docker-compose-standalone.yml @@ -352,7 +352,7 @@ x-pcap-capture-variables: &pcap-capture-variables services: opensearch: - image: malcolmnetsec/opensearch:6.4.2 + image: malcolmnetsec/opensearch:6.4.3 restart: "no" stdin_open: false tty: true @@ -383,9 +383,9 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./.opensearch.primary.curlrc:/var/local/opensearch.primary.curlrc:ro - ./.opensearch.secondary.curlrc:/var/local/opensearch.secondary.curlrc:ro - - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw - ./opensearch:/usr/share/opensearch/data:delegated - ./opensearch-backup:/opt/opensearch/backup:delegated + - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw healthcheck: test: ["CMD", "curl", "--silent", "--fail", "http://localhost:9200"] interval: 30s @@ -393,7 +393,7 @@ services: retries: 3 start_period: 180s dashboards-helper: - image: malcolmnetsec/dashboards-helper:6.4.2 + image: malcolmnetsec/dashboards-helper:6.4.3 restart: "no" stdin_open: false tty: true @@ -424,7 +424,7 @@ services: retries: 3 start_period: 30s dashboards: - image: malcolmnetsec/dashboards:6.4.2 + image: malcolmnetsec/dashboards:6.4.3 restart: "no" stdin_open: false tty: true @@ -449,7 +449,7 @@ services: retries: 3 start_period: 210s logstash: - image: malcolmnetsec/logstash-oss:6.4.2 + image: malcolmnetsec/logstash-oss:6.4.3 restart: "no" stdin_open: false tty: true @@ -492,7 +492,7 @@ services: retries: 3 start_period: 600s filebeat: - image: malcolmnetsec/filebeat-oss:6.4.2 + image: malcolmnetsec/filebeat-oss:6.4.3 restart: "no" stdin_open: false tty: true @@ -531,7 +531,7 @@ services: retries: 3 start_period: 60s arkime: - image: malcolmnetsec/arkime:6.4.2 + image: malcolmnetsec/arkime:6.4.3 restart: "no" stdin_open: false tty: true @@ -569,7 +569,7 @@ services: retries: 3 start_period: 210s zeek: - image: malcolmnetsec/zeek:6.4.2 + image: malcolmnetsec/zeek:6.4.3 restart: "no" stdin_open: false tty: true @@ -608,7 +608,7 @@ services: retries: 3 start_period: 60s zeek-live: - image: malcolmnetsec/zeek:6.4.2 + image: malcolmnetsec/zeek:6.4.3 restart: "no" stdin_open: false tty: true @@ -640,7 +640,7 @@ services: - ./zeek-logs/extract_files:/zeek/extract_files - ./zeek/intel:/opt/zeek/share/zeek/site/intel suricata: - image: malcolmnetsec/suricata:6.4.2 + image: malcolmnetsec/suricata:6.4.3 restart: "no" stdin_open: false tty: true @@ -677,7 +677,7 @@ services: retries: 3 start_period: 120s suricata-live: - image: malcolmnetsec/suricata:6.4.2 + image: malcolmnetsec/suricata:6.4.3 restart: "no" stdin_open: false tty: true @@ -704,7 +704,7 @@ services: - ./suricata-logs:/var/log/suricata - ./suricata/rules:/opt/suricata/rules:ro file-monitor: - image: malcolmnetsec/file-monitor:6.4.2 + image: malcolmnetsec/file-monitor:6.4.3 restart: "no" stdin_open: false tty: true @@ -728,7 +728,7 @@ services: retries: 3 start_period: 60s pcap-capture: - image: malcolmnetsec/pcap-capture:6.4.2 + image: malcolmnetsec/pcap-capture:6.4.3 restart: "no" stdin_open: false tty: true @@ -750,7 +750,7 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./pcap/upload:/pcap pcap-monitor: - image: malcolmnetsec/pcap-monitor:6.4.2 + image: malcolmnetsec/pcap-monitor:6.4.3 restart: "no" stdin_open: false tty: true @@ -776,7 +776,7 @@ services: retries: 3 start_period: 90s upload: - image: malcolmnetsec/file-upload:6.4.2 + image: malcolmnetsec/file-upload:6.4.3 restart: "no" stdin_open: false tty: true @@ -804,7 +804,7 @@ services: retries: 3 start_period: 60s htadmin: - image: malcolmnetsec/htadmin:6.4.2 + image: malcolmnetsec/htadmin:6.4.3 restart: "no" stdin_open: false tty: true @@ -828,7 +828,7 @@ services: retries: 3 start_period: 60s freq: - image: malcolmnetsec/freq:6.4.2 + image: malcolmnetsec/freq:6.4.3 restart: "no" stdin_open: false tty: true @@ -849,7 +849,7 @@ services: retries: 3 start_period: 60s name-map-ui: - image: malcolmnetsec/name-map-ui:6.4.2 + image: malcolmnetsec/name-map-ui:6.4.3 restart: "no" stdin_open: false tty: true @@ -872,7 +872,7 @@ services: retries: 3 start_period: 60s netbox: - image: malcolmnetsec/netbox:6.4.2 + image: malcolmnetsec/netbox:6.4.3 restart: "no" stdin_open: false tty: true @@ -903,7 +903,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: malcolmnetsec/postgresql:6.4.2 + image: malcolmnetsec/postgresql:6.4.3 restart: "no" stdin_open: false tty: true @@ -920,13 +920,13 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./netbox/postgres:/var/lib/postgresql/data:rw healthcheck: - test: [ "CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}" ] + test: [ "CMD-SHELL", "[[ $${NETBOX_POSTGRES_DISABLED} == 'true' ]] || pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}" ] interval: 60s timeout: 15s retries: 3 start_period: 45s netbox-redis: - image: malcolmnetsec/redis:6.4.2 + image: malcolmnetsec/redis:6.4.3 restart: "no" stdin_open: false tty: true @@ -947,13 +947,13 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./netbox/redis:/data healthcheck: - test: ["CMD-SHELL", "pidof redis-server || exit 1" ] + test: ["CMD-SHELL", "[[ $${NETBOX_REDIS_DISABLED} == 'true' ]] || ( pidof redis-server || exit 1 )" ] interval: 60s timeout: 15s retries: 3 start_period: 45s netbox-redis-cache: - image: malcolmnetsec/redis:6.4.2 + image: malcolmnetsec/redis:6.4.3 restart: "no" stdin_open: false tty: true @@ -973,13 +973,13 @@ services: volumes: - ./nginx/ca-trust:/var/local/ca-trust:ro healthcheck: - test: ["CMD-SHELL", "pidof redis-server || exit 1" ] + test: ["CMD-SHELL", "[[ $${NETBOX_REDIS_DISABLED} == 'true' ]] || ( pidof redis-server || exit 1 )" ] interval: 60s timeout: 15s retries: 3 start_period: 45s api: - image: malcolmnetsec/api:6.4.2 + image: malcolmnetsec/api:6.4.3 command: gunicorn --bind 0:5000 manage:app restart: "no" stdin_open: false @@ -1002,7 +1002,7 @@ services: retries: 3 start_period: 60s nginx-proxy: - image: malcolmnetsec/nginx-proxy:6.4.2 + image: malcolmnetsec/nginx-proxy:6.4.3 restart: "no" stdin_open: false tty: true diff --git a/docker-compose.yml b/docker-compose.yml index 794b5de93..fa3862f18 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -355,7 +355,7 @@ services: build: context: . dockerfile: Dockerfiles/opensearch.Dockerfile - image: malcolmnetsec/opensearch:6.4.2 + image: malcolmnetsec/opensearch:6.4.3 restart: "no" stdin_open: false tty: true @@ -386,9 +386,9 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./.opensearch.primary.curlrc:/var/local/opensearch.primary.curlrc:ro - ./.opensearch.secondary.curlrc:/var/local/opensearch.secondary.curlrc:ro - - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw - ./opensearch:/usr/share/opensearch/data:delegated - ./opensearch-backup:/opt/opensearch/backup:delegated + - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw healthcheck: test: ["CMD", "curl", "--silent", "--fail", "http://localhost:9200"] interval: 30s @@ -399,7 +399,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards-helper.Dockerfile - image: malcolmnetsec/dashboards-helper:6.4.2 + image: malcolmnetsec/dashboards-helper:6.4.3 restart: "no" stdin_open: false tty: true @@ -433,7 +433,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards.Dockerfile - image: malcolmnetsec/dashboards:6.4.2 + image: malcolmnetsec/dashboards:6.4.3 restart: "no" stdin_open: false tty: true @@ -461,7 +461,7 @@ services: build: context: . dockerfile: Dockerfiles/logstash.Dockerfile - image: malcolmnetsec/logstash-oss:6.4.2 + image: malcolmnetsec/logstash-oss:6.4.3 restart: "no" stdin_open: false tty: true @@ -511,7 +511,7 @@ services: build: context: . dockerfile: Dockerfiles/filebeat.Dockerfile - image: malcolmnetsec/filebeat-oss:6.4.2 + image: malcolmnetsec/filebeat-oss:6.4.3 restart: "no" stdin_open: false tty: true @@ -553,7 +553,7 @@ services: build: context: . dockerfile: Dockerfiles/arkime.Dockerfile - image: malcolmnetsec/arkime:6.4.2 + image: malcolmnetsec/arkime:6.4.3 restart: "no" stdin_open: false tty: true @@ -597,7 +597,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: malcolmnetsec/zeek:6.4.2 + image: malcolmnetsec/zeek:6.4.3 restart: "no" stdin_open: false tty: true @@ -640,7 +640,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: malcolmnetsec/zeek:6.4.2 + image: malcolmnetsec/zeek:6.4.3 restart: "no" stdin_open: false tty: true @@ -676,7 +676,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: malcolmnetsec/suricata:6.4.2 + image: malcolmnetsec/suricata:6.4.3 restart: "no" stdin_open: false tty: true @@ -716,7 +716,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: malcolmnetsec/suricata:6.4.2 + image: malcolmnetsec/suricata:6.4.3 restart: "no" stdin_open: false tty: true @@ -746,7 +746,7 @@ services: build: context: . dockerfile: Dockerfiles/file-monitor.Dockerfile - image: malcolmnetsec/file-monitor:6.4.2 + image: malcolmnetsec/file-monitor:6.4.3 restart: "no" stdin_open: false tty: true @@ -773,7 +773,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-capture.Dockerfile - image: malcolmnetsec/pcap-capture:6.4.2 + image: malcolmnetsec/pcap-capture:6.4.3 restart: "no" stdin_open: false tty: true @@ -798,7 +798,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-monitor.Dockerfile - image: malcolmnetsec/pcap-monitor:6.4.2 + image: malcolmnetsec/pcap-monitor:6.4.3 restart: "no" stdin_open: false tty: true @@ -827,7 +827,7 @@ services: build: context: . dockerfile: Dockerfiles/file-upload.Dockerfile - image: malcolmnetsec/file-upload:6.4.2 + image: malcolmnetsec/file-upload:6.4.3 restart: "no" stdin_open: false tty: true @@ -855,7 +855,7 @@ services: retries: 3 start_period: 60s htadmin: - image: malcolmnetsec/htadmin:6.4.2 + image: malcolmnetsec/htadmin:6.4.3 build: context: . dockerfile: Dockerfiles/htadmin.Dockerfile @@ -882,7 +882,7 @@ services: retries: 3 start_period: 60s freq: - image: malcolmnetsec/freq:6.4.2 + image: malcolmnetsec/freq:6.4.3 build: context: . dockerfile: Dockerfiles/freq.Dockerfile @@ -906,7 +906,7 @@ services: retries: 3 start_period: 60s name-map-ui: - image: malcolmnetsec/name-map-ui:6.4.2 + image: malcolmnetsec/name-map-ui:6.4.3 build: context: . dockerfile: Dockerfiles/name-map-ui.Dockerfile @@ -932,7 +932,7 @@ services: retries: 3 start_period: 60s netbox: - image: malcolmnetsec/netbox:6.4.2 + image: malcolmnetsec/netbox:6.4.3 build: context: . dockerfile: Dockerfiles/netbox.Dockerfile @@ -966,7 +966,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: malcolmnetsec/postgresql:6.4.2 + image: malcolmnetsec/postgresql:6.4.3 build: context: . dockerfile: Dockerfiles/postgresql.Dockerfile @@ -986,13 +986,13 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./netbox/postgres:/var/lib/postgresql/data:rw healthcheck: - test: [ "CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}" ] + test: [ "CMD-SHELL", "[[ $${NETBOX_POSTGRES_DISABLED} == 'true' ]] || pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}" ] interval: 60s timeout: 15s retries: 3 start_period: 45s netbox-redis: - image: malcolmnetsec/redis:6.4.2 + image: malcolmnetsec/redis:6.4.3 build: context: . dockerfile: Dockerfiles/redis.Dockerfile @@ -1016,13 +1016,13 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./netbox/redis:/data healthcheck: - test: ["CMD-SHELL", "pidof redis-server || exit 1" ] + test: ["CMD-SHELL", "[[ $${NETBOX_REDIS_DISABLED} == 'true' ]] || ( pidof redis-server || exit 1 )" ] interval: 60s timeout: 15s retries: 3 start_period: 45s netbox-redis-cache: - image: malcolmnetsec/redis:6.4.2 + image: malcolmnetsec/redis:6.4.3 build: context: . dockerfile: Dockerfiles/redis.Dockerfile @@ -1045,13 +1045,13 @@ services: volumes: - ./nginx/ca-trust:/var/local/ca-trust:ro healthcheck: - test: ["CMD-SHELL", "pidof redis-server || exit 1" ] + test: ["CMD-SHELL", "[[ $${NETBOX_REDIS_DISABLED} == 'true' ]] || ( pidof redis-server || exit 1 )" ] interval: 60s timeout: 15s retries: 3 start_period: 45s api: - image: malcolmnetsec/api:6.4.2 + image: malcolmnetsec/api:6.4.3 build: context: . dockerfile: Dockerfiles/api.Dockerfile @@ -1080,7 +1080,7 @@ services: build: context: . dockerfile: Dockerfiles/nginx.Dockerfile - image: malcolmnetsec/nginx-proxy:6.4.2 + image: malcolmnetsec/nginx-proxy:6.4.3 restart: "no" stdin_open: false tty: true diff --git a/docs/development.md b/docs/development.md index bf0a46e8c..c149bfda6 100644 --- a/docs/development.md +++ b/docs/development.md @@ -58,7 +58,7 @@ Then, go take a walk or something since it will be a while. When you're done, yo * `malcolmnetsec/api` (based on `python:3-slim`) * `malcolmnetsec/arkime` (based on `debian:11-slim`) -* `malcolmnetsec/dashboards-helper` (based on `alpine:3.16`) +* `malcolmnetsec/dashboards-helper` (based on `alpine:3.17`) * `malcolmnetsec/dashboards` (based on `opensearchproject/opensearch-dashboards`) * `malcolmnetsec/file-monitor` (based on `debian:11-slim`) * `malcolmnetsec/file-upload` (based on `debian:11-slim`) @@ -66,9 +66,9 @@ Then, go take a walk or something since it will be a while. When you're done, yo * `malcolmnetsec/freq` (based on `debian:11-slim`) * `malcolmnetsec/htadmin` (based on `debian:11-slim`) * `malcolmnetsec/logstash-oss` (based on `opensearchproject/logstash-oss-with-opensearch-output-plugin`) -* `malcolmnetsec/name-map-ui` (based on `alpine:3.16`) +* `malcolmnetsec/name-map-ui` (based on `alpine:3.17`) * `malcolmnetsec/netbox` (based on `netboxcommunity/netbox:latest`) -* `malcolmnetsec/nginx-proxy` (based on `alpine:3.16`) +* `malcolmnetsec/nginx-proxy` (based on `alpine:3.17`) * `malcolmnetsec/opensearch` (based on `opensearchproject/opensearch`) * `malcolmnetsec/pcap-capture` (based on `debian:11-slim`) * `malcolmnetsec/pcap-monitor` (based on `debian:11-slim`) diff --git a/docs/download.md b/docs/download.md index 8bb1864b2..9f4e9a16a 100644 --- a/docs/download.md +++ b/docs/download.md @@ -16,7 +16,7 @@ While official downloads of the Malcolm installer ISO are not provided, an **uno | ISO | SHA256 | |---|---| -| [malcolm-6.4.2.iso](/iso/malcolm-6.4.2.iso) (5.0GiB) | [`xxxxxxxx`](/iso/malcolm-6.4.2.iso.sha256.txt) | +| [malcolm-6.4.3.iso](/iso/malcolm-6.4.3.iso) (5.1GiB) | [`dde48c542029524f08ef03ccfd925fd67deee5e9a7f405d89037bd9e250b173d`](/iso/malcolm-6.4.3.iso.sha256.txt) | ## Hedgehog Linux @@ -26,7 +26,7 @@ While official downloads of the Malcolm installer ISO are not provided, an **uno | ISO | SHA256 | |---|---| -| [hedgehog-6.4.2.iso](/iso/hedgehog-6.4.2.iso) (2.3GiB) | [`xxxxxxxx`](/iso/hedgehog-6.4.2.iso.sha256.txt) | +| [hedgehog-6.4.3.iso](/iso/hedgehog-6.4.3.iso) (2.3GiB) | [`5fcd87e93d9dc5693d9d66cddafd4f909b9e69db56f7e999dcc37c9143ac8b34`](/iso/hedgehog-6.4.3.iso.sha256.txt) | ## Warning diff --git a/docs/hedgehog-iso-build.md b/docs/hedgehog-iso-build.md index 3aa841fb2..00334a840 100644 --- a/docs/hedgehog-iso-build.md +++ b/docs/hedgehog-iso-build.md @@ -29,7 +29,7 @@ Building the ISO may take 90 minutes or more depending on your system. As the bu ``` … -Finished, created "/sensor-build/hedgehog-6.4.2.iso" +Finished, created "/sensor-build/hedgehog-6.4.3.iso" … ``` diff --git a/docs/malcolm-config.md b/docs/malcolm-config.md index 08101b0f4..464ef6276 100644 --- a/docs/malcolm-config.md +++ b/docs/malcolm-config.md @@ -53,7 +53,7 @@ Various other environment variables inside of `docker-compose.yml` can be tweake * `PCAP_ROTATE_MEGABYTES` – used to specify how large a locally-captured PCAP file can become (in megabytes) before it is closed for processing and a new PCAP file created * `PCAP_ROTATE_MINUTES` – used to specify a time interval (in minutes) after which a locally-captured PCAP file will be closed for processing and a new PCAP file created * `pipeline.workers`, `pipeline.batch.size` and `pipeline.batch.delay` - these settings are used to tune the performance and resource utilization of the the `logstash` container; see [Tuning and Profiling Logstash Performance](https://www.elastic.co/guide/en/logstash/current/tuning-logstash.html), [`logstash.yml`](https://www.elastic.co/guide/en/logstash/current/logstash-settings-file.html) and [Multiple Pipelines](https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html) -* `PUID` and `PGID` - Docker runs all of its containers as the privileged `root` user by default. For better security, Malcolm immediately drops to non-privileged user accounts for executing internal processes wherever possible. The `PUID` (**p**rocess **u**ser **ID**) and `PGID` (**p**rocess **g**roup **ID**) environment variables allow Malcolm to map internal non-privileged user accounts to a corresponding [user account](https://en.wikipedia.org/wiki/User_identifier) on the host. +* `PUID` and `PGID` - Docker runs all of its containers as the privileged `root` user by default. For better security, Malcolm immediately drops to non-privileged user accounts for executing internal processes wherever possible. The `PUID` (**p**rocess **u**ser **ID**) and `PGID` (**p**rocess **g**roup **ID**) environment variables allow Malcolm to map internal non-privileged user accounts to a corresponding [user account](https://en.wikipedia.org/wiki/User_identifier) on the host. Note that a few containers (including the `logstash` and `netbox` containers) may take a few extra minutes during startup if `PUID` and `PGID` are set to values other than the default `1000`. This is expected and should not affect operation after the initial startup. * `SENSITIVE_COUNTRY_CODES` - when [severity scoring](severity.md#Severity) is enabled, this variable defines a comma-separated list of sensitive countries (using [ISO 3166-1 alpha-2 codes](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Current_codes)) (default `'AM,AZ,BY,CN,CU,DZ,GE,HK,IL,IN,IQ,IR,KG,KP,KZ,LY,MD,MO,PK,RU,SD,SS,SY,TJ,TM,TW,UA,UZ'`, taken from the U.S. Department of Energy Sensitive Country List) * `SURICATA_AUTO_ANALYZE_PCAP_FILES` – if set to `true`, all PCAP files imported into Malcolm will automatically be analyzed by Suricata, and the resulting logs will also be imported (default `false`) * `SURICATA_AUTO_ANALYZE_PCAP_THREADS` – the number of threads available to Malcolm for analyzing Suricata logs (default `1`) diff --git a/docs/malcolm-iso.md b/docs/malcolm-iso.md index 571c4642b..c2a0af495 100644 --- a/docs/malcolm-iso.md +++ b/docs/malcolm-iso.md @@ -41,7 +41,7 @@ Building the ISO may take 30 minutes or more depending on your system. As the bu ``` … -Finished, created "/malcolm-build/malcolm-iso/malcolm-6.4.2.iso" +Finished, created "/malcolm-build/malcolm-iso/malcolm-6.4.3.iso" … ``` diff --git a/docs/quickstart.md b/docs/quickstart.md index a56bbfdd3..f72c7053c 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -53,26 +53,26 @@ You can then observe that the images have been retrieved by running `docker imag ``` $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -malcolmnetsec/api 6.4.2 xxxxxxxxxxxx 3 days ago 158MB -malcolmnetsec/arkime 6.4.2 xxxxxxxxxxxx 3 days ago 816MB -malcolmnetsec/dashboards 6.4.2 xxxxxxxxxxxx 3 days ago 1.02GB -malcolmnetsec/dashboards-helper 6.4.2 xxxxxxxxxxxx 3 days ago 184MB -malcolmnetsec/file-monitor 6.4.2 xxxxxxxxxxxx 3 days ago 588MB -malcolmnetsec/file-upload 6.4.2 xxxxxxxxxxxx 3 days ago 259MB -malcolmnetsec/filebeat-oss 6.4.2 xxxxxxxxxxxx 3 days ago 624MB -malcolmnetsec/freq 6.4.2 xxxxxxxxxxxx 3 days ago 132MB -malcolmnetsec/htadmin 6.4.2 xxxxxxxxxxxx 3 days ago 242MB -malcolmnetsec/logstash-oss 6.4.2 xxxxxxxxxxxx 3 days ago 1.35GB -malcolmnetsec/name-map-ui 6.4.2 xxxxxxxxxxxx 3 days ago 143MB -malcolmnetsec/netbox 6.4.2 xxxxxxxxxxxx 3 days ago 1.01GB -malcolmnetsec/nginx-proxy 6.4.2 xxxxxxxxxxxx 3 days ago 121MB -malcolmnetsec/opensearch 6.4.2 xxxxxxxxxxxx 3 days ago 1.17GB -malcolmnetsec/pcap-capture 6.4.2 xxxxxxxxxxxx 3 days ago 121MB -malcolmnetsec/pcap-monitor 6.4.2 xxxxxxxxxxxx 3 days ago 213MB -malcolmnetsec/postgresql 6.4.2 xxxxxxxxxxxx 3 days ago 268MB -malcolmnetsec/redis 6.4.2 xxxxxxxxxxxx 3 days ago 34.2MB -malcolmnetsec/suricata 6.4.2 xxxxxxxxxxxx 3 days ago 278MB -malcolmnetsec/zeek 6.4.2 xxxxxxxxxxxx 3 days ago 1GB +malcolmnetsec/api 6.4.3 xxxxxxxxxxxx 3 days ago 158MB +malcolmnetsec/arkime 6.4.3 xxxxxxxxxxxx 3 days ago 816MB +malcolmnetsec/dashboards 6.4.3 xxxxxxxxxxxx 3 days ago 1.02GB +malcolmnetsec/dashboards-helper 6.4.3 xxxxxxxxxxxx 3 days ago 184MB +malcolmnetsec/file-monitor 6.4.3 xxxxxxxxxxxx 3 days ago 588MB +malcolmnetsec/file-upload 6.4.3 xxxxxxxxxxxx 3 days ago 259MB +malcolmnetsec/filebeat-oss 6.4.3 xxxxxxxxxxxx 3 days ago 624MB +malcolmnetsec/freq 6.4.3 xxxxxxxxxxxx 3 days ago 132MB +malcolmnetsec/htadmin 6.4.3 xxxxxxxxxxxx 3 days ago 242MB +malcolmnetsec/logstash-oss 6.4.3 xxxxxxxxxxxx 3 days ago 1.35GB +malcolmnetsec/name-map-ui 6.4.3 xxxxxxxxxxxx 3 days ago 143MB +malcolmnetsec/netbox 6.4.3 xxxxxxxxxxxx 3 days ago 1.01GB +malcolmnetsec/nginx-proxy 6.4.3 xxxxxxxxxxxx 3 days ago 121MB +malcolmnetsec/opensearch 6.4.3 xxxxxxxxxxxx 3 days ago 1.17GB +malcolmnetsec/pcap-capture 6.4.3 xxxxxxxxxxxx 3 days ago 121MB +malcolmnetsec/pcap-monitor 6.4.3 xxxxxxxxxxxx 3 days ago 213MB +malcolmnetsec/postgresql 6.4.3 xxxxxxxxxxxx 3 days ago 268MB +malcolmnetsec/redis 6.4.3 xxxxxxxxxxxx 3 days ago 34.2MB +malcolmnetsec/suricata 6.4.3 xxxxxxxxxxxx 3 days ago 278MB +malcolmnetsec/zeek 6.4.3 xxxxxxxxxxxx 3 days ago 1GB ``` ### Import from pre-packaged tarballs diff --git a/docs/ubuntu-install-example.md b/docs/ubuntu-install-example.md index 6b7030ab1..734483425 100644 --- a/docs/ubuntu-install-example.md +++ b/docs/ubuntu-install-example.md @@ -121,7 +121,7 @@ Specify external Docker network name (or leave blank for default networking) (): Authenticate against Lightweight Directory Access Protocol (LDAP) server? (y/N): n -Store OpenSearch index snapshots locally in /home/user/Malcolm/opensearch-backup? (Y/n): y +Store PCAP, log and index files locally under /home/user/Malcolm? (Y/n): y Compress OpenSearch index snapshots? (y/N): n @@ -192,6 +192,8 @@ Capture packets using netsniff-ng? (Y/n): y Capture packets using tcpdump? (y/N): n +Should Arkime delete PCAP files based on available storage (see https://arkime.com/faq#pcap-deletion)? (y/N): y + Should Malcolm analyze live network traffic with Suricata? (y/N): y Should Malcolm analyze live network traffic with Zeek? (y/N): y @@ -257,26 +259,26 @@ Pulling zeek ... done user@host:~/Malcolm$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -malcolmnetsec/api 6.4.2 xxxxxxxxxxxx 3 days ago 158MB -malcolmnetsec/arkime 6.4.2 xxxxxxxxxxxx 3 days ago 816MB -malcolmnetsec/dashboards 6.4.2 xxxxxxxxxxxx 3 days ago 1.02GB -malcolmnetsec/dashboards-helper 6.4.2 xxxxxxxxxxxx 3 days ago 184MB -malcolmnetsec/file-monitor 6.4.2 xxxxxxxxxxxx 3 days ago 588MB -malcolmnetsec/file-upload 6.4.2 xxxxxxxxxxxx 3 days ago 259MB -malcolmnetsec/filebeat-oss 6.4.2 xxxxxxxxxxxx 3 days ago 624MB -malcolmnetsec/freq 6.4.2 xxxxxxxxxxxx 3 days ago 132MB -malcolmnetsec/htadmin 6.4.2 xxxxxxxxxxxx 3 days ago 242MB -malcolmnetsec/logstash-oss 6.4.2 xxxxxxxxxxxx 3 days ago 1.35GB -malcolmnetsec/name-map-ui 6.4.2 xxxxxxxxxxxx 3 days ago 143MB -malcolmnetsec/netbox 6.4.2 xxxxxxxxxxxx 3 days ago 1.01GB -malcolmnetsec/nginx-proxy 6.4.2 xxxxxxxxxxxx 3 days ago 121MB -malcolmnetsec/opensearch 6.4.2 xxxxxxxxxxxx 3 days ago 1.17GB -malcolmnetsec/pcap-capture 6.4.2 xxxxxxxxxxxx 3 days ago 121MB -malcolmnetsec/pcap-monitor 6.4.2 xxxxxxxxxxxx 3 days ago 213MB -malcolmnetsec/postgresql 6.4.2 xxxxxxxxxxxx 3 days ago 268MB -malcolmnetsec/redis 6.4.2 xxxxxxxxxxxx 3 days ago 34.2MB -malcolmnetsec/suricata 6.4.2 xxxxxxxxxxxx 3 days ago 278MB -malcolmnetsec/zeek 6.4.2 xxxxxxxxxxxx 3 days ago 1GB +malcolmnetsec/api 6.4.3 xxxxxxxxxxxx 3 days ago 158MB +malcolmnetsec/arkime 6.4.3 xxxxxxxxxxxx 3 days ago 816MB +malcolmnetsec/dashboards 6.4.3 xxxxxxxxxxxx 3 days ago 1.02GB +malcolmnetsec/dashboards-helper 6.4.3 xxxxxxxxxxxx 3 days ago 184MB +malcolmnetsec/file-monitor 6.4.3 xxxxxxxxxxxx 3 days ago 588MB +malcolmnetsec/file-upload 6.4.3 xxxxxxxxxxxx 3 days ago 259MB +malcolmnetsec/filebeat-oss 6.4.3 xxxxxxxxxxxx 3 days ago 624MB +malcolmnetsec/freq 6.4.3 xxxxxxxxxxxx 3 days ago 132MB +malcolmnetsec/htadmin 6.4.3 xxxxxxxxxxxx 3 days ago 242MB +malcolmnetsec/logstash-oss 6.4.3 xxxxxxxxxxxx 3 days ago 1.35GB +malcolmnetsec/name-map-ui 6.4.3 xxxxxxxxxxxx 3 days ago 143MB +malcolmnetsec/netbox 6.4.3 xxxxxxxxxxxx 3 days ago 1.01GB +malcolmnetsec/nginx-proxy 6.4.3 xxxxxxxxxxxx 3 days ago 121MB +malcolmnetsec/opensearch 6.4.3 xxxxxxxxxxxx 3 days ago 1.17GB +malcolmnetsec/pcap-capture 6.4.3 xxxxxxxxxxxx 3 days ago 121MB +malcolmnetsec/pcap-monitor 6.4.3 xxxxxxxxxxxx 3 days ago 213MB +malcolmnetsec/postgresql 6.4.3 xxxxxxxxxxxx 3 days ago 268MB +malcolmnetsec/redis 6.4.3 xxxxxxxxxxxx 3 days ago 34.2MB +malcolmnetsec/suricata 6.4.3 xxxxxxxxxxxx 3 days ago 278MB +malcolmnetsec/zeek 6.4.3 xxxxxxxxxxxx 3 days ago 1GB ``` Finally, we can start Malcolm. When Malcolm starts it will stream informational and debug messages to the console. If you wish, you can safely close the console or use `Ctrl+C` to stop these messages; Malcolm will continue running in the background. diff --git a/logstash/pipelines/suricata/11_suricata_logs.conf b/logstash/pipelines/suricata/11_suricata_logs.conf index 6ba71c6fe..6fbd8b175 100644 --- a/logstash/pipelines/suricata/11_suricata_logs.conf +++ b/logstash/pipelines/suricata/11_suricata_logs.conf @@ -110,7 +110,7 @@ filter { rename => { "[suricata][dest_port]" => "[destination][port]" } rename => { "[suricata][src_ip]" => "[source][ip]" } rename => { "[suricata][src_port]" => "[source][port]" } - rename => { "[suricata][proto]" => "[ipProtocol]" } + rename => { "[suricata][proto]" => "[network][transport]" } rename => { "[suricata][event_type]" => "[event][dataset]" } rename => { "[suricata][vlan]" => "[network][vlan][id]" } } @@ -119,21 +119,23 @@ filter { # network protocol stuff # transport protocol (e.g., udp, tcp, etc.) - if ([ipProtocol]) { + if ([network][transport]) { + mutate { id => "mutate_lowercase_suricata_network_transport" + lowercase => [ "[network][transport]" ] } translate { id => "translate_suricata_proto" - source => "[ipProtocol]" - target => "[network][transport]" - dictionary_path => "/etc/ip_protocol_number_to_name.yaml" + source => "[network][transport]" + target => "[ipProtocol]" + dictionary_path => "/etc/ip_protocol_name_to_number.yaml" } + mutate { id => "mutate_merge_suricata_proto" + merge => { "[protocol]" => "[network][transport]" } } + } + if ([ipProtocol]) { # ECS - ipProtocol -> network.iana_number mutate { id => "mutate_add_field_suricata_ecs_network_iana_number" add_field => { "[network][iana_number]" => "%{[ipProtocol]}" } } } - if ([network][transport]) { - mutate { id => "mutate_merge_suricata_proto" - merge => { "[protocol]" => "[network][transport]" } } - } # network (application) protocol if ([suricata][app_proto_orig]) { diff --git a/malcolm-iso/build.sh b/malcolm-iso/build.sh index b6f262442..1045b7d4c 100755 --- a/malcolm-iso/build.sh +++ b/malcolm-iso/build.sh @@ -117,7 +117,8 @@ if [ -d "$WORKDIR" ]; then mkdir -p "$MALCOLM_DEST_DIR/suricata/rules/" mkdir -p "$MALCOLM_DEST_DIR/yara/rules/" mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/current/" - mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/extract_files/" + mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/extract_files/preserved" + mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/extract_files/quarantine" mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/live/" mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/processed/" mkdir -p "$MALCOLM_DEST_DIR/zeek-logs/upload/" diff --git a/malcolm-iso/config/package-lists/python.list.chroot b/malcolm-iso/config/package-lists/python.list.chroot index a7be31ca4..2cf6d89cf 100644 --- a/malcolm-iso/config/package-lists/python.list.chroot +++ b/malcolm-iso/config/package-lists/python.list.chroot @@ -7,4 +7,5 @@ python3-psutil python3-pycryptodome python3-dialog python3-requests -python3-ruamel.yaml \ No newline at end of file +python3-ruamel.yaml +python3-yaml \ No newline at end of file diff --git a/name-map-ui/config/supervisord.conf b/name-map-ui/config/supervisord.conf index e2a1735fe..c6ea33fce 100644 --- a/name-map-ui/config/supervisord.conf +++ b/name-map-ui/config/supervisord.conf @@ -17,7 +17,7 @@ supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface serverurl=unix:///tmp/supervisor-main.sock [program:php-fpm] -command=/usr/sbin/php-fpm8 -F +command=/usr/sbin/php-fpm81 -F stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 diff --git a/netbox/scripts/netbox_init.py b/netbox/scripts/netbox_init.py index 28e530a39..e3209cb86 100755 --- a/netbox/scripts/netbox_init.py +++ b/netbox/scripts/netbox_init.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# Copyright (c) 2022 Battelle Energy Alliance, LLC. All rights reserved. + import argparse import ipaddress import json @@ -13,6 +15,7 @@ from collections.abc import Iterable from slugify import slugify +from netbox_library_import import import_library ################################################################################################### args = None @@ -204,6 +207,15 @@ def main(): required=False, help="Device types(s) to create", ) + parser.add_argument( + '-l', + '--library', + dest='libraryDir', + type=str, + default=None, + required=False, + help="Directory containing NetBox device type library", + ) try: parser.error = parser.exit args = parser.parse_args() @@ -424,6 +436,14 @@ def main(): except Exception as e: logging.error(f"{type(e).__name__} processing sites: {e}") + # ###### Library ############################################################################################### + try: + counter = import_library(nb, args.libraryDir) + logging.debug(f"import library results: { counter }") + + except Exception as e: + logging.error(f"{type(e).__name__} processing library: {e}") + # ###### Net Map ############################################################################################### try: # load net-map.json from file diff --git a/netbox/scripts/netbox_library_import.py b/netbox/scripts/netbox_library_import.py new file mode 100644 index 000000000..2ba40f5cd --- /dev/null +++ b/netbox/scripts/netbox_library_import.py @@ -0,0 +1,666 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright (c) 2022 Battelle Energy Alliance, LLC. All rights reserved. + +# adapted from minitriga/Netbox-Device-Type-Library-Import (MIT License) +# Copyright (c) 2021 Alexander Gittings +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +from collections import Counter +import logging +import yaml +import pynetbox +import glob +import os +import re + + +def slugFormat(name): + return re.sub('\W+', '-', name.lower()) + + +YAML_EXTENSIONS = ['yml', 'yaml'] + + +def getFiles(library_dir, vendors=None): + + files = [] + discoveredVendors = [] + base_path = os.path.join(library_dir, 'device-types', '') + if vendors: + for r, d, f in os.walk(base_path): + for folder in d: + for vendor in vendors: + if vendor.lower() == folder.lower(): + discoveredVendors.append({'name': folder, 'slug': slugFormat(folder)}) + for extension in YAML_EXTENSIONS: + files.extend(glob.glob(base_path + folder + f'/*.{extension}')) + else: + for r, d, f in os.walk(base_path): + for folder in d: + if folder.lower() != "Testing": + discoveredVendors.append({'name': folder, 'slug': slugFormat(folder)}) + for extension in YAML_EXTENSIONS: + files.extend(glob.glob(base_path + f'[!Testing]*/*.{extension}')) + return files, discoveredVendors + + +def get_files_modules(library_dir, vendors=None): + '''Get files list for modules. + + Args: + vendors: List of vendors to sync or None to sync all vendors. + + Returns: + A 2-tuple of: + - list of filenames found + - list of vendors found + + ''' + + files = [] + discoveredVendors = [] + base_path = os.path.join(library_dir, 'module-types', '') + if vendors: + for r, d, f in os.walk(base_path): + for folder in d: + for vendor in vendors: + if vendor.lower() == folder.lower(): + discoveredVendors.append({'name': folder, 'slug': slugFormat(folder)}) + for extension in YAML_EXTENSIONS: + files.extend(glob.glob(base_path + folder + f'/*.{extension}')) + else: + for r, d, f in os.walk(base_path): + for folder in d: + if folder.lower() != "Testing": + discoveredVendors.append({'name': folder, 'slug': slugFormat(folder)}) + for extension in YAML_EXTENSIONS: + files.extend(glob.glob(base_path + f'[!Testing]*/*.{extension}')) + + return files, discoveredVendors + + +def readYAMl(files, **kwargs): + slugs = kwargs.get('slugs', None) + deviceTypes = [] + manufacturers = [] + for file in files: + with open(file, 'r') as stream: + try: + data = yaml.safe_load(stream) + except yaml.YAMLError as exc: + continue + manufacturer = data['manufacturer'] + data['manufacturer'] = {} + data['manufacturer']['name'] = manufacturer + data['manufacturer']['slug'] = slugFormat(manufacturer) + + if slugs and data['slug'] not in slugs: + continue + + deviceTypes.append(data) + manufacturers.append(manufacturer) + return deviceTypes + + +def read_yaml_modules(files, **kwargs): + + slugs = kwargs.get('slugs', None) + module_types = [] + manufacturers = [] + for file in files: + with open(file, 'r') as stream: + try: + data = yaml.safe_load(stream) + except yaml.YAMLError as exc: + continue + manufacturer = data['manufacturer'] + data['manufacturer'] = {} + data['manufacturer']['name'] = manufacturer + data['manufacturer']['slug'] = slugFormat(manufacturer) + + if slugs and data['slug'] not in slugs: + continue + + module_types.append(data) + manufacturers.append(manufacturer) + return module_types + + +def createManufacturers(vendors, nb, counter=None): + all_manufacturers = {str(item): item for item in nb.dcim.manufacturers.all()} + need_manufacturers = [] + for vendor in vendors: + try: + manGet = all_manufacturers[vendor["name"]] + except KeyError: + need_manufacturers.append(vendor) + + if not need_manufacturers: + return + + try: + manSuccess = nb.dcim.manufacturers.create(need_manufacturers) + if counter is not None: + for man in manSuccess: + counter.update({'manufacturer': 1}) + except pynetbox.RequestError as e: + pass + + +def createInterfaces(interfaces, deviceType, nb, counter=None): + all_interfaces = {str(item): item for item in nb.dcim.interface_templates.filter(devicetype_id=deviceType)} + need_interfaces = [] + for interface in interfaces: + try: + ifGet = all_interfaces[interface["name"]] + except KeyError: + interface['device_type'] = deviceType + need_interfaces.append(interface) + + if not need_interfaces: + return + + try: + ifSuccess = nb.dcim.interface_templates.create(need_interfaces) + if counter is not None: + for intf in ifSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_interfaces(interfaces, module_type, nb, counter=None): + all_interfaces = {str(item): item for item in nb.dcim.interface_templates.filter(moduletype_id=module_type)} + need_interfaces = [] + for interface in interfaces: + try: + if_res = all_interfaces[interface["name"]] + except KeyError: + interface['module_type'] = module_type + need_interfaces.append(interface) + + if not need_interfaces: + return + + try: + ifSuccess = nb.dcim.interface_templates.create(need_interfaces) + if counter is not None: + for intf in ifSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createConsolePorts(consoleports, deviceType, nb, counter=None): + all_consoleports = {str(item): item for item in nb.dcim.console_port_templates.filter(devicetype_id=deviceType)} + need_consoleports = [] + for consoleport in consoleports: + try: + cpGet = all_consoleports[consoleport["name"]] + except KeyError: + consoleport['device_type'] = deviceType + need_consoleports.append(consoleport) + + if not need_consoleports: + return + + try: + cpSuccess = nb.dcim.console_port_templates.create(need_consoleports) + if counter is not None: + for port in cpSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_console_ports(consoleports, module_type, nb, counter=None): + + all_consoleports = {str(item): item for item in nb.dcim.console_port_templates.filter(moduletype_id=module_type)} + need_consoleports = [] + for consoleport in consoleports: + try: + cpGet = all_consoleports[consoleport["name"]] + except KeyError: + consoleport['module_type'] = module_type + need_consoleports.append(consoleport) + + if not need_consoleports: + return + + try: + cpSuccess = nb.dcim.console_port_templates.create(need_consoleports) + if counter is not None: + for port in cpSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createPowerPorts(powerports, deviceType, nb, counter=None): + all_power_ports = {str(item): item for item in nb.dcim.power_port_templates.filter(devicetype_id=deviceType)} + need_power_ports = [] + for powerport in powerports: + try: + ppGet = all_power_ports[powerport["name"]] + except KeyError: + powerport['device_type'] = deviceType + need_power_ports.append(powerport) + + if not need_power_ports: + return + + try: + ppSuccess = nb.dcim.power_port_templates.create(need_power_ports) + if counter is not None: + for pp in ppSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_power_ports(powerports, module_type, nb, counter=None): + all_power_ports = {str(item): item for item in nb.dcim.power_port_templates.filter(moduletype_id=module_type)} + need_power_ports = [] + for powerport in powerports: + try: + ppGet = all_power_ports[powerport["name"]] + except KeyError: + powerport['module_type'] = module_type + need_power_ports.append(powerport) + + if not need_power_ports: + return + + try: + ppSuccess = nb.dcim.power_port_templates.create(need_power_ports) + if counter is not None: + for pp in ppSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createConsoleServerPorts(consoleserverports, deviceType, nb, counter=None): + all_consoleserverports = { + str(item): item for item in nb.dcim.console_server_port_templates.filter(devicetype_id=deviceType) + } + need_consoleserverports = [] + for csport in consoleserverports: + try: + cspGet = all_consoleserverports[csport["name"]] + except KeyError: + csport['device_type'] = deviceType + need_consoleserverports.append(csport) + + if not need_consoleserverports: + return + + try: + cspSuccess = nb.dcim.console_server_port_templates.create(need_consoleserverports) + if counter is not None: + for csp in cspSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_console_server_ports(consoleserverports, module_type, nb, counter=None): + all_consoleserverports = { + str(item): item for item in nb.dcim.console_server_port_templates.filter(moduletype_id=module_type) + } + need_consoleserverports = [] + for csport in consoleserverports: + try: + cspGet = all_consoleserverports[csport["name"]] + except KeyError: + csport['module_type'] = module_type + need_consoleserverports.append(csport) + + if not need_consoleserverports: + return + + try: + cspSuccess = nb.dcim.console_server_port_templates.create(need_consoleserverports) + if counter is not None: + for csp in cspSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createFrontPorts(frontports, deviceType, nb, counter=None): + all_frontports = {str(item): item for item in nb.dcim.front_port_templates.filter(devicetype_id=deviceType)} + need_frontports = [] + for frontport in frontports: + try: + fpGet = all_frontports[frontport["name"]] + except KeyError: + frontport['device_type'] = deviceType + need_frontports.append(frontport) + + if not need_frontports: + return + + all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(devicetype_id=deviceType)} + for port in need_frontports: + try: + rpGet = all_rearports[port["rear_port"]] + port['rear_port'] = rpGet.id + except KeyError: + pass + + try: + fpSuccess = nb.dcim.front_port_templates.create(need_frontports) + if counter is not None: + for fp in fpSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_front_ports(frontports, module_type, nb, counter=None): + all_frontports = {str(item): item for item in nb.dcim.front_port_templates.filter(moduletype_id=module_type)} + need_frontports = [] + for frontport in frontports: + try: + fpGet = all_frontports[frontport["name"]] + except KeyError: + frontport['module_type'] = module_type + need_frontports.append(frontport) + + if not need_frontports: + return + + all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(moduletype_id=module_type)} + for port in need_frontports: + try: + rpGet = all_rearports[port["rear_port"]] + port['rear_port'] = rpGet.id + except KeyError: + pass + + try: + fpSuccess = nb.dcim.front_port_templates.create(need_frontports) + if counter is not None: + for fp in fpSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createRearPorts(rearports, deviceType, nb, counter=None): + all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(devicetype_id=deviceType)} + need_rearports = [] + for rearport in rearports: + try: + rpGet = all_rearports[rearport["name"]] + except KeyError: + rearport['device_type'] = deviceType + need_rearports.append(rearport) + + if not need_rearports: + return + + try: + rpSuccess = nb.dcim.rear_port_templates.create(need_rearports) + if counter is not None: + for rp in rpSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_rear_ports(rearports, module_type, nb, counter=None): + all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(moduletype_id=module_type)} + need_rearports = [] + for rearport in rearports: + try: + rpGet = all_rearports[rearport["name"]] + except KeyError: + rearport['module_type'] = module_type + need_rearports.append(rearport) + + if not need_rearports: + return + + try: + rpSuccess = nb.dcim.rear_port_templates.create(need_rearports) + if counter is not None: + for rp in rpSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createDeviceBays(devicebays, deviceType, nb, counter=None): + all_devicebays = {str(item): item for item in nb.dcim.device_bay_templates.filter(devicetype_id=deviceType)} + need_devicebays = [] + for devicebay in devicebays: + try: + dbGet = all_devicebays[devicebay["name"]] + except KeyError: + devicebay['device_type'] = deviceType + need_devicebays.append(devicebay) + + if not need_devicebays: + return + + try: + dbSuccess = nb.dcim.device_bay_templates.create(need_devicebays) + if counter is not None: + for db in dbSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_bays(module_bays, device_type, nb, counter=None): + '''Create module bays. + + Args: + module_bays: parsed YAML module_bays section. + device_type: the device type instance from netbox. + nb: Netbox API instance + ''' + all_module_bays = {str(item): item for item in nb.dcim.module_bay_templates.filter(devicetype_id=device_type)} + need_module_bays = [] + for module_bay in module_bays: + try: + dbGet = all_module_bays[module_bay["name"]] + except KeyError: + module_bay['device_type'] = device_type + need_module_bays.append(module_bay) + + if not need_module_bays: + return + + try: + module_bay_res = nb.dcim.module_bay_templates.create(need_module_bays) + if counter is not None: + for module_bay in module_bay_res: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def createPowerOutlets(poweroutlets, deviceType, nb, counter=None): + all_poweroutlets = {str(item): item for item in nb.dcim.power_outlet_templates.filter(devicetype_id=deviceType)} + need_poweroutlets = [] + for poweroutlet in poweroutlets: + try: + poGet = all_poweroutlets[poweroutlet["name"]] + except KeyError: + poweroutlet["device_type"] = deviceType + need_poweroutlets.append(poweroutlet) + + if not need_poweroutlets: + return + + all_power_ports = {str(item): item for item in nb.dcim.power_port_templates.filter(devicetype_id=deviceType)} + for outlet in need_poweroutlets: + try: + ppGet = all_power_ports[outlet["power_port"]] + outlet['power_port'] = ppGet.id + except KeyError: + pass + + try: + poSuccess = nb.dcim.power_outlet_templates.create(need_poweroutlets) + if counter is not None: + for po in poSuccess: + counter.update({'updated': 1}) + except pynetbox.RequestError as e: + pass + + +def create_module_power_outlets(poweroutlets, module_type, nb, counter=None): + '''Create missing module power outlets. + + Args: + poweroutlets: YAML power outlet data. + module_type: Netbox module_type instance. + nb: pynetbox API instance. + + Returns: + None + + Raises: + None + ''' + all_poweroutlets = {str(item): item for item in nb.dcim.power_outlet_templates.filter(moduletype_id=module_type)} + need_poweroutlets = [] + for poweroutlet in poweroutlets: + try: + poGet = all_poweroutlets[poweroutlet["name"]] + except KeyError: + poweroutlet["module_type"] = module_type + need_poweroutlets.append(poweroutlet) + + if not need_poweroutlets: + return + + all_power_ports = {str(item): item for item in nb.dcim.power_port_templates.filter(moduletype_id=module_type)} + for outlet in need_poweroutlets: + try: + ppGet = all_power_ports[outlet["power_port"]] + outlet['power_port'] = ppGet.id + except KeyError: + pass + + try: + poSuccess = nb.dcim.power_outlet_templates.create(need_poweroutlets) + if counter is not None: + for po in poSuccess: + counter.update({'module_port_added': 1}) + except pynetbox.RequestError as e: + pass + + +def createDeviceTypes(deviceTypes, nb, counter=None): + all_device_types = {str(item): item for item in nb.dcim.device_types.all()} + for deviceType in deviceTypes: + try: + dt = all_device_types[deviceType["model"]] + except KeyError: + try: + dt = nb.dcim.device_types.create(deviceType) + if counter is not None: + counter.update({'added': 1}) + except pynetbox.RequestError as e: + pass + + if "interfaces" in deviceType: + createInterfaces(deviceType["interfaces"], dt.id, nb, counter=counter) + if "power-ports" in deviceType: + createPowerPorts(deviceType["power-ports"], dt.id, nb, counter=counter) + if "power-port" in deviceType: + createPowerPorts(deviceType["power-port"], dt.id, nb, counter=counter) + if "console-ports" in deviceType: + createConsolePorts(deviceType["console-ports"], dt.id, nb, counter=counter) + if "power-outlets" in deviceType: + createPowerOutlets(deviceType["power-outlets"], dt.id, nb, counter=counter) + if "console-server-ports" in deviceType: + createConsoleServerPorts(deviceType["console-server-ports"], dt.id, nb, counter=counter) + if "rear-ports" in deviceType: + createRearPorts(deviceType["rear-ports"], dt.id, nb, counter=counter) + if "front-ports" in deviceType: + createFrontPorts(deviceType["front-ports"], dt.id, nb, counter=counter) + if "device-bays" in deviceType: + createDeviceBays(deviceType["device-bays"], dt.id, nb, counter=counter) + if "module-bays" in deviceType: + create_module_bays(deviceType['module-bays'], dt.id, nb, counter=counter) + + +def create_module_types(module_types, nb, counter=None): + '''Create missing module types. + + Args: + module_types: yaml data from repo. + nb: pynetbox API instance + + Returns: + None + ''' + + all_module_types = {} + for curr_nb_mt in nb.dcim.module_types.all(): + if curr_nb_mt.manufacturer.slug not in all_module_types: + all_module_types[curr_nb_mt.manufacturer.slug] = {} + + all_module_types[curr_nb_mt.manufacturer.slug][curr_nb_mt.model] = curr_nb_mt + + for curr_mt in module_types: + try: + module_type_res = all_module_types[curr_mt['manufacturer']['slug']][curr_mt["model"]] + except KeyError: + try: + module_type_res = nb.dcim.module_types.create(curr_mt) + if counter is not None: + counter.update({'module_added': 1}) + except pynetbox.RequestError as exce: + pass + + # module_type_res = all_module_types[curr_mt['manufacturer']['slug']][curr_mt["model"]] + + if "interfaces" in curr_mt: + create_module_interfaces(curr_mt["interfaces"], module_type_res.id, nb, counter=counter) + if "power-ports" in curr_mt: + create_module_power_ports(curr_mt["power-ports"], module_type_res.id, nb, counter=counter) + if "console-ports" in curr_mt: + create_module_console_ports(curr_mt["console-ports"], module_type_res.id, nb, counter=counter) + if "power-outlets" in curr_mt: # No current entries to test + create_module_power_outlets(curr_mt["power-outlets"], module_type_res.id, nb, counter=counter) + if "console-server-ports" in curr_mt: # No current entries to test + create_module_console_server_ports(curr_mt["console-server-ports"], module_type_res.id, nb, counter=counter) + if "rear-ports" in curr_mt: + create_module_rear_ports(curr_mt["rear-ports"], module_type_res.id, nb, counter=counter) + if "front-ports" in curr_mt: + create_module_front_ports(curr_mt["front-ports"], module_type_res.id, nb, counter=counter) + + +def import_library(nb, library_dir): + cntr = Counter( + added=0, + updated=0, + manufacturer=0, + module_added=0, + module_port_added=0, + ) + + if library_dir is not None and os.path.isdir(library_dir): + + files, vendors = getFiles(library_dir) + deviceTypes = readYAMl(files) + createManufacturers(vendors, nb, counter=cntr) + createDeviceTypes(deviceTypes, nb, counter=cntr) + + files, vendors = get_files_modules(library_dir) + module_types = read_yaml_modules(files) + createManufacturers(vendors, nb, counter=cntr) + create_module_types(module_types, nb, counter=cntr) + + return cntr diff --git a/netbox/supervisord.conf b/netbox/supervisord.conf index 2571a5bb9..7b2fedced 100644 --- a/netbox/supervisord.conf +++ b/netbox/supervisord.conf @@ -38,6 +38,7 @@ command=/opt/netbox/venv/bin/python /usr/local/bin/netbox_init.py --url "http://localhost:8080/netbox" --token "%(ENV_SUPERUSER_API_TOKEN)s" --net-map /usr/local/share/net-map.json + --library /opt/netbox-devicetype-library autostart=true autorestart=false startsecs=0 diff --git a/nginx/nginx_readonly.conf b/nginx/nginx_readonly.conf index 95f9a3ae4..f08b31e16 100644 --- a/nginx/nginx_readonly.conf +++ b/nginx/nginx_readonly.conf @@ -37,10 +37,6 @@ http { server api:5000; } - upstream malcolm-readme { - server arkime:8000; - } - upstream dashboards { server dashboards:5601; } @@ -49,15 +45,11 @@ http { server dashboards-helper:28991; } - upstream netbox { - server netbox:8080; - } - upstream extracted-file-http-server { server file-monitor:8440; } - # Arkime interface + # Main web interface server { listen 443; include /etc/nginx/nginx_ssl_config.conf; @@ -67,9 +59,8 @@ http { # Malcolm readme location /readme { - proxy_pass http://malcolm-readme/README.html; - proxy_redirect off; - proxy_set_header Host arkime.malcolm.local; + root /usr/share/nginx/html; + try_files $uri $uri/index.html; } # Arkime -> Dashboards shortcut @@ -104,7 +95,8 @@ http { # Dashboards -> Arkime shortcut location ~* /iddash2ark/(.*) { rewrite ^.*/iddash2ark/(.*) /sessions?expression=($1) redirect; - proxy_pass http://arkime; + proxy_pass https://arkime; + proxy_ssl_verify off; proxy_redirect off; proxy_set_header Host arkime.malcolm.local; proxy_set_header http_auth_http_user $authenticated_user; @@ -139,16 +131,6 @@ http { proxy_set_header Host file-monitor.malcolm.local; } - # netbox - location /netbox { - proxy_pass http://netbox; - proxy_redirect off; - proxy_set_header Host netbox.malcolm.local; - proxy_set_header X-Forwarded-Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-Proto $scheme; - } - # favicon, logos, banners, etc. include /etc/nginx/nginx_image_aliases.conf; @@ -160,13 +142,15 @@ http { add_header Content-Type "application/javascript"; default_type application/javascript; add_header X-Content-Type-Options 'nosniff'; - proxy_pass http://arkime/cyberchef/$1; + proxy_pass https://arkime/cyberchef/$1; + proxy_ssl_verify off; proxy_redirect off; proxy_set_header Host arkime.malcolm.local; proxy_set_header http_auth_http_user $authenticated_user; proxy_set_header Authorization ""; } + # Malcolm API location /mapi { proxy_pass http://api/; proxy_redirect off; @@ -179,7 +163,8 @@ http { location / { limit_except GET POST { deny all; } - proxy_pass http://arkime; + proxy_pass https://arkime; + proxy_ssl_verify off; proxy_redirect off; proxy_set_header Host arkime.malcolm.local; proxy_set_header http_auth_http_user $authenticated_user; diff --git a/scripts/control.py b/scripts/control.py index eed575719..53e631f0a 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -48,6 +48,8 @@ def __exit__(self, *args): dockerBin = None dockerComposeBin = None opensslBin = None +yamlImported = None +dockerComposeYaml = None ################################################################################################### try: @@ -495,6 +497,7 @@ def stop(wipe=False): global args global dockerBin global dockerComposeBin + global dockerComposeYaml # docker-compose use local temporary path osEnv = os.environ.copy() @@ -515,47 +518,67 @@ def stop(wipe=False): exit(err) if wipe: - # delete OpenSearch database - shutil.rmtree(os.path.join(MalcolmPath, os.path.join('opensearch', 'nodes')), ignore_errors=True) - - # delete Zeek live-related spool files - shutil.rmtree( - os.path.join(MalcolmPath, os.path.join('zeek-logs', os.path.join('live', 'spool'))), ignore_errors=True + # there is some overlap here among some of these containers, but it doesn't matter + boundPathsToWipe = ( + BoundPath("arkime", "/opt/arkime/logs", True, None, None), + BoundPath("arkime", "/opt/arkime/raw", True, None, None), + BoundPath("filebeat", "/zeek", True, None, None), + BoundPath("file-monitor", "/zeek/logs", True, None, None), + BoundPath("netbox", "/opt/netbox/netbox/media", True, None, ["."]), + BoundPath("netbox-postgres", "/var/lib/postgresql/data", True, None, ["."]), + BoundPath("netbox-redis", "/data", True, None, ["."]), + BoundPath("opensearch", "/usr/share/opensearch/data", True, ["nodes"], None), + BoundPath("pcap-capture", "/pcap", True, None, None), + BoundPath("pcap-monitor", "/pcap", True, ["processed", "upload"], None), + BoundPath("suricata", "/var/log/suricata", True, None, ["."]), + BoundPath("upload", "/var/www/upload/server/php/chroot/files", True, None, None), + BoundPath("zeek", "/zeek/extract_files", True, None, None), + BoundPath("zeek", "/zeek/upload", True, None, None), + BoundPath("zeek-live", "/zeek/live", True, ["spool"], None), + BoundPath( + "filebeat", + "/zeek", + False, + ["processed", "current", "live"], + ["processed", "current", "live"], + ), ) - - # delete data files (backups, zeek logs, arkime logs, PCAP files, captured PCAP files) - for dataDir in [ - 'opensearch-backup', - 'zeek-logs', - 'suricata-logs', - 'arkime-logs', - 'pcap', - 'arkime-raw', - os.path.join('netbox', 'media'), - os.path.join('netbox', 'postgres'), - os.path.join('netbox', 'redis'), - ]: - for root, dirnames, filenames in os.walk(os.path.join(MalcolmPath, dataDir), topdown=True, onerror=None): - for file in filenames: - fileSpec = os.path.join(root, file) - if (os.path.isfile(fileSpec) or os.path.islink(fileSpec)) and (not file.startswith('.git')): - try: - os.remove(fileSpec) - except: - pass - - # clean up empty directories - for dataDir in [ - os.path.join('opensearch-backup', 'logs'), - os.path.join('zeek-logs', 'processed'), - os.path.join('zeek-logs', 'current'), - os.path.join('zeek-logs', 'live'), - os.path.join('suricata-logs'), - os.path.join('netbox', 'media'), - os.path.join('netbox', 'postgres'), - os.path.join('netbox', 'redis'), - ]: - RemoveEmptyFolders(dataDir, removeRoot=False) + for boundPath in boundPathsToWipe: + localPath = LocalPathForContainerBindMount( + boundPath.service, + dockerComposeYaml, + boundPath.container_dir, + MalcolmPath, + ) + if localPath and os.path.isdir(localPath): + # delete files + if boundPath.files: + if args.debug: + eprint(f'Walking "{localPath}" for file deletion') + for root, dirnames, filenames in os.walk(localPath, topdown=True, onerror=None): + for file in filenames: + fileSpec = os.path.join(root, file) + if (os.path.isfile(fileSpec) or os.path.islink(fileSpec)) and (not file.startswith('.git')): + try: + os.remove(fileSpec) + except: + pass + # delete whole directories + if boundPath.relative_dirs: + for relDir in GetIterable(boundPath.relative_dirs): + tmpPath = os.path.join(localPath, relDir) + if os.path.isdir(tmpPath): + if args.debug: + eprint(f'Performing rmtree on "{tmpPath}"') + shutil.rmtree(tmpPath, ignore_errors=True) + # cleanup empty directories + if boundPath.clean_empty_dirs: + for cleanDir in GetIterable(boundPath.clean_empty_dirs): + tmpPath = os.path.join(localPath, cleanDir) + if os.path.isdir(tmpPath): + if args.debug: + eprint(f'Performing RemoveEmptyFolders on "{tmpPath}"') + RemoveEmptyFolders(tmpPath, removeRoot=False) eprint("Malcolm has been stopped and its data cleared\n") @@ -607,31 +630,54 @@ def start(): os.chmod(authFile, stat.S_IRUSR | stat.S_IWUSR) # make sure some directories exist before we start - for path in [ - os.path.join(MalcolmPath, 'opensearch'), - os.path.join(MalcolmPath, 'opensearch-backup'), - os.path.join(MalcolmPath, os.path.join('nginx', 'ca-trust')), - os.path.join(MalcolmPath, os.path.join('netbox', 'media')), - os.path.join(MalcolmPath, os.path.join('netbox', 'postgres')), - os.path.join(MalcolmPath, os.path.join('netbox', 'redis')), - os.path.join(MalcolmPath, os.path.join('pcap', 'processed')), - os.path.join(MalcolmPath, os.path.join('pcap', 'upload')), - os.path.join(MalcolmPath, os.path.join('suricata-logs', 'live')), - os.path.join(MalcolmPath, os.path.join('zeek', os.path.join('intel', 'MISP'))), - os.path.join(MalcolmPath, os.path.join('zeek', os.path.join('intel', 'STIX'))), - os.path.join(MalcolmPath, os.path.join('zeek-logs', 'current')), - os.path.join(MalcolmPath, os.path.join('zeek-logs', 'live')), - os.path.join(MalcolmPath, os.path.join('zeek-logs', 'extract_files')), - os.path.join(MalcolmPath, os.path.join('zeek-logs', 'processed')), - os.path.join(MalcolmPath, os.path.join('zeek-logs', 'upload')), - ]: - try: - os.makedirs(path) - except OSError as exc: - if (exc.errno == errno.EEXIST) and os.path.isdir(path): - pass - else: - raise + boundPathsToCreate = ( + BoundPath("arkime", "/opt/arkime/logs", False, None, None), + BoundPath("arkime", "/opt/arkime/raw", False, None, None), + BoundPath("file-monitor", "/zeek/logs", False, None, None), + BoundPath("nginx-proxy", "/var/local/ca-trust", False, None, None), + BoundPath("netbox", "/opt/netbox/netbox/media", False, None, None), + BoundPath("netbox-postgres", "/var/lib/postgresql/data", False, None, None), + BoundPath("netbox-redis", "/data", False, None, None), + BoundPath("opensearch", "/usr/share/opensearch/data", False, ["nodes"], None), + BoundPath("opensearch", "/opt/opensearch/backup", False, None, None), + BoundPath("pcap-capture", "/pcap", False, ["processed", "upload"], None), + BoundPath("suricata", "/var/log/suricata", False, ["live"], None), + BoundPath("upload", "/var/www/upload/server/php/chroot/files", False, None, None), + BoundPath("zeek", "/zeek/extract_files", False, None, None), + BoundPath("zeek", "/zeek/upload", False, None, None), + BoundPath("zeek", "/opt/zeek/share/zeek/site/intel", False, ["MISP", "STIX"], None), + BoundPath("zeek-live", "/zeek/live", False, ["spool"], None), + BoundPath("filebeat", "/zeek", False, ["processed", "current", "live", "extract_files", "upload"], None), + ) + for boundPath in boundPathsToCreate: + localPath = LocalPathForContainerBindMount( + boundPath.service, + dockerComposeYaml, + boundPath.container_dir, + MalcolmPath, + ) + if localPath: + try: + if args.debug: + eprint(f'Ensuring "{localPath}" exists') + os.makedirs(localPath) + except OSError as exc: + if (exc.errno == errno.EEXIST) and os.path.isdir(localPath): + pass + else: + raise + if boundPath.relative_dirs: + for relDir in GetIterable(boundPath.relative_dirs): + tmpPath = os.path.join(localPath, relDir) + try: + if args.debug: + eprint(f'Ensuring "{tmpPath}" exists') + os.makedirs(tmpPath) + except OSError as exc: + if (exc.errno == errno.EEXIST) and os.path.isdir(tmpPath): + pass + else: + raise # touch the zeek intel file open(os.path.join(MalcolmPath, os.path.join('zeek', os.path.join('intel', '__load__.zeek'))), 'a').close() @@ -1208,6 +1254,8 @@ def main(): global dockerBin global dockerComposeBin global opensslBin + global yamlImported + global dockerComposeYaml # extract arguments from the command line # print (sys.argv[1:]); @@ -1298,6 +1346,12 @@ def main(): else: sys.tracebacklimit = 0 + yamlImported = YAMLDynamic(debug=args.debug) + if args.debug: + eprint(f"Imported yaml: {yamlImported}") + if not yamlImported: + exit(2) + with pushd(MalcolmPath): # don't run this as root @@ -1333,6 +1387,10 @@ def main(): if err != 0: raise Exception(f'{ScriptName} requires docker-compose, please run install.py') + # load compose file YAML (used to find some volume bind mount locations) + with open(args.composeFile, 'r') as cf: + dockerComposeYaml = yamlImported.safe_load(cf) + # identify openssl binary opensslBin = 'openssl.exe' if ((pyPlatform == PLATFORM_WINDOWS) and Which('openssl.exe')) else 'openssl' diff --git a/scripts/demo/reset_and_auto_populate.sh b/scripts/demo/reset_and_auto_populate.sh index 3dadf829c..2ede711e3 100755 --- a/scripts/demo/reset_and_auto_populate.sh +++ b/scripts/demo/reset_and_auto_populate.sh @@ -24,6 +24,12 @@ # -x # remaining parameters: PCAP file(s) +# Those PCAP files with FILENAMES (not path) prepended with '=' will not be time-adjusted. eg., +# /home/ec2-user/artifacts/current/Cyberville.pcap - will be time-adjusted +# /home/ec2-user/artifacts/ctf/=ctfd.pcap - will NOT be time-adjusted +# The file itself shouldn't be named with a '=', it's just an indicator. +# That character will be removed before processing. + ############################################################################### # force bash if [ -z "$BASH_VERSION" ]; then @@ -194,17 +200,32 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ pushd "$WORKDIR" >/dev/null 2>&1 PCAP_FILES_ADJUSTED=() + PCAP_FILES_NOT_ADJUSTED=() if (( ${#PCAP_FILES[@]} > 0 )); then for ((i = 0; i < ${#PCAP_FILES[@]}; i++)); do - PCAP_FILE_ABSOLUTE="$($REALPATH -e "${PCAP_FILES[$i]}")" - PCAP_FILE_ADJUSTED="$WORKDIR"/"$(basename "${PCAP_FILES[$i]}")" + PCAP_FILE_DIRNAME="$(dirname "${PCAP_FILES[$i]}")" + PCAP_FILE_BASENAME="$(basename "${PCAP_FILES[$i]}")" + if [[ "$PCAP_FILE_BASENAME" =~ ^= ]]; then + # don't time-adjust files prepended with =, remove the = from the filename and insert as-is + PCAP_FILE_BASENAME="${PCAP_FILE_BASENAME:1}" + PCAP_ADJUST="false" + else + PCAP_ADJUST="true" + fi + PCAP_FILE_ABSOLUTE="$($REALPATH -e "$PCAP_FILE_DIRNAME"/"$PCAP_FILE_BASENAME")" + PCAP_FILE_ADJUSTED="$WORKDIR"/"$PCAP_FILE_BASENAME" cp $VERBOSE_FLAG "$PCAP_FILE_ABSOLUTE" "$PCAP_FILE_ADJUSTED" - [[ -f "$PCAP_FILE_ADJUSTED" ]] && \ - PCAP_FILES_ADJUSTED+=("$PCAP_FILE_ADJUSTED") + if [[ -f "$PCAP_FILE_ADJUSTED" ]]; then + if [[ "$PCAP_ADJUST" == "true" ]]; then + PCAP_FILES_ADJUSTED+=("$PCAP_FILE_ADJUSTED") + else + PCAP_FILES_NOT_ADJUSTED+=("$PCAP_FILE_ADJUSTED") + fi + fi done - [[ -n "$PCAP_ADJUST_SCRIPT" ]] && \ + [[ -n "$PCAP_ADJUST_SCRIPT" ]] && (( ${#PCAP_FILES_ADJUSTED[@]} > 0 )) && \ "$PCAP_ADJUST_SCRIPT" $VERBOSE_FLAG \ --time "$PCAP_DATE" \ --relative "$PCAP_RELATIVE_ADJUST" \ @@ -244,9 +265,10 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ done sleep 30 - if (( ${#PCAP_FILES_ADJUSTED[@]} > 0 )); then - # copy the adjusted PCAP file(s) to the Malcolm upload directory to be processed - cp $VERBOSE_FLAG "${PCAP_FILES_ADJUSTED[@]}" ./pcap/upload/ + if (( ${#PCAP_FILES_ADJUSTED[@]} > 0 )) || (( ${#PCAP_FILES_NOT_ADJUSTED[@]} > 0 )); then + # copy the PCAP file(s) to the Malcolm upload directory to be processed + (( ${#PCAP_FILES_ADJUSTED[@]} > 0 )) && cp $VERBOSE_FLAG "${PCAP_FILES_ADJUSTED[@]}" ./pcap/upload/ + (( ${#PCAP_FILES_NOT_ADJUSTED[@]} > 0 )) && cp $VERBOSE_FLAG "${PCAP_FILES_NOT_ADJUSTED[@]}" ./pcap/upload/ if (( $PCAP_PROCESS_IDLE_SECONDS > 0 )); then # wait for processing to finish out (count becomes "idle", no longer increasing) @@ -304,11 +326,11 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ for USER in \ $(cat nginx/htpasswd | cut -d: -f1) \ $(grep -q -P "NGINX_BASIC_AUTH\s*:\s*'no_authentication'" "$MALCOLM_FILE" && echo guest); do - docker-compose -f "$MALCOLM_FILE" exec -T arkime curl -sSL -XGET \ + docker-compose -f "$MALCOLM_FILE" exec -T arkime curl -ksSL -XGET \ --header 'Content-type:application/json' \ --header "http_auth_http_user:$USER" \ --header "Authorization:" \ - "http://localhost:8005" + "https://localhost:8005" || true done sleep 5 [[ -n $VERBOSE_FLAG ]] && echo "Setting cluster to read-only" >&2 @@ -316,7 +338,7 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ sleep 5 docker-compose -f "$MALCOLM_FILE" exec -T dashboards-helper /data/opensearch_read_only.py -i _cluster sleep 5 - for CONTAINER in filebeat logstash upload pcap-monitor zeek name-map-ui pcap-capture freq; do + for CONTAINER in filebeat logstash upload pcap-monitor zeek name-map-ui netbox netbox-postgres netbox-redis netbox-redis-cache pcap-capture freq; do docker-compose -f "$MALCOLM_FILE" pause "$CONTAINER" || true done sleep 5 diff --git a/scripts/install.py b/scripts/install.py index 50ff7c608..2079aa8be 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -10,6 +10,7 @@ import glob import json import os +import pathlib import platform import pprint import math @@ -44,6 +45,7 @@ ################################################################################################### args = None requests_imported = None +yaml_imported = None ################################################################################################### # get interactive user response to Y/N question @@ -376,11 +378,11 @@ def tweak_malcolm_runtime( osMemory = '30g' lsMemory = '6g' elif self.totalMemoryGigs >= 31.0: - osMemory = '21g' - lsMemory = '3500m' + osMemory = '16g' + lsMemory = '3g' elif self.totalMemoryGigs >= 15.0: osMemory = '10g' - lsMemory = '3g' + lsMemory = '2500m' elif self.totalMemoryGigs >= 11.0: osMemory = '6g' lsMemory = '2500m' @@ -544,22 +546,143 @@ def tweak_malcolm_runtime( except: pass - # snapshot repository directory and compression - indexSnapshotDir = './opensearch-backup' + # directories for data volume mounts (PCAP storage, Zeek log storage, OpenSearch indexes, etc.) + indexDir = './opensearch' + indexDirDefault = os.path.join(malcolm_install_path, indexDir) + indexDirFull = os.path.realpath(indexDirDefault) + indexSnapshotCompressed = False - if not opensearchPrimaryRemote: + indexSnapshotDir = './opensearch-backup' + indexSnapshotDirDefault = os.path.join(malcolm_install_path, indexSnapshotDir) + indexSnapshotDirFull = os.path.realpath(indexSnapshotDirDefault) + + pcapDir = './pcap' + pcapDirDefault = os.path.join(malcolm_install_path, pcapDir) + pcapDirFull = os.path.realpath(pcapDirDefault) + + suricataLogDir = './suricata-logs' + suricataLogDirDefault = os.path.join(malcolm_install_path, suricataLogDir) + suricataLogDirFull = os.path.realpath(suricataLogDirDefault) + + zeekLogDir = './zeek-logs' + zeekLogDirDefault = os.path.join(malcolm_install_path, zeekLogDir) + zeekLogDirFull = os.path.realpath(zeekLogDirDefault) + + if not InstallerYesOrNo( + 'Store PCAP, log and index files locally under {}?'.format(malcolm_install_path), + default=True, + ): + # PCAP directory if not InstallerYesOrNo( - 'Store OpenSearch index snapshots locally in {}?'.format( - os.path.join(malcolm_install_path, 'opensearch-backup') - ), + 'Store PCAP files locally in {}?'.format(pcapDirDefault), default=True, ): while True: - indexSnapshotDir = InstallerAskForString('Enter OpenSearch index snapshot directory') - if (len(indexSnapshotDir) > 1) and os.path.isdir(indexSnapshotDir): - indexSnapshotDir = os.path.realpath(indexSnapshotDir) + pcapDir = InstallerAskForString('Enter PCAP directory') + if (len(pcapDir) > 1) and os.path.isdir(pcapDir): + pcapDirFull = os.path.realpath(pcapDir) + pcapDir = ( + f"./{os.path.relpath(pcapDirDefault, malcolm_install_path)}" + if same_file_or_dir(pcapDirDefault, pcapDirFull) + else pcapDirFull + ) break - indexSnapshotCompressed = InstallerYesOrNo('Compress OpenSearch index snapshots?', default=False) + + # Zeek log directory + if not InstallerYesOrNo( + 'Store Zeek logs locally in {}?'.format(zeekLogDirDefault), + default=True, + ): + while True: + zeekLogDir = InstallerAskForString('Enter Zeek log directory') + if (len(zeekLogDir) > 1) and os.path.isdir(zeekLogDir): + zeekLogDirFull = os.path.realpath(zeekLogDir) + zeekLogDir = ( + f"./{os.path.relpath(zeekLogDirDefault, malcolm_install_path)}" + if same_file_or_dir(zeekLogDirDefault, zeekLogDirFull) + else zeekLogDirFull + ) + break + + # Suricata log directory + if not InstallerYesOrNo( + 'Store Suricata logs locally in {}?'.format(suricataLogDirDefault), + default=True, + ): + while True: + suricataLogDir = InstallerAskForString('Enter Suricata log directory') + if (len(suricataLogDir) > 1) and os.path.isdir(suricataLogDir): + suricataLogDirFull = os.path.realpath(suricataLogDir) + suricataLogDir = ( + f"./{os.path.relpath(suricataLogDirDefault, malcolm_install_path)}" + if same_file_or_dir(suricataLogDirDefault, suricataLogDirFull) + else suricataLogDirFull + ) + break + + if not opensearchPrimaryRemote: + # opensearch index directory + if not InstallerYesOrNo( + 'Store OpenSearch indices locally in {}?'.format(indexDirDefault), + default=True, + ): + while True: + indexDir = InstallerAskForString('Enter OpenSearch index directory') + if (len(indexDir) > 1) and os.path.isdir(indexDir): + indexDirFull = os.path.realpath(indexDir) + indexDir = ( + f"./{os.path.relpath(indexDirDefault, malcolm_install_path)}" + if same_file_or_dir(indexDirDefault, indexDirFull) + else indexDirFull + ) + break + + # opensearch snapshot repository directory and compression + if not InstallerYesOrNo( + 'Store OpenSearch index snapshots locally in {}?'.format(indexSnapshotDirDefault), + default=True, + ): + while True: + indexSnapshotDir = InstallerAskForString('Enter OpenSearch index snapshot directory') + if (len(indexSnapshotDir) > 1) and os.path.isdir(indexSnapshotDir): + indexSnapshotDirFull = os.path.realpath(indexSnapshotDir) + indexSnapshotDir = ( + f"./{os.path.relpath(indexSnapshotDirDefault, malcolm_install_path)}" + if same_file_or_dir(indexSnapshotDirDefault, indexSnapshotDirFull) + else indexSnapshotDirFull + ) + break + + # make sure paths specified (and their necessary children) exist + for pathToCreate in ( + indexDirFull, + indexSnapshotDirFull, + os.path.join(pcapDirFull, 'processed'), + os.path.join(pcapDirFull, 'upload'), + os.path.join(suricataLogDirFull, 'live'), + os.path.join(zeekLogDirFull, 'current'), + os.path.join(zeekLogDirFull, 'live'), + os.path.join(zeekLogDirFull, 'upload'), + os.path.join(zeekLogDirFull, os.path.join('extract_files', 'preserved')), + os.path.join(zeekLogDirFull, os.path.join('extract_files', 'quarantine')), + ): + try: + if args.debug: + eprint(f"Creating {pathToCreate}") + pathlib.Path(pathToCreate).mkdir(parents=True, exist_ok=True) + if ( + ((self.platform == PLATFORM_LINUX) or (self.platform == PLATFORM_MAC)) + and (self.scriptUser == "root") + and (getpwuid(os.stat(pathToCreate).st_uid).pw_name == self.scriptUser) + ): + if args.debug: + eprint(f"Setting permissions of {pathToCreate} to {puid}:{pgid}") + # change ownership of newly-created directory to match puid/pgid + os.chown(pathToCreate, int(puid), int(pgid)) + except Exception as e: + eprint(f"Creating {pathToCreate} failed: {e}") + + indexSnapshotCompressed = InstallerYesOrNo('Compress OpenSearch index snapshots?', default=False) # delete oldest indexes based on index pattern size indexPruneSizeLimit = '0' @@ -696,12 +819,17 @@ def tweak_malcolm_runtime( pcapIface = 'lo' tweakIface = False pcapFilter = '' + arkimeManagePCAP = False if InstallerYesOrNo( 'Should Malcolm capture live network traffic to PCAP files for analysis with Arkime?', default=False ): pcapNetSniff = InstallerYesOrNo('Capture packets using netsniff-ng?', default=True) pcapTcpDump = InstallerYesOrNo('Capture packets using tcpdump?', default=(not pcapNetSniff)) + arkimeManagePCAP = InstallerYesOrNo( + 'Should Arkime delete PCAP files based on available storage (see https://arkime.com/faq#pcap-deletion)?', + default=False, + ) liveSuricata = InstallerYesOrNo('Should Malcolm analyze live network traffic with Suricata?', default=False) liveZeek = InstallerYesOrNo('Should Malcolm analyze live network traffic with Zeek?', default=False) @@ -880,6 +1008,12 @@ def tweak_malcolm_runtime( r'(PCAP_ENABLE_TCPDUMP\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(pcapTcpDump)}", line ) + elif 'MANAGE_PCAP_FILES' in line: + # Whether or not Arkime is allowed to delete uploaded/captured PCAP + line = re.sub( + r'(MANAGE_PCAP_FILES\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(arkimeManagePCAP)}", line + ) + elif 'ZEEK_LIVE_CAPTURE' in line: # live traffic analysis with Zeek line = re.sub( @@ -1079,21 +1213,59 @@ def tweak_malcolm_runtime( # whether or not to restart services automatically (on boot, etc.) line = f"{sectionIndents[currentSection] * 2}restart: {restartMode}" - elif currentService == 'opensearch': - # stuff specifically in the opensearch section - if 'OPENSEARCH_JAVA_OPTS' in line: - # OpenSearch memory allowance - line = re.sub(r'(-Xm[sx])(\w+)', fr'\g<1>{osMemory}', line) + elif currentService == 'arkime': + # stuff specifically in the arkime section + if re.match(r'^\s*-.+:/data/pcap(:.+)?\s*$', line): + # Arkime's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + pcapDir, + sectionIndents[currentSection] * 3, + ) - elif ( - re.match(r'^\s*-.+:/opt/opensearch/backup(:.+)?\s*$', line) - and (indexSnapshotDir is not None) - and os.path.isdir(indexSnapshotDir) - ): - # OpenSearch backup directory - volumeParts = line.strip().lstrip('-').lstrip().split(':') - volumeParts[0] = indexSnapshotDir - line = "{}- {}".format(sectionIndents[currentSection] * 3, ':'.join(volumeParts)) + elif currentService == 'filebeat': + # stuff specifically in the filebeat section + if re.match(r'^[\s#]*-\s*"([\d\.]+:)?\d+:\d+"\s*$', line): + # set bind IP based on whether it should be externally exposed or not + line = re.sub( + r'^([\s#]*-\s*")([\d\.]+:)?(\d+:\d+"\s*)$', + fr"\g<1>{'0.0.0.0' if filebeatTcpOpen else '127.0.0.1'}:\g<3>", + line, + ) + + elif re.match(r'^\s*-.+:/suricata(:.+)?\s*$', line): + # filebeat's reference to the suricata-logs directory + line = ReplaceBindMountLocation( + line, + suricataLogDir, + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek(:.+)?\s*$', line): + # filebeat's reference to the zeek-logs directory + line = ReplaceBindMountLocation( + line, + zeekLogDir, + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'file-monitor': + # stuff specifically in the file-monitor section + if re.match(r'^\s*-.+:/zeek/extract_files(:.+)?\s*$', line): + # file-monitor's reference to the zeek-logs/extract_files directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'extract_files'), + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek/logs(:.+)?\s*$', line): + # zeek's reference to the zeek-logs/current directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'current'), + sectionIndents[currentSection] * 3, + ) elif currentService == 'logstash': # stuff specifically in the logstash section @@ -1109,14 +1281,82 @@ def tweak_malcolm_runtime( line, ) - elif currentService == 'filebeat': - # stuff specifically in the filebeat section - if re.match(r'^[\s#]*-\s*"([\d\.]+:)?\d+:\d+"\s*$', line): - # set bind IP based on whether it should be externally exposed or not - line = re.sub( - r'^([\s#]*-\s*")([\d\.]+:)?(\d+:\d+"\s*)$', - fr"\g<1>{'0.0.0.0' if filebeatTcpOpen else '127.0.0.1'}:\g<3>", + elif currentService == 'opensearch': + # stuff specifically in the opensearch section + if 'OPENSEARCH_JAVA_OPTS' in line: + # OpenSearch memory allowance + line = re.sub(r'(-Xm[sx])(\w+)', fr'\g<1>{osMemory}', line) + + elif re.match(r'^\s*-.+:/usr/share/opensearch/data(:.+)?\s*$', line): + # OpenSearch indexes directory + line = ReplaceBindMountLocation( line, + indexDir, + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/opt/opensearch/backup(:.+)?\s*$', line): + # OpenSearch backup directory + line = ReplaceBindMountLocation( + line, + indexSnapshotDir, + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'pcap-capture': + # stuff specifically in the pcap-capture section + if re.match(r'^\s*-.+:/pcap(:.+)?\s*$', line): + # pcap-capture's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + os.path.join(pcapDir, 'upload'), + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'pcap-monitor': + # stuff specifically in the pcap-monitor section + if re.match(r'^\s*-.+:/pcap(:.+)?\s*$', line): + # pcap-monitor's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + pcapDir, + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek(:.+)?\s*$', line): + # pcap-monitor's reference to the zeek-logs directory + line = ReplaceBindMountLocation( + line, + zeekLogDir, + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'suricata': + # stuff specifically in the suricata section + if re.match(r'^\s*-.+:/data/pcap(:.+)?\s*$', line): + # Suricata's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + pcapDir, + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/var/log/suricata(:.+)?\s*$', line): + # suricata's reference to the suricata-logs directory + line = ReplaceBindMountLocation( + line, + suricataLogDir, + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'suricata-live': + # stuff specifically in the suricata-live section + if re.match(r'^\s*-.+:/var/log/suricata(:.+)?\s*$', line): + # suricata-live's reference to the suricata-logs directory + line = ReplaceBindMountLocation( + line, + suricataLogDir, + sectionIndents[currentSection] * 3, ) elif currentService == 'upload': @@ -1129,6 +1369,58 @@ def tweak_malcolm_runtime( line, ) + elif re.match(r'^\s*-.+:/var/www/upload/server/php/chroot/files(:.+)?\s*$', line): + # upload's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + os.path.join(pcapDir, 'upload'), + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'zeek': + # stuff specifically in the zeek section + if re.match(r'^\s*-.+:/pcap(:.+)?\s*$', line): + # Zeek's reference to the PCAP directory + line = ReplaceBindMountLocation( + line, + pcapDir, + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek/upload(:.+)?\s*$', line): + # zeek's reference to the zeek-logs/upload directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'upload'), + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek/extract_files(:.+)?\s*$', line): + # zeek's reference to the zeek-logs/extract_files directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'extract_files'), + sectionIndents[currentSection] * 3, + ) + + elif currentService == 'zeek-live': + # stuff specifically in the zeek-live section + if re.match(r'^\s*-.+:/zeek/live(:.+)?\s*$', line): + # zeek-live's reference to the zeek-logs/live directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'live'), + sectionIndents[currentSection] * 3, + ) + + elif re.match(r'^\s*-.+:/zeek/extract_files(:.+)?\s*$', line): + # zeek-lives's reference to the zeek-logs/extract_files directory + line = ReplaceBindMountLocation( + line, + os.path.join(zeekLogDir, 'extract_files'), + sectionIndents[currentSection] * 3, + ) + elif currentService == 'nginx-proxy': # stuff specifically in the nginx-proxy section @@ -2062,6 +2354,7 @@ def install_docker(self): def main(): global args global requests_imported + global yaml_imported # extract arguments from the command line # print (sys.argv[1:]); @@ -2187,9 +2480,11 @@ def main(): sys.tracebacklimit = 0 requests_imported = RequestsDynamic(debug=args.debug, forceInteraction=(not args.acceptDefaultsNonInteractive)) + yaml_imported = YAMLDynamic(debug=args.debug, forceInteraction=(not args.acceptDefaultsNonInteractive)) if args.debug: eprint(f"Imported requests: {requests_imported}") - if not requests_imported: + eprint(f"Imported yaml: {yaml_imported}") + if (not requests_imported) or (not yaml_imported): exit(2) # If Malcolm and images tarballs are provided, we will use them. diff --git a/scripts/malcolm_appliance_packager.sh b/scripts/malcolm_appliance_packager.sh index ca822a86a..fdc488a24 100755 --- a/scripts/malcolm_appliance_packager.sh +++ b/scripts/malcolm_appliance_packager.sh @@ -82,7 +82,8 @@ if mkdir "$DESTDIR"; then mkdir $VERBOSE -p "$DESTDIR/suricata/rules/" mkdir $VERBOSE -p "$DESTDIR/yara/rules/" mkdir $VERBOSE -p "$DESTDIR/zeek-logs/current/" - mkdir $VERBOSE -p "$DESTDIR/zeek-logs/extract_files/" + mkdir $VERBOSE -p "$DESTDIR/zeek-logs/extract_files/preserved" + mkdir $VERBOSE -p "$DESTDIR/zeek-logs/extract_files/quarantine" mkdir $VERBOSE -p "$DESTDIR/zeek-logs/live/" mkdir $VERBOSE -p "$DESTDIR/zeek-logs/processed/" mkdir $VERBOSE -p "$DESTDIR/zeek-logs/upload/" diff --git a/scripts/malcolm_common.py b/scripts/malcolm_common.py index 1e9a81f5a..43b1eb713 100644 --- a/scripts/malcolm_common.py +++ b/scripts/malcolm_common.py @@ -14,7 +14,6 @@ import sys import time -from collections import defaultdict from enum import IntFlag, auto try: @@ -23,6 +22,14 @@ getpwuid = None from subprocess import PIPE, STDOUT, Popen, CalledProcessError + +from collections import defaultdict, namedtuple + +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable + try: from dialog import Dialog @@ -57,6 +64,12 @@ class UserInterfaceMode(IntFlag): InteractionInput = auto() +BoundPath = namedtuple( + "BoundPath", + ["service", "container_dir", "files", "relative_dirs", "clean_empty_dirs"], + rename=False, +) + # URLS for figuring things out if something goes wrong DOCKER_INSTALL_URLS = defaultdict(lambda: 'https://docs.docker.com/install/') DOCKER_INSTALL_URLS[PLATFORM_WINDOWS] = [ @@ -136,6 +149,52 @@ def UnescapeForCurl(s): ) +################################################################################################### +# if the object is an iterable, return it, otherwise return a tuple with it as a single element. +# useful if you want to user either a scalar or an array in a loop, etc. +def GetIterable(x): + if isinstance(x, Iterable) and not isinstance(x, str): + return x + else: + return (x,) + + +################################################################################################## +def ReplaceBindMountLocation(line, location, linePrefix): + if os.path.isdir(location): + volumeParts = line.strip().lstrip('-').lstrip().split(':') + volumeParts[0] = location + return "{}- {}".format(linePrefix, ':'.join(volumeParts)) + else: + return line + + +################################################################################################## +def LocalPathForContainerBindMount(service, dockerComposeContents, containerPath, localBasePath=None): + localPath = None + if service and dockerComposeContents and containerPath: + vols = DeepGet(dockerComposeContents, ['services', service, 'volumes']) + if (vols is not None) and (len(vols) > 0): + for vol in vols: + volSplit = vol.split(':') + if (len(volSplit) >= 2) and (volSplit[1] == containerPath): + if localBasePath and not os.path.isabs(volSplit[0]): + localPath = os.path.realpath(os.path.join(localBasePath, volSplit[0])) + else: + localPath = volSplit[0] + break + + return localPath + + +################################################################################################### +def same_file_or_dir(path1, path2): + try: + return os.path.samefile(path1, path2) + except Exception: + return False + + ################################################################################################### # parse a curl-formatted config file, with special handling for user:password and URL # see https://everything.curl.dev/cmdline/configfile @@ -540,6 +599,23 @@ def LoadStrIfJson(jsonStr): return None +################################################################################################### +# safe deep get for a dictionary +# +# Example: +# d = {'meta': {'status': 'OK', 'status_code': 200}} +# DeepGet(d, ['meta', 'status_code']) # => 200 +# DeepGet(d, ['garbage', 'status_code']) # => None +# DeepGet(d, ['meta', 'garbage'], default='-') # => '-' +def DeepGet(d, keys, default=None): + assert type(keys) is list + if d is None: + return default + if not keys: + return d + return DeepGet(d.get(keys[0]), keys[1:], default) + + ################################################################################################### # run command with arguments and return its exit code, stdout, and stderr def check_output_input(*popenargs, **kwargs): @@ -678,6 +754,10 @@ def RequestsDynamic(debug=False, forceInteraction=False): return DoDynamicImport("requests", "requests", interactive=forceInteraction, debug=debug) +def YAMLDynamic(debug=False, forceInteraction=False): + return DoDynamicImport("yaml", "pyyaml", interactive=forceInteraction, debug=debug) + + ################################################################################################### # do the required auth files for Malcolm exist? def MalcolmAuthFilesExist(): diff --git a/scripts/third-party-logs/fluent-bit-setup.ps1 b/scripts/third-party-logs/fluent-bit-setup.ps1 index 9001c0467..7b1bf2537 100644 --- a/scripts/third-party-logs/fluent-bit-setup.ps1 +++ b/scripts/third-party-logs/fluent-bit-setup.ps1 @@ -9,7 +9,7 @@ ############################################################################### $fluent_bit_version = '2.0' -$fluent_bit_full_version = '2.0.4' +$fluent_bit_full_version = '2.0.6' ############################################################################### # select an item from a menu provided in an array diff --git a/sensor-iso/beats/Dockerfile b/sensor-iso/beats/Dockerfile index 07c47de42..fb6e0770c 100644 --- a/sensor-iso/beats/Dockerfile +++ b/sensor-iso/beats/Dockerfile @@ -41,7 +41,7 @@ RUN set -x && \ go run bootstrap.go ENV BEATS=filebeat -ENV BEATS_VERSION=8.5.1 +ENV BEATS_VERSION=8.5.2 ADD ./build.sh /build.sh RUN [ "chmod", "+x", "/build.sh" ] diff --git a/sensor-iso/beats/beat-build.sh b/sensor-iso/beats/beat-build.sh index 0eb671c01..f8851517a 100755 --- a/sensor-iso/beats/beat-build.sh +++ b/sensor-iso/beats/beat-build.sh @@ -2,7 +2,7 @@ # Copyright (c) 2022 Battelle Energy Alliance, LLC. All rights reserved. -VERSION="8.5.1" +VERSION="8.5.2" THIRD_PARTY_BRANCH="master" while getopts b:v:t: opts; do case ${opts} in diff --git a/sensor-iso/build.sh b/sensor-iso/build.sh index 145dfb06e..eb9f51db9 100755 --- a/sensor-iso/build.sh +++ b/sensor-iso/build.sh @@ -135,6 +135,8 @@ if [ -d "$WORKDIR" ]; then # format and copy documentation [[ -f "$SCRIPT_PATH/shared/environment.chroot" ]] && \ . "$SCRIPT_PATH/shared/environment.chroot" + sed -i "s/^\(show_downloads:\).*/\1 false/" "$SCRIPT_PATH"/_config.yml + sed -i -e "/^mastodon:/,+2d" "$SCRIPT_PATH"/_config.yml bash "$SCRIPT_PATH/docs/documentation_build.sh" -v -r "${VCS_REVSION:-main}" -t "${GITHUB_TOKEN:-}" mkdir -p ./config/includes.chroot/usr/share/doc cp -r "$SCRIPT_PATH/_site" ./config/includes.chroot/usr/share/doc/hedgehog diff --git a/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot b/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot index 462d2d1f8..df5235222 100755 --- a/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot +++ b/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot @@ -13,14 +13,14 @@ GITHUB_API_CURL_ARGS+=( -H ) GITHUB_API_CURL_ARGS+=( "Accept: application/vnd.github.v3+json" ) [[ -n "$GITHUB_TOKEN" ]] && GITHUB_API_CURL_ARGS+=( -H ) && GITHUB_API_CURL_ARGS+=( "Authorization: token $GITHUB_TOKEN" ) -ZEEK_VER=5.0.3-0 +ZEEK_VER=5.0.4-0 ZEEK_LTS=true ZEEK_DIR="/opt/zeek" export PATH="${ZEEK_DIR}"/bin:$PATH SURICATA_RULES_DIR="/etc/suricata/rules" -BEATS_VER="8.5.1" +BEATS_VER="8.5.2" BEATS_OSS="-oss" BEATS_DEB_URL_TEMPLATE_REPLACER="XXXXX" BEATS_DEB_URL_TEMPLATE="https://artifacts.elastic.co/downloads/beats/$BEATS_DEB_URL_TEMPLATE_REPLACER/$BEATS_DEB_URL_TEMPLATE_REPLACER$BEATS_OSS-$BEATS_VER-amd64.deb" diff --git a/sensor-iso/config/package-lists/python.list.chroot b/sensor-iso/config/package-lists/python.list.chroot index 4660ff230..385f990d6 100644 --- a/sensor-iso/config/package-lists/python.list.chroot +++ b/sensor-iso/config/package-lists/python.list.chroot @@ -16,5 +16,6 @@ python3-semantic-version python3-setuptools python3-tz python3-wheel +python3-yaml python3-yara python3-zmq \ No newline at end of file