diff --git a/.dockerignore b/.dockerignore index 2b544d5934..a420bea9af 100644 --- a/.dockerignore +++ b/.dockerignore @@ -45,3 +45,6 @@ # Exclude JetBrains IDE files /.idea/ + +# Exclude Compose environment file +/docker/dev/.env diff --git a/.gitignore b/.gitignore index 1b16979d92..7a6aec9412 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,6 @@ vagrant/*.log # Exclude JetBrains IDE files .idea/ + +# Exclude Compose environment file +docker/dev/.env diff --git a/docker/dev/.env.template b/docker/dev/.env.template new file mode 100644 index 0000000000..9e5162165a --- /dev/null +++ b/docker/dev/.env.template @@ -0,0 +1,15 @@ +GIFT_PPA_TRACK="stable" +GIFT_PPA_URL="https://ppa.launchpadcontent.net/gift/${GIFT_PPA_TRACK}/ubuntu" +NODE_VERSION="18.x" +NODE_PPA_URL="https://deb.nodesource.com/node_${NODE_VERSION}" +NODE_NPMRC="" +PYTHON_PIP_CONF="" + +TIMESKETCH_BASE_IMAGE="ubuntu:22.04" +TIMESKETCH_CONF_DIR="/etc/timesketch" +TIMESKETCH_SECRET_KEY="L4np0jV3yAdAFdbVzWRMaBqiFMV8FKYd+Je1WKE40o8=" +TIMESKETCH_USER="dev" +TIMESKETCH_PASSWORD="dev" + +POSTGRES_USER="timesketch" +POSTGRES_PASSWORD="password" diff --git a/docker/dev/README.md b/docker/dev/README.md index bd2c5e7896..a31f678031 100644 --- a/docker/dev/README.md +++ b/docker/dev/README.md @@ -5,6 +5,19 @@ Make sure to follow the docker [post-install](https://docs.docker.com/engine/ins NOTE: It is not recommended to try to run on a system with less than 8 GB of RAM. +### Prepare a .env file + +Compose requires a `.env` file to be set. +Copy the `.env.template` file to a `.env` one, not versioned. + +```bash +cp .env.template .env +``` + +You can optionally edit the `.env` file. +This is useful if you need to build images with some company restrictions (accessing +remote Ubuntu, Python or Node repositories). + ### Start a developer version of docker containers in this directory ```bash @@ -18,36 +31,32 @@ If you see the following message you can continue ```text Timesketch development server is ready! ``` -### Find out container ID for the timesketch container - -```bash -CONTAINER_ID="$(docker container list -f name=timesketch-dev -q)" -``` - -In the output look for CONTAINER ID for the timesketch container - -To write the ID to a variable, use: - -```bash -export CONTAINER_ID="$(docker container list -f name=timesketch-dev -q)" -``` - -and test with - -```bash -echo $CONTAINER_ID -``` ### Start a celery container shell +Start the container in foreground (add `-d` to run in background): + ```bash -docker exec -it $CONTAINER_ID celery -A timesketch.lib.tasks worker --loglevel info +docker compose exec timesketch \ + celery \ + -A timesketch.lib.tasks \ + worker \ + --loglevel info ``` ### Start development webserver (and metrics server) +Start the container in foreground (add `-d` to run in background): + ```bash -docker exec -it $CONTAINER_ID gunicorn --reload -b 0.0.0.0:5000 --log-file - --timeout 600 -c /usr/local/src/timesketch/data/gunicorn_config.py timesketch.wsgi:application +docker compose exec timesketch \ + gunicorn \ + --reload \ + -b 0.0.0.0:5000 \ + --log-file - \ + --timeout 600 \ + -c /usr/local/src/timesketch/data/gunicorn_config.py \ + timesketch.wsgi:application ``` You now can access your development version at http://127.0.0.1:5000/ @@ -58,18 +67,21 @@ You can also access a metrics dashboard at http://127.0.0.1:3000/ ### Non-interactive -Running the following as a script after `docker compose up -d` will bring up the development environment in the background for you. +The `restart.sh` script applies the 2 previous command in background for you. ```bash -export CONTAINER_ID="$(docker container list -f name=timesketch-dev -q)" -docker exec $CONTAINER_ID celery -A timesketch.lib.tasks worker --loglevel info -docker exec $CONTAINER_ID gunicorn --reload -b 0.0.0.0:5000 --log-file - --timeout 120 timesketch.wsgi:application +docker compose up -d +./restart.sh ``` ### Run tests ```bash -docker exec -w /usr/local/src/timesketch -it $CONTAINER_ID python3 run_tests.py --coverage +docker compose exec \ + -w /usr/local/src/timesketch \ + -it \ + timesketch \ + python3 run_tests.py --coverage ``` That will run all tests in your docker container. It is recommended to run all tests at least before creating a pull request. diff --git a/docker/dev/build/Dockerfile b/docker/dev/build/Dockerfile deleted file mode 100644 index 1e2e269897..0000000000 --- a/docker/dev/build/Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -# Use the official Docker Hub Ubuntu base image -FROM ubuntu:22.04 - -ARG PPA_TRACK=stable - -# Prevent needing to configure debian packages, stopping the setup of -# the docker container. -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN apt-get update && apt-get install -y --no-install-recommends \ - software-properties-common \ - apt-transport-https \ - apt-utils \ - ca-certificates \ - curl \ - git \ - gpg-agent \ - python3-dev \ - python3-pip \ - python3-wheel \ - python3-setuptools \ - python3-psycopg2 \ - tzdata \ - && rm -rf /var/lib/apt/lists/* - -# Install Plaso -RUN add-apt-repository -y ppa:gift/$PPA_TRACK -RUN apt-get update && apt-get install -y --no-install-recommends \ - plaso-tools \ - && rm -rf /var/lib/apt/lists/* - -# Install NodeJS for frontend development -RUN curl -sL https://deb.nodesource.com/setup_18.x -o nodesource_setup.sh -RUN bash nodesource_setup.sh -RUN apt-get update && apt-get install -y --no-install-recommends \ - nodejs \ - && rm -rf /var/lib/apt/lists/* - -# Install Yarn for frontend development -RUN npm install --global yarn - -# Install dependencies for Timesketch -COPY ./requirements.txt /timesketch-requirements.txt -RUN pip3 install -r /timesketch-requirements.txt - -# Install test dependencies for Timesketch -COPY ./test_requirements.txt /timesketch-test-requirements.txt -RUN pip3 install -r /timesketch-test-requirements.txt - -# Copy the entrypoint script into the container -COPY ./docker/dev/build/docker-entrypoint.sh / -RUN chmod a+x /docker-entrypoint.sh - -# Load the entrypoint script to be run later -ENTRYPOINT ["/docker-entrypoint.sh"] diff --git a/docker/dev/build/docker-entrypoint.sh b/docker/dev/build/docker-entrypoint.sh deleted file mode 100755 index 1dd8683cbc..0000000000 --- a/docker/dev/build/docker-entrypoint.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -# Run the container the default way -if [ "$1" = 'timesketch' ]; then - - # Install Timesketch in editable mode from volume - pip3 install -e /usr/local/src/timesketch/ - - # Copy config files - mkdir /etc/timesketch - cp /usr/local/src/timesketch/data/timesketch.conf /etc/timesketch/ - cp /usr/local/src/timesketch/data/regex_features.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/winevt_features.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/tags.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/intelligence_tag_metadata.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/plaso.mappings /etc/timesketch/ - cp /usr/local/src/timesketch/data/generic.mappings /etc/timesketch/ - cp /usr/local/src/timesketch/data/ontology.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/data_finder.yaml /etc/timesketch/ - cp /usr/local/src/timesketch/data/bigquery_matcher.yaml /etc/timesketch/ - ln -s /usr/local/src/timesketch/data/sigma_config.yaml /etc/timesketch/sigma_config.yaml - ln -s /usr/local/src/timesketch/data/sigma /etc/timesketch/ - ln -s /usr/local/src/timesketch/data/dfiq /etc/timesketch/ - ln -s /usr/local/src/timesketch/data/context_links.yaml /etc/timesketch/context_links.yaml - ln -s /usr/local/src/timesketch/data/plaso_formatters.yaml /etc/timesketch/plaso_formatters.yaml - - # Set SECRET_KEY in /etc/timesketch/timesketch.conf if it isn't already set - if grep -q "SECRET_KEY = ''" /etc/timesketch/timesketch.conf; then - OPENSSL_RAND=$( openssl rand -base64 32 ) - # Using the pound sign as a delimiter to avoid problems with / being output from openssl - sed -i 's#SECRET_KEY = \x27\x3CKEY_GOES_HERE\x3E\x27#SECRET_KEY = \x27'$OPENSSL_RAND'\x27#' /etc/timesketch/timesketch.conf - fi - - # Set up the Postgres connection - if [ $POSTGRES_USER ] && [ $POSTGRES_PASSWORD ] && [ $POSTGRES_ADDRESS ] && [ $POSTGRES_PORT ]; then - sed -i 's#postgresql://:@localhost#postgresql://'$POSTGRES_USER':'$POSTGRES_PASSWORD'@'$POSTGRES_ADDRESS':'$POSTGRES_PORT'#' /etc/timesketch/timesketch.conf - else - # Log an error since we need the above-listed environment variables - echo "Please pass values for the POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_ADDRESS, and POSTGRES_PORT environment variables" - exit 1 - fi - - # Set up the OpenSearch connection - if [ $OPENSEARCH_HOST ] && [ $OPENSEARCH_PORT ]; then - sed -i 's#OPENSEARCH_HOST = \x27127.0.0.1\x27#OPENSEARCH_HOST = \x27'$OPENSEARCH_HOST'\x27#' /etc/timesketch/timesketch.conf - sed -i 's#OPENSEARCH_PORT = 9200#OPENSEARCH_PORT = '$OPENSEARCH_PORT'#' /etc/timesketch/timesketch.conf - else - # Log an error since we need the above-listed environment variables - echo "Please pass values for the ELASTIC_ADDRESS and ELASTIC_PORT environment variables" - fi - - # Set up the Redis connection - if [ $REDIS_ADDRESS ] && [ $REDIS_PORT ]; then - sed -i 's#UPLOAD_ENABLED = False#UPLOAD_ENABLED = True#' /etc/timesketch/timesketch.conf - sed -i 's#^CELERY_BROKER_URL =.*#CELERY_BROKER_URL = \x27redis://'$REDIS_ADDRESS':'$REDIS_PORT'\x27#' /etc/timesketch/timesketch.conf - sed -i 's#^CELERY_RESULT_BACKEND =.*#CELERY_RESULT_BACKEND = \x27redis://'$REDIS_ADDRESS':'$REDIS_PORT'\x27#' /etc/timesketch/timesketch.conf - else - # Log an error since we need the above-listed environment variables - echo "Please pass values for the REDIS_ADDRESS and REDIS_PORT environment variables" - fi - - # Enable debug for the development server - sed -i s/"DEBUG = False"/"DEBUG = True"/ /etc/timesketch/timesketch.conf - - # Enable index and sketch analyzers - sed -i s/"ENABLE_INDEX_ANALYZERS = False"/"ENABLE_INDEX_ANALYZERS = True"/ /etc/timesketch/timesketch.conf - sed -i s/"ENABLE_SKETCH_ANALYZERS = False"/"ENABLE_SKETCH_ANALYZERS = True"/ /etc/timesketch/timesketch.conf - sed -i s/"ENABLE_EXPERIMENTAL_UI = False"/"ENABLE_EXPERIMENTAL_UI = True"/ /etc/timesketch/timesketch.conf - - # Disable CSRF checks for the development server - echo "WTF_CSRF_ENABLED = False" >> /etc/timesketch/timesketch.conf - - # Add web user - tsctl create-user --password "${TIMESKETCH_USER}" "${TIMESKETCH_USER}" - - # Add Sigma rules - git clone https://github.com/SigmaHQ/sigma /usr/local/src/sigma - # for each line in sigma_rules.txt execute the command - for line in $(cat sigma_rules.txt); do - tsctl import-sigma-rules $line - done - - # Wrap up things - echo "Timesketch development server is ready!" - - # Sleep forever to keep the container running - sleep infinity -fi - -# Run a custom command on container start -exec "$@" diff --git a/docker/dev/compose.yaml b/docker/dev/compose.yaml index bdb9a61dd1..fb8737fa92 100644 --- a/docker/dev/compose.yaml +++ b/docker/dev/compose.yaml @@ -6,26 +6,42 @@ networks: services: timesketch: image: us-docker.pkg.dev/osdfir-registry/timesketch/dev:latest + build: + context: ../.. + dockerfile: docker/dev/timesketch/Dockerfile + args: + BASE_IMAGE: "${TIMESKETCH_BASE_IMAGE}" + GIFT_PPA_TRACK: "${GIFT_PPA_TRACK}" + GIFT_PPA_URL: "${GIFT_PPA_URL}" + NODE_VERSION: "${NODE_VERSION}" + NODE_PPA_URL: "${NODE_PPA_URL}" + NODE_NPMRC: "${NODE_NPMRC}" + PYTHON_PIP_CONF: "${PYTHON_PIP_CONF}" command: timesketch ports: - "5000:5000" - "5001:5001" - "8080:8080" - environment: - POSTGRES_USER: "timesketch" - POSTGRES_PASSWORD: "password" - POSTGRES_ADDRESS: "postgres" - POSTGRES_PORT: "5432" - OPENSEARCH_HOST: "opensearch" - OPENSEARCH_PORT: "9200" - REDIS_ADDRESS: "redis" - REDIS_PORT: "6379" - TIMESKETCH_USER: "dev" - TIMESKETCH_PASSWORD: "dev" - CHOKIDAR_USEPOLLING: "true" - prometheus_multiproc_dir: "/tmp/" + env_file: + - timesketch/timesketch.env volumes: - "../../:/usr/local/src/timesketch/" + - "./timesketch/timesketch.conf:${TIMESKETCH_CONF_DIR}/timesketch.conf:ro" + - "./timesketch/sigma_rules.txt:${TIMESKETCH_CONF_DIR}/sigma_rules.txt:ro" + - "../../data/regex_features.yaml:${TIMESKETCH_CONF_DIR}/regex_features.yaml:ro" + - "../../data/winevt_features.yaml:${TIMESKETCH_CONF_DIR}/winevt_features.yaml:ro" + - "../../data/tags.yaml:${TIMESKETCH_CONF_DIR}/tags.yaml:ro" + - "../../data/intelligence_tag_metadata.yaml:${TIMESKETCH_CONF_DIR}/intelligence_tag_metadata.yaml:ro" + - "../../data/plaso.mappings:${TIMESKETCH_CONF_DIR}/plaso.mappings:ro" + - "../../data/generic.mappings:${TIMESKETCH_CONF_DIR}/generic.mappings:ro" + - "../../data/ontology.yaml:${TIMESKETCH_CONF_DIR}/ontology.yaml:ro" + - "../../data/data_finder.yaml:${TIMESKETCH_CONF_DIR}/data_finder.yaml:ro" + - "../../data/bigquery_matcher.yaml:${TIMESKETCH_CONF_DIR}/bigquery_matcher.yaml:ro" + - "../../data/sigma_config.yaml:${TIMESKETCH_CONF_DIR}/sigma_config.yaml:ro" + - "../../data/sigma:${TIMESKETCH_CONF_DIR}/sigma:ro" + - "../../data/dfiq:${TIMESKETCH_CONF_DIR}/dfiq:ro" + - "../../data/context_links.yaml:${TIMESKETCH_CONF_DIR}/context_links.yaml:ro" + - "../../data/plaso_formatters.yaml:${TIMESKETCH_CONF_DIR}/plaso_formatters.yaml:ro" depends_on: - opensearch - postgres @@ -35,13 +51,8 @@ services: opensearch: image: opensearchproject/opensearch:2.15.0 - environment: - discovery.type: "single-node" - bootstrap.memory_lock: "true" - network.host: "0.0.0.0" - OPENSEARCH_JAVA_OPTS: "-Xms2g -Xmx2g" - DISABLE_INSTALL_DEMO_CONFIG: "true" - DISABLE_SECURITY_PLUGIN: "true" # TODO: Enable when we have migrated the python client to Opensearch as well. + env_file: + - opensearch/opensearch.env ports: - "9200:9200" networks: @@ -56,9 +67,8 @@ services: postgres: image: postgres:13.1-alpine - environment: - POSTGRES_USER: "timesketch" - POSTGRES_PASSWORD: "password" + env_file: + - postgresql/postgresql.env ports: - "5432:5432" networks: @@ -73,6 +83,11 @@ services: notebook: image: us-docker.pkg.dev/osdfir-registry/timesketch/notebook:latest + build: + context: ../.. + dockerfile: docker/dev/notebook/Dockerfile + args: + PYTHON_PIP_CONF: "${PYTHON_PIP_CONF}" ports: - "8844:8844" volumes: diff --git a/docker/dev/notebook/Dockerfile b/docker/dev/notebook/Dockerfile index 7c3d94f747..c97d22053b 100644 --- a/docker/dev/notebook/Dockerfile +++ b/docker/dev/notebook/Dockerfile @@ -18,8 +18,14 @@ COPY --chown=1000:1000 docker/dev/notebook/logo.png /home/picatrix/.jupyter/cust COPY --chown=1000:1000 docker/dev/notebook/custom.css /home/picatrix/.jupyter/custom/custom.css COPY --chown=1000:1000 docker/dev/notebook/timesketch /home/picatrix/picenv/share/jupyter/nbextensions/timesketch +ARG PYTHON_PIP_CONF="" +RUN if [ -n "${PYTHON_PIP_CONF}" ]; then \ + mkdir -p ~/.config/pip; \ + env echo -e "${PYTHON_PIP_CONF}" > ~/.config/pip/pip.conf; \ + fi + RUN sed -i -e "s/c.NotebookApp.token = 'picatrix'/c.NotebookApp.token = 'timesketch'/g" /home/picatrix/.jupyter/jupyter_notebook_config.py && \ - sed -i -e "s/c.NotebookApp.port = 8899/c.NotebookApp.port = 8844/g" /home/picatrix/.jupyter/jupyter_notebook_config.py && \ + sed -i -e "s/c.NotebookApp.port = 8899/c.NotebookApp.port = ${JUPYTER_PORT}/g" /home/picatrix/.jupyter/jupyter_notebook_config.py && \ pip install -e /home/picatrix/code/api_client/python && \ pip install -e /home/picatrix/code/importer_client/python/ && \ jupyter nbextension enable snippets/main && \ diff --git a/docker/dev/opensearch.env b/docker/dev/opensearch.env new file mode 100644 index 0000000000..3176c6728d --- /dev/null +++ b/docker/dev/opensearch.env @@ -0,0 +1,6 @@ +discovery.type="single-node" +bootstrap.memory_lock="true" +network.host="0.0.0.0" +OPENSEARCH_JAVA_OPTS="-Xms2g -Xmx2g" +DISABLE_INSTALL_DEMO_CONFIG="true" +DISABLE_SECURITY_PLUGIN="true" # TODO: Enable when we have migrated the python client to Opensearch as well. diff --git a/docker/dev/opensearch/opensearch.env b/docker/dev/opensearch/opensearch.env new file mode 100644 index 0000000000..3176c6728d --- /dev/null +++ b/docker/dev/opensearch/opensearch.env @@ -0,0 +1,6 @@ +discovery.type="single-node" +bootstrap.memory_lock="true" +network.host="0.0.0.0" +OPENSEARCH_JAVA_OPTS="-Xms2g -Xmx2g" +DISABLE_INSTALL_DEMO_CONFIG="true" +DISABLE_SECURITY_PLUGIN="true" # TODO: Enable when we have migrated the python client to Opensearch as well. diff --git a/docker/dev/postgresql/postgresql.env b/docker/dev/postgresql/postgresql.env new file mode 100644 index 0000000000..9cbb09d159 --- /dev/null +++ b/docker/dev/postgresql/postgresql.env @@ -0,0 +1,2 @@ +POSTGRES_USER="${POSTGRES_USER}" +POSTGRES_PASSWORD="${POSTGRES_PASSWORD}" diff --git a/docker/dev/restart.sh b/docker/dev/restart.sh index 6e92f54948..514e964a59 100755 --- a/docker/dev/restart.sh +++ b/docker/dev/restart.sh @@ -1,4 +1,16 @@ -#!/bin/bash -export CONTAINER_ID="$(docker container list -f name=timesketch-dev -q)" -docker exec -d $CONTAINER_ID celery -A timesketch.lib.tasks worker --loglevel info -docker exec -d $CONTAINER_ID gunicorn --reload -b 0.0.0.0:5000 --log-file - --timeout 600 -c /usr/local/src/timesketch/data/gunicorn_config.py timesketch.wsgi:application +#!/usr/bin/env bash + +docker compose exec -d timesketch \ + celery \ + -A timesketch.lib.tasks \ + worker \ + --loglevel info + +docker compose exec -d timesketch \ + gunicorn \ + --reload \ + -b 0.0.0.0:5000 \ + --log-file - \ + --timeout 600 \ + -c /usr/local/src/timesketch/data/gunicorn_config.py \ + timesketch.wsgi:application diff --git a/docker/dev/start_frontend.sh b/docker/dev/start_frontend.sh index aac1d92c89..7739cf1a4f 100755 --- a/docker/dev/start_frontend.sh +++ b/docker/dev/start_frontend.sh @@ -1,14 +1,13 @@ -#!/bin/bash +#!/usr/bin/env bash echo "[i] Script (NOT TESTED) to run the frontend in dev mode" echo "[i] Run this script in timesketch/docker/dev" echo "[i] Remember to run 'docker compose up -d' to start the containers" -CONTAINER_ID="$(docker container list -f name=timesketch-dev -q)" -docker exec -d $CONTAINER_ID celery -A timesketch.lib.tasks worker --loglevel info +CONTAINER_ID="$(docker container list -f name=timesketch-dev-timesketch -q)" +docker exec -d "${CONTAINER_ID}" celery -A timesketch.lib.tasks worker --loglevel info docker compose exec timesketch yarn install --cwd=/usr/local/src/timesketch/timesketch/frontend docker compose exec -d timesketch yarn run --cwd=/usr/local/src/timesketch/timesketch/frontend build --mode development --watch -docker exec -it $CONTAINER_ID sh -c "cd /usr/local/src/timesketch/timesketch/frontend; npm install >> /dev/null; yarn install >> /dev/null" -docker exec -d $CONTAINER_ID gunicorn --reload -b 0.0.0.0:5000 --log-file - --timeout 600 -c /usr/local/src/timesketch/data/gunicorn_config.py timesketch.wsgi:application +docker exec -it "${CONTAINER_ID}" sh -c "cd /usr/local/src/timesketch/timesketch/frontend; npm install >> /dev/null; yarn install >> /dev/null" +docker exec -d "${CONTAINER_ID}" gunicorn --reload -b 0.0.0.0:5000 --log-file - --timeout 600 -c /usr/local/src/timesketch/data/gunicorn_config.py timesketch.wsgi:application docker compose exec timesketch yarn run --cwd=/usr/local/src/timesketch/timesketch/frontend serve - diff --git a/docker/dev/timesketch/Dockerfile b/docker/dev/timesketch/Dockerfile new file mode 100644 index 0000000000..a58223d75a --- /dev/null +++ b/docker/dev/timesketch/Dockerfile @@ -0,0 +1,80 @@ +# Use the official Docker Hub Ubuntu base image +ARG BASE_IMAGE="ubuntu:22.04" +FROM $BASE_IMAGE + +# Prevent needing to configure debian packages, stopping the setup of +# the docker container. +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +RUN apt-get update && apt-get install -y --no-install-recommends \ + software-properties-common \ + apt-transport-https \ + apt-utils \ + ca-certificates \ + curl \ + git \ + gpg-agent \ + python3-dev \ + python3-pip \ + python3-wheel \ + python3-setuptools \ + python3-psycopg2 \ + tzdata \ + && rm -rf /var/lib/apt/lists/* + +# Install Plaso +ARG GIFT_PPA_TRACK="stable" +ARG GIFT_PPA_URL="http://ppa.launchpad.net/gift/${GIFT_PPA_TRACK}/ubuntu" +RUN set -eux \ + && DIST="$(lsb_release -cs)" \ + && KEY_ID="$(curl -sS "${GIFT_PPA_URL}/dists/${DIST}/Release.gpg" | gpg --list-packets | grep -oE 'keyid [0-9A-F]+' | cut -d ' ' -f 2)" \ + && curl -sSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x${KEY_ID}" | \ + gpg --dearmor -o /usr/share/keyrings/gift.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/gift.gpg] ${GIFT_PPA_URL} ${DIST} main" > /etc/apt/sources.list.d/gift.list \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + plaso-tools \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* ~/.gnupg + +# Install NodeJS for frontend development +ARG NODE_VERSION="18.x" +ARG NODE_PPA_URL="https://deb.nodesource.com/node_${NODE_VERSION}" +RUN set -eux \ + && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | \ + gpg --dearmor -o /usr/share/keyrings/nodesource.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/nodesource.gpg] ${NODE_PPA_URL} nodistro main" > /etc/apt/sources.list.d/nodesource.list \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + nodejs \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* ~/.gnupg + +ARG NODE_NPMRC="" +RUN if [ -n "${NODE_NPMRC}" ]; then \ + env echo -e "${NODE_NPMRC}" > ~/.npmrc; \ + fi + +ARG PYTHON_PIP_CONF="" +RUN if [ -n "${PYTHON_PIP_CONF}" ]; then \ + mkdir -p ~/.config/pip; \ + env echo -e "${PYTHON_PIP_CONF}" > ~/.config/pip/pip.conf; \ + fi + +# Install Yarn for frontend development +RUN npm install --global yarn + +# Install dependencies for Timesketch +COPY ./requirements.txt /timesketch-requirements.txt +RUN pip3 install -r /timesketch-requirements.txt + +# Install test dependencies for Timesketch +COPY ./test_requirements.txt /timesketch-test-requirements.txt +RUN pip3 install -r /timesketch-requirements.txt -r /timesketch-test-requirements.txt + +# Copy the entrypoint script into the container +COPY ["./docker/dev/timesketch/docker-entrypoint.sh", "/"] +RUN chmod a+x /docker-entrypoint.sh + +# Load the entrypoint script to be run later +ENTRYPOINT ["/docker-entrypoint.sh"] diff --git a/docker/dev/timesketch/docker-entrypoint.sh b/docker/dev/timesketch/docker-entrypoint.sh new file mode 100644 index 0000000000..a34e834358 --- /dev/null +++ b/docker/dev/timesketch/docker-entrypoint.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Run the container the default way +if [ "$1" = 'timesketch' ]; then + CONF_DIR="/etc/timesketch" + + # Install Timesketch in editable mode from volume + pip3 install -e /usr/local/src/timesketch/ + + # Add web user + tsctl create-user --password "${TIMESKETCH_PASSWORD}" "${TIMESKETCH_USER}" + + # Add Sigma rules + git clone https://github.com/SigmaHQ/sigma /usr/local/src/sigma + # for each line in sigma_rules.txt execute the command + while IFS= read -r line; do + tsctl import-sigma-rules "${line}" + done < "${CONF_DIR}/sigma_rules.txt" + + # Wrap up things + echo "Timesketch development server is ready!" + + # Sleep forever to keep the container running + exec sleep infinity +fi + +# Run a custom command on container start +exec "$@" diff --git a/docker/dev/build/sigma_rules.txt b/docker/dev/timesketch/sigma_rules.txt similarity index 100% rename from docker/dev/build/sigma_rules.txt rename to docker/dev/timesketch/sigma_rules.txt diff --git a/docker/dev/timesketch/timesketch.conf b/docker/dev/timesketch/timesketch.conf new file mode 100644 index 0000000000..6ca305a03f --- /dev/null +++ b/docker/dev/timesketch/timesketch.conf @@ -0,0 +1,380 @@ +# Timesketch configuration +import os + +# Show debug information. +# Note: It is a security risk to have this enabled in production. +DEBUG = False + +# Key for signing cookies and for CSRF protection. +# +# This should be a unique random string. Don't share this with anyone. +# To generate a key, you can for example use openssl: +# $ openssl rand -base64 32 +SECRET_KEY = os.environ["SECRET_KEY"] + +# Setup the database. +# +# For more options, see the official documentation: +# https://pythonhosted.org/Flask-SQLAlchemy/config.html +# By default sqlite is used. +# +# NOTE: SQLite should only be used in development. Use PostgreSQL or MySQL in +# production. +SQLALCHEMY_DATABASE_URI = "postgresql://{USER}:{PASSWORD}@postgres/{DATABASE}".format( + USER=os.environ['POSTGRES_USER'], + PASSWORD=os.environ['POSTGRES_PASSWORD'], + DATABASE=os.environ['POSTGRES_USER'] +) +# Configure where your Elasticsearch server is located. +# +# Make sure that the OpenSearch server is properly secured and not accessible +# from the internet. See the following link for more information: +# http://www.elasticsearch.org/blog/scripting-security/ +OPENSEARCH_HOST = "opensearch" +OPENSEARCH_PORT = 9200 +OPENSEARCH_USER = None +OPENSEARCH_PASSWORD = None +OPENSEARCH_SSL = False +OPENSEARCH_VERIFY_CERTS = True +OPENSEARCH_TIMEOUT = 10 +OPENSEARCH_FLUSH_INTERVAL = 5000 + +# Define what labels should be defined that make it so that a sketch and +# timelines will not be deleted. This can be used to add a list of different +# labels that ensure that a sketch and it's associated timelines cannot be +# deleted. +LABELS_TO_PREVENT_DELETION = ['protected', 'preserved'] + +# Number of seconds before a timeout occurs in bulk operations in the +# OpenSearch client. +TIMEOUT_FOR_EVENT_IMPORT = 180 + +# Location for the configuration file of the data finder. +DATA_FINDER_PATH = f'{os.environ["TIMESKETCH_CONF_DIR"]}/data_finder.yaml' + +#------------------------------------------------------------------------------- +# Single Sign On (SSO) configuration. + +# Your web server can handle authentication for you by setting a environment +# variable when the user is successfully authenticated. The standard environment +# variable is REMOTE_USER and this is the default, but if your SSO system uses +# another name you can configure that here. + +SSO_ENABLED = False +SSO_USER_ENV_VARIABLE = 'REMOTE_USER' + +# Some SSO systems provides group information as environment variable. +# Timesketch can automatically create groups and add users as members. +# To enable this feature just provide the environment variable used in the SSO +# system of use. +SSO_GROUP_ENV_VARIABLE = None + +# Different systems use different separators in the string returned in the +# environment variable. +SSO_GROUP_SEPARATOR = ';' + +# Some SSO systems uses a special prefix for the group name to indicate that +# the user is not a member of that group. Set this if that is the case, i.e. +# '-'. +SSO_GROUP_NOT_MEMBER_SIGN = None + +#------------------------------------------------------------------------------- +# Google Cloud Identity-Aware Proxy (Cloud IAP) authentication configuration. + +# Cloud IAP controls access to your Timesketch server running on Google Cloud +# Platform. Cloud IAP works by verifying a user’s identity and determining if +# that user should be allowed to access the server. +# +# For this feature you will need to configure your Cloud IAP and HTTPS load- +# balancer. Follow the official documentation to get everything ready: +# https://cloud.google.com/iap/docs/enabling-compute-howto + +# Enable Cloud IAP authentication support. +GOOGLE_IAP_ENABLED = False + +# This information is available via the Google Cloud console: +# https://cloud.google.com/iap/docs/signed-headers-howto +GOOGLE_IAP_PROJECT_NUMBER = '' +GOOGLE_IAP_BACKEND_ID = '' + +# DON'T EDIT: Google IAP expected audience is based on Cloud project number and +# backend ID. +GOOGLE_IAP_AUDIENCE = '/projects/{}/global/backendServices/{}'.format( + GOOGLE_IAP_PROJECT_NUMBER, + GOOGLE_IAP_BACKEND_ID +) + +GOOGLE_IAP_ALGORITHM = 'ES256' +GOOGLE_IAP_ISSUER = 'https://cloud.google.com/iap' +GOOGLE_IAP_PUBLIC_KEY_URL = 'https://www.gstatic.com/iap/verify/public_key' + +#------------------------------------------------------------------------------- +# Google Cloud OpenID Connect (OIDC) authentication configuration. + +# Cloud OIDC controls access to your Timesketch server running on Google Cloud +# Platform. Cloud OIDC works by verifying a user’s identity and determining if +# that user should be allowed to access the server. + +# Enable Cloud OIDC authentication support. +# For Google's federated identity, leave AUTH_URI and DICOVERY_URL to None. +# For others, refer to your OIDC provider configuration. Configuration can be +# obtain from the discovery url. eg. https://accounts.google.com/.well-known/openid-configuration + +# Some OIDC providers expects a specific Algorithm. If so, specify in ALGORITHM. +# Eg. HS256, HS384, HS512, RS256, RS384, RS512. +# For Google, leave it to None + +GOOGLE_OIDC_ENABLED = False + +GOOGLE_OIDC_AUTH_URL = None +GOOGLE_OIDC_DISCOVERY_URL = None +GOOGLE_OIDC_ALGORITHM = None + +GOOGLE_OIDC_CLIENT_ID = None +GOOGLE_OIDC_CLIENT_SECRET = None + +# If you need to authenticate an API client using OIDC you need to create +# an OAUTH client for "other", or for native applications. +# https://developers.google.com/identity/protocols/OAuth2ForDevices +GOOGLE_OIDC_API_CLIENT_ID = None + +# List of additional allowed GOOGLE OIDC clients that can authenticate to the APIs +GOOGLE_OIDC_API_CLIENT_IDS = [] + +# Limit access to a specific Google GSuite domain. +GOOGLE_OIDC_HOSTED_DOMAIN = None + +# Additional Google GSuite domains allowed API access. +GOOGLE_OIDC_API_ALLOWED_DOMAINS = [] + +# If populated only these users (email addresses) will be able to login to +# this server. This can be used when access should be limited to a specific +# set of users. +GOOGLE_OIDC_ALLOWED_USERS = [] + +#------------------------------------------------------------------------------- +# Upload and processing of Plaso storage files. + +# To enable this feature you need to configure an upload directory and +# how to reach the Redis database used by the distributed task queue. +UPLOAD_ENABLED = True + +# Folder for temporarily storage of Plaso dump files before being processed and +# inserted into the datastore. +UPLOAD_FOLDER = '/tmp' + +# Celery broker configuration. You need to change ip/port to where your Redis +# server is running. +CELERY_BROKER_URL = 'redis://redis:6379' +CELERY_RESULT_BACKEND = 'redis://redis:6379' + +# File location to store the mappings used when OpenSearch indices are created +# for plaso files. +PLASO_MAPPING_FILE = f'{os.environ["TIMESKETCH_CONF_DIR"]}/plaso.mappings' +GENERIC_MAPPING_FILE = f'{os.environ["TIMESKETCH_CONF_DIR"]}/generic.mappings' + +# Override/extend Plaso default message string formatters. +PLASO_FORMATTERS = f'{os.environ["TIMESKETCH_CONF_DIR"]}/plaso_formatters.yaml' + +# Upper limits for the process memory that psort.py is allocated when ingesting +# plaso files. The size is in bytes, with the default value of +# 4294967296 or 4 GiB. +PLASO_UPPER_MEMORY_LIMIT = None + +#------------------------------------------------------------------------------- +# Analyzers. + +# Which analyzers to run automatically. +AUTO_SKETCH_ANALYZERS = [] + +# Optional specify any default arguments to pass to analyzers. +# The format is: +# {'analyzer1_name': { +# 'param1': 'value' +# }, +# {'analyzer2_name': { +# 'param1': 'value' +# } +# } +# } +AUTO_SKETCH_ANALYZERS_KWARGS = {} +ANALYZERS_DEFAULT_KWARGS = {} + +# Add all domains that are relevant to your enterprise here. +# All domains in this list are added to the list of watched +# domains and compared to other domains in the timeline to +# attempt to spot "phishy" domains. +DOMAIN_ANALYZER_WATCHED_DOMAINS = [] + +# Defines how deep into the most frequently visited top +# level domains the analyzer should include in its watch list. +DOMAIN_ANALYZER_WATCHED_DOMAINS_THRESHOLD = 10 + +# The minimum Jaccard distance for a domain to be considered +# similar to the domains in the watch list. The lower this number +# is the more domains will be included in the "phishy" domain +# category. +DOMAIN_ANALYZER_WATCHED_DOMAINS_SCORE_THRESHOLD = 0.75 + +# A list of domains that are frequent source of false positives +# in the "phishy" domain comparison, mostly CDNs and similar. +DOMAIN_ANALYZER_EXCLUDE_DOMAINS = ['ytimg.com', 'gstatic.com', 'yimg.com', 'akamaized.net', 'akamaihd.net', 's-microsoft.com', 'images-amazon.com', 'ssl-images-amazon.com', 'wikimedia.org', 'redditmedia.com', 'googleusercontent.com', 'googleapis.com', 'wikipedia.org', 'github.io', 'github.com'] + +# The threshold in minutes which the difference in timestamps has to cross in order to be +# detected as 'timestomping'. +NTFS_TIMESTOMP_ANALYZER_THRESHOLD = 10 + +# Safe Browsing API key for the URL analyzer. +SAFEBROWSING_API_KEY = '' + +# For the other possible values of the two settings below, please refer to +# the Safe Browsing API reference at: +# https://developers.google.com/safe-browsing/v4/reference/rest + +# Platforms to be looked at in Safe Browsing (PlatformType). +SAFEBROWSING_PLATFORMS = ['ANY_PLATFORM'] + +# Types to be looked at in Safe Browsing (ThreatType). +SAFEBROWSING_THREATTYPES = ['MALWARE'] + +#-- hashR integration --# +# https://github.com/google/hashr +# Uncomment and fill this section if you want to use the hashR lookup analyzer. +# Provide hashR postgres database connection information below: +# HASHR_DB_USER = 'hashRuser' +# HASHR_DB_PW = 'hashRpass' +# HASHR_DB_ADDR = '127.0.0.1' +# HASHR_DB_PORT = '5432' +# HASHR_DB_NAME = 'hashRdb' + +# The total number of unique hashes that are checked against the database is +# split into multiple batches. This number defines how many unique hashes are +# checked per query. 50000 is the default value. +# HASHR_QUERY_BATCH_SIZE = '50000' + +# Set as True if you want to add the source of the hash ([repo:imagename]) as +# an attribute to the event. WARNING: This will increase the processing time +# of the analyzer! +# HASHR_ADD_SOURCE_ATTRIBUTE = True + +# Threatintel Yeti analyzer-specific configuration +# URI root to Yeti's API, e.g. 'https://localhost:8000/api/v2' +YETI_API_ROOT = '' + +# API key to authenticate requests +YETI_API_KEY = '' + +# Path to a TLS certificate that can be used to authenticate servers +# using self-signed certificates. Provide the full path to the .crt file. +YETI_TLS_CERTIFICATE = None + +# Labels to narrow down indicator selection +YETI_INDICATOR_LABELS = ['domain'] + + +# Url to MISP instance +MISP_URL = '' + +# API key to authenticate requests +MISP_API_KEY = '' + +# Url to Hashlookup instance +HASHLOOKUP_URL = '' + +# GeoIP Analyzer Settings +# +# Disclaimer: Please note that the geolocation results obtained from this analyzer +# are indicative and based upon the accuracy of the configured datasource. +# This analyzer uses GeoLite2 data created by MaxMind, available from +# https://maxmind.com. + +# The path to a MaxMind GeoIP database +MAXMIND_DB_PATH = '' + +# The Account ID to access a MaxMind GeoIP web service +MAXMIND_WEB_ACCOUNT_ID = '' + +# The license key to access a MaxMind GeoIP web service +MAXMIND_WEB_LICENSE_KEY = '' + +# The host URL of a MaxMind GeoIP web service +MAXMIND_WEB_HOST = '' + +#------------------------------------------------------------------------------- +# Enable experimental UI features. + +ENABLE_EXPERIMENTAL_UI = False + +#------------------------------------------------------------------------------- +# Email notifications. + +ENABLE_EMAIL_NOTIFICATIONS = False +EMAIL_DOMAIN = 'localhost' +EMAIL_FROM_USER = 'nobody' +EMAIL_SMTP_SERVER = 'localhost' + +# Only send emails to these users. +EMAIL_RECIPIENTS = [] + +# Configuration to construct URLs for resources. +EXTERNAL_HOST_URL = 'https://localhost' + +# SSL/TLS support for emails +EMAIL_TLS = False +EMAIL_SSL = False + +# Email support for authentication +EMAIL_AUTH_USERNAME = "" +EMAIL_AUTH_PASSWORD = "" + +#------------------------------------------------------------------------------- +# Sigma Settings + +SIGMA_CONFIG = f'{os.environ["TIMESKETCH_CONF_DIR"]}/sigma_config.yaml' +SIGMA_TAG_DELAY = 5 + +#------------------------------------------------------------------------------- +# Flask Settings +# Everything mentioned in https://flask-wtf.readthedocs.io/en/latest/config/ can be used. +# Max age in seconds for CSRF tokens. Default is 3600. If set to None, the CSRF token is valid for the life of the session. +# WTF_CSRF_TIME_LIMIT = 7200 +WTF_CSRF_ENABLED = False # Set this to False for UI-development purposes + +#------------------------------------------------------------------------------- +# DFIQ - Digital Forensics Investigation Questions +DFIQ_ENABLED = False +DFIQ_PATH = f'{os.environ["TIMESKETCH_CONF_DIR"]}/dfiq/' + +# Intelligence tag metadata configuration +INTELLIGENCE_TAG_METADATA = f'{os.environ["TIMESKETCH_CONF_DIR"]}/intelligence_tag_metadata.yaml' + +# Context links configuration +CONTEXT_LINKS_CONFIG_PATH = f'{os.environ["TIMESKETCH_CONF_DIR"]}/context_links.yaml' + +# LLM provider configs +LLM_PROVIDER_CONFIGS = { + # To use the Ollama provider you need to download and run an Ollama server. + # See instructions at: https://ollama.ai/ + 'ollama': { + 'server_url': 'http://localhost:11434', + 'model': 'gemma:7b', + }, + # To use the Vertex AI provider you need to: + # 1. Create and export a Service Account Key from the Google Cloud Console. + # 2. Set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the full path + # to your service account private key file. + # 3. Install the python libraries: $ pip3 install google-cloud-aiplatform + # + # IMPORTANT: Private keys must be kept secret. If you expose your private key it is + # recommended to revoke it immediately from the Google Cloud Console. + 'vertexai': { + 'model': 'gemini-1.5-flash-001', + 'project_id': '', + } +} + +# LLM nl2q configuration +DATA_TYPES_PATH = f'{os.environ["TIMESKETCH_CONF_DIR"]}/nl2q/data_types.csv' +PROMPT_NL2Q = f'{os.environ["TIMESKETCH_CONF_DIR"]}/nl2q/prompt_nl2q' +EXAMPLES_NL2Q = f'{os.environ["TIMESKETCH_CONF_DIR"]}/nl2q/examples_nl2q' +LLM_PROVIDER = '' diff --git a/docker/dev/timesketch/timesketch.env b/docker/dev/timesketch/timesketch.env new file mode 100644 index 0000000000..2d128dc4cb --- /dev/null +++ b/docker/dev/timesketch/timesketch.env @@ -0,0 +1,10 @@ +TIMESKETCH_USER="${TIMESKETCH_USER}" +TIMESKETCH_PASSWORD="${TIMESKETCH_PASSWORD}" +TIMESKETCH_CONF_DIR="${TIMESKETCH_CONF_DIR}" + +SECRET_KEY="${TIMESKETCH_SECRET_KEY}" +POSTGRES_USER="${POSTGRES_USER}" +POSTGRES_PASSWORD="${POSTGRES_PASSWORD}" + +CHOKIDAR_USEPOLLING="true" +prometheus_multiproc_dir="/tmp/"