mirror of
https://github.com/telekom-security/tpotce.git
synced 2025-07-02 01:27:27 -04:00
Compare commits
49 Commits
Author | SHA1 | Date | |
---|---|---|---|
175e1944c2 | |||
6933ee0065 | |||
8fd23d7796 | |||
8a59696a5c | |||
1d2592bb30 | |||
fd8061eacf | |||
404ce6d4b3 | |||
700100c5c8 | |||
0eaad86292 | |||
d6b3e842fb | |||
9455877fa3 | |||
3372a23eb2 | |||
7eb271c6ac | |||
c180732af3 | |||
cc9c1d95ce | |||
5ece0ee435 | |||
e9d80607c3 | |||
e46942674b | |||
8dafef2857 | |||
5468724771 | |||
5ee4aa3d03 | |||
c489189879 | |||
84447e686d | |||
9d17d7b505 | |||
d3d26eac05 | |||
ae254943a6 | |||
988b7a8651 | |||
b659d5a036 | |||
873f515727 | |||
bdea4e2932 | |||
26a25719ff | |||
98c90be47c | |||
134f2934e9 | |||
95626fb2cc | |||
6cda0fefe5 | |||
bd055f0037 | |||
8162ce2571 | |||
4917c42fbc | |||
e53c864138 | |||
896d719cfb | |||
919031bd2a | |||
67d115fc21 | |||
11328412ab | |||
3334c5d0c7 | |||
c45cda4f70 | |||
9fcc4cc1ad | |||
b221a9d96e | |||
653bb2ed1e | |||
f6398f9cbb |
10
.env
10
.env
@ -40,11 +40,19 @@ TPOT_BLACKHOLE=DISABLED
|
||||
# if you just do not need any of the logfiles.
|
||||
TPOT_PERSISTENCE=on
|
||||
|
||||
# T-Pot Persistence Cycles
|
||||
# <1-999>: Set the number of T-Pot restart cycles for logrotate.
|
||||
# Be mindful of this setting as the logs will use up a lot of available disk space.
|
||||
# In case the setting is invalid, T-Pot will default to 30 cycles.
|
||||
# Remember to adjust the Elastic Search Lifecycle Policy (https://github.com/telekom-security/tpotce/?tab=readme-ov-file#log-persistence)
|
||||
# as this setting only accounts for the honeypot logs in the ~/tpotce/data folder.
|
||||
TPOT_PERSISTENCE_CYCLES=30
|
||||
|
||||
# T-Pot Type
|
||||
# HIVE: This is the default and offers everything to connect T-Pot sensors.
|
||||
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
|
||||
# settings as well.
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-compose.yml
|
||||
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
|
||||
# your SENSOR host to ~/tpotce/data/hive.crt
|
||||
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
|
||||
|
15
.github/workflows/main.yml
vendored
Normal file
15
.github/workflows/main.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
name: Link Checker
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *' # daily at 2 AM UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
linkChecker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: lycheeverse/lychee-action@v1.9.1
|
||||
with:
|
||||
args: --verbose README.md
|
20
README.md
20
README.md
@ -325,12 +325,12 @@ Once you are familiar with how things work you should choose a network you suspe
|
||||
|
||||
| Distribution Name | x64 | arm64 |
|
||||
| :--------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [Alma Linux OS 9.5 Boot ISO](https://almalinux.org) | [download](https://repo.almalinux.org/almalinux/9.5/isos/x86_64/AlmaLinux-9.5-x86_64-boot.iso) | [download](https://repo.almalinux.org/almalinux/9.5/isos/aarch64/AlmaLinux-9.5-aarch64-boot.iso) |
|
||||
| [Debian 12 Network Install](https://www.debian.org/CD/netinst/index.en.html) | [download](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.8.0-amd64-netinst.iso) | [download](https://cdimage.debian.org/debian-cd/current/arm64/iso-cd/debian-12.8.0-arm64-netinst.iso) |
|
||||
| [Fedora Server 41 Network Install](https://fedoraproject.org/server/download) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/x86_64/iso/Fedora-Server-netinst-x86_64-41-1.4.iso) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/aarch64/iso/Fedora-Server-netinst-aarch64-41-1.4.iso) |
|
||||
| [Alma Linux OS 9.6 Boot ISO](https://almalinux.org) | [download](https://repo.almalinux.org/almalinux/9.6/isos/x86_64/AlmaLinux-9.6-x86_64-boot.iso) | [download](https://repo.almalinux.org/almalinux/9.6/isos/aarch64/AlmaLinux-9.6-aarch64-boot.iso) |
|
||||
| [Debian 12 Network Install](https://www.debian.org/CD/netinst/index.en.html) | [download](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.11.0-amd64-netinst.iso) | [download](https://cdimage.debian.org/debian-cd/current/arm64/iso-cd/debian-12.11.0-arm64-netinst.iso) |
|
||||
| [Fedora Server 42 Network Install](https://fedoraproject.org/server/download) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/x86_64/iso/Fedora-Server-netinst-x86_64-42-1.1.iso) | [download](https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/aarch64/iso/Fedora-Server-netinst-aarch64-42-1.1.iso) |
|
||||
| [OpenSuse Tumbleweed Network Image](https://get.opensuse.org/tumbleweed/#download) | [download](https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso) | [download](https://download.opensuse.org/ports/aarch64/tumbleweed/iso/openSUSE-Tumbleweed-NET-aarch64-Current.iso) |
|
||||
| [Rocky Linux OS 9.5 Boot ISO](https://rockylinux.org/download) | [download](https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.5-x86_64-minimal.iso) | [download](https://download.rockylinux.org/pub/rocky/9/isos/aarch64/Rocky-9.5-aarch64-minimal.iso) |
|
||||
| [Ubuntu 24.04.1 Live Server](https://ubuntu.com/download/server) | [download](https://releases.ubuntu.com/24.04/ubuntu-24.04.1-live-server-amd64.iso) | [download](https://cdimage.ubuntu.com/releases/24.04/release/ubuntu-24.04.1-live-server-arm64.iso) |
|
||||
| [Rocky Linux OS 9.6 Boot ISO](https://rockylinux.org/download) | [download](https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.6-x86_64-minimal.iso) | [download](https://download.rockylinux.org/pub/rocky/9/isos/aarch64/Rocky-9.6-aarch64-minimal.iso) |
|
||||
| [Ubuntu 24.04.2 Live Server](https://ubuntu.com/download/server) | [download](https://releases.ubuntu.com/24.04/ubuntu-24.04.2-live-server-amd64.iso) | [download](https://cdimage.ubuntu.com/releases/24.04/release/ubuntu-24.04.2-live-server-arm64.iso) |
|
||||
|
||||
<br>
|
||||
|
||||
@ -677,7 +677,7 @@ All persistent log files from the honeypots, tools and T-Pot related services ar
|
||||
<br><br>
|
||||
|
||||
## Log Persistence
|
||||
All log data stored in the [T-Pot Data Folder](#t-pot-data-folder) will be persisted for 30 days by default.
|
||||
All log data is stored in the [T-Pot Data Folder](#t-pot-data-folder) and will be persisted for the number of cycles set for `TPOT_PERSISTENCE_CYCLES=<1-999>` in the T-Pot configuration file `~/tpotce/.env`. It defaults to 30.
|
||||
<br>
|
||||
Elasticsearch indices are handled by the `tpot` Index Lifecycle Policy which can be adjusted directly in Kibana (make sure to "Include managed system policies").
|
||||

|
||||
@ -797,7 +797,7 @@ The software that T-Pot is built on uses the following licenses.
|
||||
<br>GPLv3:
|
||||
[adbhoney](https://github.com/huuck/ADBHoney),
|
||||
[elasticpot](https://gitlab.com/bontchev/elasticpot/-/blob/master/LICENSE),
|
||||
[ewsposter](https://github.com/telekom-security/ews/),
|
||||
[ewsposter](https://github.com/telekom-security/ewsposter),
|
||||
[log4pot](https://github.com/thomaspatzke/Log4Pot/blob/master/LICENSE),
|
||||
[fatt](https://github.com/0x4D31/fatt/blob/master/LICENSE),
|
||||
[heralding](https://github.com/johnnykv/heralding/blob/master/LICENSE.txt),
|
||||
@ -813,8 +813,8 @@ The software that T-Pot is built on uses the following licenses.
|
||||
[elasticsearch](https://github.com/elasticsearch/elasticsearch/blob/master/LICENSE.txt),
|
||||
[go-pot](https://github.com/ryanolee/go-pot?tab=License-1-ov-file#readme),
|
||||
[h0neytr4p](https://github.com/pbssubhash/h0neytr4p?tab=Apache-2.0-1-ov-file#readme),
|
||||
[logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE),
|
||||
[kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.md),
|
||||
[logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE.txt),
|
||||
[kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.txt),
|
||||
[docker](https://github.com/docker/docker/blob/master/LICENSE)
|
||||
<br>MIT license:
|
||||
[autoheal](https://github.com/willfarrell/docker-autoheal?tab=MIT-1-ov-file#readme),
|
||||
@ -907,4 +907,4 @@ And from @robcowart (creator of [ElastiFlow](https://github.com/robcowart/elasti
|
||||
***"#TPot is one of the most well put together turnkey honeypot solutions. It is a must-have for anyone wanting to analyze and understand the behavior of malicious actors and the threat they pose to your organization."***
|
||||
<br><br>
|
||||
|
||||
# Thank you 💖
|
||||
# Thank you 💖
|
||||
|
@ -93,23 +93,6 @@ services:
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
||||
|
||||
# CitrixHoneypot service
|
||||
citrixhoneypot:
|
||||
container_name: citrixhoneypot
|
||||
restart: always
|
||||
depends_on:
|
||||
tpotinit:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- citrixhoneypot_local
|
||||
ports:
|
||||
- "443:443"
|
||||
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
||||
pull_policy: ${TPOT_PULL_POLICY}
|
||||
read_only: true
|
||||
volumes:
|
||||
- ${TPOT_DATA_PATH}/citrixhoneypot/log:/opt/citrixhoneypot/logs
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
|
@ -226,8 +226,8 @@ services:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
- "53:53"
|
||||
- "67:67/udp"
|
||||
- "53:53/udp"
|
||||
- "80:80"
|
||||
- "110:110"
|
||||
- "123:123"
|
||||
|
@ -572,7 +572,7 @@ services:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
- "25:25"
|
||||
- "53:53/udp"
|
||||
- "53:53"
|
||||
- "67:67/udp"
|
||||
- "80:80"
|
||||
- "110:110"
|
||||
|
@ -18,7 +18,7 @@ PUSH_IMAGES=false
|
||||
NO_CACHE=false
|
||||
PARALLELBUILDS=2
|
||||
UPLOAD_BANDWIDTH=40mbit # Set this to max 90% of available upload bandwidth
|
||||
INTERFACE=$(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }')
|
||||
INTERFACE=$(ip route | grep "^default" | awk '{ print $5 }')
|
||||
|
||||
# Help message
|
||||
usage() {
|
||||
@ -132,7 +132,7 @@ fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes > /dev/null 2>&1; then
|
||||
if docker run --rm --privileged tonistiigi/binfmt --install all > /dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
|
@ -19,8 +19,7 @@ fi
|
||||
if [ "$1" != "-y" ]; then
|
||||
echo "### Setting up Docker for Multi-Arch Builds."
|
||||
echo "### Requires Docker packages from https://get.docker.com/"
|
||||
echo "### Use on x64 only!"
|
||||
echo "### Run with -y if you fit the requirements!"
|
||||
echo "### Run with -y if you meet the requirements!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@ -42,7 +41,7 @@ fi
|
||||
|
||||
# Ensure QEMU is set up for cross-platform builds
|
||||
echo -n "Ensuring QEMU is configured for cross-platform builds..."
|
||||
if docker run --rm --privileged multiarch/qemu-user-static --reset -p yes >/dev/null 2>&1; then
|
||||
if docker run --rm --privileged tonistiigi/binfmt --install all >/dev/null 2>&1; then
|
||||
echo -e " [${GREEN}OK${NC}]"
|
||||
else
|
||||
echo -e " [${RED}FAIL${NC}]"
|
||||
@ -95,5 +94,5 @@ echo " docker login -u <username>"
|
||||
echo " docker login ghcr.io -u <username>"
|
||||
echo
|
||||
echo -e "${BLUE}Fix segmentation faults when building arm64 images:${NC}"
|
||||
echo " docker run --rm --privileged multiarch/qemu-user-static --reset -p yes"
|
||||
echo " docker buildx rm mybuilder && docker run --rm --privileged tonistiigi/binfmt --install all"
|
||||
echo
|
||||
|
@ -12,7 +12,7 @@ WORKDIR /root
|
||||
# Build beelzebub
|
||||
RUN git clone https://github.com/t3chn0m4g3/beelzebub && \
|
||||
cd beelzebub && \
|
||||
git checkout 17a0854b8d9d1e41cf5435b5dc85354bbfb9093c
|
||||
git checkout 0b9aba53ec1671f669d22782758142a1d411b858
|
||||
WORKDIR /root/beelzebub
|
||||
RUN go mod download
|
||||
RUN go build -o main .
|
||||
|
@ -22,7 +22,7 @@ services:
|
||||
LLM_MODEL: "ollama"
|
||||
LLM_HOST: "http://ollama.local:11434/api/chat"
|
||||
OLLAMA_MODEL: "openchat"
|
||||
image: "dtagdevsec/beelzebub:24.04"
|
||||
image: "ghcr.io/telekom-security/beelzebub:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/beelzebub/key:/opt/beelzebub/configurations/key
|
||||
|
@ -43,9 +43,9 @@ RUN apk --no-cache -U upgrade && \
|
||||
cd /home/cowrie && \
|
||||
git clone https://github.com/cowrie/cowrie && \
|
||||
cd cowrie && \
|
||||
git checkout 49c7c4aac87603e0d2449501c300841a1f59fd0f && \
|
||||
git checkout 7b18207485dbfc218082e82c615d948924429973 && \
|
||||
mkdir -p log && \
|
||||
cp /root/dist/requirements.txt . && \
|
||||
# cp /root/dist/requirements.txt . && \
|
||||
pip3 install --break-system-packages --upgrade --no-cache-dir pip && \
|
||||
pip3 install --break-system-packages --no-cache-dir -r requirements.txt && \
|
||||
#
|
||||
|
2
docker/cowrie/dist/cowrie.cfg
vendored
2
docker/cowrie/dist/cowrie.cfg
vendored
@ -16,7 +16,7 @@ backend = shell
|
||||
timezone = UTC
|
||||
auth_class = AuthRandom
|
||||
auth_class_parameters = 2, 5, 10
|
||||
data_path = /tmp/cowrie/data
|
||||
data_path = src/cowrie/data
|
||||
|
||||
[shell]
|
||||
filesystem = src/cowrie/data/fs.pickle
|
||||
|
@ -18,7 +18,7 @@ services:
|
||||
ports:
|
||||
- "22:22"
|
||||
- "23:23"
|
||||
image: "dtagdevsec/cowrie:24.04"
|
||||
image: "ghcr.io/telekom-security/cowrie:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
|
@ -22,7 +22,7 @@ services:
|
||||
mem_limit: 4g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:24.04"
|
||||
image: "ghcr.io/telekom-security/elasticsearch:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data:/data
|
||||
|
||||
@ -38,7 +38,7 @@ services:
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:24.04"
|
||||
image: "ghcr.io/telekom-security/kibana:24.04.1"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
@ -50,7 +50,7 @@ services:
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
image: "dtagdevsec/logstash:24.04"
|
||||
image: "ghcr.io/telekom-security/logstash:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data:/data
|
||||
# - /root/tpotce/docker/elk/logstash/dist/logstash.conf:/etc/logstash/conf.d/logstash.conf
|
||||
@ -63,7 +63,7 @@ services:
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:6379:6379"
|
||||
image: "dtagdevsec/redis:24.04"
|
||||
image: "ghcr.io/telekom-security/redis:24.04.1"
|
||||
read_only: true
|
||||
|
||||
# Map Web Service
|
||||
@ -77,7 +77,7 @@ services:
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:64299:64299"
|
||||
image: "dtagdevsec/map:24.04"
|
||||
image: "ghcr.io/telekom-security/map:24.04.1"
|
||||
depends_on:
|
||||
- map_redis
|
||||
|
||||
@ -89,6 +89,6 @@ services:
|
||||
- MAP_COMMAND=DataServer_v2.py
|
||||
stop_signal: SIGKILL
|
||||
tty: true
|
||||
image: "dtagdevsec/map:24.04"
|
||||
image: "ghcr.io/telekom-security/map:24.04.1"
|
||||
depends_on:
|
||||
- map_redis
|
||||
|
@ -1,6 +1,6 @@
|
||||
FROM ubuntu:24.04
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV ES_VER=8.16.1
|
||||
ENV ES_VER=8.18.3
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
@ -15,8 +15,8 @@ RUN apt-get update -y && \
|
||||
#
|
||||
# Determine arch, get and install packages
|
||||
ARCH=$(arch) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then ES_ARCH="amd64"; fi && \
|
||||
if [ "$ARCH" = "aarch64" ]; then ES_ARCH="arm64"; fi && \
|
||||
if [ "$ARCH" = "x86_64" ]; then ES_ARCH="amd64"; export _JAVA_OPTIONS=""; fi && \
|
||||
if [ "$ARCH" = "aarch64" ]; then ES_ARCH="arm64"; export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
|
||||
echo "$ARCH" && \
|
||||
cd /root/dist/ && \
|
||||
mkdir -p /usr/share/elasticsearch/config \
|
||||
@ -47,4 +47,7 @@ HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9200/_cat/health'
|
||||
#
|
||||
# Start ELK
|
||||
USER elasticsearch:elasticsearch
|
||||
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]
|
||||
#CMD ["/usr/share/elasticsearch/bin/elasticsearch"]
|
||||
CMD ARCH=$(arch) && \
|
||||
if [ "$ARCH" = "aarch64" ]; then export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
|
||||
exec /usr/share/elasticsearch/bin/elasticsearch
|
||||
|
@ -11,5 +11,5 @@ http.host: 0.0.0.0
|
||||
http.cors.enabled: true
|
||||
http.cors.allow-origin: "*"
|
||||
indices.query.bool.max_clause_count: 2000
|
||||
cluster.routing.allocation.disk.watermark.enable_for_single_data_node: true
|
||||
#cluster.routing.allocation.disk.watermark.enable_for_single_data_node: true
|
||||
discovery.type: single-node
|
||||
|
@ -22,6 +22,6 @@ services:
|
||||
mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:24.04"
|
||||
image: "ghcr.io/telekom-security/elasticsearch:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data:/data
|
||||
|
@ -1,5 +1,5 @@
|
||||
FROM node:20.15.1-alpine3.20
|
||||
ENV KB_VER=8.16.1
|
||||
FROM node:20.19.2-alpine3.20
|
||||
ENV KB_VER=8.18.3
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
|
@ -10,4 +10,4 @@ services:
|
||||
# condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:24.04"
|
||||
image: "ghcr.io/telekom-security/kibana:24.04.1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
FROM ubuntu:24.04
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV LS_VER=8.16.1
|
||||
ENV LS_VER=8.18.3
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
@ -17,8 +17,8 @@ RUN apt-get update -y && \
|
||||
#
|
||||
# Determine arch, get and install packages
|
||||
ARCH=$(arch) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then LS_ARCH="amd64"; fi && \
|
||||
if [ "$ARCH" = "aarch64" ]; then LS_ARCH="arm64"; fi && \
|
||||
if [ "$ARCH" = "x86_64" ]; then LS_ARCH="amd64"; export _JAVA_OPTIONS=""; fi && \
|
||||
if [ "$ARCH" = "aarch64" ]; then LS_ARCH="arm64"; export _JAVA_OPTIONS="-XX:UseSVE=0"; fi && \
|
||||
echo "$ARCH" && \
|
||||
mkdir -p /etc/listbot && \
|
||||
cd /etc/listbot && \
|
||||
@ -42,6 +42,7 @@ RUN apt-get update -y && \
|
||||
cp tpot-template.json /etc/logstash/ && \
|
||||
cd /usr/share/logstash && \
|
||||
bin/logstash-plugin update logstash-filter-translate && \
|
||||
bin/logstash-plugin install logstash-output-syslog && \
|
||||
rm /etc/logstash/pipelines.yml && \
|
||||
rm /etc/logstash/logstash.yml && \
|
||||
#
|
||||
|
4
docker/elk/logstash/dist/entrypoint.sh
vendored
4
docker/elk/logstash/dist/entrypoint.sh
vendored
@ -101,4 +101,8 @@ if [ "$TPOT_TYPE" != "SENSOR" ];
|
||||
fi
|
||||
echo
|
||||
|
||||
ARCH=$(arch)
|
||||
if [ "$ARCH" = "aarch64" ]; then
|
||||
export _JAVA_OPTIONS="-XX:UseSVE=0";
|
||||
fi
|
||||
exec /usr/share/logstash/bin/logstash --config.reload.automatic
|
||||
|
9
docker/elk/logstash/dist/http_input.conf
vendored
9
docker/elk/logstash/dist/http_input.conf
vendored
@ -17,5 +17,12 @@ output {
|
||||
template => "/etc/logstash/tpot-template.json"
|
||||
template_overwrite => "true"
|
||||
}
|
||||
|
||||
# Syslog Output Example
|
||||
# syslog {
|
||||
# host => "192.168.1.1"
|
||||
# port => 514
|
||||
# protocol => tcp
|
||||
# appname => "logstash-logs"
|
||||
# severity => "6"
|
||||
# }
|
||||
}
|
||||
|
9
docker/elk/logstash/dist/logstash.conf
vendored
9
docker/elk/logstash/dist/logstash.conf
vendored
@ -816,7 +816,14 @@ output {
|
||||
template => "/etc/logstash/tpot-template.json"
|
||||
template_overwrite => "true"
|
||||
}
|
||||
|
||||
# Syslog Output Example
|
||||
# syslog {
|
||||
# host => "192.168.1.1"
|
||||
# port => 514
|
||||
# protocol => tcp
|
||||
# appname => "logstash-logs"
|
||||
# severity => "6"
|
||||
# }
|
||||
#if [type] == "Suricata" {
|
||||
# file {
|
||||
# file_mode => 0770
|
||||
|
@ -12,7 +12,7 @@ services:
|
||||
# condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64305:64305"
|
||||
image: "dtagdevsec/logstash:24.04"
|
||||
image: "ghcr.io/telekom-security/logstash:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data:/data
|
||||
# - /$HOME/tpotce/docker/elk/logstash/dist/logstash.conf:/etc/logstash/conf.d/logstash.conf
|
||||
|
@ -14,7 +14,7 @@ RUN apk --no-cache -U upgrade && \
|
||||
# Install from GitHub and setup
|
||||
mkdir -p /opt && \
|
||||
cd /opt/ && \
|
||||
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.2.6 && \
|
||||
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.2.7 && \
|
||||
cd t-pot-attack-map && \
|
||||
pip3 install --break-system-packages --upgrade pip && \
|
||||
pip3 install --break-system-packages -r requirements.txt && \
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.21
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
@ -22,7 +22,8 @@ RUN apk --no-cache -U upgrade && \
|
||||
py3-requests \
|
||||
py3-pip \
|
||||
py3-setuptools \
|
||||
py3-wheel && \
|
||||
py3-wheel \
|
||||
py3-xmltodict && \
|
||||
pip3 install --break-system-packages --upgrade pip && \
|
||||
pip3 install --break-system-packages --no-cache-dir \
|
||||
configparser \
|
||||
@ -32,7 +33,8 @@ RUN apk --no-cache -U upgrade && \
|
||||
xmljson && \
|
||||
#
|
||||
# Setup ewsposter
|
||||
git clone https://github.com/telekom-security/ewsposter -b v1.25.0 /opt/ewsposter && \
|
||||
git clone https://github.com/telekom-security/ewsposter /opt/ewsposter && \
|
||||
# git clone https://github.com/telekom-security/ewsposter -b v1.32 /opt/ewsposter && \
|
||||
mkdir -p /opt/ewsposter/spool /opt/ewsposter/log && \
|
||||
#
|
||||
# Setup user and groups
|
||||
|
204
docker/ewsposter/dist/ews.cfg
vendored
204
docker/ewsposter/dist/ews.cfg
vendored
@ -44,23 +44,104 @@ token = <your token for influx 2.0>
|
||||
bucket = <your bucket/database for 2.0/1.8>
|
||||
org = <your org for influx 2.0>
|
||||
|
||||
[GLASTOPFV3]
|
||||
glastopfv3 = false
|
||||
nodeid = glastopfv3-community-01
|
||||
sqlitedb = /data/glastopf/db/glastopf.db
|
||||
malwaredir = /data/glastopf/data/files/
|
||||
[ADBHONEY]
|
||||
adbhoney = true
|
||||
nodeid = adbhoney-community-01
|
||||
logfile = /data/adbhoney/log/adbhoney.json
|
||||
malwaredir = /data/adbhoney/downloads
|
||||
|
||||
[BEELZEBUB]
|
||||
beelzebub = true
|
||||
nodeid = beelzebub-community-01
|
||||
logfile = /data/beelzebub/log/beelzebub.json
|
||||
|
||||
[CISCOASA]
|
||||
ciscoasa = true
|
||||
nodeid = ciscoasa-community-01
|
||||
logfile = /data/ciscoasa/log/ciscoasa.log
|
||||
|
||||
[CITRIX]
|
||||
citrix = true
|
||||
nodeid = citrix-community-01
|
||||
logfile = /data/citrixhoneypot/logs/server.log
|
||||
|
||||
[CONPOT]
|
||||
conpot = true
|
||||
nodeid = conpot-community-01
|
||||
logdir = /data/conpot/log
|
||||
|
||||
[COWRIE]
|
||||
cowrie = true
|
||||
nodeid = cowrie-community-01
|
||||
logfile = /data/cowrie/log/cowrie.json
|
||||
|
||||
[DDOSPOT]
|
||||
ddospot = true
|
||||
nodeid = ddospot-community-01
|
||||
logdir = /data/ddospot/log
|
||||
|
||||
[DICOMPOT]
|
||||
dicompot = true
|
||||
nodeid = dicompot-community-01
|
||||
logfile = /data/dicompot/log/dicompot.log
|
||||
|
||||
[DIONAEA]
|
||||
dionaea = true
|
||||
nodeid = dionaea-community-01
|
||||
malwaredir = /data/dionaea/binaries/
|
||||
sqlitedb = /data/dionaea/log/dionaea.sqlite
|
||||
|
||||
[ELASTICPOT]
|
||||
elasticpot = true
|
||||
nodeid = elasticpot-community-01
|
||||
logfile = /data/elasticpot/log/elasticpot.json
|
||||
|
||||
[ENDLESSH]
|
||||
endlessh = true
|
||||
nodeid = endlessh-community-01
|
||||
logfile = /data/endlessh/log/endlessh.log
|
||||
|
||||
[GALAH]
|
||||
galah = true
|
||||
nodeid = galah-community-01
|
||||
logfile = /data/galah/log/galah.json
|
||||
|
||||
[GLUTTON]
|
||||
glutton = true
|
||||
nodeid = glutton-community-01
|
||||
logfile = /data/glutton/log/glutton.log
|
||||
|
||||
[GOPOT]
|
||||
gopot = true
|
||||
nodeid = gopot-community-01
|
||||
logfile = /data/go-pot/log/go-pot.json
|
||||
|
||||
[H0NEYTR4P]
|
||||
h0neytr4p = true
|
||||
nodeid = h0neytr4p-community-01
|
||||
logfile = /data/h0neytr4p/log/log.json
|
||||
payloaddir = /data/h04neytr4p/payload
|
||||
|
||||
[HELLPOT]
|
||||
hellpot = true
|
||||
nodeid = hellpot-community-01
|
||||
logfile = /data/hellpot/log/hellpot.log
|
||||
|
||||
[HERALDING]
|
||||
heralding = true
|
||||
nodeid = heralding-community-01
|
||||
logfile = /data/heralding/log/auth.csv
|
||||
|
||||
[HONEYAML]
|
||||
honeyaml = true
|
||||
nodeid = honeyaml-community-01
|
||||
logfile = /data/honeyaml/log/honeyaml.log
|
||||
|
||||
[HONEYPOTS]
|
||||
honeypots = true
|
||||
nodeid = honeypots-community-01
|
||||
logdir = /data/honeypots/log
|
||||
|
||||
[HONEYTRAP]
|
||||
honeytrap = true
|
||||
nodeid = honeytrap-community-01
|
||||
@ -68,118 +149,47 @@ newversion = true
|
||||
payloaddir = /data/honeytrap/attacks/
|
||||
attackerfile = /data/honeytrap/log/attacker.log
|
||||
|
||||
[EMOBILITY]
|
||||
eMobility = false
|
||||
nodeid = emobility-community-01
|
||||
logfile = /data/emobility/log/centralsystemEWS.log
|
||||
[IPPHONEY]
|
||||
ipphoney = true
|
||||
nodeid = ipphoney-community-01
|
||||
logfile = /data/ipphoney/log/ipphoney.json
|
||||
|
||||
[CONPOT]
|
||||
conpot = true
|
||||
nodeid = conpot-community-01
|
||||
logfile = /data/conpot/log/conpot*.json
|
||||
|
||||
[ELASTICPOT]
|
||||
elasticpot = true
|
||||
nodeid = elasticpot-community-01
|
||||
logfile = /data/elasticpot/log/elasticpot.json
|
||||
|
||||
[SURICATA]
|
||||
suricata = false
|
||||
nodeid = suricata-community-01
|
||||
logfile = /data/suricata/log/eve.json
|
||||
[LOG4POT]
|
||||
log4pot = true
|
||||
nodeid = log4pot-community-01
|
||||
logfile = /data/log4pot/log/log4pot.log
|
||||
|
||||
[MAILONEY]
|
||||
mailoney = true
|
||||
nodeid = mailoney-community-01
|
||||
logfile = /data/mailoney/log/commands.log
|
||||
|
||||
[RDPY]
|
||||
rdpy = false
|
||||
nodeid = rdpy-community-01
|
||||
logfile = /data/rdpy/log/rdpy.log
|
||||
|
||||
[VNCLOWPOT]
|
||||
vnclowpot = false
|
||||
nodeid = vnclowpot-community-01
|
||||
logfile = /data/vnclowpot/log/vnclowpot.log
|
||||
|
||||
[HERALDING]
|
||||
heralding = true
|
||||
nodeid = heralding-community-01
|
||||
logfile = /data/heralding/log/auth.csv
|
||||
|
||||
[CISCOASA]
|
||||
ciscoasa = true
|
||||
nodeid = ciscoasa-community-01
|
||||
logfile = /data/ciscoasa/log/ciscoasa.log
|
||||
|
||||
[TANNER]
|
||||
tanner = true
|
||||
nodeid = tanner-community-01
|
||||
logfile = /data/tanner/log/tanner_report.json
|
||||
|
||||
[GLUTTON]
|
||||
glutton = true
|
||||
nodeid = glutton-community-01
|
||||
logfile = /data/glutton/log/glutton.log
|
||||
|
||||
[HONEYSAP]
|
||||
honeysap = false
|
||||
nodeid = honeysap-community-01
|
||||
logfile = /data/honeysap/log/honeysap-external.log
|
||||
|
||||
[ADBHONEY]
|
||||
adbhoney = true
|
||||
nodeid = adbhoney-community-01
|
||||
logfile = /data/adbhoney/log/adbhoney.json
|
||||
malwaredir = /data/adbhoney/downloads
|
||||
|
||||
[FATT]
|
||||
fatt = false
|
||||
nodeid = fatt-community-01
|
||||
logfile = /data/fatt/log/fatt.log
|
||||
|
||||
[IPPHONEY]
|
||||
ipphoney = true
|
||||
nodeid = ipphoney-community-01
|
||||
logfile = /data/ipphoney/log/ipphoney.json
|
||||
|
||||
[DICOMPOT]
|
||||
dicompot = true
|
||||
nodeid = dicompot-community-01
|
||||
logfile = /data/dicompot/log/dicompot.log
|
||||
|
||||
[MEDPOT]
|
||||
medpot = true
|
||||
nodeid = medpot-community-01
|
||||
logfile = /data/medpot/log/medpot.log
|
||||
|
||||
[HONEYPY]
|
||||
honeypy = false
|
||||
nodeid = honeypy-community-01
|
||||
logfile = /data/honeypy/log/json.log
|
||||
|
||||
[CITRIX]
|
||||
citrix = true
|
||||
nodeid = citrix-community-01
|
||||
logfile = /data/citrixhoneypot/logs/server.log
|
||||
[MINIPRINT]
|
||||
miniprint = true
|
||||
nodeid = miniprint-community-01
|
||||
logfile = /data/miniprint/log/miniprint.json
|
||||
|
||||
[REDISHONEYPOT]
|
||||
redishoneypot = true
|
||||
nodeid = redishoneypot-community-01
|
||||
logfile = /data/redishoneypot/log/redishoneypot.log
|
||||
|
||||
[ENDLESSH]
|
||||
endlessh = true
|
||||
nodeid = endlessh-community-01
|
||||
logfile = /data/endlessh/log/endlessh.log
|
||||
|
||||
[SENTRYPEER]
|
||||
sentrypeer = true
|
||||
nodeid = sentrypeer-community-01
|
||||
logfile = /data/sentrypeer/log/sentrypeer.json
|
||||
|
||||
[LOG4POT]
|
||||
log4pot = true
|
||||
nodeid = log4pot-community-01
|
||||
logfile = /data/log4pot/log/log4pot.log
|
||||
[TANNER]
|
||||
tanner = true
|
||||
nodeid = tanner-community-01
|
||||
logfile = /data/tanner/log/tanner_report.json
|
||||
|
||||
[WORDPOT]
|
||||
wordpot = true
|
||||
nodeid = wordpot-community-01
|
||||
logfile = /data/wordpot/log/wordpot.log
|
||||
|
205
docker/ewsposter/dist/ews.cfg.backup
vendored
Normal file
205
docker/ewsposter/dist/ews.cfg.backup
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
[MAIN]
|
||||
homedir = /opt/ewsposter/
|
||||
spooldir = /opt/ewsposter/spool/
|
||||
logdir = /opt/ewsposter/log/
|
||||
del_malware_after_send = false
|
||||
send_malware = false
|
||||
sendlimit = 5000
|
||||
contact = your_email_address
|
||||
proxy = None
|
||||
ip_int = None
|
||||
ip_ext = None
|
||||
|
||||
[EWS]
|
||||
ews = true
|
||||
username = community-01-user
|
||||
token = foth{a5maiCee8fineu7
|
||||
rhost_first = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
|
||||
rhost_second = https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage
|
||||
ignorecert = false
|
||||
|
||||
[HPFEED]
|
||||
hpfeed = %(EWS_HPFEEDS_ENABLE)s
|
||||
host = %(EWS_HPFEEDS_HOST)s
|
||||
port = %(EWS_HPFEEDS_PORT)s
|
||||
channels = %(EWS_HPFEEDS_CHANNELS)s
|
||||
ident = %(EWS_HPFEEDS_IDENT)s
|
||||
secret= %(EWS_HPFEEDS_SECRET)s
|
||||
# path/to/certificate for tls broker - or "false" for non-tls broker
|
||||
tlscert = %(EWS_HPFEEDS_TLSCERT)s
|
||||
# hpfeeds submission format: "ews" (xml) or "json"
|
||||
hpfformat = %(EWS_HPFEEDS_FORMAT)s
|
||||
|
||||
[EWSJSON]
|
||||
json = false
|
||||
jsondir = /data/ews/json/
|
||||
|
||||
[INFLUXDB]
|
||||
influxdb = false
|
||||
host = http://localhost
|
||||
port = 8086
|
||||
username = <your username for influx 1.8>
|
||||
password = <your password for influx 1.8>
|
||||
token = <your token for influx 2.0>
|
||||
bucket = <your bucket/database for 2.0/1.8>
|
||||
org = <your org for influx 2.0>
|
||||
|
||||
[ADBHONEY]
|
||||
adbhoney = true
|
||||
nodeid = adbhoney-community-01
|
||||
logfile = /data/adbhoney/log/adbhoney.json
|
||||
malwaredir = /data/adbhoney/downloads
|
||||
|
||||
[BEELZEBUB]
|
||||
beelzebub = true
|
||||
nodeid = beelzebub-community-01
|
||||
logfile = /data/beelzebub/log/beelzebub.json
|
||||
|
||||
[CISCOASA]
|
||||
ciscoasa = true
|
||||
nodeid = ciscoasa-community-01
|
||||
logfile = /data/ciscoasa/log/ciscoasa.log
|
||||
|
||||
[CITRIX]
|
||||
citrix = true
|
||||
nodeid = citrix-community-01
|
||||
logfile = /data/citrixhoneypot/logs/server.log
|
||||
|
||||
[CONPOT]
|
||||
conpot = true
|
||||
nodeid = conpot-community-01
|
||||
logdir = /data/conpot/log
|
||||
|
||||
[COWRIE]
|
||||
cowrie = true
|
||||
nodeid = cowrie-community-01
|
||||
logfile = /data/cowrie/log/cowrie.json
|
||||
|
||||
[DDOSPOT]
|
||||
ddospot = true
|
||||
nodeid = ddospot-community-01
|
||||
logdir = /data/ddospot/log
|
||||
|
||||
[DICOMPOT]
|
||||
dicompot = true
|
||||
nodeid = dicompot-community-01
|
||||
logfile = /data/dicompot/log/dicompot.log
|
||||
|
||||
[DIONAEA]
|
||||
dionaea = true
|
||||
nodeid = dionaea-community-01
|
||||
malwaredir = /data/dionaea/binaries/
|
||||
sqlitedb = /data/dionaea/log/dionaea.sqlite
|
||||
|
||||
[ELASTICPOT]
|
||||
elasticpot = true
|
||||
nodeid = elasticpot-community-01
|
||||
logfile = /data/elasticpot/log/elasticpot.json
|
||||
|
||||
[ENDLESSH]
|
||||
endlessh = true
|
||||
nodeid = endlessh-community-01
|
||||
logfile = /data/endlessh/log/endlessh.log
|
||||
|
||||
[FATT]
|
||||
fatt = false
|
||||
nodeid = fatt-community-01
|
||||
logfile = /data/fatt/log/fatt.log
|
||||
|
||||
[GALAH]
|
||||
galah = true
|
||||
nodeid = galah-community-01
|
||||
logfile = /data/galah/log/galah.json
|
||||
|
||||
[GLUTTON]
|
||||
glutton = true
|
||||
nodeid = glutton-community-01
|
||||
logfile = /data/glutton/log/glutton.log
|
||||
|
||||
[GOPOT]
|
||||
gopot = true
|
||||
nodeid = gopot-community-01
|
||||
logfile = /data/go-pot/log/go-pot.json
|
||||
|
||||
[H0NEYTR4P]
|
||||
h0neytr4p = true
|
||||
nodeid = h0neytr4p-community-01
|
||||
logfile = /data/h0neytr4p/log/log.json
|
||||
payloaddir = /data/h04neytr4p/payload
|
||||
|
||||
[HELLPOT]
|
||||
hellpot = true
|
||||
nodeid = hellpot-community-01
|
||||
logfile = /data/hellpot/log/hellpot.log
|
||||
|
||||
[HERALDING]
|
||||
heralding = true
|
||||
nodeid = heralding-community-01
|
||||
logfile = /data/heralding/log/auth.csv
|
||||
|
||||
[HONEYAML]
|
||||
honeyaml = true
|
||||
nodeid = honeyaml-community-01
|
||||
logfile = /data/honeyaml/log/honeyaml.log
|
||||
|
||||
[HONEYPOTS]
|
||||
honeypots = true
|
||||
nodeid = honeypots-community-01
|
||||
logdir = /data/honeypots/log
|
||||
|
||||
[HONEYTRAP]
|
||||
honeytrap = true
|
||||
nodeid = honeytrap-community-01
|
||||
newversion = true
|
||||
payloaddir = /data/honeytrap/attacks/
|
||||
attackerfile = /data/honeytrap/log/attacker.log
|
||||
|
||||
[IPPHONEY]
|
||||
ipphoney = true
|
||||
nodeid = ipphoney-community-01
|
||||
logfile = /data/ipphoney/log/ipphoney.json
|
||||
|
||||
[LOG4POT]
|
||||
log4pot = true
|
||||
nodeid = log4pot-community-01
|
||||
logfile = /data/log4pot/log/log4pot.log
|
||||
|
||||
[MAILONEY]
|
||||
mailoney = true
|
||||
nodeid = mailoney-community-01
|
||||
logfile = /data/mailoney/log/commands.log
|
||||
|
||||
[MEDPOT]
|
||||
medpot = true
|
||||
nodeid = medpot-community-01
|
||||
logfile = /data/medpot/log/medpot.log
|
||||
|
||||
[MINIPRINT]
|
||||
miniprint = true
|
||||
nodeid = miniprint-community-01
|
||||
logfile = /data/miniprint/log/miniprint.json
|
||||
|
||||
[REDISHONEYPOT]
|
||||
redishoneypot = true
|
||||
nodeid = redishoneypot-community-01
|
||||
logfile = /data/redishoneypot/log/redishoneypot.log
|
||||
|
||||
[SENTRYPEER]
|
||||
sentrypeer = true
|
||||
nodeid = sentrypeer-community-01
|
||||
logfile = /data/sentrypeer/log/sentrypeer.json
|
||||
|
||||
[SURICATA]
|
||||
suricata = false
|
||||
nodeid = suricata-community-01
|
||||
logfile = /data/suricata/log/eve.json
|
||||
|
||||
[TANNER]
|
||||
tanner = true
|
||||
nodeid = tanner-community-01
|
||||
logfile = /data/tanner/log/tanner_report.json
|
||||
|
||||
[WORDPOT]
|
||||
wordpot = true
|
||||
nodeid = wordpot-community-01
|
||||
logfile = /data/wordpot/log/wordpot.log
|
@ -21,7 +21,7 @@ services:
|
||||
- EWS_HPFEEDS_SECRET=secret
|
||||
- EWS_HPFEEDS_TLSCERT=false
|
||||
- EWS_HPFEEDS_FORMAT=json
|
||||
image: "dtagdevsec/ewsposter:24.04"
|
||||
image: "ghcr.io/telekom-security/ewsposter:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data:/data
|
||||
- $HOME/tpotce/data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
@ -42,4 +42,4 @@ STOPSIGNAL SIGINT
|
||||
ENV PYTHONPATH /opt/fatt
|
||||
WORKDIR /opt/fatt
|
||||
USER fatt:fatt
|
||||
CMD python3 fatt.py -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') --print_output --json_logging -o log/fatt.log
|
||||
CMD python3 fatt.py -i $(ip route | grep "^default" | awk '{ print $5 }') --print_output --json_logging -o log/fatt.log
|
||||
|
@ -14,6 +14,6 @@ services:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/fatt:24.04"
|
||||
image: "ghcr.io/telekom-security/fatt:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data/fatt/log:/opt/fatt/log
|
||||
|
@ -16,7 +16,8 @@ go build -o galah ./cmd/galah
|
||||
EOF
|
||||
#
|
||||
FROM alpine:3.20
|
||||
RUN apk --no-cache -U upgrade
|
||||
RUN apk --no-cache -U upgrade && \
|
||||
apk --no-cache -U add openssl
|
||||
COPY --from=builder /opt/galah/ /opt/galah/
|
||||
#
|
||||
# Start galah
|
||||
|
@ -17,7 +17,7 @@ services:
|
||||
- "443:443"
|
||||
- "8443:8443"
|
||||
- "8080:8080"
|
||||
image: dtagdevsec/galah:24.04
|
||||
image: ghcr.io/telekom-security/galah:24.04.1
|
||||
environment:
|
||||
LLM_PROVIDER: "ollama"
|
||||
LLM_SERVER_URL: "http://ollama.local:11434"
|
||||
|
@ -40,4 +40,4 @@ RUN apk -U --no-cache upgrade && \
|
||||
# Start glutton
|
||||
WORKDIR /opt/glutton
|
||||
USER 2000:2000
|
||||
CMD exec bin/server -d true -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') -l /var/log/glutton/glutton.log > /dev/null 2>&1
|
||||
CMD exec bin/server -d true -i $(ip route | grep "^default" | awk '{ print $5 }') -l /var/log/glutton/glutton.log > /dev/null 2>&1
|
||||
|
@ -48,7 +48,8 @@ RUN apk --no-cache -U upgrade && \
|
||||
cd /opt/ && \
|
||||
git clone https://github.com/qeeqbox/honeypots && \
|
||||
cd honeypots && \
|
||||
git checkout 238c6732d28b36ab88f2008361bafc8aef054755 && \
|
||||
# git checkout 238c6732d28b36ab88f2008361bafc8aef054755 && \
|
||||
git checkout 3f52129fbe9a712e86c54207ceb088c3b2c2af17 && \
|
||||
pip3 install --break-system-packages --no-cache-dir . && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(type -P python3)) && \
|
||||
#
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM ubuntu:24.10
|
||||
FROM ubuntu:22.04
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
#
|
||||
# Include dist
|
||||
@ -28,7 +28,7 @@ RUN apt-get update && \
|
||||
# Install honeytrap from source
|
||||
git clone https://github.com/t3chn0m4g3/honeytrap /root/honeytrap && \
|
||||
cd /root/honeytrap/ && \
|
||||
git checkout 9b63a5452ed3d5004ca714a7965218c25226a504 && \
|
||||
# git checkout 9aa4f734f2ea2f0da790b02d79afe18204a23982 && \
|
||||
autoreconf -vfi && \
|
||||
./configure \
|
||||
--with-stream-mon=nfq \
|
||||
|
@ -12,7 +12,7 @@ services:
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:24.04"
|
||||
image: "ghcr.io/telekom-security/honeytrap:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
|
@ -1,9 +1,9 @@
|
||||
FROM ubuntu:24.04
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
#
|
||||
# Install packages
|
||||
RUN apt-get update -y && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
cargo \
|
||||
@ -21,7 +21,7 @@ RUN apt-get update -y && \
|
||||
python3-dev \
|
||||
rust-all && \
|
||||
pip3 install --no-cache-dir --break-system-packages \
|
||||
poetry \
|
||||
poetry==1.8.3 \
|
||||
pycurl && \
|
||||
#
|
||||
# Install log4pot from GitHub and setup
|
||||
@ -31,11 +31,12 @@ RUN apt-get update -y && \
|
||||
cd Log4Pot && \
|
||||
git checkout 5002b1fe0f82359ef32dbc3a899e8a701dc3256e && \
|
||||
sed -i 's#"type": logtype,#"reason": logtype,#g' log4pot-server.py && \
|
||||
poetry --no-cache install && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(which python3)) && \
|
||||
rm poetry.lock && \
|
||||
poetry --no-cache --without=dev install && \
|
||||
setcap cap_net_bind_service=+ep $(readlink -f $(which python3)) && \
|
||||
#
|
||||
# Setup user, groups and configs
|
||||
addgroup --gid 2000 log4pot && \
|
||||
addgroup --gid 2000 log4pot && \
|
||||
adduser --system --no-create-home --shell /bin/bash -uid 2000 --disabled-password --disabled-login -gid 2000 log4pot && \
|
||||
chown log4pot:log4pot -R /opt/Log4Pot && \
|
||||
#
|
||||
|
@ -20,7 +20,7 @@ services:
|
||||
- "8080:8080"
|
||||
- "9200:8080"
|
||||
- "25565:8080"
|
||||
image: "dtagdevsec/log4pot:24.04"
|
||||
image: "ghcr.io/telekom-security/log4pot:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/log4pot/log:/var/log/log4pot/log
|
||||
|
@ -1,17 +1,19 @@
|
||||
### elasticvue build is currently broken, issue has been opened https://github.com/cars10/elasticvue/issues/215
|
||||
### in the meantime we are using the older dist, if not resolved we need to find a different solution
|
||||
FROM node:20-alpine AS builder
|
||||
FROM node:22.5.1-alpine AS builder
|
||||
#
|
||||
# Prep and build Elasticvue
|
||||
RUN apk -U --no-cache add git && \
|
||||
git clone https://github.com/cars10/elasticvue -b v1.1.0 /opt/src && \
|
||||
git clone https://github.com/cars10/elasticvue -b v1.7.0 /opt/src && \
|
||||
# We need to adjust consts.ts so the user has connection suggestion for reverse proxied ES
|
||||
sed -i "s#export const DEFAULT_CLUSTER_URI = 'http://localhost:9200'#export const DEFAULT_CLUSTER_URI = window.location.origin + '/es'#g" /opt/src/src/consts.ts && \
|
||||
sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/index.html && \
|
||||
mkdir /opt/app && \
|
||||
cd /opt/app && \
|
||||
corepack enable && \
|
||||
cp /opt/src/package.json . && \
|
||||
cp /opt/src/yarn.lock . && \
|
||||
cp /opt/src/.yarnrc.yml . && \
|
||||
yarn install && \
|
||||
cp -R /opt/src/* . && \
|
||||
export VITE_APP_BUILD_MODE=docker && \
|
||||
|
BIN
docker/nginx/dist/html/esvue/esvue.tgz
vendored
BIN
docker/nginx/dist/html/esvue/esvue.tgz
vendored
Binary file not shown.
@ -20,7 +20,7 @@ services:
|
||||
# ports:
|
||||
# - "64297:64297"
|
||||
# - "127.0.0.1:64304:64304"
|
||||
image: "dtagdevsec/nginx:24.04"
|
||||
image: "ghcr.io/telekom-security/nginx:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/nginx/cert/:/etc/nginx/cert/:ro
|
||||
|
@ -33,4 +33,4 @@ RUN apk --no-cache -U upgrade && \
|
||||
# Start p0f
|
||||
WORKDIR /opt/p0f
|
||||
USER p0f:p0f
|
||||
CMD exec /opt/p0f/p0f -u p0f -j -o /var/log/p0f/p0f.json -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') > /dev/null
|
||||
CMD exec /opt/p0f/p0f -u p0f -j -o /var/log/p0f/p0f.json -i $(ip route | grep "^default" | awk '{ print $5 }') > /dev/null
|
||||
|
@ -8,7 +8,7 @@ services:
|
||||
# cpu_count: 1
|
||||
# cpus: 0.75
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:24.04"
|
||||
image: "ghcr.io/telekom-security/p0f:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/p0f/log:/var/log/p0f
|
||||
|
@ -1,5 +1,8 @@
|
||||
FROM alpine:edge
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /root/dist/
|
||||
#
|
||||
# Install packages
|
||||
RUN apk -U upgrade --no-cache && \
|
||||
apk -U add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing \
|
||||
@ -8,7 +11,9 @@ RUN apk -U upgrade --no-cache && \
|
||||
# Setup user, groups and configs
|
||||
mkdir -p /var/log/sentrypeer && \
|
||||
addgroup -g 2000 sentrypeer && \
|
||||
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 sentrypeer && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 sentrypeer && \
|
||||
mkdir -p /home/sentrypeer/.config/sentrypeer && \
|
||||
cp /root/dist/default-config.toml /home/sentrypeer/.config/sentrypeer && \
|
||||
chown -R sentrypeer:sentrypeer /usr/bin/sentrypeer && \
|
||||
#
|
||||
# Clean up
|
||||
@ -18,4 +23,5 @@ RUN apk -U upgrade --no-cache && \
|
||||
# Set workdir and start sentrypeer
|
||||
STOPSIGNAL SIGKILL
|
||||
USER sentrypeer:sentrypeer
|
||||
WORKDIR /var/log/sentrypeer/
|
||||
CMD /usr/bin/sentrypeer -jar -f /var/log/sentrypeer/sentrypeer.db -l /var/log/sentrypeer/sentrypeer.json
|
||||
|
3
docker/sentrypeer/dist/default-config.toml
vendored
Normal file
3
docker/sentrypeer/dist/default-config.toml
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
cert = "cert.pem"
|
||||
key = "key.pem"
|
||||
tls_listen_address = "0.0.0.0:5061"
|
@ -22,7 +22,7 @@ services:
|
||||
- "5060:5060/udp"
|
||||
- "5060:5060/tcp"
|
||||
# - "127.0.0.1:8082:8082"
|
||||
image: "dtagdevsec/sentrypeer:24.04"
|
||||
image: "ghcr.io/telekom-security/sentrypeer:testing"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/sentrypeer/log:/var/log/sentrypeer
|
||||
|
@ -37,4 +37,4 @@ RUN apk --no-cache -U upgrade && \
|
||||
#
|
||||
# Start suricata
|
||||
STOPSIGNAL SIGINT
|
||||
CMD SURICATA_CAPTURE_FILTER=$(update.sh $OINKCODE) && exec suricata -v -F $SURICATA_CAPTURE_FILTER -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }')
|
||||
CMD SURICATA_CAPTURE_FILTER=$(update.sh $OINKCODE) && exec suricata -v -F $SURICATA_CAPTURE_FILTER -i $(ip route | grep "^default" | awk '{ print $5 }')
|
||||
|
@ -135,4 +135,4 @@ RUN apk -U add \
|
||||
#
|
||||
# Start suricata
|
||||
STOPSIGNAL SIGINT
|
||||
CMD SURICATA_CAPTURE_FILTER=$(update.sh $OINKCODE) && exec suricata -v -F $SURICATA_CAPTURE_FILTER -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }')
|
||||
CMD SURICATA_CAPTURE_FILTER=$(update.sh $OINKCODE) && exec suricata -v -F $SURICATA_CAPTURE_FILTER -i $(ip route | grep "^default" | awk '{ print $5 }')
|
||||
|
2
docker/suricata/dist/suricata.yaml
vendored
2
docker/suricata/dist/suricata.yaml
vendored
@ -1005,7 +1005,7 @@ app-layer:
|
||||
detection-ports:
|
||||
dp: 53
|
||||
udp:
|
||||
enabled: yes
|
||||
enabled: no
|
||||
detection-ports:
|
||||
dp: 53
|
||||
http:
|
||||
|
@ -15,6 +15,6 @@ services:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:24.04"
|
||||
image: "ghcr.io/telekom-security/suricata:24.04.1"
|
||||
volumes:
|
||||
- $HOME/tpotce/data/suricata/log:/var/log/suricata
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM alpine:edge
|
||||
FROM alpine:3.20
|
||||
#
|
||||
# Include dist
|
||||
COPY dist/ /opt/tpot/
|
||||
@ -13,6 +13,7 @@ RUN apk --no-cache -U upgrade && \
|
||||
conntrack-tools \
|
||||
cracklib \
|
||||
curl \
|
||||
envsubst \
|
||||
ethtool \
|
||||
figlet \
|
||||
git \
|
||||
@ -26,13 +27,13 @@ RUN apk --no-cache -U upgrade && \
|
||||
openssl \
|
||||
pigz \
|
||||
tar \
|
||||
uuidgen && \
|
||||
apk --no-cache -U add --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \
|
||||
yq && \
|
||||
uuidgen \
|
||||
yq-go && \
|
||||
#
|
||||
# Setup user
|
||||
# Setup user, logrotate permissions
|
||||
addgroup -g 2000 tpot && \
|
||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 tpot && \
|
||||
chmod 0600 /opt/tpot/etc/logrotate/logrotate.* && \
|
||||
#
|
||||
# Clean up
|
||||
apk del --purge git && \
|
||||
|
17
docker/tpotinit/dist/bin/clean.sh
vendored
17
docker/tpotinit/dist/bin/clean.sh
vendored
@ -10,6 +10,9 @@ myPIGZ=$(which pigz)
|
||||
|
||||
# Set persistence
|
||||
myPERSISTENCE=$1
|
||||
myPERSISTENCE_CYCLES=$2
|
||||
myPERSISTENCE_CYCLES="${myPERSISTENCE_CYCLES:=30}"
|
||||
export myPERSISTENCE_CYCLES
|
||||
|
||||
# Let's create a function to check if folder is empty
|
||||
fuEMPTY () {
|
||||
@ -18,6 +21,15 @@ fuEMPTY () {
|
||||
echo $(ls $myFOLDER | wc -l)
|
||||
}
|
||||
|
||||
# Let's create a function to setup logrotate config
|
||||
fuLOGROTATECONF () {
|
||||
local myLOGROTATECONF="/opt/tpot/etc/logrotate/logrotate.conf"
|
||||
local myLOGROTATETEMP="/opt/tpot/etc/logrotate/logrotate.template"
|
||||
envsubst < $myLOGROTATETEMP > $myLOGROTATECONF
|
||||
chown root:root $myLOGROTATECONF
|
||||
chmod 0600 $myLOGROTATECONF
|
||||
}
|
||||
|
||||
# Let's create a function to rotate and compress logs
|
||||
fuLOGROTATE () {
|
||||
local mySTATUS="/data/tpot/etc/logrotate/status"
|
||||
@ -43,6 +55,9 @@ fuLOGROTATE () {
|
||||
local myTANNERF="/data/tanner/files/"
|
||||
local myTANNERFTGZ="/data/tanner/files.tgz"
|
||||
|
||||
# Setup logrotate config
|
||||
fuLOGROTATECONF
|
||||
|
||||
# Ensure correct permissions and ownerships for logrotate to run without issues
|
||||
chmod 770 /data/ -R
|
||||
chown tpot:tpot /data -R
|
||||
@ -408,7 +423,7 @@ fi
|
||||
# Check persistence, if enabled compress and rotate logs
|
||||
if [ "$myPERSISTENCE" = "on" ];
|
||||
then
|
||||
echo "Persistence enabled, now rotating and compressing logs."
|
||||
echo "Persistence enabled for $myPERSISTENCE_CYCLES cycles, now rotating and compressing logs."
|
||||
fuLOGROTATE
|
||||
fi
|
||||
|
||||
|
3
docker/tpotinit/dist/bin/genuser.sh
vendored
3
docker/tpotinit/dist/bin/genuser.sh
vendored
@ -13,6 +13,9 @@ __ __ _ _ _ [ T-Pot ]
|
||||
EOF
|
||||
)
|
||||
|
||||
# Add trap to ensure SIGINT, SIGTERM works
|
||||
trap 'echo; echo; echo "# User interrupt. Exiting."; exit 1' SIGINT
|
||||
|
||||
# Generate T-Pot WebUser
|
||||
echo "$myPW"
|
||||
echo
|
||||
|
19
docker/tpotinit/dist/entrypoint.sh
vendored
19
docker/tpotinit/dist/entrypoint.sh
vendored
@ -114,6 +114,20 @@ validate_ip_or_domain() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to validate if TPOT_PERSISTENCE_CYCLES is set and valid
|
||||
validate_tpot_persistence_cycles() {
|
||||
# Check if the variable is unset, empty, not a number, or out of the valid range (1–999)
|
||||
if [[ -z "$TPOT_PERSISTENCE_CYCLES" ]] ||
|
||||
[[ ! "$TPOT_PERSISTENCE_CYCLES" =~ ^[0-9]+$ ]] ||
|
||||
(( TPOT_PERSISTENCE_CYCLES < 1 )) ||
|
||||
(( TPOT_PERSISTENCE_CYCLES > 999 )); then
|
||||
|
||||
# Set to default value
|
||||
echo "WARNING! TPOT_PERSISTENCE_CYCLES is not set, invalid or out of bounds. Using default of 30 cycles."
|
||||
TPOT_PERSISTENCE_CYCLES=30
|
||||
fi
|
||||
}
|
||||
|
||||
create_web_users() {
|
||||
echo
|
||||
echo "# Creating passwd files based on T-Pot .env config ..."
|
||||
@ -203,6 +217,9 @@ for var in TPOT_BLACKHOLE TPOT_PERSISTENCE TPOT_ATTACKMAP_TEXT TPOT_ATTACKMAP_TE
|
||||
validate_format "$var"
|
||||
done
|
||||
|
||||
# Validate TPOT_PERSISTENCE_CYCLES
|
||||
validate_tpot_persistence_cycles
|
||||
|
||||
if [ "${TPOT_TYPE}" == "HIVE" ];
|
||||
then
|
||||
# No $ for check_var
|
||||
@ -242,7 +259,7 @@ if [ -f "/data/uuid" ];
|
||||
echo
|
||||
echo "# Data folder is present, just cleaning up, please be patient ..."
|
||||
echo
|
||||
/opt/tpot/bin/clean.sh "${TPOT_PERSISTENCE}"
|
||||
/opt/tpot/bin/clean.sh "${TPOT_PERSISTENCE}" "${TPOT_PERSISTENCE_CYCLES}"
|
||||
echo
|
||||
else
|
||||
figlet "Setting up ..."
|
||||
|
78
docker/tpotinit/dist/etc/logrotate/logrotate.template
vendored
Normal file
78
docker/tpotinit/dist/etc/logrotate/logrotate.template
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/data/adbhoney/log/*.json
|
||||
/data/adbhoney/log/*.log
|
||||
/data/beelzebub/log/*.json
|
||||
/data/ciscoasa/log/ciscoasa.log
|
||||
/data/citrixhoneypot/logs/server.log
|
||||
/data/conpot/log/conpot*.json
|
||||
/data/conpot/log/conpot*.log
|
||||
/data/cowrie/log/cowrie.json
|
||||
/data/cowrie/log/cowrie-textlog.log
|
||||
/data/cowrie/log/lastlog.txt
|
||||
/data/ddospot/log/*.log
|
||||
/data/dicompot/log/dicompot.log
|
||||
/data/dionaea/log/dionaea.json
|
||||
/data/dionaea/log/dionaea.sqlite
|
||||
/data/dionaea/dionaea-errors.log
|
||||
/data/elasticpot/log/elasticpot.log
|
||||
/data/elasticpot/log/elasticpot.json
|
||||
/data/elk/log/*.log
|
||||
/data/endlessh/log/*.log
|
||||
/data/fatt/log/fatt.log
|
||||
/data/galah/log/*.json
|
||||
/data/glutton/log/*.log
|
||||
/data/glutton/log/*.err
|
||||
/data/go-pot/log/*.json
|
||||
/data/h0neytr4p/log/*.json
|
||||
/data/hellpot/log/*.log
|
||||
/data/heralding/log/*.log
|
||||
/data/heralding/log/*.csv
|
||||
/data/heralding/log/*.json
|
||||
/data/honeyaml/log/*.log
|
||||
/data/honeypots/log/*.log
|
||||
/data/honeysap/log/*.log
|
||||
/data/honeytrap/log/*.log
|
||||
/data/honeytrap/log/*.json
|
||||
/data/ipphoney/log/*.json
|
||||
/data/log4pot/log/*.log
|
||||
/data/mailoney/log/*.log
|
||||
/data/medpot/log/*.log
|
||||
/data/miniprint/log/*.json
|
||||
/data/nginx/log/*.log
|
||||
/data/p0f/log/p0f.json
|
||||
/data/redishoneypot/log/*.log
|
||||
/data/sentrypeer/log/*.json
|
||||
/data/suricata/log/*.log
|
||||
/data/suricata/log/*.json
|
||||
/data/tanner/log/*.json
|
||||
/data/wordpot/log/*.log
|
||||
{
|
||||
su tpot tpot
|
||||
copytruncate
|
||||
create 770 tpot tpot
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
rotate $myPERSISTENCE_CYCLES
|
||||
compress
|
||||
compresscmd /usr/bin/pigz
|
||||
}
|
||||
|
||||
/data/adbhoney/downloads.tgz
|
||||
/data/cowrie/log/ttylogs.tgz
|
||||
/data/cowrie/downloads.tgz
|
||||
/data/dionaea/bistreams.tgz
|
||||
/data/dionaea/binaries.tgz
|
||||
/data/h0neytr4p/payloads.tgz
|
||||
/data/honeytrap/attacks.tgz
|
||||
/data/honeytrap/downloads.tgz
|
||||
/data/miniprint/uploads.tgz
|
||||
/data/tanner/files.tgz
|
||||
{
|
||||
su tpot tpot
|
||||
copytruncate
|
||||
create 770 tpot tpot
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
rotate $myPERSISTENCE_CYCLES
|
||||
}
|
@ -8,7 +8,7 @@ services:
|
||||
- $HOME/tpotce/.env
|
||||
restart: "no"
|
||||
stop_grace_period: 60s
|
||||
image: "dtagdevsec/tpotinit:24.04"
|
||||
image: "ghcr.io/telekom-security/tpotinit:24.04.1"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- $HOME/tpotce/data:/data
|
||||
|
@ -14,7 +14,7 @@ services:
|
||||
- wordpot_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/wordpot:24.04"
|
||||
image: "ghcr.io/telekom-security/wordpot:24.04.1"
|
||||
read_only: true
|
||||
volumes:
|
||||
- $HOME/tpotce/data/wordpot/log:/opt/wordpot/logs/
|
||||
|
10
env.example
10
env.example
@ -40,11 +40,19 @@ TPOT_BLACKHOLE=DISABLED
|
||||
# if you just do not need any of the logfiles.
|
||||
TPOT_PERSISTENCE=on
|
||||
|
||||
# T-Pot Persistence Cycles
|
||||
# <1-999>: Set the number of T-Pot restart cycles for logrotate.
|
||||
# Be mindful of this setting as the logs will use up a lot of available disk space.
|
||||
# In case the setting is invalid, T-Pot will default to 30 cycles.
|
||||
# Remember to adjust the Elastic Search Lifecycle Policy (https://github.com/telekom-security/tpotce/?tab=readme-ov-file#log-persistence)
|
||||
# as this setting only accounts for the honeypot logs in the ~/tpotce/data folder.
|
||||
TPOT_PERSISTENCE_CYCLES=30
|
||||
|
||||
# T-Pot Type
|
||||
# HIVE: This is the default and offers everything to connect T-Pot sensors.
|
||||
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
|
||||
# settings as well.
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
|
||||
# 1. You will need to copy compose/sensor.yml to ./docker-compose.yml
|
||||
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
|
||||
# your SENSOR host to ~/tpotce/data/hive.crt
|
||||
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
|
||||
|
13
genuser.sh
13
genuser.sh
@ -1,2 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
docker run -v $HOME/tpotce:/data --entrypoint bash -it -u $(id -u):$(id -g) dtagdevsec/tpotinit:24.04.1 "/opt/tpot/bin/genuser.sh"
|
||||
TPOT_REPO=$(grep -E "^TPOT_REPO" .env | cut -d "=" -f2-)
|
||||
TPOT_VERSION=$(grep -E "^TPOT_VERSION" .env | cut -d "=" -f2-)
|
||||
USER=$(id -u)
|
||||
USERNAME=$(id -un)
|
||||
GROUP=$(id -g)
|
||||
echo "### Repository: ${TPOT_REPO}"
|
||||
echo "### Version Tag: ${TPOT_VERSION}"
|
||||
echo "### Your User Name: ${USERNAME}"
|
||||
echo "### Your User ID: ${USER}"
|
||||
echo "### Your Group ID: ${GROUP}"
|
||||
echo
|
||||
docker run -v $HOME/tpotce:/data --entrypoint "bash" -it -u "${USER}":"${GROUP}" "${TPOT_REPO}"/tpotinit:"${TPOT_VERSION}" "/opt/tpot/bin/genuser.sh"
|
||||
|
@ -145,32 +145,6 @@
|
||||
- "Raspbian"
|
||||
- "Ubuntu"
|
||||
|
||||
- name: Install exa (Debian, Raspbian, Ubuntu)
|
||||
package:
|
||||
name:
|
||||
- exa
|
||||
state: latest
|
||||
update_cache: yes
|
||||
register: exa_install_result
|
||||
ignore_errors: yes
|
||||
when: ansible_distribution in ["Debian", "Raspbian", "Ubuntu"]
|
||||
tags:
|
||||
- "Debian"
|
||||
- "Raspbian"
|
||||
- "Ubuntu"
|
||||
|
||||
- name: Install eza (if exa failed)
|
||||
package:
|
||||
name:
|
||||
- eza
|
||||
state: latest
|
||||
update_cache: yes
|
||||
when: exa_install_result is failed
|
||||
tags:
|
||||
- "Debian"
|
||||
- "Raspbian"
|
||||
- "Ubuntu"
|
||||
|
||||
- name: Install grc from remote repo (AlmaLinux, Rocky)
|
||||
ansible.builtin.dnf:
|
||||
name: 'https://github.com/kriipke/grc/releases/download/1.13.8/grc-1.13.8-1.el7.noarch.rpm'
|
||||
@ -189,7 +163,6 @@
|
||||
- cracklib
|
||||
- curl
|
||||
- dnf-plugins-core
|
||||
- exa
|
||||
- git
|
||||
- grc
|
||||
- htop
|
||||
@ -200,7 +173,6 @@
|
||||
- wget
|
||||
state: latest
|
||||
update_cache: yes
|
||||
register: exa_install_result
|
||||
when: ansible_distribution in ["AlmaLinux", "Rocky"]
|
||||
tags:
|
||||
- "AlmaLinux"
|
||||
@ -225,7 +197,6 @@
|
||||
- cronie
|
||||
- curl
|
||||
- dnf-plugins-core
|
||||
- exa
|
||||
- git
|
||||
- grc
|
||||
- htop
|
||||
@ -236,7 +207,6 @@
|
||||
- wget
|
||||
state: latest
|
||||
update_cache: yes
|
||||
register: exa_install_result
|
||||
when: ansible_distribution in ["Fedora"]
|
||||
tags:
|
||||
- "Fedora"
|
||||
@ -264,7 +234,6 @@
|
||||
- ca-certificates
|
||||
- cracklib
|
||||
- curl
|
||||
- exa
|
||||
- git
|
||||
- grc
|
||||
- htop
|
||||
@ -272,7 +241,6 @@
|
||||
- wget
|
||||
state: latest
|
||||
update_cache: yes
|
||||
register: exa_install_result
|
||||
when: ansible_distribution in ["openSUSE Tumbleweed"]
|
||||
tags:
|
||||
- "openSUSE Tumbleweed"
|
||||
@ -356,7 +324,7 @@
|
||||
shell: |
|
||||
if [ "$(dnf repolist docker-ce-stable)" == "" ];
|
||||
then
|
||||
dnf -y config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
|
||||
dnf -y config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo
|
||||
fi
|
||||
when: ansible_distribution in ["Fedora"]
|
||||
tags:
|
||||
@ -729,7 +697,7 @@
|
||||
when: ansible_distribution in ["AlmaLinux", "Debian", "Fedora", "openSUSE Tumbleweed", "Raspbian", "Rocky", "Ubuntu"]
|
||||
failed_when: ansible_user_id == "root"
|
||||
|
||||
- name: Add aliases with exa (All)
|
||||
- name: Add aliases (All)
|
||||
blockinfile:
|
||||
path: ~/.bashrc
|
||||
block: |
|
||||
@ -737,13 +705,10 @@
|
||||
alias dpsw='watch -c bash -ic dps'
|
||||
alias mi='micro'
|
||||
alias sudo='sudo '
|
||||
alias ls='exa'
|
||||
alias ll='exa -hlg'
|
||||
alias la='exa -hlag'
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK"
|
||||
insertafter: EOF
|
||||
state: present
|
||||
when: exa_install_result is succeeded and ansible_distribution in ["AlmaLinux", "Debian", "Fedora", "openSUSE Tumbleweed", "Raspbian", "Rocky", "Ubuntu"]
|
||||
when: ansible_distribution in ["AlmaLinux", "Debian", "Fedora", "openSUSE Tumbleweed", "Raspbian", "Rocky", "Ubuntu"]
|
||||
tags:
|
||||
- "AlmaLinux"
|
||||
- "Debian"
|
||||
@ -753,26 +718,6 @@
|
||||
- "Rocky"
|
||||
- "Ubuntu"
|
||||
|
||||
- name: Add aliases with eza (Debian, Raspbian, Ubuntu)
|
||||
blockinfile:
|
||||
path: ~/.bashrc
|
||||
block: |
|
||||
alias dps='grc --colour=on docker ps -f status=running -f status=exited --format "table {{'{{'}}.Names{{'}}'}}\\t{{'{{'}}.Status{{'}}'}}\\t{{'{{'}}.Ports{{'}}'}}" | sort'
|
||||
alias dpsw='watch -c bash -ic dps'
|
||||
alias mi='micro'
|
||||
alias sudo='sudo '
|
||||
alias ls='eza'
|
||||
alias ll='eza -hlg'
|
||||
alias la='eza -hlag'
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK"
|
||||
insertafter: EOF
|
||||
state: present
|
||||
when: exa_install_result is failed and ansible_distribution in ["Debian", "Raspbian", "Ubuntu"]
|
||||
tags:
|
||||
- "Debian"
|
||||
- "Raspbian"
|
||||
- "Ubuntu"
|
||||
|
||||
- name: Clone / Update T-Pot repository (All)
|
||||
git:
|
||||
repo: 'https://github.com/telekom-security/tpotce'
|
||||
|
Reference in New Issue
Block a user