Rework .env / env.example

Add more functions to customizer.py (improve port and service checks, improve user output)
Adjust docker-compose files
This commit is contained in:
Marco Ochse
2024-02-13 19:02:40 +01:00
parent e7aecf560d
commit ef2f5b3f93
11 changed files with 774 additions and 72 deletions

172
compose/customizer.py Normal file
View File

@ -0,0 +1,172 @@
from datetime import datetime
import yaml
version = \
"""# T-Pot Service Builder v0.2
This script is intended as a kickstarter for users who want to build a customized docker-compose.yml for use with T-Pot.
T-Pot Service Builder will ask you for all the docker services you wish to include in your docker-compose configuration file.
The configuration file will be checked for conflicting ports as some of the honeypots are meant to work on certain ports.
You have to manually resolve the port conflicts or re-run the script and exclude the conflicting services / honeypots.
Review the resulting configuration and adjust the port settings to your needs by (un)commenting the corresponding lines in the config.
"""
header = \
"""# T-Pot: CUSTOM EDITION
# Generated on: {current_date}
"""
config_filename = "tpot_services.yml"
service_filename = "docker-compose-custom.yml"
def load_config(filename):
try:
with open(filename, 'r') as file:
config = yaml.safe_load(file)
except:
print_color(f"Error: {filename} not found. Exiting.", "red")
exit()
return config
def prompt_service_include(service_name):
while True:
response = input(f"Include {service_name}? (y/n): ").strip().lower()
if response in ['y', 'n']:
return response == 'y'
else:
print("Please enter 'y' for yes or 'n' for no.")
def check_port_conflicts(selected_services):
all_ports = {}
conflict_ports = []
for service_name, config in selected_services.items():
ports = config.get('ports', [])
for port in ports:
# Split the port mapping and take only the host port part
parts = port.split(':')
host_port = parts[1] if len(parts) == 3 else (parts[0] if parts[1].isdigit() else parts[1])
# Check for port conflict and associate it with the service name
if host_port in all_ports:
conflict_ports.append((service_name, host_port))
if all_ports[host_port] not in [service for service, _ in conflict_ports]:
conflict_ports.append((all_ports[host_port], host_port))
else:
all_ports[host_port] = service_name
if conflict_ports:
print_color("Port conflict(s) detected:", "red")
for service, port in conflict_ports:
print_color(f"{service}: {port}", "red")
return True
return False
def print_color(text, color):
colors = {
"red": "\033[91m",
"green": "\033[92m",
"end": "\033[0m",
}
print(f"{colors[color]}{text}{colors['end']}")
def enforce_dependencies(selected_services, services):
# If snare or any tanner services are selected, ensure all are enabled
tanner_services = {'snare', 'tanner', 'tanner_redis', 'tanner_phpox', 'tanner_api'}
if tanner_services.intersection(selected_services):
print_color("For Snare / Tanner to work all required services have been added to your configuration.", "green")
for service in tanner_services:
selected_services[service] = services[service]
# If kibana is enabled, also enable elasticsearch
if 'kibana' in selected_services:
selected_services['elasticsearch'] = services['elasticsearch']
print_color("Kibana requires Elasticsearch which has been added to your configuration.", "green")
# If spiderfoot is enabled, also enable nginx
if 'spiderfoot' in selected_services:
selected_services['nginx'] = services['nginx']
print_color("Spiderfoot requires Nginx which has been added to your configuration.","green")
# If any map services are detected, enable logstash, elasticsearch, nginx, and all map services
map_services = {'map_web', 'map_redis', 'map_data'}
if map_services.intersection(selected_services):
print_color("For Map to work all required services have been added to your configuration.", "green")
for service in map_services.union({'elasticsearch', 'nginx'}):
selected_services[service] = services[service]
# honeytrap and glutton cannot be active at the same time, always vote in favor of honeytrap
if 'honeytrap' in selected_services and 'glutton' in selected_services:
# Remove glutton and notify
del selected_services['glutton']
print_color("Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.","red")
def remove_unused_networks(selected_services, services, networks):
used_networks = set()
# Identify networks used by selected services
for service_name in selected_services:
service_config = services[service_name]
if 'networks' in service_config:
for network in service_config['networks']:
used_networks.add(network)
# Remove unused networks
for network in list(networks):
if network not in used_networks:
del networks[network]
def main():
config = load_config(config_filename)
# Separate services and networks
services = config['services']
networks = config.get('networks', {})
selected_services = {'tpotinit': services['tpotinit'],
'logstash': services['logstash']} # Always include tpotinit and logstash
for service_name, service_config in services.items():
if service_name not in selected_services: # Skip already included services
if prompt_service_include(service_name):
selected_services[service_name] = service_config
# Enforce dependencies
enforce_dependencies(selected_services, services)
# Remove unused networks based on selected services
remove_unused_networks(selected_services, services, networks)
output_config = {
'version': '3.9',
'networks': networks,
'services': selected_services,
}
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(service_filename, 'w') as file:
file.write(header.format(current_date=current_date))
yaml.dump(output_config, file, default_flow_style=False, sort_keys=False, indent=2)
if check_port_conflicts(selected_services):
print_color(f"Adjust the conflicting ports in the {service_filename} or re-run the script and select services that do not occupy the same port(s).",
"red")
else:
print_color(f"Custom {service_filename} has been generated without port conflicts.", "green")
print(f"Copy {service_filename} to tpotce/ and test with: docker compose -f {service_filename} up")
print(f"If everything works, exit with CTRL-C and replace docker-compose.yml with the new config.")
if __name__ == "__main__":
print(version)
main()

View File

@ -454,11 +454,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -600,9 +597,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -676,6 +672,11 @@ services:
- nginx_local
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

608
compose/mobile.yml Normal file
View File

@ -0,0 +1,608 @@
# T-Pot: MOBILE
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
dicompot_local:
dionaea_local:
elasticpot_local:
heralding_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
##################
#### Honeypots
##################
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
# - "5060:5060"
# - "5060:5060/udp"
# - "5061:5061"
- "27017:27017"
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- elasticpot_local
ports:
- "9200:9200"
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Ipphoney service
ipphoney:
container_name: ipphoney
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ipphoney_local
ports:
- "631:631"
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Log4pot service
log4pot:
container_name: log4pot
restart: always
depends_on:
logstash:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- log4pot_local
ports:
# - "80:8080"
# - "443:8080"
- "8080:8080"
# - "9200:8080"
- "25565:8080"
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- redishoneypot_local
ports:
- "6379:6379"
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
# SentryPeer service
sentrypeer:
container_name: sentrypeer
restart: always
depends_on:
logstash:
condition: service_healthy
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
# - "4222:4222/udp"
- "5060:5060/udp"
# - "127.0.0.1:8082:8082"
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
depends_on:
logstash:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Tanner API Service
tanner_api:
container_name: tanner_api
restart: always
depends_on:
- tanner_redis
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
command: tannerapi
## Tanner Service
tanner:
container_name: tanner
restart: always
depends_on:
- tanner_api
- tanner_phpox
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
command: tanner
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
## Snare Service
snare:
container_name: snare
restart: always
depends_on:
- tanner
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
logstash:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View File

@ -1,8 +1,8 @@
# T-Pot: RASPBERRY_SHOWCASE
# T-Pot: MOBILE
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
# T-Pot on a Raspberry Pi 4 (8GB of RAM, SSD).
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
# desktop environment such as LXDE and meet the minimum requirements of 8GB and a SSD.
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
version: '3.9'
networks:
@ -433,11 +433,8 @@ services:
depends_on:
logstash:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -574,6 +571,11 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

View File

@ -473,11 +473,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -615,9 +612,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -645,8 +641,11 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
env_file:
- .env
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}

View File

@ -474,11 +474,8 @@ services:
depends_on:
tpotinit:
condition: service_healthy
# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
# the bad actors in its logs. Therefore this option is opt-in based.
# environment:
# - SENTRYPEER_PEER_TO_PEER=0
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
@ -616,9 +613,8 @@ services:
tpotinit:
condition: service_healthy
environment:
# For ET Pro ruleset replace "OPEN" with your OINKCODE
- OINKCODE=OPEN
# Loading externel Rules from URL
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
@ -686,6 +682,9 @@ services:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g

956
compose/tpot_services.yml Normal file
View File

@ -0,0 +1,956 @@
# T-Pot: Docker Services Base Configuration
# This is only to be used with the T-Pot Customizer
# Editing the contents may result in broken custom configurations!
networks:
adbhoney_local:
ciscoasa_local:
citrixhoneypot_local:
conpot_local_IEC104:
conpot_local_guardian_ast:
conpot_local_ipmi:
conpot_local_kamstrup_382:
cowrie_local:
ddospot_local:
dicompot_local:
dionaea_local:
elasticpot_local:
endlessh_local:
glutton_local:
hellpot_local:
heralding_local:
honeypots_local:
ipphoney_local:
log4pot_local:
mailoney_local:
medpot_local:
redishoneypot_local:
sentrypeer_local:
tanner_local:
wordpot_local:
spiderfoot_local:
ewsposter_local:
services:
#########################################
#### DEV
#########################################
#### T-Pot Init - Never delete this!
#########################################
# T-Pot Init Service
tpotinit:
container_name: tpotinit
env_file:
- .env
restart: always
tmpfs:
- /tmp/etc:uid=2000,gid=2000
- /tmp/:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
- ${TPOT_DATA_PATH}:/data
##################
#### Honeypots
##################
# Adbhoney service
adbhoney:
container_name: adbhoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- adbhoney_local
ports:
- "5555:5555"
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
# Ciscoasa service
ciscoasa:
container_name: ciscoasa
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/ciscoasa:uid=2000,gid=2000
networks:
- ciscoasa_local
ports:
- "5000:5000/udp"
- "8443:8443"
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
# CitrixHoneypot service
citrixhoneypot:
container_name: citrixhoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- citrixhoneypot_local
ports:
- "443:443"
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
# Conpot IEC104 service
conpot_IEC104:
container_name: conpot_iec104
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
- CONPOT_TEMPLATE=IEC104
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_IEC104
ports:
- "161:161/udp"
- "2404:2404"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot guardian_ast service
conpot_guardian_ast:
container_name: conpot_guardian_ast
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
- CONPOT_TEMPLATE=guardian_ast
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_guardian_ast
ports:
- "10001:10001"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot ipmi
conpot_ipmi:
container_name: conpot_ipmi
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
- CONPOT_TEMPLATE=ipmi
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_ipmi
ports:
- "623:623/udp"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Conpot kamstrup_382
conpot_kamstrup_382:
container_name: conpot_kamstrup_382
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
- CONPOT_TEMPLATE=kamstrup_382
- CONPOT_TMP=/tmp/conpot
tmpfs:
- /tmp/conpot:uid=2000,gid=2000
networks:
- conpot_local_kamstrup_382
ports:
- "1025:1025"
- "50100:50100"
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
# Cowrie service
cowrie:
container_name: cowrie
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/cowrie:uid=2000,gid=2000
- /tmp/cowrie/data:uid=2000,gid=2000
networks:
- cowrie_local
ports:
- "22:22"
- "23:23"
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
# Ddospot service
ddospot:
container_name: ddospot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
# Dicompot service
# Get the Horos Client for testing: https://horosproject.org/
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
dicompot:
container_name: dicompot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- dicompot_local
ports:
- "11112:11112"
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
# Dionaea service
dionaea:
container_name: dionaea
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- dionaea_local
ports:
- "20:20"
- "21:21"
- "42:42"
- "69:69/udp"
- "81:81"
- "135:135"
# - "443:443"
- "445:445"
- "1433:1433"
- "1723:1723"
- "1883:1883"
- "3306:3306"
# - "5060:5060"
# - "5060:5060/udp"
# - "5061:5061"
- "27017:27017"
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
# ElasticPot service
elasticpot:
container_name: elasticpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- elasticpot_local
ports:
- "9200:9200"
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
# Endlessh service
endlessh:
container_name: endlessh
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- endlessh_local
ports:
- "22:2222"
image: ${TPOT_REPO}/endlessh:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
# Glutton service
glutton:
container_name: glutton
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/lib/glutton:uid=2000,gid=2000
- /run:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/glutton:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/glutton/log:/var/log/glutton
# Hellpot service
hellpot:
container_name: hellpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- hellpot_local
ports:
- "80:8080"
image: ${TPOT_REPO}/hellpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/hellpot/log:/var/log/hellpot
# Heralding service
heralding:
container_name: heralding
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/heralding:uid=2000,gid=2000
networks:
- heralding_local
ports:
# - "21:21"
# - "22:22"
# - "23:23"
# - "25:25"
# - "80:80"
- "110:110"
- "143:143"
# - "443:443"
- "465:465"
- "993:993"
- "995:995"
# - "3306:3306"
# - "3389:3389"
- "1080:1080"
- "5432:5432"
- "5900:5900"
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
# Honeypots service
honeypots:
container_name: honeypots
stdin_open: true
tty: true
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- honeypots_local
ports:
- "21:21"
- "22:22"
- "23:23"
- "25:25"
- "53:53/udp"
- "80:80"
- "110:110"
- "123:123"
- "143:143"
- "161:161"
- "389:389"
- "443:443"
- "445:445"
- "1080:1080"
- "1433:1433"
- "1521:1521"
- "3306:3306"
- "5060:5060"
- "5432:5432"
- "5900:5900"
- "6379:6379"
- "6667:6667"
- "8080:8080"
- "9200:9200"
- "11211:11211"
image: ${TPOT_REPO}/honeypots:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeypots/log:/var/log/honeypots
# Honeytrap service
honeytrap:
container_name: honeytrap
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp/honeytrap:uid=2000,gid=2000
network_mode: "host"
cap_add:
- NET_ADMIN
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
# Ipphoney service
ipphoney:
container_name: ipphoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ipphoney_local
ports:
- "631:631"
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
# Log4pot service
log4pot:
container_name: log4pot
restart: always
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- log4pot_local
ports:
- "80:8080"
- "443:8080"
- "8080:8080"
- "9200:8080"
- "25565:8080"
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
# Mailoney service
mailoney:
container_name: mailoney
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- HPFEEDS_SERVER=
- HPFEEDS_IDENT=user
- HPFEEDS_SECRET=pass
- HPFEEDS_PORT=20000
- HPFEEDS_CHANNELPREFIX=prefix
networks:
- mailoney_local
ports:
- "25:25"
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
# Medpot service
medpot:
container_name: medpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- medpot_local
ports:
- "2575:2575"
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
# Redishoneypot service
redishoneypot:
container_name: redishoneypot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- redishoneypot_local
ports:
- "6379:6379"
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
# SentryPeer service
sentrypeer:
container_name: sentrypeer
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
networks:
- sentrypeer_local
ports:
# - "4222:4222/udp"
- "5060:5060/udp"
# - "127.0.0.1:8082:8082"
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
#### Snare / Tanner
## Tanner Redis Service
tanner_redis:
container_name: tanner_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## PHP Sandbox service
tanner_phpox:
container_name: tanner_phpox
restart: always
depends_on:
tpotinit:
condition: service_healthy
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Tanner API service
tanner_api:
container_name: tanner_api
restart: always
depends_on:
- tanner_redis
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
command: tannerapi
## Tanner service
tanner:
container_name: tanner
restart: always
depends_on:
- tanner_api
- tanner_phpox
tmpfs:
- /tmp/tanner:uid=2000,gid=2000
tty: true
networks:
- tanner_local
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
command: tanner
read_only: true
volumes:
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
## Snare service
snare:
container_name: snare
restart: always
depends_on:
- tanner
tty: true
networks:
- tanner_local
ports:
- "80:80"
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
# Wordpot service
wordpot:
container_name: wordpot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- wordpot_local
ports:
- "80:80"
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/log
##################
#### NSM
##################
# Fatt service
fatt:
container_name: fatt
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
# P0f service
p0f:
container_name: p0f
restart: always
depends_on:
tpotinit:
condition: service_healthy
network_mode: "host"
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
# Suricata service
suricata:
container_name: suricata
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
# Loading external Rules from URL
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
network_mode: "host"
cap_add:
- NET_ADMIN
- SYS_NICE
- NET_RAW
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
##################
#### Tools
##################
#### ELK
## Elasticsearch service
elasticsearch:
container_name: elasticsearch
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp
cap_add:
- IPC_LOCK
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 4g
ports:
- "127.0.0.1:64298:9200"
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Kibana service
kibana:
container_name: kibana
restart: always
depends_on:
elasticsearch:
condition: service_healthy
mem_limit: 1g
ports:
- "127.0.0.1:64296:5601"
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Logstash service
logstash:
container_name: logstash
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
ports:
- "127.0.0.1:64305:64305"
mem_limit: 2g
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
## Map Redis Service
map_redis:
container_name: map_redis
restart: always
depends_on:
tpotinit:
condition: service_healthy
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
## Map Web Service
map_web:
container_name: map_web
restart: always
depends_on:
tpotinit:
condition: service_healthy
environment:
- MAP_COMMAND=AttackMapServer.py
stop_signal: SIGKILL
tty: true
ports:
- "127.0.0.1:64299:64299"
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
## Map Data Service
map_data:
container_name: map_data
restart: always
depends_on:
elasticsearch:
condition: service_healthy
environment:
- MAP_COMMAND=DataServer_v2.py
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
stop_signal: SIGKILL
tty: true
image: ${TPOT_REPO}/map:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
#### /ELK
# Ewsposter service
ewsposter:
container_name: ewsposter
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- ewsposter_local
environment:
- EWS_HPFEEDS_ENABLE=false
- EWS_HPFEEDS_HOST=host
- EWS_HPFEEDS_PORT=port
- EWS_HPFEEDS_CHANNELS=channels
- EWS_HPFEEDS_IDENT=user
- EWS_HPFEEDS_SECRET=secret
- EWS_HPFEEDS_TLSCERT=false
- EWS_HPFEEDS_FORMAT=json
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}:/data
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
# Nginx service
nginx:
container_name: nginx
restart: always
environment:
- COCKPIT=${COCKPIT}
- TPOT_OSTYPE=${TPOT_OSTYPE}
depends_on:
tpotinit:
condition: service_healthy
tmpfs:
- /var/tmp/nginx/client_body
- /var/tmp/nginx/proxy
- /var/tmp/nginx/fastcgi
- /var/tmp/nginx/uwsgi
- /var/tmp/nginx/scgi
- /run
- /var/lib/nginx/tmp:uid=100,gid=82
network_mode: "host"
ports:
- "64297:64297"
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
# Spiderfoot service
spiderfoot:
container_name: spiderfoot
restart: always
depends_on:
tpotinit:
condition: service_healthy
networks:
- spiderfoot_local
ports:
- "127.0.0.1:64303:8080"
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
pull_policy: ${TPOT_PULL_POLICY}
volumes:
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot