Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
932ad6b27c | |||
02098f9b76 | |||
649163e06f | |||
9d66bcb7d3 | |||
dc4384d6ab | |||
1af7cdcaa1 |
121
.env
@ -1,121 +0,0 @@
|
|||||||
# T-Pot config file. Do not remove.
|
|
||||||
|
|
||||||
###############################################
|
|
||||||
# T-Pot Base Settings - Adjust to your needs. #
|
|
||||||
###############################################
|
|
||||||
|
|
||||||
# Set Web username and password here, it will be used to create the Nginx password file nginxpasswd.
|
|
||||||
# Use 'htpasswd -n <username>' to create the WEB_USER if you want to manually deploy T-Pot
|
|
||||||
# Example: 'htpasswd -n tsec' will print tsec:$apr1$TdJGdsss$6yLsxPmOcXb2kaEZ7lKva0
|
|
||||||
# Copy the string and replace WEB_USER='tsec:$apr1$TdJGdsss$6yLsxPmOcXb2kaEZ7lKva0'
|
|
||||||
WEB_USER='change:me'
|
|
||||||
|
|
||||||
# T-Pot Blackhole
|
|
||||||
# ENABLED: T-Pot will download a db of known mass scanners and nullroute them
|
|
||||||
# Be aware, this will put T-Pot off the map for stealth reasons and
|
|
||||||
# you will get less traffic. Routes will active until reboot and will
|
|
||||||
# be re-added with every T-Pot start until disabled.
|
|
||||||
# DISABLED: This is the default and no stealth efforts are in place.
|
|
||||||
TPOT_BLACKHOLE=DISABLED
|
|
||||||
|
|
||||||
# T-Pot Persistence
|
|
||||||
# on: This is the default. T-Pot will keep the honeypot logfiles and rotate
|
|
||||||
# with logrotate for 30 days.
|
|
||||||
# off: This is recommended for Raspberry Pi or setups with weaker CPUs or
|
|
||||||
# if you just do not need any of the logfiles.
|
|
||||||
TPOT_PERSISTENCE=on
|
|
||||||
|
|
||||||
# T-Pot Type
|
|
||||||
# HIVE: This is the default and offers everything to connect T-Pot sensors.
|
|
||||||
# SENSOR: This needs to be used when running a sensor. Be aware to adjust all other
|
|
||||||
# settings as well.
|
|
||||||
# 1. You will need to copy compose/sensor.yml to ./docker-comopose.yml
|
|
||||||
# 2. From HIVE host you will need to copy ~/tpotce/data/nginx/cert/nginx.crt to
|
|
||||||
# your SENSOR host to ~/tpotce/data/hive.crt
|
|
||||||
# 3. On HIVE: Create a web user per SENSOR on HIVE and provide credentials below
|
|
||||||
# Create credentials with 'htpasswd ~/tpotce/data/nginx/conf/lswebpasswd <username>'
|
|
||||||
# 4. On SENSOR: Provide username / password from (3) for TPOT_HIVE_USER as base64 encoded string:
|
|
||||||
# "echo -n 'username:password' | base64"
|
|
||||||
TPOT_TYPE=HIVE
|
|
||||||
|
|
||||||
# T-Pot Hive User (only relevant for SENSOR deployment)
|
|
||||||
# <empty>: This is empty by default.
|
|
||||||
# <base64 encoded string>: Provide a base64 encoded string "echo -n 'username:password' | base64"
|
|
||||||
# i.e. TPOT_HIVE_USER='dXNlcm5hbWU6cGFzc3dvcmQ='
|
|
||||||
TPOT_HIVE_USER=
|
|
||||||
|
|
||||||
# T-Pot Hive IP (only relevant for SENSOR deployment)
|
|
||||||
# <empty>: This is empty by default.
|
|
||||||
# <IP, FQDN>: This can be either a IP (i.e. 192.168.1.1) or a FQDN (i.e. foo.bar.local)
|
|
||||||
TPOT_HIVE_IP=
|
|
||||||
|
|
||||||
# T-Pot AttackMap Text Output
|
|
||||||
# ENABLED: This is the default and the docker container map_data will print events to the console.
|
|
||||||
# DISABLED: Printing events to the console is disabled.
|
|
||||||
TPOT_ATTACKMAP_TEXT=ENABLED
|
|
||||||
|
|
||||||
# T-Pot AttackMap Text Output Timezone
|
|
||||||
# UTC: (T-Pot default) This is usually the best option.
|
|
||||||
# Continent/City: In Linux you can check our timezone with `readlink` /etc/localtime or
|
|
||||||
# see the full list here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
|
||||||
# Examples: America/New_York, Asia/Taipei, Australia/Melbourne, Europe/Athens, Europe/Berlin
|
|
||||||
TPOT_ATTACKMAP_TEXT_TIMEZONE=UTC
|
|
||||||
|
|
||||||
###################################################################################
|
|
||||||
# Honeypots / Tools settings
|
|
||||||
###################################################################################
|
|
||||||
# Some services / tools offer adjustments using ENVs which can be adjusted here.
|
|
||||||
###################################################################################
|
|
||||||
|
|
||||||
# SentryPeer P2P mode
|
|
||||||
# Exchange bad actor data via DHT / P2P mode by setting the ENV to true (1)
|
|
||||||
# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show
|
|
||||||
# the bad actors in its logs. Therefore this option is opt-in based.
|
|
||||||
# 0: This is the default, P2P mode is disabled.
|
|
||||||
# 1: Enable P2P mode.
|
|
||||||
SENTRYPEER_PEER_TO_PEER=0
|
|
||||||
|
|
||||||
# Suricata ET Pro ruleset
|
|
||||||
# OPEN: This is the default and will the ET Open ruleset
|
|
||||||
# OINKCODE: Replace OPEN with your Oinkcode to use the ET Pro ruleset
|
|
||||||
OINKCODE=OPEN
|
|
||||||
|
|
||||||
|
|
||||||
###################################################################################
|
|
||||||
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #
|
|
||||||
###################################################################################
|
|
||||||
|
|
||||||
# T-Pot Landing page provides Cockpit Link
|
|
||||||
COCKPIT=false
|
|
||||||
|
|
||||||
# docker.sock Path
|
|
||||||
TPOT_DOCKER_SOCK=/var/run/docker.sock
|
|
||||||
|
|
||||||
# docker compose .env
|
|
||||||
TPOT_DOCKER_ENV=./.env
|
|
||||||
|
|
||||||
# Docker-Compose file
|
|
||||||
TPOT_DOCKER_COMPOSE=./docker-compose.yml
|
|
||||||
|
|
||||||
# T-Pot Repo
|
|
||||||
# Depending on where you are located you may choose between DockerHub and GHCR
|
|
||||||
# dtagdevsec: This will use the DockerHub image registry
|
|
||||||
# ghcr.io/telekom-security: This will use the GitHub container registry
|
|
||||||
TPOT_REPO=ghcr.io/telekom-security
|
|
||||||
|
|
||||||
# T-Pot Version Tag
|
|
||||||
TPOT_VERSION=dev
|
|
||||||
|
|
||||||
# T-Pot Pull Policy
|
|
||||||
# always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry.
|
|
||||||
# never: Compose implementations SHOULD NOT pull the image from a registry and SHOULD rely on the platform cached image.
|
|
||||||
# missing: Compose implementations SHOULD pull the image only if it's not available in the platform cache.
|
|
||||||
# build: Compose implementations SHOULD build the image. Compose implementations SHOULD rebuild the image if already present.
|
|
||||||
TPOT_PULL_POLICY=always
|
|
||||||
|
|
||||||
# T-Pot Data Path
|
|
||||||
TPOT_DATA_PATH=./data
|
|
||||||
|
|
||||||
# OSType (linux, mac, win)
|
|
||||||
# Most docker features are available on linux
|
|
||||||
TPOT_OSTYPE=linux
|
|
4
.gitignore
vendored
@ -1,4 +0,0 @@
|
|||||||
# Ignore data folder
|
|
||||||
data/
|
|
||||||
**/.DS_Store
|
|
||||||
.idea
|
|
@ -2,7 +2,7 @@
|
|||||||
# Visit https://bit.ly/cffinit to generate yours today!
|
# Visit https://bit.ly/cffinit to generate yours today!
|
||||||
|
|
||||||
cff-version: 1.2.0
|
cff-version: 1.2.0
|
||||||
title: T-Pot DEV
|
title: T-Pot
|
||||||
message: >-
|
message: >-
|
||||||
If you use this software, please cite it using the
|
If you use this software, please cite it using the
|
||||||
metadata from this file.
|
metadata from this file.
|
||||||
@ -38,6 +38,6 @@ keywords:
|
|||||||
- docker
|
- docker
|
||||||
- elk
|
- elk
|
||||||
license: GPL-3.0
|
license: GPL-3.0
|
||||||
commit: unreleased, under heavy development
|
commit: af09aa96b184f873ec83da4e7380762a0a5ce416
|
||||||
version: 2x.yy.z
|
version: 22.04.0
|
||||||
date-released: '202x-yy-zz'
|
date-released: '2022-04-12'
|
@ -901,5 +901,3 @@ And from @robcowart (creator of [ElastiFlow](https://github.com/robcowart/elasti
|
|||||||
***"#TPot is one of the most well put together turnkey honeypot solutions. It is a must-have for anyone wanting to analyze and understand the behavior of malicious actors and the threat they pose to your organization."***
|
***"#TPot is one of the most well put together turnkey honeypot solutions. It is a must-have for anyone wanting to analyze and understand the behavior of malicious actors and the threat they pose to your organization."***
|
||||||
<br><br>
|
<br><br>
|
||||||
**Thank you!**
|
**Thank you!**
|
||||||
|
|
||||||

|
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
## Supported Versions
|
## Supported Versions
|
||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|---------| ------------------ |
|
| ------- | ------------------ |
|
||||||
| 23.12.x | :white_check_mark: |
|
| 22.04.x | :white_check_mark: |
|
||||||
|
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
cd iso/installer
|
|
||||||
./install.sh "$@"
|
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Got root?
|
# Got root?
|
||||||
myWHOAMI=$(whoami)
|
myWHOAMI=$(whoami)
|
||||||
@ -11,13 +11,20 @@ fi
|
|||||||
# Only run with command switch
|
# Only run with command switch
|
||||||
if [ "$1" != "-y" ]; then
|
if [ "$1" != "-y" ]; then
|
||||||
echo "### Setting up docker for Multi Arch Builds."
|
echo "### Setting up docker for Multi Arch Builds."
|
||||||
echo "### Requires Docker packages from https://get.docker.com/"
|
|
||||||
echo "### Use on x64 only!"
|
echo "### Use on x64 only!"
|
||||||
echo "### Run with -y if you fit the requirements!"
|
echo "### Run with -y to install!"
|
||||||
echo
|
echo
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Main
|
||||||
|
mkdir -p /root/.docker/cli-plugins/
|
||||||
|
cd /root/.docker/cli-plugins/
|
||||||
|
wget https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64 -O docker-buildx
|
||||||
|
chmod +x docker-buildx
|
||||||
|
|
||||||
|
docker buildx ls
|
||||||
|
|
||||||
# We need to create a new builder as the default one cannot handle multi-arch builds
|
# We need to create a new builder as the default one cannot handle multi-arch builds
|
||||||
# https://docs.docker.com/desktop/multi-arch/
|
# https://docs.docker.com/desktop/multi-arch/
|
||||||
docker buildx create --name mybuilder
|
docker buildx create --name mybuilder
|
Before Width: | Height: | Size: 204 KiB After Width: | Height: | Size: 204 KiB |
Before Width: | Height: | Size: 883 KiB After Width: | Height: | Size: 883 KiB |
Before Width: | Height: | Size: 148 KiB After Width: | Height: | Size: 148 KiB |
Before Width: | Height: | Size: 193 KiB After Width: | Height: | Size: 193 KiB |
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
9
cloud/ansible/openstack/clouds.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
clouds:
|
||||||
|
open-telekom-cloud:
|
||||||
|
profile: otc
|
||||||
|
region_name: eu-de
|
||||||
|
auth:
|
||||||
|
project_name: eu-de_your_project
|
||||||
|
username: your_api_user
|
||||||
|
password: your_password
|
||||||
|
user_domain_name: OTC-EU-DE-000000000010000XXXXX
|
71
cloud/azure/README.md
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
|
||||||
|
# Azure T-Pot
|
||||||
|
|
||||||
|
The following deployment template will deploy a Standard T-Pot server on a Azure VM on a Network\Subnet of your choosing. [Click here to learn more on T-Pot](https://github.com/telekom-security/tpotce)
|
||||||
|
|
||||||
|
[](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Ftelekom-security%2Ftpotce%2Fmaster%2Fcloud%2Fazure%2Fazuredeploy.json)
|
||||||
|
[](https://portal.azure.us/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Ftelekom-security%2Ftpotce%2Fmaster%2Fcloud%2Fazure%2Fazuredeploy.json)
|
||||||
|
[](http://armviz.io/#/?load=https%3A%2F%2Fraw.githubusercontent.com%2Ftelekom-security%2Ftpotce%2Fmaster%2Fcloud%2Fazure%2Fazuredeploy.json)
|
||||||
|
|
||||||
|
## Install Instructions
|
||||||
|
|
||||||
|
1. Update the VM Name to reflect your naming convention and taxonomy.
|
||||||
|
2. Place you Azure Virtual Network Resource Id *(Recommendation of
|
||||||
|
placement depending on goal, you may want to place in Hub Virtual
|
||||||
|
Network to detect activity from on-premise or other virtual
|
||||||
|
network spokes. You can also place in DMZ or isolated in a unique
|
||||||
|
virtual network exposed to direct internet.)*
|
||||||
|
3. My Connection IP of a public ip address you are coming from to use dashboards and manage.
|
||||||
|
4. Cloud Init B64 Encoded write your cloud init yaml contents and base 64 encode them into this string parameter.
|
||||||
|
|
||||||
|
Cloud-Init Yaml Example before B64 Encoding:
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- git
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- curl -sS --retry 5 https://github.com
|
||||||
|
- git clone https://github.com/telekom-security/tpotce /root/tpot
|
||||||
|
- /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf
|
||||||
|
- rm /root/tpot.conf
|
||||||
|
- /sbin/shutdown -r now
|
||||||
|
|
||||||
|
password: w3b$ecrets2!
|
||||||
|
chpasswd:
|
||||||
|
expire: false
|
||||||
|
|
||||||
|
write_files:
|
||||||
|
- content: |
|
||||||
|
# tpot configuration file
|
||||||
|
myCONF_TPOT_FLAVOR='STANDARD'
|
||||||
|
myCONF_WEB_USER='webuser'
|
||||||
|
myCONF_WEB_PW='w3b$ecrets2!'
|
||||||
|
owner: root:root
|
||||||
|
path: /root/tpot.conf
|
||||||
|
permissions: '0600'
|
||||||
|
|
||||||
|
Be sure to copy and update values like:
|
||||||
|
|
||||||
|
- password:
|
||||||
|
- myCONF_TPOT_FLAVOR= (Different flavors as follows: [STANDARD,
|
||||||
|
HIVE, HIVE_SENSOR, INDUSTRIAL, LOG4J, MEDICAL, MINI, SENSOR]
|
||||||
|
**Recommend deploying STANDARD** if you are exploring first time)
|
||||||
|
- myCONF_WEB_USER=
|
||||||
|
- myCONF_WEB_PW=
|
||||||
|
|
||||||
|
Once you update the cloud init yaml file locally then base 64 encode and paste this string to in the securestring parameter.
|
||||||
|
|
||||||
|
B64 Example:
|
||||||
|
|
||||||
|
I2Nsb3VkLWNvbmZpZwp0aW1lem9uZTogVVMvRWFzdGVybgoKcGFja2FnZXM6CiAgLSBnaXQKCnJ1bmNtZDoKICAtIGN1cmwgLXNTIC0tcmV0cnkgNSBodHRwczovL2dpdGh1Yi5jb20KICAtIGdpdCBjbG9uZSBodHRwczovL2dpdGh1Yi5jb20vdGVsZWtvbS1zZWN1cml0eS90cG90Y2UgL3Jvb3QvdHBvdAogIC0gL3Jvb3QvdHBvdC9pc28vaW5zdGFsbGVyL2luc3RhbGwuc2ggLS10eXBlPWF1dG8gLS1jb25mPS9yb290L3Rwb3QuY29uZgogIC0gcm0gL3Jvb3QvdHBvdC5jb25mCiAgLSAvc2Jpbi9zaHV0ZG93biAtciBub3cKCnBhc3N3b3JkOiB3M2IkZWNyZXRzMiEKY2hwYXNzd2Q6CiAgZXhwaXJlOiBmYWxzZQoKd3JpdGVfZmlsZXM6CiAgLSBjb250ZW50OiB8CiAgICAgICMgdHBvdCBjb25maWd1cmF0aW9uIGZpbGUKICAgICAgbXlDT05GX1RQT1RfRkxBVk9SPSdTVEFOREFSRCcKICAgICAgbXlDT05GX1dFQl9VU0VSPSd3ZWJ1c2VyJwogICAgICBteUNPTkZfV0VCX1BXPSd3M2IkZWNyZXRzMiEnCiAgICBvd25lcjogcm9vdDpyb290CiAgICBwYXRoOiAvcm9vdC90cG90LmNvbmYKICAgIHBlcm1pc3Npb25zOiAnMDYwMCc=
|
||||||
|
|
||||||
|
Click review and create, deployment of VM should take less than 5 minutes, however Cloud-Init will take some time, **typically 15 minutes** before T-Pot services are up and running.
|
||||||
|
|
||||||
|
## Post Install Instructions
|
||||||
|
Install **may take around 15 minutes** for services to come up. Check to make sure from your public IP you can connect to https://azurepuplicip:64297 you will be prompted for your username and password supplied in the B64 Cloud Init String you supplied for *myCONF_WEB_PW=*
|
||||||
|
|
||||||
|
Review the [available honeypots architecture section](https://raw.githubusercontent.com/telekom-security/tpotce/master/doc/architecture.png) and [available ports](https://github.com/telekom-security/tpotce#required-ports) and poke a hole in the Network Security Group to expose the T-Pot to your on-premise network CIDR, or other Azure virtual network CIDRs, finally you can also expose a port to the public Internet for Threat Intelligence gathering.
|
||||||
|
|
||||||
|
## Network Security Group
|
||||||
|
Please study the rules carefully. You may need to make some additional rules or modifications based on your needs and considerations. As an example if this is for internal private ip range detections you may want to remove rules and place a higher priority DENY rule preventing all the T-Pot ports and services being exposed internally, and then place a few ALLOW rules to your on-premise private ip address CIDR, other Hub Private IPs, and some Spoke Private IPs.
|
||||||
|

|
308
cloud/azure/azuredeploy.json
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
{
|
||||||
|
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
"VMName": {
|
||||||
|
"type": "string",
|
||||||
|
"metadata": {
|
||||||
|
"description": "VM Name and convention your company uses, be sure to entice naming EX. vm-fileshares-prod-eastus-003"
|
||||||
|
},
|
||||||
|
"defaultValue": "vm-fileshares-prod-eastus-003"
|
||||||
|
},
|
||||||
|
"virtualNetworkId": {
|
||||||
|
"type": "string",
|
||||||
|
"metadata": {
|
||||||
|
"description": "Virtual Network Resource ID to Deploy Azure VM into"
|
||||||
|
},
|
||||||
|
"defaultValue": "/subscriptions/{SUBID}/resourceGroups/{RG NAME}/providers/Microsoft.Network/virtualNetworks/{VNET NAME}"
|
||||||
|
},
|
||||||
|
"subnetName": {
|
||||||
|
"type": "string",
|
||||||
|
"metadata": {
|
||||||
|
"description": "Virtual Network Subnet Name to Deploy Azure VM into"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"MyConnectionIP": {
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 7,
|
||||||
|
"maxLength": 15,
|
||||||
|
"metadata": {
|
||||||
|
"description": "The Public IP I will be connecting from to administer and configure"
|
||||||
|
},
|
||||||
|
"defaultValue": "XXX.XXX.XXX.XXX"
|
||||||
|
},
|
||||||
|
"adminUsername": {
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1,
|
||||||
|
"defaultValue": "webuser",
|
||||||
|
"metadata": {
|
||||||
|
"description": "Admin user name for Linux VM"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"authenticationType": {
|
||||||
|
"type": "string",
|
||||||
|
"defaultValue": "password",
|
||||||
|
"allowedValues": [
|
||||||
|
"sshPublicKey",
|
||||||
|
"password"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"description": "Type of authentication to use on the Virtual Machine. SSH key is recommended."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"adminPasswordOrKey": {
|
||||||
|
"type": "securestring",
|
||||||
|
"metadata": {
|
||||||
|
"description": "SSH Key or password for the Virtual Machine. SSH key is recommended."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CloudInitB64Encoded": {
|
||||||
|
"type": "securestring",
|
||||||
|
"metadata": {
|
||||||
|
"description": "Cloud Init Configuration as a Base 64 encoded string, decode to examine a few variables to change and encode and submit"
|
||||||
|
},
|
||||||
|
"defaultValue": "I2Nsb3VkLWNvbmZpZwp0aW1lem9uZTogVVMvRWFzdGVybgoKcGFja2FnZXM6CiAgLSBnaXQKCnJ1bmNtZDoKICAtIGN1cmwgLXNTIC0tcmV0cnkgNSBodHRwczovL2dpdGh1Yi5jb20KICAtIGdpdCBjbG9uZSBodHRwczovL2dpdGh1Yi5jb20vdGVsZWtvbS1zZWN1cml0eS90cG90Y2UgL3Jvb3QvdHBvdAogIC0gL3Jvb3QvdHBvdC9pc28vaW5zdGFsbGVyL2luc3RhbGwuc2ggLS10eXBlPWF1dG8gLS1jb25mPS9yb290L3Rwb3QuY29uZgogIC0gcm0gL3Jvb3QvdHBvdC5jb25mCiAgLSAvc2Jpbi9zaHV0ZG93biAtciBub3cKCnBhc3N3b3JkOiB3M2IkZWNyZXRzMiEKY2hwYXNzd2Q6CiAgZXhwaXJlOiBmYWxzZQoKd3JpdGVfZmlsZXM6CiAgLSBjb250ZW50OiB8CiAgICAgICMgdHBvdCBjb25maWd1cmF0aW9uIGZpbGUKICAgICAgbXlDT05GX1RQT1RfRkxBVk9SPSdTVEFOREFSRCcKICAgICAgbXlDT05GX1dFQl9VU0VSPSd3ZWJ1c2VyJwogICAgICBteUNPTkZfV0VCX1BXPSd3M2IkZWNyZXRzMiEnCiAgICBvd25lcjogcm9vdDpyb290CiAgICBwYXRoOiAvcm9vdC90cG90LmNvbmYKICAgIHBlcm1pc3Npb25zOiAnMDYwMCc="
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"vnetId": "[parameters('virtualNetworkId')]",
|
||||||
|
"subnetRef": "[concat(variables('vnetId'), '/subnets/', parameters('subnetName'))]",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": true,
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "[format('/home/{0}/.ssh/authorized_keys', parameters('adminUsername'))]",
|
||||||
|
"keyData": "[parameters('adminPasswordOrKey')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"name": "[concat(uniqueString(resourceGroup().id, deployment().name),'-nic')]",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"apiVersion": "2021-08-01",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[resourceId('Microsoft.Network/networkSecurityGroups/', concat(uniqueString(resourceGroup().id, deployment().name),'-nsg'))]",
|
||||||
|
"[resourceId('Microsoft.Network/publicIpAddresses', concat(uniqueString(resourceGroup().id, deployment().name),'-pip'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "ipconfig1",
|
||||||
|
"properties": {
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('subnetRef')]"
|
||||||
|
},
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
"publicIpAddress": {
|
||||||
|
"id": "[resourceId(resourceGroup().name, 'Microsoft.Network/publicIpAddresses', concat(uniqueString(resourceGroup().id, deployment().name),'-pip'))]",
|
||||||
|
"properties": {
|
||||||
|
"deleteOption": "Detach"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"enableAcceleratedNetworking": true,
|
||||||
|
"networkSecurityGroup": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkSecurityGroups/', concat(uniqueString(resourceGroup().id, deployment().name),'-nsg'))]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "[concat(uniqueString(resourceGroup().id, deployment().name),'-nsg')]",
|
||||||
|
"type": "Microsoft.Network/networkSecurityGroups",
|
||||||
|
"apiVersion": "2019-02-01",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"securityRules": [
|
||||||
|
{
|
||||||
|
"name": "AllowAzureCloud22Inbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "22",
|
||||||
|
"sourceAddressPrefix": "AzureCloud",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1011,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AllowCustom64294Inbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "64294",
|
||||||
|
"sourceAddressPrefix": "[parameters('MyConnectionIP')]",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1021,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AllowSSHCustom64295Inbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "64295",
|
||||||
|
"sourceAddressPrefix": "[parameters('MyConnectionIP')]",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1031,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AllowAzureCloud64295Inbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "64295",
|
||||||
|
"sourceAddressPrefix": "AzureCloud",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1041,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AllowCustom64297Inbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "64297",
|
||||||
|
"sourceAddressPrefix": "[parameters('MyConnectionIP')]",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1051,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AllowAllHomeOfficeCustomAnyInbound",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "*",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "*",
|
||||||
|
"sourceAddressPrefix": "[parameters('MyConnectionIP')]",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 1061,
|
||||||
|
"direction": "Inbound",
|
||||||
|
"sourcePortRanges": [],
|
||||||
|
"destinationPortRanges": [],
|
||||||
|
"sourceAddressPrefixes": [],
|
||||||
|
"destinationAddressPrefixes": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "[concat(uniqueString(resourceGroup().id, deployment().name),'-pip')]",
|
||||||
|
"type": "Microsoft.Network/publicIpAddresses",
|
||||||
|
"apiVersion": "2020-08-01",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIpAllocationMethod": "Static"
|
||||||
|
},
|
||||||
|
"sku": {
|
||||||
|
"name": "Standard"
|
||||||
|
},
|
||||||
|
"zones": [
|
||||||
|
"1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "[parameters('VMName')]",
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"apiVersion": "2022-03-01",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[resourceId('Microsoft.Network/networkInterfaces', concat(uniqueString(resourceGroup().id, deployment().name),'-nic'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "Standard_D4s_v3"
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"osDisk": {
|
||||||
|
"createOption": "fromImage",
|
||||||
|
"managedDisk": {
|
||||||
|
"storageAccountType": "StandardSSD_LRS"
|
||||||
|
},
|
||||||
|
"deleteOption": "Delete"
|
||||||
|
},
|
||||||
|
"imageReference": {
|
||||||
|
"publisher": "debian",
|
||||||
|
"offer": "debian-11",
|
||||||
|
"sku": "11-gen2",
|
||||||
|
"version": "latest"
|
||||||
|
},
|
||||||
|
"dataDisks": [
|
||||||
|
{
|
||||||
|
"name": "[concat(parameters('VMName'),'-datadisk')]",
|
||||||
|
"diskSizeGB": 256,
|
||||||
|
"lun": 0,
|
||||||
|
"createOption": "Empty",
|
||||||
|
"caching": "ReadWrite"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(uniqueString(resourceGroup().id, deployment().name),'-nic'))]",
|
||||||
|
"properties": {
|
||||||
|
"deleteOption": "Delete"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "[parameters('VMName')]",
|
||||||
|
"adminUsername": "[parameters('adminUsername')]",
|
||||||
|
"adminPassword": "[parameters('adminPasswordOrKey')]",
|
||||||
|
"linuxConfiguration": "[if(equals(parameters('authenticationType'), 'password'), null(), variables('linuxConfiguration'))]",
|
||||||
|
"customData": "[parameters('CloudInitB64Encoded')]"
|
||||||
|
},
|
||||||
|
"diagnosticsProfile": {
|
||||||
|
"bootDiagnostics": {
|
||||||
|
"enabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"zones": [
|
||||||
|
"1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": {}
|
||||||
|
}
|
BIN
cloud/azure/images/nsg.png
Normal file
After Width: | Height: | Size: 49 KiB |
9
cloud/terraform/otc/clouds.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
clouds:
|
||||||
|
open-telekom-cloud:
|
||||||
|
region_name: eu-de
|
||||||
|
auth:
|
||||||
|
project_name: eu-de_your_project
|
||||||
|
username: your_api_user
|
||||||
|
password: your_password
|
||||||
|
user_domain_name: OTC-EU-DE-000000000010000XXXXX
|
||||||
|
auth_url: https://iam.eu-de.otc.t-systems.com/v3
|
@ -1,172 +0,0 @@
|
|||||||
from datetime import datetime
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
version = \
|
|
||||||
"""# T-Pot Service Builder v0.2
|
|
||||||
|
|
||||||
This script is intended as a kickstarter for users who want to build a customized docker-compose.yml for use with T-Pot.
|
|
||||||
|
|
||||||
T-Pot Service Builder will ask you for all the docker services you wish to include in your docker-compose configuration file.
|
|
||||||
The configuration file will be checked for conflicting ports as some of the honeypots are meant to work on certain ports.
|
|
||||||
You have to manually resolve the port conflicts or re-run the script and exclude the conflicting services / honeypots.
|
|
||||||
|
|
||||||
Review the resulting configuration and adjust the port settings to your needs by (un)commenting the corresponding lines in the config.
|
|
||||||
"""
|
|
||||||
|
|
||||||
header = \
|
|
||||||
"""# T-Pot: CUSTOM EDITION
|
|
||||||
# Generated on: {current_date}
|
|
||||||
"""
|
|
||||||
|
|
||||||
config_filename = "tpot_services.yml"
|
|
||||||
service_filename = "docker-compose-custom.yml"
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(filename):
|
|
||||||
try:
|
|
||||||
with open(filename, 'r') as file:
|
|
||||||
config = yaml.safe_load(file)
|
|
||||||
except:
|
|
||||||
print_color(f"Error: {filename} not found. Exiting.", "red")
|
|
||||||
exit()
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_service_include(service_name):
|
|
||||||
while True:
|
|
||||||
response = input(f"Include {service_name}? (y/n): ").strip().lower()
|
|
||||||
if response in ['y', 'n']:
|
|
||||||
return response == 'y'
|
|
||||||
else:
|
|
||||||
print("Please enter 'y' for yes or 'n' for no.")
|
|
||||||
|
|
||||||
|
|
||||||
def check_port_conflicts(selected_services):
|
|
||||||
all_ports = {}
|
|
||||||
conflict_ports = []
|
|
||||||
|
|
||||||
for service_name, config in selected_services.items():
|
|
||||||
ports = config.get('ports', [])
|
|
||||||
for port in ports:
|
|
||||||
# Split the port mapping and take only the host port part
|
|
||||||
parts = port.split(':')
|
|
||||||
host_port = parts[1] if len(parts) == 3 else (parts[0] if parts[1].isdigit() else parts[1])
|
|
||||||
|
|
||||||
# Check for port conflict and associate it with the service name
|
|
||||||
if host_port in all_ports:
|
|
||||||
conflict_ports.append((service_name, host_port))
|
|
||||||
if all_ports[host_port] not in [service for service, _ in conflict_ports]:
|
|
||||||
conflict_ports.append((all_ports[host_port], host_port))
|
|
||||||
else:
|
|
||||||
all_ports[host_port] = service_name
|
|
||||||
|
|
||||||
if conflict_ports:
|
|
||||||
print_color("Port conflict(s) detected:", "red")
|
|
||||||
for service, port in conflict_ports:
|
|
||||||
print_color(f"{service}: {port}", "red")
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def print_color(text, color):
|
|
||||||
colors = {
|
|
||||||
"red": "\033[91m",
|
|
||||||
"green": "\033[92m",
|
|
||||||
"end": "\033[0m",
|
|
||||||
}
|
|
||||||
print(f"{colors[color]}{text}{colors['end']}")
|
|
||||||
|
|
||||||
|
|
||||||
def enforce_dependencies(selected_services, services):
|
|
||||||
# If snare or any tanner services are selected, ensure all are enabled
|
|
||||||
tanner_services = {'snare', 'tanner', 'tanner_redis', 'tanner_phpox', 'tanner_api'}
|
|
||||||
if tanner_services.intersection(selected_services):
|
|
||||||
print_color("For Snare / Tanner to work all required services have been added to your configuration.", "green")
|
|
||||||
for service in tanner_services:
|
|
||||||
selected_services[service] = services[service]
|
|
||||||
|
|
||||||
# If kibana is enabled, also enable elasticsearch
|
|
||||||
if 'kibana' in selected_services:
|
|
||||||
selected_services['elasticsearch'] = services['elasticsearch']
|
|
||||||
print_color("Kibana requires Elasticsearch which has been added to your configuration.", "green")
|
|
||||||
|
|
||||||
# If spiderfoot is enabled, also enable nginx
|
|
||||||
if 'spiderfoot' in selected_services:
|
|
||||||
selected_services['nginx'] = services['nginx']
|
|
||||||
print_color("Spiderfoot requires Nginx which has been added to your configuration.","green")
|
|
||||||
|
|
||||||
|
|
||||||
# If any map services are detected, enable logstash, elasticsearch, nginx, and all map services
|
|
||||||
map_services = {'map_web', 'map_redis', 'map_data'}
|
|
||||||
if map_services.intersection(selected_services):
|
|
||||||
print_color("For Map to work all required services have been added to your configuration.", "green")
|
|
||||||
for service in map_services.union({'elasticsearch', 'nginx'}):
|
|
||||||
selected_services[service] = services[service]
|
|
||||||
|
|
||||||
# honeytrap and glutton cannot be active at the same time, always vote in favor of honeytrap
|
|
||||||
if 'honeytrap' in selected_services and 'glutton' in selected_services:
|
|
||||||
# Remove glutton and notify
|
|
||||||
del selected_services['glutton']
|
|
||||||
print_color("Honeytrap and Glutton cannot be active at the same time. Glutton has been removed from your configuration.","red")
|
|
||||||
|
|
||||||
|
|
||||||
def remove_unused_networks(selected_services, services, networks):
|
|
||||||
used_networks = set()
|
|
||||||
# Identify networks used by selected services
|
|
||||||
for service_name in selected_services:
|
|
||||||
service_config = services[service_name]
|
|
||||||
if 'networks' in service_config:
|
|
||||||
for network in service_config['networks']:
|
|
||||||
used_networks.add(network)
|
|
||||||
|
|
||||||
# Remove unused networks
|
|
||||||
for network in list(networks):
|
|
||||||
if network not in used_networks:
|
|
||||||
del networks[network]
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
config = load_config(config_filename)
|
|
||||||
|
|
||||||
# Separate services and networks
|
|
||||||
services = config['services']
|
|
||||||
networks = config.get('networks', {})
|
|
||||||
selected_services = {'tpotinit': services['tpotinit'],
|
|
||||||
'logstash': services['logstash']} # Always include tpotinit and logstash
|
|
||||||
|
|
||||||
for service_name, service_config in services.items():
|
|
||||||
if service_name not in selected_services: # Skip already included services
|
|
||||||
if prompt_service_include(service_name):
|
|
||||||
selected_services[service_name] = service_config
|
|
||||||
|
|
||||||
# Enforce dependencies
|
|
||||||
enforce_dependencies(selected_services, services)
|
|
||||||
|
|
||||||
# Remove unused networks based on selected services
|
|
||||||
remove_unused_networks(selected_services, services, networks)
|
|
||||||
|
|
||||||
output_config = {
|
|
||||||
'version': '3.9',
|
|
||||||
'networks': networks,
|
|
||||||
'services': selected_services,
|
|
||||||
}
|
|
||||||
|
|
||||||
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
|
|
||||||
with open(service_filename, 'w') as file:
|
|
||||||
file.write(header.format(current_date=current_date))
|
|
||||||
yaml.dump(output_config, file, default_flow_style=False, sort_keys=False, indent=2)
|
|
||||||
|
|
||||||
if check_port_conflicts(selected_services):
|
|
||||||
print_color(f"Adjust the conflicting ports in the {service_filename} or re-run the script and select services that do not occupy the same port(s).",
|
|
||||||
"red")
|
|
||||||
else:
|
|
||||||
print_color(f"Custom {service_filename} has been generated without port conflicts.", "green")
|
|
||||||
print(f"Copy {service_filename} to tpotce/ and test with: docker compose -f {service_filename} up")
|
|
||||||
print(f"If everything works, exit with CTRL-C and replace docker-compose.yml with the new config.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
print(version)
|
|
||||||
main()
|
|
@ -1,608 +0,0 @@
|
|||||||
# T-Pot: MOBILE
|
|
||||||
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
|
|
||||||
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
|
|
||||||
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
|
|
||||||
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
|
|
||||||
version: '3.9'
|
|
||||||
|
|
||||||
networks:
|
|
||||||
ciscoasa_local:
|
|
||||||
citrixhoneypot_local:
|
|
||||||
conpot_local_IEC104:
|
|
||||||
conpot_local_ipmi:
|
|
||||||
conpot_local_kamstrup_382:
|
|
||||||
cowrie_local:
|
|
||||||
dicompot_local:
|
|
||||||
dionaea_local:
|
|
||||||
elasticpot_local:
|
|
||||||
heralding_local:
|
|
||||||
ipphoney_local:
|
|
||||||
log4pot_local:
|
|
||||||
mailoney_local:
|
|
||||||
medpot_local:
|
|
||||||
redishoneypot_local:
|
|
||||||
sentrypeer_local:
|
|
||||||
tanner_local:
|
|
||||||
ewsposter_local:
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
#########################################
|
|
||||||
#### DEV
|
|
||||||
#########################################
|
|
||||||
#### T-Pot Init - Never delete this!
|
|
||||||
#########################################
|
|
||||||
|
|
||||||
# T-Pot Init Service
|
|
||||||
tpotinit:
|
|
||||||
container_name: tpotinit
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
restart: always
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/etc:uid=2000,gid=2000
|
|
||||||
- /tmp/:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
|
||||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Honeypots
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Ciscoasa service
|
|
||||||
ciscoasa:
|
|
||||||
container_name: ciscoasa
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- ciscoasa_local
|
|
||||||
ports:
|
|
||||||
- "5000:5000/udp"
|
|
||||||
- "8443:8443"
|
|
||||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
|
||||||
|
|
||||||
# CitrixHoneypot service
|
|
||||||
citrixhoneypot:
|
|
||||||
container_name: citrixhoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- citrixhoneypot_local
|
|
||||||
ports:
|
|
||||||
- "443:443"
|
|
||||||
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
|
|
||||||
|
|
||||||
# Conpot IEC104 service
|
|
||||||
conpot_IEC104:
|
|
||||||
container_name: conpot_iec104
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
|
||||||
- CONPOT_TEMPLATE=IEC104
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_IEC104
|
|
||||||
ports:
|
|
||||||
- "161:161/udp"
|
|
||||||
- "2404:2404"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot ipmi
|
|
||||||
conpot_ipmi:
|
|
||||||
container_name: conpot_ipmi
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
|
||||||
- CONPOT_TEMPLATE=ipmi
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_ipmi
|
|
||||||
ports:
|
|
||||||
- "623:623/udp"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot kamstrup_382
|
|
||||||
conpot_kamstrup_382:
|
|
||||||
container_name: conpot_kamstrup_382
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
|
||||||
- CONPOT_TEMPLATE=kamstrup_382
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_kamstrup_382
|
|
||||||
ports:
|
|
||||||
- "1025:1025"
|
|
||||||
- "50100:50100"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Cowrie service
|
|
||||||
cowrie:
|
|
||||||
container_name: cowrie
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/cowrie:uid=2000,gid=2000
|
|
||||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- cowrie_local
|
|
||||||
ports:
|
|
||||||
- "22:22"
|
|
||||||
- "23:23"
|
|
||||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
|
||||||
|
|
||||||
# Dicompot service
|
|
||||||
# Get the Horos Client for testing: https://horosproject.org/
|
|
||||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
|
||||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
|
||||||
dicompot:
|
|
||||||
container_name: dicompot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dicompot_local
|
|
||||||
ports:
|
|
||||||
- "11112:11112"
|
|
||||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
|
||||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
|
||||||
|
|
||||||
# Dionaea service
|
|
||||||
dionaea:
|
|
||||||
container_name: dionaea
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dionaea_local
|
|
||||||
ports:
|
|
||||||
- "20:20"
|
|
||||||
- "21:21"
|
|
||||||
- "42:42"
|
|
||||||
- "69:69/udp"
|
|
||||||
- "81:81"
|
|
||||||
- "135:135"
|
|
||||||
# - "443:443"
|
|
||||||
- "445:445"
|
|
||||||
- "1433:1433"
|
|
||||||
- "1723:1723"
|
|
||||||
- "1883:1883"
|
|
||||||
- "3306:3306"
|
|
||||||
# - "5060:5060"
|
|
||||||
# - "5060:5060/udp"
|
|
||||||
# - "5061:5061"
|
|
||||||
- "27017:27017"
|
|
||||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
|
||||||
|
|
||||||
# ElasticPot service
|
|
||||||
elasticpot:
|
|
||||||
container_name: elasticpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- elasticpot_local
|
|
||||||
ports:
|
|
||||||
- "9200:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
|
||||||
|
|
||||||
# Heralding service
|
|
||||||
heralding:
|
|
||||||
container_name: heralding
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/heralding:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- heralding_local
|
|
||||||
ports:
|
|
||||||
# - "21:21"
|
|
||||||
# - "22:22"
|
|
||||||
# - "23:23"
|
|
||||||
# - "25:25"
|
|
||||||
# - "80:80"
|
|
||||||
- "110:110"
|
|
||||||
- "143:143"
|
|
||||||
# - "443:443"
|
|
||||||
- "465:465"
|
|
||||||
- "993:993"
|
|
||||||
- "995:995"
|
|
||||||
# - "3306:3306"
|
|
||||||
# - "3389:3389"
|
|
||||||
- "1080:1080"
|
|
||||||
- "5432:5432"
|
|
||||||
- "5900:5900"
|
|
||||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
|
||||||
|
|
||||||
# Honeytrap service
|
|
||||||
honeytrap:
|
|
||||||
container_name: honeytrap
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/honeytrap:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
|
||||||
|
|
||||||
# Ipphoney service
|
|
||||||
ipphoney:
|
|
||||||
container_name: ipphoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ipphoney_local
|
|
||||||
ports:
|
|
||||||
- "631:631"
|
|
||||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
|
||||||
|
|
||||||
# Mailoney service
|
|
||||||
mailoney:
|
|
||||||
container_name: mailoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- HPFEEDS_SERVER=
|
|
||||||
- HPFEEDS_IDENT=user
|
|
||||||
- HPFEEDS_SECRET=pass
|
|
||||||
- HPFEEDS_PORT=20000
|
|
||||||
- HPFEEDS_CHANNELPREFIX=prefix
|
|
||||||
networks:
|
|
||||||
- mailoney_local
|
|
||||||
ports:
|
|
||||||
- "25:25"
|
|
||||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
|
||||||
|
|
||||||
# Log4pot service
|
|
||||||
log4pot:
|
|
||||||
container_name: log4pot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- log4pot_local
|
|
||||||
ports:
|
|
||||||
# - "80:8080"
|
|
||||||
# - "443:8080"
|
|
||||||
- "8080:8080"
|
|
||||||
# - "9200:8080"
|
|
||||||
- "25565:8080"
|
|
||||||
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
|
|
||||||
|
|
||||||
# Medpot service
|
|
||||||
medpot:
|
|
||||||
container_name: medpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- medpot_local
|
|
||||||
ports:
|
|
||||||
- "2575:2575"
|
|
||||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
|
||||||
|
|
||||||
# Redishoneypot service
|
|
||||||
redishoneypot:
|
|
||||||
container_name: redishoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- redishoneypot_local
|
|
||||||
ports:
|
|
||||||
- "6379:6379"
|
|
||||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
|
||||||
|
|
||||||
# SentryPeer service
|
|
||||||
sentrypeer:
|
|
||||||
container_name: sentrypeer
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
|
||||||
networks:
|
|
||||||
- sentrypeer_local
|
|
||||||
ports:
|
|
||||||
# - "4222:4222/udp"
|
|
||||||
- "5060:5060/udp"
|
|
||||||
# - "127.0.0.1:8082:8082"
|
|
||||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
|
||||||
|
|
||||||
#### Snare / Tanner
|
|
||||||
## Tanner Redis Service
|
|
||||||
tanner_redis:
|
|
||||||
container_name: tanner_redis
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## PHP Sandbox service
|
|
||||||
tanner_phpox:
|
|
||||||
container_name: tanner_phpox
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## Tanner API Service
|
|
||||||
tanner_api:
|
|
||||||
container_name: tanner_api
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_redis
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
command: tannerapi
|
|
||||||
|
|
||||||
## Tanner Service
|
|
||||||
tanner:
|
|
||||||
container_name: tanner
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_api
|
|
||||||
- tanner_phpox
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
command: tanner
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
|
||||||
|
|
||||||
## Snare Service
|
|
||||||
snare:
|
|
||||||
container_name: snare
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Tools
|
|
||||||
##################
|
|
||||||
|
|
||||||
#### ELK
|
|
||||||
## Elasticsearch service
|
|
||||||
elasticsearch:
|
|
||||||
container_name: elasticsearch
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- bootstrap.memory_lock=true
|
|
||||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
|
||||||
- ES_TMPDIR=/tmp
|
|
||||||
cap_add:
|
|
||||||
- IPC_LOCK
|
|
||||||
ulimits:
|
|
||||||
memlock:
|
|
||||||
soft: -1
|
|
||||||
hard: -1
|
|
||||||
nofile:
|
|
||||||
soft: 65536
|
|
||||||
hard: 65536
|
|
||||||
mem_limit: 4g
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64298:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
## Logstash service
|
|
||||||
logstash:
|
|
||||||
container_name: logstash
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
|
||||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
|
||||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
|
||||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64305:64305"
|
|
||||||
mem_limit: 2g
|
|
||||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
#### /ELK
|
|
||||||
|
|
||||||
# Ewsposter service
|
|
||||||
ewsposter:
|
|
||||||
container_name: ewsposter
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ewsposter_local
|
|
||||||
environment:
|
|
||||||
- EWS_HPFEEDS_ENABLE=false
|
|
||||||
- EWS_HPFEEDS_HOST=host
|
|
||||||
- EWS_HPFEEDS_PORT=port
|
|
||||||
- EWS_HPFEEDS_CHANNELS=channels
|
|
||||||
- EWS_HPFEEDS_IDENT=user
|
|
||||||
- EWS_HPFEEDS_SECRET=secret
|
|
||||||
- EWS_HPFEEDS_TLSCERT=false
|
|
||||||
- EWS_HPFEEDS_FORMAT=json
|
|
||||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
|
@ -1,608 +0,0 @@
|
|||||||
# T-Pot: MOBILE
|
|
||||||
# Note: This docker compose file has been adjusted to limit the number of tools, services and honeypots to run
|
|
||||||
# T-Pot on a Raspberry Pi 4 (8GB of RAM).
|
|
||||||
# The standard docker compose file should work mostly fine (depending on traffic) if you do not enable a
|
|
||||||
# desktop environment such as LXDE and meet the minimum requirements of 8GB RAM.
|
|
||||||
version: '3.9'
|
|
||||||
|
|
||||||
networks:
|
|
||||||
ciscoasa_local:
|
|
||||||
citrixhoneypot_local:
|
|
||||||
conpot_local_IEC104:
|
|
||||||
conpot_local_ipmi:
|
|
||||||
conpot_local_kamstrup_382:
|
|
||||||
cowrie_local:
|
|
||||||
dicompot_local:
|
|
||||||
dionaea_local:
|
|
||||||
elasticpot_local:
|
|
||||||
heralding_local:
|
|
||||||
ipphoney_local:
|
|
||||||
log4pot_local:
|
|
||||||
mailoney_local:
|
|
||||||
medpot_local:
|
|
||||||
redishoneypot_local:
|
|
||||||
sentrypeer_local:
|
|
||||||
tanner_local:
|
|
||||||
ewsposter_local:
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
#########################################
|
|
||||||
#### DEV
|
|
||||||
#########################################
|
|
||||||
#### T-Pot Init - Never delete this!
|
|
||||||
#########################################
|
|
||||||
|
|
||||||
# T-Pot Init Service
|
|
||||||
tpotinit:
|
|
||||||
container_name: tpotinit
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
restart: always
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/etc:uid=2000,gid=2000
|
|
||||||
- /tmp/:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
|
||||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Honeypots
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Ciscoasa service
|
|
||||||
ciscoasa:
|
|
||||||
container_name: ciscoasa
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- ciscoasa_local
|
|
||||||
ports:
|
|
||||||
- "5000:5000/udp"
|
|
||||||
- "8443:8443"
|
|
||||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
|
||||||
|
|
||||||
# CitrixHoneypot service
|
|
||||||
citrixhoneypot:
|
|
||||||
container_name: citrixhoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- citrixhoneypot_local
|
|
||||||
ports:
|
|
||||||
- "443:443"
|
|
||||||
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
|
|
||||||
|
|
||||||
# Conpot IEC104 service
|
|
||||||
conpot_IEC104:
|
|
||||||
container_name: conpot_iec104
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
|
||||||
- CONPOT_TEMPLATE=IEC104
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_IEC104
|
|
||||||
ports:
|
|
||||||
- "161:161/udp"
|
|
||||||
- "2404:2404"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot ipmi
|
|
||||||
conpot_ipmi:
|
|
||||||
container_name: conpot_ipmi
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
|
||||||
- CONPOT_TEMPLATE=ipmi
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_ipmi
|
|
||||||
ports:
|
|
||||||
- "623:623/udp"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot kamstrup_382
|
|
||||||
conpot_kamstrup_382:
|
|
||||||
container_name: conpot_kamstrup_382
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
|
||||||
- CONPOT_TEMPLATE=kamstrup_382
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_kamstrup_382
|
|
||||||
ports:
|
|
||||||
- "1025:1025"
|
|
||||||
- "50100:50100"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Cowrie service
|
|
||||||
cowrie:
|
|
||||||
container_name: cowrie
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/cowrie:uid=2000,gid=2000
|
|
||||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- cowrie_local
|
|
||||||
ports:
|
|
||||||
- "22:22"
|
|
||||||
- "23:23"
|
|
||||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
|
||||||
|
|
||||||
# Dicompot service
|
|
||||||
# Get the Horos Client for testing: https://horosproject.org/
|
|
||||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
|
||||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
|
||||||
dicompot:
|
|
||||||
container_name: dicompot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dicompot_local
|
|
||||||
ports:
|
|
||||||
- "11112:11112"
|
|
||||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
|
||||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
|
||||||
|
|
||||||
# Dionaea service
|
|
||||||
dionaea:
|
|
||||||
container_name: dionaea
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dionaea_local
|
|
||||||
ports:
|
|
||||||
- "20:20"
|
|
||||||
- "21:21"
|
|
||||||
- "42:42"
|
|
||||||
- "69:69/udp"
|
|
||||||
- "81:81"
|
|
||||||
- "135:135"
|
|
||||||
# - "443:443"
|
|
||||||
- "445:445"
|
|
||||||
- "1433:1433"
|
|
||||||
- "1723:1723"
|
|
||||||
- "1883:1883"
|
|
||||||
- "3306:3306"
|
|
||||||
# - "5060:5060"
|
|
||||||
# - "5060:5060/udp"
|
|
||||||
# - "5061:5061"
|
|
||||||
- "27017:27017"
|
|
||||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
|
||||||
|
|
||||||
# ElasticPot service
|
|
||||||
elasticpot:
|
|
||||||
container_name: elasticpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- elasticpot_local
|
|
||||||
ports:
|
|
||||||
- "9200:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
|
||||||
|
|
||||||
# Heralding service
|
|
||||||
heralding:
|
|
||||||
container_name: heralding
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/heralding:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- heralding_local
|
|
||||||
ports:
|
|
||||||
# - "21:21"
|
|
||||||
# - "22:22"
|
|
||||||
# - "23:23"
|
|
||||||
# - "25:25"
|
|
||||||
# - "80:80"
|
|
||||||
- "110:110"
|
|
||||||
- "143:143"
|
|
||||||
# - "443:443"
|
|
||||||
- "465:465"
|
|
||||||
- "993:993"
|
|
||||||
- "995:995"
|
|
||||||
# - "3306:3306"
|
|
||||||
# - "3389:3389"
|
|
||||||
- "1080:1080"
|
|
||||||
- "5432:5432"
|
|
||||||
- "5900:5900"
|
|
||||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
|
||||||
|
|
||||||
# Honeytrap service
|
|
||||||
honeytrap:
|
|
||||||
container_name: honeytrap
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/honeytrap:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
|
||||||
|
|
||||||
# Ipphoney service
|
|
||||||
ipphoney:
|
|
||||||
container_name: ipphoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ipphoney_local
|
|
||||||
ports:
|
|
||||||
- "631:631"
|
|
||||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
|
||||||
|
|
||||||
# Mailoney service
|
|
||||||
mailoney:
|
|
||||||
container_name: mailoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- HPFEEDS_SERVER=
|
|
||||||
- HPFEEDS_IDENT=user
|
|
||||||
- HPFEEDS_SECRET=pass
|
|
||||||
- HPFEEDS_PORT=20000
|
|
||||||
- HPFEEDS_CHANNELPREFIX=prefix
|
|
||||||
networks:
|
|
||||||
- mailoney_local
|
|
||||||
ports:
|
|
||||||
- "25:25"
|
|
||||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
|
||||||
|
|
||||||
# Log4pot service
|
|
||||||
log4pot:
|
|
||||||
container_name: log4pot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- log4pot_local
|
|
||||||
ports:
|
|
||||||
# - "80:8080"
|
|
||||||
# - "443:8080"
|
|
||||||
- "8080:8080"
|
|
||||||
# - "9200:8080"
|
|
||||||
- "25565:8080"
|
|
||||||
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
|
|
||||||
|
|
||||||
# Medpot service
|
|
||||||
medpot:
|
|
||||||
container_name: medpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- medpot_local
|
|
||||||
ports:
|
|
||||||
- "2575:2575"
|
|
||||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
|
||||||
|
|
||||||
# Redishoneypot service
|
|
||||||
redishoneypot:
|
|
||||||
container_name: redishoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- redishoneypot_local
|
|
||||||
ports:
|
|
||||||
- "6379:6379"
|
|
||||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
|
||||||
|
|
||||||
# SentryPeer service
|
|
||||||
sentrypeer:
|
|
||||||
container_name: sentrypeer
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
|
||||||
networks:
|
|
||||||
- sentrypeer_local
|
|
||||||
ports:
|
|
||||||
# - "4222:4222/udp"
|
|
||||||
- "5060:5060/udp"
|
|
||||||
# - "127.0.0.1:8082:8082"
|
|
||||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
|
||||||
|
|
||||||
#### Snare / Tanner
|
|
||||||
## Tanner Redis Service
|
|
||||||
tanner_redis:
|
|
||||||
container_name: tanner_redis
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## PHP Sandbox service
|
|
||||||
tanner_phpox:
|
|
||||||
container_name: tanner_phpox
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## Tanner API Service
|
|
||||||
tanner_api:
|
|
||||||
container_name: tanner_api
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_redis
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
command: tannerapi
|
|
||||||
|
|
||||||
## Tanner Service
|
|
||||||
tanner:
|
|
||||||
container_name: tanner
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_api
|
|
||||||
- tanner_phpox
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
command: tanner
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
|
||||||
|
|
||||||
## Snare Service
|
|
||||||
snare:
|
|
||||||
container_name: snare
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Tools
|
|
||||||
##################
|
|
||||||
|
|
||||||
#### ELK
|
|
||||||
## Elasticsearch service
|
|
||||||
elasticsearch:
|
|
||||||
container_name: elasticsearch
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- bootstrap.memory_lock=true
|
|
||||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
|
||||||
- ES_TMPDIR=/tmp
|
|
||||||
cap_add:
|
|
||||||
- IPC_LOCK
|
|
||||||
ulimits:
|
|
||||||
memlock:
|
|
||||||
soft: -1
|
|
||||||
hard: -1
|
|
||||||
nofile:
|
|
||||||
soft: 65536
|
|
||||||
hard: 65536
|
|
||||||
mem_limit: 4g
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64298:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
## Logstash service
|
|
||||||
logstash:
|
|
||||||
container_name: logstash
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
|
||||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
|
||||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
|
||||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64305:64305"
|
|
||||||
mem_limit: 2g
|
|
||||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
#### /ELK
|
|
||||||
|
|
||||||
# Ewsposter service
|
|
||||||
ewsposter:
|
|
||||||
container_name: ewsposter
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
logstash:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ewsposter_local
|
|
||||||
environment:
|
|
||||||
- EWS_HPFEEDS_ENABLE=false
|
|
||||||
- EWS_HPFEEDS_HOST=host
|
|
||||||
- EWS_HPFEEDS_PORT=port
|
|
||||||
- EWS_HPFEEDS_CHANNELS=channels
|
|
||||||
- EWS_HPFEEDS_IDENT=user
|
|
||||||
- EWS_HPFEEDS_SECRET=secret
|
|
||||||
- EWS_HPFEEDS_TLSCERT=false
|
|
||||||
- EWS_HPFEEDS_FORMAT=json
|
|
||||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
|
@ -1,679 +0,0 @@
|
|||||||
# T-Pot: SENSOR
|
|
||||||
version: '3.9'
|
|
||||||
|
|
||||||
networks:
|
|
||||||
adbhoney_local:
|
|
||||||
ciscoasa_local:
|
|
||||||
citrixhoneypot_local:
|
|
||||||
conpot_local_IEC104:
|
|
||||||
conpot_local_guardian_ast:
|
|
||||||
conpot_local_ipmi:
|
|
||||||
conpot_local_kamstrup_382:
|
|
||||||
cowrie_local:
|
|
||||||
ddospot_local:
|
|
||||||
dicompot_local:
|
|
||||||
dionaea_local:
|
|
||||||
elasticpot_local:
|
|
||||||
heralding_local:
|
|
||||||
ipphoney_local:
|
|
||||||
mailoney_local:
|
|
||||||
medpot_local:
|
|
||||||
redishoneypot_local:
|
|
||||||
sentrypeer_local:
|
|
||||||
tanner_local:
|
|
||||||
ewsposter_local:
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
#########################################
|
|
||||||
#### DEV
|
|
||||||
#########################################
|
|
||||||
#### T-Pot Init - Never delete this!
|
|
||||||
#########################################
|
|
||||||
|
|
||||||
# T-Pot Init Service
|
|
||||||
tpotinit:
|
|
||||||
container_name: tpotinit
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
restart: always
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/etc:uid=2000,gid=2000
|
|
||||||
- /tmp/:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
|
||||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Honeypots
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Adbhoney service
|
|
||||||
adbhoney:
|
|
||||||
container_name: adbhoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- adbhoney_local
|
|
||||||
ports:
|
|
||||||
- "5555:5555"
|
|
||||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
|
||||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
|
||||||
|
|
||||||
# Ciscoasa service
|
|
||||||
ciscoasa:
|
|
||||||
container_name: ciscoasa
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- ciscoasa_local
|
|
||||||
ports:
|
|
||||||
- "5000:5000/udp"
|
|
||||||
- "8443:8443"
|
|
||||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
|
||||||
|
|
||||||
# CitrixHoneypot service
|
|
||||||
citrixhoneypot:
|
|
||||||
container_name: citrixhoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- citrixhoneypot_local
|
|
||||||
ports:
|
|
||||||
- "443:443"
|
|
||||||
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
|
|
||||||
|
|
||||||
# Conpot IEC104 service
|
|
||||||
conpot_IEC104:
|
|
||||||
container_name: conpot_iec104
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
|
||||||
- CONPOT_TEMPLATE=IEC104
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_IEC104
|
|
||||||
ports:
|
|
||||||
- "161:161/udp"
|
|
||||||
- "2404:2404"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot guardian_ast service
|
|
||||||
conpot_guardian_ast:
|
|
||||||
container_name: conpot_guardian_ast
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
|
||||||
- CONPOT_TEMPLATE=guardian_ast
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_guardian_ast
|
|
||||||
ports:
|
|
||||||
- "10001:10001"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot ipmi
|
|
||||||
conpot_ipmi:
|
|
||||||
container_name: conpot_ipmi
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
|
||||||
- CONPOT_TEMPLATE=ipmi
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_ipmi
|
|
||||||
ports:
|
|
||||||
- "623:623/udp"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot kamstrup_382
|
|
||||||
conpot_kamstrup_382:
|
|
||||||
container_name: conpot_kamstrup_382
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
|
||||||
- CONPOT_TEMPLATE=kamstrup_382
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_kamstrup_382
|
|
||||||
ports:
|
|
||||||
- "1025:1025"
|
|
||||||
- "50100:50100"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Cowrie service
|
|
||||||
cowrie:
|
|
||||||
container_name: cowrie
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/cowrie:uid=2000,gid=2000
|
|
||||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- cowrie_local
|
|
||||||
ports:
|
|
||||||
- "22:22"
|
|
||||||
- "23:23"
|
|
||||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
|
||||||
|
|
||||||
# Ddospot service
|
|
||||||
ddospot:
|
|
||||||
container_name: ddospot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ddospot_local
|
|
||||||
ports:
|
|
||||||
- "19:19/udp"
|
|
||||||
- "53:53/udp"
|
|
||||||
- "123:123/udp"
|
|
||||||
# - "161:161/udp"
|
|
||||||
- "1900:1900/udp"
|
|
||||||
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
|
|
||||||
|
|
||||||
# Dicompot service
|
|
||||||
# Get the Horos Client for testing: https://horosproject.org/
|
|
||||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
|
||||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
|
||||||
dicompot:
|
|
||||||
container_name: dicompot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dicompot_local
|
|
||||||
ports:
|
|
||||||
- "11112:11112"
|
|
||||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
|
||||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
|
||||||
|
|
||||||
# Dionaea service
|
|
||||||
dionaea:
|
|
||||||
container_name: dionaea
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dionaea_local
|
|
||||||
ports:
|
|
||||||
- "20:20"
|
|
||||||
- "21:21"
|
|
||||||
- "42:42"
|
|
||||||
- "69:69/udp"
|
|
||||||
- "81:81"
|
|
||||||
- "135:135"
|
|
||||||
# - "443:443"
|
|
||||||
- "445:445"
|
|
||||||
- "1433:1433"
|
|
||||||
- "1723:1723"
|
|
||||||
- "1883:1883"
|
|
||||||
- "3306:3306"
|
|
||||||
# - "5060:5060"
|
|
||||||
# - "5060:5060/udp"
|
|
||||||
# - "5061:5061"
|
|
||||||
- "27017:27017"
|
|
||||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
|
||||||
|
|
||||||
# ElasticPot service
|
|
||||||
elasticpot:
|
|
||||||
container_name: elasticpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- elasticpot_local
|
|
||||||
ports:
|
|
||||||
- "9200:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
|
||||||
|
|
||||||
# Heralding service
|
|
||||||
heralding:
|
|
||||||
container_name: heralding
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/heralding:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- heralding_local
|
|
||||||
ports:
|
|
||||||
# - "21:21"
|
|
||||||
# - "22:22"
|
|
||||||
# - "23:23"
|
|
||||||
# - "25:25"
|
|
||||||
# - "80:80"
|
|
||||||
- "110:110"
|
|
||||||
- "143:143"
|
|
||||||
# - "443:443"
|
|
||||||
- "465:465"
|
|
||||||
- "993:993"
|
|
||||||
- "995:995"
|
|
||||||
# - "3306:3306"
|
|
||||||
# - "3389:3389"
|
|
||||||
- "1080:1080"
|
|
||||||
- "5432:5432"
|
|
||||||
- "5900:5900"
|
|
||||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
|
||||||
|
|
||||||
# Honeytrap service
|
|
||||||
honeytrap:
|
|
||||||
container_name: honeytrap
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/honeytrap:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
|
||||||
|
|
||||||
# Ipphoney service
|
|
||||||
ipphoney:
|
|
||||||
container_name: ipphoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ipphoney_local
|
|
||||||
ports:
|
|
||||||
- "631:631"
|
|
||||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
|
||||||
|
|
||||||
# Mailoney service
|
|
||||||
mailoney:
|
|
||||||
container_name: mailoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- HPFEEDS_SERVER=
|
|
||||||
- HPFEEDS_IDENT=user
|
|
||||||
- HPFEEDS_SECRET=pass
|
|
||||||
- HPFEEDS_PORT=20000
|
|
||||||
- HPFEEDS_CHANNELPREFIX=prefix
|
|
||||||
networks:
|
|
||||||
- mailoney_local
|
|
||||||
ports:
|
|
||||||
- "25:25"
|
|
||||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
|
||||||
|
|
||||||
# Medpot service
|
|
||||||
medpot:
|
|
||||||
container_name: medpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- medpot_local
|
|
||||||
ports:
|
|
||||||
- "2575:2575"
|
|
||||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
|
||||||
|
|
||||||
# Redishoneypot service
|
|
||||||
redishoneypot:
|
|
||||||
container_name: redishoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- redishoneypot_local
|
|
||||||
ports:
|
|
||||||
- "6379:6379"
|
|
||||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
|
||||||
|
|
||||||
# SentryPeer service
|
|
||||||
sentrypeer:
|
|
||||||
container_name: sentrypeer
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
|
||||||
networks:
|
|
||||||
- sentrypeer_local
|
|
||||||
ports:
|
|
||||||
# - "4222:4222/udp"
|
|
||||||
- "5060:5060/udp"
|
|
||||||
# - "127.0.0.1:8082:8082"
|
|
||||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
|
||||||
|
|
||||||
#### Snare / Tanner
|
|
||||||
## Tanner Redis Service
|
|
||||||
tanner_redis:
|
|
||||||
container_name: tanner_redis
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## PHP Sandbox service
|
|
||||||
tanner_phpox:
|
|
||||||
container_name: tanner_phpox
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## Tanner API Service
|
|
||||||
tanner_api:
|
|
||||||
container_name: tanner_api
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_redis
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
command: tannerapi
|
|
||||||
|
|
||||||
## Tanner Service
|
|
||||||
tanner:
|
|
||||||
container_name: tanner
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_api
|
|
||||||
- tanner_phpox
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
command: tanner
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
|
||||||
|
|
||||||
## Snare Service
|
|
||||||
snare:
|
|
||||||
container_name: snare
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### NSM
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Fatt service
|
|
||||||
fatt:
|
|
||||||
container_name: fatt
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
- SYS_NICE
|
|
||||||
- NET_RAW
|
|
||||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
|
||||||
|
|
||||||
# P0f service
|
|
||||||
p0f:
|
|
||||||
container_name: p0f
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
network_mode: "host"
|
|
||||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
|
||||||
|
|
||||||
# Suricata service
|
|
||||||
suricata:
|
|
||||||
container_name: suricata
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
|
||||||
# Loading external Rules from URL
|
|
||||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
- SYS_NICE
|
|
||||||
- NET_RAW
|
|
||||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Tools
|
|
||||||
##################
|
|
||||||
|
|
||||||
#### ELK
|
|
||||||
|
|
||||||
## Logstash service
|
|
||||||
logstash:
|
|
||||||
container_name: logstash
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
|
||||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
|
||||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
|
||||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64305:64305"
|
|
||||||
mem_limit: 2g
|
|
||||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
#### /ELK
|
|
||||||
|
|
||||||
# Ewsposter service
|
|
||||||
ewsposter:
|
|
||||||
container_name: ewsposter
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ewsposter_local
|
|
||||||
environment:
|
|
||||||
- EWS_HPFEEDS_ENABLE=false
|
|
||||||
- EWS_HPFEEDS_HOST=host
|
|
||||||
- EWS_HPFEEDS_PORT=port
|
|
||||||
- EWS_HPFEEDS_CHANNELS=channels
|
|
||||||
- EWS_HPFEEDS_IDENT=user
|
|
||||||
- EWS_HPFEEDS_SECRET=secret
|
|
||||||
- EWS_HPFEEDS_TLSCERT=false
|
|
||||||
- EWS_HPFEEDS_FORMAT=json
|
|
||||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
|
@ -1,956 +0,0 @@
|
|||||||
# T-Pot: Docker Services Base Configuration
|
|
||||||
# This is only to be used with the T-Pot Customizer
|
|
||||||
# Editing the contents may result in broken custom configurations!
|
|
||||||
|
|
||||||
networks:
|
|
||||||
adbhoney_local:
|
|
||||||
ciscoasa_local:
|
|
||||||
citrixhoneypot_local:
|
|
||||||
conpot_local_IEC104:
|
|
||||||
conpot_local_guardian_ast:
|
|
||||||
conpot_local_ipmi:
|
|
||||||
conpot_local_kamstrup_382:
|
|
||||||
cowrie_local:
|
|
||||||
ddospot_local:
|
|
||||||
dicompot_local:
|
|
||||||
dionaea_local:
|
|
||||||
elasticpot_local:
|
|
||||||
endlessh_local:
|
|
||||||
glutton_local:
|
|
||||||
hellpot_local:
|
|
||||||
heralding_local:
|
|
||||||
honeypots_local:
|
|
||||||
ipphoney_local:
|
|
||||||
log4pot_local:
|
|
||||||
mailoney_local:
|
|
||||||
medpot_local:
|
|
||||||
redishoneypot_local:
|
|
||||||
sentrypeer_local:
|
|
||||||
tanner_local:
|
|
||||||
wordpot_local:
|
|
||||||
spiderfoot_local:
|
|
||||||
ewsposter_local:
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
#########################################
|
|
||||||
#### DEV
|
|
||||||
#########################################
|
|
||||||
#### T-Pot Init - Never delete this!
|
|
||||||
#########################################
|
|
||||||
|
|
||||||
# T-Pot Init Service
|
|
||||||
tpotinit:
|
|
||||||
container_name: tpotinit
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
restart: always
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/etc:uid=2000,gid=2000
|
|
||||||
- /tmp/:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/tpotinit:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DOCKER_COMPOSE}:/tmp/tpot/docker-compose.yml:ro
|
|
||||||
- ${TPOT_DATA_PATH}/blackhole:/etc/blackhole
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Honeypots
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Adbhoney service
|
|
||||||
adbhoney:
|
|
||||||
container_name: adbhoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- adbhoney_local
|
|
||||||
ports:
|
|
||||||
- "5555:5555"
|
|
||||||
image: ${TPOT_REPO}/adbhoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/adbhoney/log:/opt/adbhoney/log
|
|
||||||
- ${TPOT_DATA_PATH}/adbhoney/downloads:/opt/adbhoney/dl
|
|
||||||
|
|
||||||
# Ciscoasa service
|
|
||||||
ciscoasa:
|
|
||||||
container_name: ciscoasa
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/ciscoasa:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- ciscoasa_local
|
|
||||||
ports:
|
|
||||||
- "5000:5000/udp"
|
|
||||||
- "8443:8443"
|
|
||||||
image: ${TPOT_REPO}/ciscoasa:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ciscoasa/log:/var/log/ciscoasa
|
|
||||||
|
|
||||||
# CitrixHoneypot service
|
|
||||||
citrixhoneypot:
|
|
||||||
container_name: citrixhoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- citrixhoneypot_local
|
|
||||||
ports:
|
|
||||||
- "443:443"
|
|
||||||
image: ${TPOT_REPO}/citrixhoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/citrixhoneypot/logs:/opt/citrixhoneypot/logs
|
|
||||||
|
|
||||||
# Conpot IEC104 service
|
|
||||||
conpot_IEC104:
|
|
||||||
container_name: conpot_iec104
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_IEC104.log
|
|
||||||
- CONPOT_TEMPLATE=IEC104
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_IEC104
|
|
||||||
ports:
|
|
||||||
- "161:161/udp"
|
|
||||||
- "2404:2404"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot guardian_ast service
|
|
||||||
conpot_guardian_ast:
|
|
||||||
container_name: conpot_guardian_ast
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log
|
|
||||||
- CONPOT_TEMPLATE=guardian_ast
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_guardian_ast
|
|
||||||
ports:
|
|
||||||
- "10001:10001"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot ipmi
|
|
||||||
conpot_ipmi:
|
|
||||||
container_name: conpot_ipmi
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_ipmi.log
|
|
||||||
- CONPOT_TEMPLATE=ipmi
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_ipmi
|
|
||||||
ports:
|
|
||||||
- "623:623/udp"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Conpot kamstrup_382
|
|
||||||
conpot_kamstrup_382:
|
|
||||||
container_name: conpot_kamstrup_382
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- CONPOT_CONFIG=/etc/conpot/conpot.cfg
|
|
||||||
- CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json
|
|
||||||
- CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log
|
|
||||||
- CONPOT_TEMPLATE=kamstrup_382
|
|
||||||
- CONPOT_TMP=/tmp/conpot
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/conpot:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- conpot_local_kamstrup_382
|
|
||||||
ports:
|
|
||||||
- "1025:1025"
|
|
||||||
- "50100:50100"
|
|
||||||
image: ${TPOT_REPO}/conpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/conpot/log:/var/log/conpot
|
|
||||||
|
|
||||||
# Cowrie service
|
|
||||||
cowrie:
|
|
||||||
container_name: cowrie
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/cowrie:uid=2000,gid=2000
|
|
||||||
- /tmp/cowrie/data:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- cowrie_local
|
|
||||||
ports:
|
|
||||||
- "22:22"
|
|
||||||
- "23:23"
|
|
||||||
image: ${TPOT_REPO}/cowrie:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/downloads:/home/cowrie/cowrie/dl
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/keys:/home/cowrie/cowrie/etc
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log:/home/cowrie/cowrie/log
|
|
||||||
- ${TPOT_DATA_PATH}/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
|
||||||
|
|
||||||
# Ddospot service
|
|
||||||
ddospot:
|
|
||||||
container_name: ddospot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ddospot_local
|
|
||||||
ports:
|
|
||||||
- "19:19/udp"
|
|
||||||
- "53:53/udp"
|
|
||||||
- "123:123/udp"
|
|
||||||
# - "161:161/udp"
|
|
||||||
- "1900:1900/udp"
|
|
||||||
image: ${TPOT_REPO}/ddospot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/log:/opt/ddospot/ddospot/logs
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/bl:/opt/ddospot/ddospot/bl
|
|
||||||
- ${TPOT_DATA_PATH}/ddospot/db:/opt/ddospot/ddospot/db
|
|
||||||
|
|
||||||
# Dicompot service
|
|
||||||
# Get the Horos Client for testing: https://horosproject.org/
|
|
||||||
# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/
|
|
||||||
# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images
|
|
||||||
dicompot:
|
|
||||||
container_name: dicompot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dicompot_local
|
|
||||||
ports:
|
|
||||||
- "11112:11112"
|
|
||||||
image: ${TPOT_REPO}/dicompot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dicompot/log:/var/log/dicompot
|
|
||||||
# - ${TPOT_DATA_PATH}/dicompot/images:/opt/dicompot/images
|
|
||||||
|
|
||||||
# Dionaea service
|
|
||||||
dionaea:
|
|
||||||
container_name: dionaea
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- dionaea_local
|
|
||||||
ports:
|
|
||||||
- "20:20"
|
|
||||||
- "21:21"
|
|
||||||
- "42:42"
|
|
||||||
- "69:69/udp"
|
|
||||||
- "81:81"
|
|
||||||
- "135:135"
|
|
||||||
# - "443:443"
|
|
||||||
- "445:445"
|
|
||||||
- "1433:1433"
|
|
||||||
- "1723:1723"
|
|
||||||
- "1883:1883"
|
|
||||||
- "3306:3306"
|
|
||||||
# - "5060:5060"
|
|
||||||
# - "5060:5060/udp"
|
|
||||||
# - "5061:5061"
|
|
||||||
- "27017:27017"
|
|
||||||
image: ${TPOT_REPO}/dionaea:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea:/opt/dionaea/var/dionaea
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/log:/opt/dionaea/var/log
|
|
||||||
- ${TPOT_DATA_PATH}/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
|
||||||
|
|
||||||
# ElasticPot service
|
|
||||||
elasticpot:
|
|
||||||
container_name: elasticpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- elasticpot_local
|
|
||||||
ports:
|
|
||||||
- "9200:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/elasticpot/log:/opt/elasticpot/log
|
|
||||||
|
|
||||||
# Endlessh service
|
|
||||||
endlessh:
|
|
||||||
container_name: endlessh
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- endlessh_local
|
|
||||||
ports:
|
|
||||||
- "22:2222"
|
|
||||||
image: ${TPOT_REPO}/endlessh:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/endlessh/log:/var/log/endlessh
|
|
||||||
|
|
||||||
# Glutton service
|
|
||||||
glutton:
|
|
||||||
container_name: glutton
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /var/lib/glutton:uid=2000,gid=2000
|
|
||||||
- /run:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/glutton:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/glutton/log:/var/log/glutton
|
|
||||||
|
|
||||||
# Hellpot service
|
|
||||||
hellpot:
|
|
||||||
container_name: hellpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- hellpot_local
|
|
||||||
ports:
|
|
||||||
- "80:8080"
|
|
||||||
image: ${TPOT_REPO}/hellpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/hellpot/log:/var/log/hellpot
|
|
||||||
|
|
||||||
# Heralding service
|
|
||||||
heralding:
|
|
||||||
container_name: heralding
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/heralding:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- heralding_local
|
|
||||||
ports:
|
|
||||||
# - "21:21"
|
|
||||||
# - "22:22"
|
|
||||||
# - "23:23"
|
|
||||||
# - "25:25"
|
|
||||||
# - "80:80"
|
|
||||||
- "110:110"
|
|
||||||
- "143:143"
|
|
||||||
# - "443:443"
|
|
||||||
- "465:465"
|
|
||||||
- "993:993"
|
|
||||||
- "995:995"
|
|
||||||
# - "3306:3306"
|
|
||||||
# - "3389:3389"
|
|
||||||
- "1080:1080"
|
|
||||||
- "5432:5432"
|
|
||||||
- "5900:5900"
|
|
||||||
image: ${TPOT_REPO}/heralding:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/heralding/log:/var/log/heralding
|
|
||||||
|
|
||||||
# Honeypots service
|
|
||||||
honeypots:
|
|
||||||
container_name: honeypots
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- honeypots_local
|
|
||||||
ports:
|
|
||||||
- "21:21"
|
|
||||||
- "22:22"
|
|
||||||
- "23:23"
|
|
||||||
- "25:25"
|
|
||||||
- "53:53/udp"
|
|
||||||
- "80:80"
|
|
||||||
- "110:110"
|
|
||||||
- "123:123"
|
|
||||||
- "143:143"
|
|
||||||
- "161:161"
|
|
||||||
- "389:389"
|
|
||||||
- "443:443"
|
|
||||||
- "445:445"
|
|
||||||
- "1080:1080"
|
|
||||||
- "1433:1433"
|
|
||||||
- "1521:1521"
|
|
||||||
- "3306:3306"
|
|
||||||
- "5060:5060"
|
|
||||||
- "5432:5432"
|
|
||||||
- "5900:5900"
|
|
||||||
- "6379:6379"
|
|
||||||
- "6667:6667"
|
|
||||||
- "8080:8080"
|
|
||||||
- "9200:9200"
|
|
||||||
- "11211:11211"
|
|
||||||
image: ${TPOT_REPO}/honeypots:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/honeypots/log:/var/log/honeypots
|
|
||||||
|
|
||||||
# Honeytrap service
|
|
||||||
honeytrap:
|
|
||||||
container_name: honeytrap
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/honeytrap:uid=2000,gid=2000
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
image: ${TPOT_REPO}/honeytrap:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/attacks:/opt/honeytrap/var/attacks
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/downloads:/opt/honeytrap/var/downloads
|
|
||||||
- ${TPOT_DATA_PATH}/honeytrap/log:/opt/honeytrap/var/log
|
|
||||||
|
|
||||||
# Ipphoney service
|
|
||||||
ipphoney:
|
|
||||||
container_name: ipphoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ipphoney_local
|
|
||||||
ports:
|
|
||||||
- "631:631"
|
|
||||||
image: ${TPOT_REPO}/ipphoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/ipphoney/log:/opt/ipphoney/log
|
|
||||||
|
|
||||||
# Log4pot service
|
|
||||||
log4pot:
|
|
||||||
container_name: log4pot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /tmp:uid=2000,gid=2000
|
|
||||||
networks:
|
|
||||||
- log4pot_local
|
|
||||||
ports:
|
|
||||||
- "80:8080"
|
|
||||||
- "443:8080"
|
|
||||||
- "8080:8080"
|
|
||||||
- "9200:8080"
|
|
||||||
- "25565:8080"
|
|
||||||
image: ${TPOT_REPO}/log4pot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/log:/var/log/log4pot/log
|
|
||||||
- ${TPOT_DATA_PATH}/log4pot/payloads:/var/log/log4pot/payloads
|
|
||||||
|
|
||||||
# Mailoney service
|
|
||||||
mailoney:
|
|
||||||
container_name: mailoney
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- HPFEEDS_SERVER=
|
|
||||||
- HPFEEDS_IDENT=user
|
|
||||||
- HPFEEDS_SECRET=pass
|
|
||||||
- HPFEEDS_PORT=20000
|
|
||||||
- HPFEEDS_CHANNELPREFIX=prefix
|
|
||||||
networks:
|
|
||||||
- mailoney_local
|
|
||||||
ports:
|
|
||||||
- "25:25"
|
|
||||||
image: ${TPOT_REPO}/mailoney:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/mailoney/log:/opt/mailoney/logs
|
|
||||||
|
|
||||||
# Medpot service
|
|
||||||
medpot:
|
|
||||||
container_name: medpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- medpot_local
|
|
||||||
ports:
|
|
||||||
- "2575:2575"
|
|
||||||
image: ${TPOT_REPO}/medpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/medpot/log/:/var/log/medpot
|
|
||||||
|
|
||||||
# Redishoneypot service
|
|
||||||
redishoneypot:
|
|
||||||
container_name: redishoneypot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- redishoneypot_local
|
|
||||||
ports:
|
|
||||||
- "6379:6379"
|
|
||||||
image: ${TPOT_REPO}/redishoneypot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/redishoneypot/log:/var/log/redishoneypot
|
|
||||||
|
|
||||||
# SentryPeer service
|
|
||||||
sentrypeer:
|
|
||||||
container_name: sentrypeer
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- SENTRYPEER_PEER_TO_PEER=${SENTRYPEER_PEER_TO_PEER:-0} # Default to 0 if unset or NULL (value provided by T-Pot .env)
|
|
||||||
networks:
|
|
||||||
- sentrypeer_local
|
|
||||||
ports:
|
|
||||||
# - "4222:4222/udp"
|
|
||||||
- "5060:5060/udp"
|
|
||||||
# - "127.0.0.1:8082:8082"
|
|
||||||
image: ${TPOT_REPO}/sentrypeer:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/sentrypeer/log:/var/log/sentrypeer
|
|
||||||
|
|
||||||
#### Snare / Tanner
|
|
||||||
## Tanner Redis Service
|
|
||||||
tanner_redis:
|
|
||||||
container_name: tanner_redis
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## PHP Sandbox service
|
|
||||||
tanner_phpox:
|
|
||||||
container_name: tanner_phpox
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/phpox:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## Tanner API service
|
|
||||||
tanner_api:
|
|
||||||
container_name: tanner_api
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_redis
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
command: tannerapi
|
|
||||||
|
|
||||||
## Tanner service
|
|
||||||
tanner:
|
|
||||||
container_name: tanner
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner_api
|
|
||||||
- tanner_phpox
|
|
||||||
tmpfs:
|
|
||||||
- /tmp/tanner:uid=2000,gid=2000
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
image: ${TPOT_REPO}/tanner:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
command: tanner
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/log:/var/log/tanner
|
|
||||||
- ${TPOT_DATA_PATH}/tanner/files:/opt/tanner/files
|
|
||||||
|
|
||||||
## Snare service
|
|
||||||
snare:
|
|
||||||
container_name: snare
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- tanner
|
|
||||||
tty: true
|
|
||||||
networks:
|
|
||||||
- tanner_local
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
image: ${TPOT_REPO}/snare:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
# Wordpot service
|
|
||||||
wordpot:
|
|
||||||
container_name: wordpot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- wordpot_local
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
image: ${TPOT_REPO}/wordpot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/wordpot/log:/opt/wordpot/log
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### NSM
|
|
||||||
##################
|
|
||||||
|
|
||||||
# Fatt service
|
|
||||||
fatt:
|
|
||||||
container_name: fatt
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
- SYS_NICE
|
|
||||||
- NET_RAW
|
|
||||||
image: ${TPOT_REPO}/fatt:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/fatt/log:/opt/fatt/log
|
|
||||||
|
|
||||||
# P0f service
|
|
||||||
p0f:
|
|
||||||
container_name: p0f
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
network_mode: "host"
|
|
||||||
image: ${TPOT_REPO}/p0f:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/p0f/log:/var/log/p0f
|
|
||||||
|
|
||||||
# Suricata service
|
|
||||||
suricata:
|
|
||||||
container_name: suricata
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- OINKCODE=${OINKCODE:-OPEN} # Default to OPEN if unset or NULL (value provided by T-Pot .env)
|
|
||||||
# Loading external Rules from URL
|
|
||||||
# - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com"
|
|
||||||
network_mode: "host"
|
|
||||||
cap_add:
|
|
||||||
- NET_ADMIN
|
|
||||||
- SYS_NICE
|
|
||||||
- NET_RAW
|
|
||||||
image: ${TPOT_REPO}/suricata:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/suricata/log:/var/log/suricata
|
|
||||||
|
|
||||||
|
|
||||||
##################
|
|
||||||
#### Tools
|
|
||||||
##################
|
|
||||||
|
|
||||||
#### ELK
|
|
||||||
## Elasticsearch service
|
|
||||||
elasticsearch:
|
|
||||||
container_name: elasticsearch
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- bootstrap.memory_lock=true
|
|
||||||
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m
|
|
||||||
- ES_TMPDIR=/tmp
|
|
||||||
cap_add:
|
|
||||||
- IPC_LOCK
|
|
||||||
ulimits:
|
|
||||||
memlock:
|
|
||||||
soft: -1
|
|
||||||
hard: -1
|
|
||||||
nofile:
|
|
||||||
soft: 65536
|
|
||||||
hard: 65536
|
|
||||||
mem_limit: 4g
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64298:9200"
|
|
||||||
image: ${TPOT_REPO}/elasticsearch:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
## Kibana service
|
|
||||||
kibana:
|
|
||||||
container_name: kibana
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
mem_limit: 1g
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64296:5601"
|
|
||||||
image: ${TPOT_REPO}/kibana:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
## Logstash service
|
|
||||||
logstash:
|
|
||||||
container_name: logstash
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- LS_JAVA_OPTS=-Xms1024m -Xmx1024m
|
|
||||||
- TPOT_TYPE=${TPOT_TYPE:-HIVE}
|
|
||||||
- TPOT_HIVE_USER=${TPOT_HIVE_USER}
|
|
||||||
- TPOT_HIVE_IP=${TPOT_HIVE_IP}
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64305:64305"
|
|
||||||
mem_limit: 2g
|
|
||||||
image: ${TPOT_REPO}/logstash:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
|
|
||||||
## Map Redis Service
|
|
||||||
map_redis:
|
|
||||||
container_name: map_redis
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
stop_signal: SIGKILL
|
|
||||||
tty: true
|
|
||||||
image: ${TPOT_REPO}/redis:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
## Map Web Service
|
|
||||||
map_web:
|
|
||||||
container_name: map_web
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- MAP_COMMAND=AttackMapServer.py
|
|
||||||
stop_signal: SIGKILL
|
|
||||||
tty: true
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64299:64299"
|
|
||||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
|
|
||||||
## Map Data Service
|
|
||||||
map_data:
|
|
||||||
container_name: map_data
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
- MAP_COMMAND=DataServer_v2.py
|
|
||||||
- TPOT_ATTACKMAP_TEXT=${TPOT_ATTACKMAP_TEXT}
|
|
||||||
- TZ=${TPOT_ATTACKMAP_TEXT_TIMEZONE}
|
|
||||||
stop_signal: SIGKILL
|
|
||||||
tty: true
|
|
||||||
image: ${TPOT_REPO}/map:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
#### /ELK
|
|
||||||
|
|
||||||
# Ewsposter service
|
|
||||||
ewsposter:
|
|
||||||
container_name: ewsposter
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- ewsposter_local
|
|
||||||
environment:
|
|
||||||
- EWS_HPFEEDS_ENABLE=false
|
|
||||||
- EWS_HPFEEDS_HOST=host
|
|
||||||
- EWS_HPFEEDS_PORT=port
|
|
||||||
- EWS_HPFEEDS_CHANNELS=channels
|
|
||||||
- EWS_HPFEEDS_IDENT=user
|
|
||||||
- EWS_HPFEEDS_SECRET=secret
|
|
||||||
- EWS_HPFEEDS_TLSCERT=false
|
|
||||||
- EWS_HPFEEDS_FORMAT=json
|
|
||||||
image: ${TPOT_REPO}/ewsposter:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}:/data
|
|
||||||
- ${TPOT_DATA_PATH}/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
|
||||||
|
|
||||||
# Nginx service
|
|
||||||
nginx:
|
|
||||||
container_name: nginx
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
- COCKPIT=${COCKPIT}
|
|
||||||
- TPOT_OSTYPE=${TPOT_OSTYPE}
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
tmpfs:
|
|
||||||
- /var/tmp/nginx/client_body
|
|
||||||
- /var/tmp/nginx/proxy
|
|
||||||
- /var/tmp/nginx/fastcgi
|
|
||||||
- /var/tmp/nginx/uwsgi
|
|
||||||
- /var/tmp/nginx/scgi
|
|
||||||
- /run
|
|
||||||
- /var/lib/nginx/tmp:uid=100,gid=82
|
|
||||||
network_mode: "host"
|
|
||||||
ports:
|
|
||||||
- "64297:64297"
|
|
||||||
image: ${TPOT_REPO}/nginx:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
read_only: true
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/nginx/cert/:/etc/nginx/cert/:ro
|
|
||||||
- ${TPOT_DATA_PATH}/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro
|
|
||||||
- ${TPOT_DATA_PATH}/nginx/conf/lswebpasswd:/etc/nginx/lswebpasswd:ro
|
|
||||||
- ${TPOT_DATA_PATH}/nginx/log/:/var/log/nginx/
|
|
||||||
|
|
||||||
# Spiderfoot service
|
|
||||||
spiderfoot:
|
|
||||||
container_name: spiderfoot
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
tpotinit:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- spiderfoot_local
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:64303:8080"
|
|
||||||
image: ${TPOT_REPO}/spiderfoot:${TPOT_VERSION}
|
|
||||||
pull_policy: ${TPOT_PULL_POLICY}
|
|
||||||
volumes:
|
|
||||||
- ${TPOT_DATA_PATH}/spiderfoot:/home/spiderfoot/.spiderfoot
|
|
@ -1,13 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Buildx Example: docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push .
|
|
||||||
|
|
||||||
# Setup Vars
|
# Setup Vars
|
||||||
myPLATFORMS="linux/amd64,linux/arm64"
|
myPLATFORMS="linux/amd64,linux/arm64"
|
||||||
myHUBORG_DOCKER="dtagdevsec"
|
myHUBORG="dtagdevsec"
|
||||||
myHUBORG_GITHUB="ghcr.io/telekom-security"
|
myTAG="2204"
|
||||||
myTAG="dev"
|
myIMAGESBASE="adbhoney ciscoasa citrixhoneypot conpot cowrie ddospot dicompot dionaea elasticpot endlessh ewsposter fatt glutton hellpot heralding honeypots honeytrap ipphoney log4pot mailoney medpot nginx p0f redishoneypot sentrypeer spiderfoot suricata wordpot"
|
||||||
myIMAGESBASE="tpotinit adbhoney ciscoasa citrixhoneypot conpot cowrie ddospot dicompot dionaea elasticpot endlessh ewsposter fatt glutton hellpot heralding honeypots honeytrap ipphoney log4pot mailoney medpot nginx p0f redishoneypot sentrypeer spiderfoot suricata wordpot"
|
|
||||||
myIMAGESELK="elasticsearch kibana logstash map"
|
myIMAGESELK="elasticsearch kibana logstash map"
|
||||||
myIMAGESTANNER="phpox redis snare tanner"
|
myIMAGESTANNER="phpox redis snare tanner"
|
||||||
myBUILDERLOG="builder.log"
|
myBUILDERLOG="builder.log"
|
||||||
@ -26,42 +23,9 @@ fi
|
|||||||
docker buildx > /dev/null 2>&1
|
docker buildx > /dev/null 2>&1
|
||||||
if [ "$?" == "1" ];
|
if [ "$?" == "1" ];
|
||||||
then
|
then
|
||||||
echo "### Build environment not setup. Install docker engine from docker:"
|
echo "### Build environment not setup. Run bin/setup_builder.sh"
|
||||||
echo "### https://docs.docker.com/engine/install/debian/"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Let's ensure arm64 and amd64 are supported
|
|
||||||
echo "### Let's ensure ARM64 and AMD64 are supported ..."
|
|
||||||
myARCHITECTURES="amd64 arm64"
|
|
||||||
mySUPPORTED=$(docker buildx inspect --bootstrap)
|
|
||||||
|
|
||||||
for i in $myARCHITECTURES;
|
|
||||||
do
|
|
||||||
if ! echo $mySUPPORTED | grep -q linux/$i;
|
|
||||||
then
|
|
||||||
echo "## Installing $i support ..."
|
|
||||||
docker run --privileged --rm tonistiigi/binfmt --install $i
|
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
else
|
|
||||||
echo "## $i support detected!"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Let's ensure we have builder created with cache support
|
|
||||||
echo "### Checking for mybuilder ..."
|
|
||||||
if ! docker buildx ls | grep -q mybuilder;
|
|
||||||
then
|
|
||||||
echo "## Setting up mybuilder ..."
|
|
||||||
docker buildx create --name mybuilder
|
|
||||||
# Set as default, otherwise local cache is not supported
|
|
||||||
docker buildx use mybuilder
|
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
else
|
|
||||||
echo "## Found mybuilder!"
|
|
||||||
fi
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Only run with command switch
|
# Only run with command switch
|
||||||
if [ "$1" == "" ]; then
|
if [ "$1" == "" ]; then
|
||||||
echo "### T-Pot Multi Arch Image Builder."
|
echo "### T-Pot Multi Arch Image Builder."
|
||||||
@ -80,12 +44,7 @@ local myPUSHOPTION="$3"
|
|||||||
for myREPONAME in $myIMAGELIST;
|
for myREPONAME in $myIMAGELIST;
|
||||||
do
|
do
|
||||||
echo -n "Now building: $myREPONAME in $myPATH$myREPONAME/."
|
echo -n "Now building: $myREPONAME in $myPATH$myREPONAME/."
|
||||||
docker buildx build --cache-from "type=local,src=$myBUILDCACHE" \
|
docker buildx build --cache-from "type=local,src=$myBUILDCACHE" --cache-to "type=local,dest=$myBUILDCACHE" --platform $myPLATFORMS -t $myHUBORG/$myREPONAME:$myTAG $myPUSHOPTION $myPATH$myREPONAME/. >> $myBUILDERLOG 2>&1
|
||||||
--cache-to "type=local,dest=$myBUILDCACHE" \
|
|
||||||
--platform $myPLATFORMS \
|
|
||||||
-t $myHUBORG_DOCKER/$myREPONAME:$myTAG \
|
|
||||||
-t $myHUBORG_GITHUB/$myREPONAME:$myTAG \
|
|
||||||
$myPUSHOPTION $myPATH$myREPONAME/. >> $myBUILDERLOG 2>&1
|
|
||||||
if [ "$?" != "0" ];
|
if [ "$?" != "0" ];
|
||||||
then
|
then
|
||||||
echo " [ ERROR ] - Check logs!"
|
echo " [ ERROR ] - Check logs!"
|
||||||
@ -117,3 +76,4 @@ if [ "$1" == "push" ];
|
|||||||
fuBUILDIMAGES "elk/" "$myIMAGESELK" "--push"
|
fuBUILDIMAGES "elk/" "$myIMAGESELK" "--push"
|
||||||
fuBUILDIMAGES "tanner/" "$myIMAGESTANNER" "--push"
|
fuBUILDIMAGES "tanner/" "$myIMAGESTANNER" "--push"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -9,9 +9,12 @@ COPY dist/ /root/dist/
|
|||||||
RUN apt-get update -y && \
|
RUN apt-get update -y && \
|
||||||
apt-get install -y \
|
apt-get install -y \
|
||||||
aria2 \
|
aria2 \
|
||||||
|
autossh \
|
||||||
bash \
|
bash \
|
||||||
bzip2 \
|
bzip2 \
|
||||||
curl && \
|
curl \
|
||||||
|
# openjdk-11-jre \
|
||||||
|
openssh-client && \
|
||||||
#
|
#
|
||||||
# Determine arch, get and install packages
|
# Determine arch, get and install packages
|
||||||
ARCH=$(arch) && \
|
ARCH=$(arch) && \
|
||||||
|
22
docker/elk/logstash/dist/entrypoint.sh
vendored
@ -42,27 +42,25 @@ if [ "$myCHECK" == "0" ];
|
|||||||
echo "Cannot reach Listbot, starting Logstash without latest translation maps."
|
echo "Cannot reach Listbot, starting Logstash without latest translation maps."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Distributed T-Pot installation needs a different pipeline config
|
# Distributed T-Pot installation needs a different pipeline config and autossh tunnel.
|
||||||
if [ "$TPOT_TYPE" == "SENSOR" ];
|
if [ "$MY_TPOT_TYPE" == "SENSOR" ];
|
||||||
then
|
then
|
||||||
echo
|
echo
|
||||||
echo "Distributed T-Pot setup, sending T-Pot logs to $TPOT_HIVE_IP."
|
echo "Distributed T-Pot setup, sending T-Pot logs to $MY_HIVE_IP."
|
||||||
echo
|
echo
|
||||||
echo "T-Pot type: $TPOT_TYPE"
|
echo "T-Pot type: $MY_TPOT_TYPE"
|
||||||
echo "Hive IP: $TPOT_HIVE_IP"
|
echo "Keyfile used: $MY_SENSOR_PRIVATEKEYFILE"
|
||||||
|
echo "Hive username: $MY_HIVE_USERNAME"
|
||||||
|
echo "Hive IP: $MY_HIVE_IP"
|
||||||
echo
|
echo
|
||||||
# Ensure correct file permissions for private keyfile or SSH will ask for password
|
# Ensure correct file permissions for private keyfile or SSH will ask for password
|
||||||
|
chmod 600 $MY_SENSOR_PRIVATEKEYFILE
|
||||||
cp /usr/share/logstash/config/pipelines_sensor.yml /usr/share/logstash/config/pipelines.yml
|
cp /usr/share/logstash/config/pipelines_sensor.yml /usr/share/logstash/config/pipelines.yml
|
||||||
|
autossh -f -M 0 -4 -l $MY_HIVE_USERNAME -i $MY_SENSOR_PRIVATEKEYFILE -p 64295 -N -L64305:127.0.0.1:64305 $MY_HIVE_IP -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile=/dev/null"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$TPOT_TYPE" != "SENSOR" ];
|
if [ "$MY_TPOT_TYPE" != "SENSOR" ];
|
||||||
then
|
then
|
||||||
echo
|
|
||||||
echo "This is a T-Pot STANDARD / HIVE installation."
|
|
||||||
echo
|
|
||||||
echo "T-Pot type: $TPOT_TYPE"
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Index Management is happening through ILM, but we need to put T-Pot ILM setting on ES.
|
# Index Management is happening through ILM, but we need to put T-Pot ILM setting on ES.
|
||||||
myTPOTILM=$(curl -s -XGET "http://elasticsearch:9200/_ilm/policy/tpot" | grep "Lifecycle policy not found: tpot" -c)
|
myTPOTILM=$(curl -s -XGET "http://elasticsearch:9200/_ilm/policy/tpot" | grep "Lifecycle policy not found: tpot" -c)
|
||||||
if [ "$myTPOTILM" == "1" ];
|
if [ "$myTPOTILM" == "1" ];
|
||||||
|
7
docker/elk/logstash/dist/http_output.conf
vendored
@ -708,10 +708,7 @@ output {
|
|||||||
id => "${MY_HOSTNAME}"
|
id => "${MY_HOSTNAME}"
|
||||||
codec => "json"
|
codec => "json"
|
||||||
format => "json_batch"
|
format => "json_batch"
|
||||||
url => "https://${TPOT_HIVE_IP}:64294"
|
url => "http://127.0.0.1:64305"
|
||||||
cacert => "/data/hive.crt"
|
|
||||||
headers => {
|
|
||||||
"Authorization" => "Basic ${TPOT_HIVE_USER}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -10,13 +10,12 @@ RUN apk -U --no-cache add \
|
|||||||
libcap \
|
libcap \
|
||||||
py3-pip \
|
py3-pip \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev && \
|
||||||
tzdata && \
|
|
||||||
#
|
#
|
||||||
# Install from GitHub and setup
|
# Install from GitHub and setup
|
||||||
mkdir -p /opt && \
|
mkdir -p /opt && \
|
||||||
cd /opt/ && \
|
cd /opt/ && \
|
||||||
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.1.0 && \
|
git clone https://github.com/t3chn0m4g3/t-pot-attack-map -b 2.0.1 && \
|
||||||
cd t-pot-attack-map && \
|
cd t-pot-attack-map && \
|
||||||
# git checkout eaf8d123d72a62e4c12093e4e8487e10e6ef60f3 && \
|
# git checkout eaf8d123d72a62e4c12093e4e8487e10e6ef60f3 && \
|
||||||
# git branch -a && \
|
# git branch -a && \
|
||||||
@ -37,7 +36,6 @@ RUN apk -U --no-cache add \
|
|||||||
rm -rf /root/* /var/cache/apk/* /opt/t-pot-attack-map/.git
|
rm -rf /root/* /var/cache/apk/* /opt/t-pot-attack-map/.git
|
||||||
#
|
#
|
||||||
# Start T-Pot-Attack-Map
|
# Start T-Pot-Attack-Map
|
||||||
ENV TZ=UTC
|
|
||||||
STOPSIGNAL SIGINT
|
STOPSIGNAL SIGINT
|
||||||
USER map:map
|
USER map:map
|
||||||
WORKDIR /opt/t-pot-attack-map
|
WORKDIR /opt/t-pot-attack-map
|
||||||
|
@ -29,7 +29,6 @@ RUN apk -U --no-cache add \
|
|||||||
cp /root/dist/conf/nginx.conf /etc/nginx/ && \
|
cp /root/dist/conf/nginx.conf /etc/nginx/ && \
|
||||||
cp -R /root/dist/conf/ssl /etc/nginx/ && \
|
cp -R /root/dist/conf/ssl /etc/nginx/ && \
|
||||||
cp /root/dist/conf/tpotweb.conf /etc/nginx/conf.d/ && \
|
cp /root/dist/conf/tpotweb.conf /etc/nginx/conf.d/ && \
|
||||||
cp /root/dist/conf/lsweb.conf /etc/nginx/conf.d/ && \
|
|
||||||
#
|
#
|
||||||
# Clean up
|
# Clean up
|
||||||
rm -rf /root/* && \
|
rm -rf /root/* && \
|
||||||
|
110
docker/nginx/dist/conf/lsweb.conf
vendored
@ -1,110 +0,0 @@
|
|||||||
############################################
|
|
||||||
### NGINX T-Pot configuration file by mo ###
|
|
||||||
############################################
|
|
||||||
|
|
||||||
server {
|
|
||||||
|
|
||||||
#########################
|
|
||||||
### Basic server settings
|
|
||||||
#########################
|
|
||||||
listen 64294 ssl http2;
|
|
||||||
index index.html;
|
|
||||||
ssl_protocols TLSv1.3;
|
|
||||||
server_name example.com;
|
|
||||||
error_page 300 301 302 400 401 402 403 404 500 501 502 503 504 /error.html;
|
|
||||||
root /var/lib/nginx/html;
|
|
||||||
add_header Cache-Control "public, max-age=604800";
|
|
||||||
|
|
||||||
##############################################
|
|
||||||
### Remove version number add different header
|
|
||||||
##############################################
|
|
||||||
server_tokens off;
|
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
|
||||||
### SSL settings and Cipher Suites
|
|
||||||
##############################################
|
|
||||||
ssl_certificate /etc/nginx/cert/nginx.crt;
|
|
||||||
ssl_certificate_key /etc/nginx/cert/nginx.key;
|
|
||||||
|
|
||||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH:!DHE:!SHA:!SHA256';
|
|
||||||
ssl_ecdh_curve secp384r1;
|
|
||||||
ssl_dhparam /etc/nginx/ssl/dhparam4096.pem;
|
|
||||||
|
|
||||||
ssl_prefer_server_ciphers on;
|
|
||||||
ssl_session_cache shared:SSL:10m;
|
|
||||||
|
|
||||||
|
|
||||||
####################################
|
|
||||||
### OWASP recommendations / settings
|
|
||||||
####################################
|
|
||||||
|
|
||||||
### Size Limits & Buffer Overflows
|
|
||||||
### the size may be configured based on the needs.
|
|
||||||
client_body_buffer_size 128k;
|
|
||||||
client_header_buffer_size 1k;
|
|
||||||
client_max_body_size 2M;
|
|
||||||
|
|
||||||
### Changed from OWASP recommendations: "2 1k" to "2 1280" (So 1.2k)
|
|
||||||
### When you pass though potentially another reverse proxy/load balancer
|
|
||||||
### in front of tpotce you can introduce more headers than normal and
|
|
||||||
### therefore you can exceed the allowed header buffer of 1k.
|
|
||||||
### An 280 extra bytes seems to be working for most use-cases.
|
|
||||||
### And still keeping it close to OWASP's recommendation.
|
|
||||||
large_client_header_buffers 2 1280;
|
|
||||||
|
|
||||||
### Mitigate Slow HHTP DoS Attack
|
|
||||||
### Timeouts definition ##
|
|
||||||
client_body_timeout 10;
|
|
||||||
client_header_timeout 10;
|
|
||||||
keepalive_timeout 5 5;
|
|
||||||
send_timeout 10;
|
|
||||||
|
|
||||||
### X-Frame-Options is to prevent from clickJacking attack
|
|
||||||
add_header X-Frame-Options SAMEORIGIN;
|
|
||||||
|
|
||||||
### disable content-type sniffing on some browsers.
|
|
||||||
add_header X-Content-Type-Options nosniff;
|
|
||||||
|
|
||||||
### This header enables the Cross-site scripting (XSS) filter
|
|
||||||
add_header X-XSS-Protection "1; mode=block";
|
|
||||||
|
|
||||||
### This will enforce HTTP browsing into HTTPS and avoid ssl stripping attack
|
|
||||||
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
|
|
||||||
# add_header 'Content-Security-Policy' 'upgrade-insecure-requests';
|
|
||||||
|
|
||||||
##################################
|
|
||||||
### Restrict access and basic auth
|
|
||||||
##################################
|
|
||||||
|
|
||||||
# satisfy all;
|
|
||||||
satisfy any;
|
|
||||||
|
|
||||||
# allow 10.0.0.0/8;
|
|
||||||
# allow 172.16.0.0/12;
|
|
||||||
# allow 192.168.0.0/16;
|
|
||||||
allow 127.0.0.1;
|
|
||||||
allow ::1;
|
|
||||||
deny all;
|
|
||||||
|
|
||||||
auth_basic "closed site";
|
|
||||||
auth_basic_user_file /etc/nginx/lswebpasswd;
|
|
||||||
|
|
||||||
################################################
|
|
||||||
### T-Pot Hive Logstash HTTP Input Reverse Proxy
|
|
||||||
################################################
|
|
||||||
|
|
||||||
location / {
|
|
||||||
set_by_lua_block $logstash {
|
|
||||||
local tpot_ostype = os.getenv("TPOT_OSTYPE")
|
|
||||||
if tpot_ostype == "mac" or tpot_ostype == "win" then
|
|
||||||
return "http://logstash:64305";
|
|
||||||
else
|
|
||||||
return "http://127.0.0.1:64305";
|
|
||||||
end
|
|
||||||
}
|
|
||||||
access_log off;
|
|
||||||
error_log /var/log/nginx/lsweb_error.log;
|
|
||||||
proxy_pass $logstash;
|
|
||||||
}
|
|
||||||
}
|
|
1
docker/nginx/dist/conf/nginx.conf
vendored
@ -6,6 +6,7 @@ load_module /usr/lib/nginx/modules/ngx_http_brotli_filter_module.so;
|
|||||||
load_module /usr/lib/nginx/modules/ngx_http_brotli_static_module.so;
|
load_module /usr/lib/nginx/modules/ngx_http_brotli_static_module.so;
|
||||||
|
|
||||||
# OS ENV variables need to be defined here, so Lua can use them
|
# OS ENV variables need to be defined here, so Lua can use them
|
||||||
|
env COCKPIT;
|
||||||
env TPOT_OSTYPE;
|
env TPOT_OSTYPE;
|
||||||
|
|
||||||
# Both modules are needed for Lua, in this exact order
|
# Both modules are needed for Lua, in this exact order
|
||||||
|
8
docker/nginx/dist/conf/tpotweb.conf
vendored
@ -96,7 +96,12 @@ server {
|
|||||||
|
|
||||||
location / {
|
location / {
|
||||||
set_by_lua_block $index_file {
|
set_by_lua_block $index_file {
|
||||||
return "index.html";
|
local cockpit = os.getenv("COCKPIT")
|
||||||
|
if cockpit == "false" then
|
||||||
|
return "index_light.html"
|
||||||
|
else
|
||||||
|
return "index.html"
|
||||||
|
end
|
||||||
}
|
}
|
||||||
auth_basic "closed site";
|
auth_basic "closed site";
|
||||||
auth_basic_user_file /etc/nginx/nginxpasswd;
|
auth_basic_user_file /etc/nginx/nginxpasswd;
|
||||||
@ -144,7 +149,6 @@ server {
|
|||||||
return "http://127.0.0.1:64298";
|
return "http://127.0.0.1:64298";
|
||||||
end
|
end
|
||||||
}
|
}
|
||||||
|
|
||||||
proxy_pass $elasticsearch;
|
proxy_pass $elasticsearch;
|
||||||
rewrite /es/(.*)$ /$1 break;
|
rewrite /es/(.*)$ /$1 break;
|
||||||
}
|
}
|
||||||
|
13
docker/nginx/dist/html/cockpit.html
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
<!DOCTYPE HTML>
|
||||||
|
<html lang="en-US">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>Redirect to Cockpit</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<script type="text/javascript">
|
||||||
|
window.location.href = window.location.protocol + '//' + window.location.hostname + ':64294';
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
4
docker/nginx/dist/html/config.js
vendored
@ -36,6 +36,10 @@ const CONFIG = {
|
|||||||
name: 'Attack Map',
|
name: 'Attack Map',
|
||||||
link: '/map/',
|
link: '/map/',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: 'Cockpit',
|
||||||
|
link: '/cockpit.html',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: 'Cyberchef',
|
name: 'Cyberchef',
|
||||||
link: '/cyberchef/',
|
link: '/cyberchef/',
|
||||||
|
71
docker/nginx/dist/html/config_light.js
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
// ╔╗ ╔═╗╔╗╔╔╦╗╔═╗
|
||||||
|
// ╠╩╗║╣ ║║║ ║ ║ ║
|
||||||
|
// ╚═╝╚═╝╝╚╝ ╩ ╚═╝
|
||||||
|
// ┌─┐┌─┐┌┐┌┌─┐┬┌─┐┬ ┬┬─┐┌─┐┌┬┐┬┌─┐┌┐┌
|
||||||
|
// │ │ ││││├┤ ││ ┬│ │├┬┘├─┤ │ ││ ││││
|
||||||
|
// └─┘└─┘┘└┘└ ┴└─┘└─┘┴└─┴ ┴ ┴ ┴└─┘┘└┘
|
||||||
|
|
||||||
|
const CONFIG = {
|
||||||
|
// ┌┐ ┌─┐┌─┐┬┌─┐┌─┐
|
||||||
|
// ├┴┐├─┤└─┐││ └─┐
|
||||||
|
// └─┘┴ ┴└─┘┴└─┘└─┘
|
||||||
|
|
||||||
|
// General
|
||||||
|
imageBackground: true,
|
||||||
|
openInNewTab: true,
|
||||||
|
twelveHourFormat: false,
|
||||||
|
|
||||||
|
// Greetings
|
||||||
|
greetingMorning: 'Good morning ☕',
|
||||||
|
greetingAfternoon: 'Good afternoon 🍯',
|
||||||
|
greetingEvening: 'Good evening 😁',
|
||||||
|
greetingNight: 'Go to Sleep 🥱',
|
||||||
|
|
||||||
|
// ┬ ┬┌─┐┌┬┐┌─┐
|
||||||
|
// │ │└─┐ │ └─┐
|
||||||
|
// ┴─┘┴└─┘ ┴ └─┘
|
||||||
|
|
||||||
|
//Icons
|
||||||
|
firstListIcon: 'home',
|
||||||
|
secondListIcon: 'external-link',
|
||||||
|
|
||||||
|
// Links
|
||||||
|
lists: {
|
||||||
|
firstList: [
|
||||||
|
{
|
||||||
|
name: 'Attack Map',
|
||||||
|
link: '/map/',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Cyberchef',
|
||||||
|
link: '/cyberchef/',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Elasticvue',
|
||||||
|
link: '/elasticvue/',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Kibana',
|
||||||
|
link: '/kibana/',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Spiderfoot',
|
||||||
|
link: '/spiderfoot/',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
secondList: [
|
||||||
|
{
|
||||||
|
name: 'SecurityMeter',
|
||||||
|
link: 'https://sicherheitstacho.eu',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'T-Pot @ GitHub',
|
||||||
|
link: 'https://github.com/telekom-security/tpotce/',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'T-Pot ReadMe',
|
||||||
|
link: 'https://github.com/telekom-security/tpotce/blob/master/README.md',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
1
docker/nginx/dist/html/index.html
vendored
@ -53,7 +53,6 @@
|
|||||||
<script>
|
<script>
|
||||||
lucide.createIcons();
|
lucide.createIcons();
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
<!-- Developed and designed by Miguel R. Ávila: -->
|
<!-- Developed and designed by Miguel R. Ávila: -->
|
||||||
|
60
docker/nginx/dist/html/index_light.html
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<title>T-Pot</title>
|
||||||
|
<link
|
||||||
|
rel=" shortcut icon"
|
||||||
|
type="image/png"
|
||||||
|
href="assets/icons/favicon.png"
|
||||||
|
/>
|
||||||
|
<link rel="stylesheet" href="app.css" />
|
||||||
|
<script src="assets/js/lucide.min.js"></script>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
╔╗ ╔═╗╔╗╔╔╦╗╔═╗
|
||||||
|
╠╩╗║╣ ║║║ ║ ║ ║
|
||||||
|
╚═╝╚═╝╝╚╝ ╩ ╚═╝
|
||||||
|
-->
|
||||||
|
|
||||||
|
<body class="">
|
||||||
|
<div class="container">
|
||||||
|
<!-- Clock and Greetings -->
|
||||||
|
|
||||||
|
<div class="timeBlock">
|
||||||
|
<div class="clock">
|
||||||
|
<div id="hour" class=""></div>
|
||||||
|
<div id="separator" class=""></div>
|
||||||
|
<div id="minutes" class=""></div>
|
||||||
|
</div>
|
||||||
|
<div id="greetings"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
┬ ┬┌─┐┌┬┐┌─┐
|
||||||
|
│ │└─┐ │ └─┐
|
||||||
|
┴─┘┴└─┘ ┴ └─┘
|
||||||
|
-->
|
||||||
|
|
||||||
|
<div class="card list list__1" id="list_1"></div>
|
||||||
|
|
||||||
|
<div class="card list list__2" id="list_2"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Config -->
|
||||||
|
<script src="config_light.js"></script>
|
||||||
|
|
||||||
|
<!-- Scripts -->
|
||||||
|
<script src="assets/js/time.js"></script>
|
||||||
|
<script src="assets/js/theme.js"></script>
|
||||||
|
<script src="assets/js/greeting.js"></script>
|
||||||
|
<script src="assets/js/lists.js"></script>
|
||||||
|
<script>
|
||||||
|
lucide.createIcons();
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
<!-- Developed and designed by Miguel R. Ávila: -->
|
||||||
|
<!-- https://github.com/migueravila -->
|
||||||
|
</html>
|
@ -1,46 +0,0 @@
|
|||||||
FROM alpine:edge
|
|
||||||
#
|
|
||||||
# Include dist
|
|
||||||
COPY dist/ /opt/tpot/
|
|
||||||
#
|
|
||||||
# Get and install dependencies & packages
|
|
||||||
RUN apk --no-cache -U add \
|
|
||||||
aria2 \
|
|
||||||
apache2-utils \
|
|
||||||
bash \
|
|
||||||
bind-tools \
|
|
||||||
conntrack-tools \
|
|
||||||
curl \
|
|
||||||
ethtool \
|
|
||||||
figlet \
|
|
||||||
git \
|
|
||||||
grep \
|
|
||||||
iproute2 \
|
|
||||||
iptables \
|
|
||||||
iptables-legacy \
|
|
||||||
jq \
|
|
||||||
logrotate \
|
|
||||||
lsblk \
|
|
||||||
net-tools \
|
|
||||||
openssl \
|
|
||||||
pigz \
|
|
||||||
tar \
|
|
||||||
uuidgen && \
|
|
||||||
apk --no-cache -U add --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \
|
|
||||||
yq && \
|
|
||||||
#
|
|
||||||
# Setup user
|
|
||||||
addgroup -g 2000 tpot && \
|
|
||||||
adduser -S -s /bin/ash -u 2000 -D -g 2000 tpot && \
|
|
||||||
#
|
|
||||||
# Clean up
|
|
||||||
apk del --purge git && \
|
|
||||||
rm -rf /root/* /tmp/* && \
|
|
||||||
rm -rf /root/.cache /opt/tpot/.git && \
|
|
||||||
rm -rf /var/cache/apk/*
|
|
||||||
#
|
|
||||||
# Run tpotinit
|
|
||||||
WORKDIR /opt/tpot
|
|
||||||
HEALTHCHECK --retries=1000 --interval=5s CMD test -f /tmp/success || exit 1
|
|
||||||
STOPSIGNAL SIGKILL
|
|
||||||
CMD ["/opt/tpot/entrypoint.sh"]
|
|
61
docker/tpotinit/dist/bin/backup_es_folders.sh
vendored
@ -1,61 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Run as root only.
|
|
||||||
myWHOAMI=$(whoami)
|
|
||||||
if [ "$myWHOAMI" != "root" ];
|
|
||||||
then
|
|
||||||
echo "Need to run as root ..."
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" == "" ] || [ "$1" != "all" ] && [ "$1" != "base" ];
|
|
||||||
then
|
|
||||||
echo "Usage: backup_es_folders [all, base]"
|
|
||||||
echo " all = backup all ES folder"
|
|
||||||
echo " base = backup only Kibana index".
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Backup all ES relevant folders
|
|
||||||
# Make sure ES is available
|
|
||||||
myES="http://127.0.0.1:64298/"
|
|
||||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
|
||||||
if ! [ "$myESSTATUS" = "1" ]
|
|
||||||
then
|
|
||||||
echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'."
|
|
||||||
exit
|
|
||||||
else
|
|
||||||
echo "### Elasticsearch is available, now continuing."
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set vars
|
|
||||||
myCOUNT=1
|
|
||||||
myDATE=$(date +%Y%m%d%H%M)
|
|
||||||
myELKPATH="/data/elk/data"
|
|
||||||
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/.kibana' | awk '{ print $4 }')
|
|
||||||
myKIBANAINDEXPATH=$myELKPATH/indices/$myKIBANAINDEXNAME
|
|
||||||
|
|
||||||
# Let's ensure normal operation on exit or if interrupted ...
|
|
||||||
function fuCLEANUP {
|
|
||||||
### Start ELK
|
|
||||||
systemctl start tpot
|
|
||||||
echo "### Now starting T-Pot ..."
|
|
||||||
}
|
|
||||||
trap fuCLEANUP EXIT
|
|
||||||
|
|
||||||
# Stop T-Pot to lift db lock
|
|
||||||
echo "### Now stopping T-Pot"
|
|
||||||
systemctl stop tpot
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
# Backup DB in 2 flavors
|
|
||||||
echo "### Now backing up Elasticsearch folders ..."
|
|
||||||
if [ "$1" == "all" ];
|
|
||||||
then
|
|
||||||
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
|
|
||||||
elif [ "$1" == "base" ];
|
|
||||||
then
|
|
||||||
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH
|
|
||||||
fi
|
|
||||||
|
|
109
docker/tpotinit/dist/bin/blackhole.sh
vendored
@ -1,109 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Run as root only.
|
|
||||||
myWHOAMI=$(whoami)
|
|
||||||
if [ "$myWHOAMI" != "root" ]
|
|
||||||
then
|
|
||||||
echo "### Need to run as root ..."
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Disclaimer
|
|
||||||
if [ "$1" == "" ];
|
|
||||||
then
|
|
||||||
echo "### Warning!"
|
|
||||||
echo "### This script will download and add blackhole routes for known mass scanners in an attempt to decrease the chance of detection."
|
|
||||||
echo "### IPs are neither curated or verified, use at your own risk!"
|
|
||||||
echo "###"
|
|
||||||
echo "### As long as <blackhole.sh del> is not executed the routes will be re-added on T-Pot start through </opt/tpot/bin/updateip.sh>."
|
|
||||||
echo "### Check with <ip r> or <dps.sh> if blackhole is enabled."
|
|
||||||
echo
|
|
||||||
echo "Usage: blackhole.sh add (add blackhole routes)"
|
|
||||||
echo " blackhole.sh del (delete blackhole routes)"
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# QnD paths, files
|
|
||||||
mkdir -p /etc/blackhole
|
|
||||||
cd /etc/blackhole
|
|
||||||
myFILE="mass_scanner.txt"
|
|
||||||
myURL="https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/mass_scanner.txt"
|
|
||||||
myBASELINE="500"
|
|
||||||
# Alternatively, using less routes, but blocking complete /24 networks
|
|
||||||
#myFILE="mass_scanner_cidr.txt"
|
|
||||||
#myURL="https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/mass_scanner_cidr.txt"
|
|
||||||
|
|
||||||
# Calculate age of downloaded list, read IPs
|
|
||||||
if [ -f "$myFILE" ];
|
|
||||||
then
|
|
||||||
myNOW=$(date +%s)
|
|
||||||
myOLD=$(date +%s -r "$myFILE")
|
|
||||||
myDAYS=$(( ($myNOW-$myOLD) / (60*60*24) ))
|
|
||||||
echo "### Downloaded $myFILE list is $myDAYS days old."
|
|
||||||
myBLACKHOLE_IPS=$(grep -o -P "\b(?:\d{1,3}\.){3}\d{1,3}\b" "$myFILE" | sort -u)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Let's load ip list
|
|
||||||
if [[ ! -f "$myFILE" && "$1" == "add" || "$myDAYS" -gt 30 ]];
|
|
||||||
then
|
|
||||||
echo "### Downloading $myFILE list."
|
|
||||||
aria2c --allow-overwrite -s16 -x 16 "$myURL" && \
|
|
||||||
myBLACKHOLE_IPS=$(grep -o -P "\b(?:\d{1,3}\.){3}\d{1,3}\b" "$myFILE" | sort -u)
|
|
||||||
fi
|
|
||||||
|
|
||||||
myCOUNT=$(echo $myBLACKHOLE_IPS | wc -w)
|
|
||||||
# Let's extract mass scanner IPs
|
|
||||||
if [ "$myCOUNT" -lt "$myBASELINE" ] && [ "$1" == "add" ];
|
|
||||||
then
|
|
||||||
echo "### Something went wrong. Please check contents of /etc/blackhole/$myFILE."
|
|
||||||
echo "### Aborting."
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
elif [ "$(ip r | grep 'blackhole' -c)" -gt "$myBASELINE" ] && [ "$1" == "add" ];
|
|
||||||
then
|
|
||||||
echo "### Blackhole already enabled."
|
|
||||||
echo "### Aborting."
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Let's add blackhole routes for all mass scanner IPs
|
|
||||||
if [ "$1" == "add" ];
|
|
||||||
then
|
|
||||||
echo
|
|
||||||
echo -n "Now adding $myCOUNT IPs to blackhole."
|
|
||||||
for i in $myBLACKHOLE_IPS;
|
|
||||||
do
|
|
||||||
ip route add blackhole "$i"
|
|
||||||
echo -n "."
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
echo "Added $(ip r | grep "blackhole" -c) IPs to blackhole."
|
|
||||||
echo
|
|
||||||
echo "### Remember!"
|
|
||||||
echo "### As long as <blackhole.sh del> is not executed the routes will be re-added on T-Pot start through </opt/tpot/bin/updateip.sh>."
|
|
||||||
echo "### Check with <ip r> or <dps.sh> if blackhole is enabled."
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Let's delete blackhole routes for all mass scanner IPs
|
|
||||||
if [ "$1" == "del" ] && [ "$myCOUNT" -gt "$myBASELINE" ];
|
|
||||||
then
|
|
||||||
echo
|
|
||||||
echo -n "Now deleting $myCOUNT IPs from blackhole."
|
|
||||||
for i in $myBLACKHOLE_IPS;
|
|
||||||
do
|
|
||||||
ip route del blackhole "$i"
|
|
||||||
echo -n "."
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
echo "$(ip r | grep 'blackhole' -c) IPs remaining in blackhole."
|
|
||||||
echo
|
|
||||||
rm "$myFILE"
|
|
||||||
else
|
|
||||||
echo "### Blackhole already disabled."
|
|
||||||
echo
|
|
||||||
fi
|
|
393
docker/tpotinit/dist/bin/clean.sh
vendored
@ -1,393 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# T-Pot Container Data Cleaner & Log Rotator
|
|
||||||
# Set colors
|
|
||||||
myRED="[0;31m"
|
|
||||||
myGREEN="[0;32m"
|
|
||||||
myWHITE="[0;0m"
|
|
||||||
|
|
||||||
# Set pigz
|
|
||||||
myPIGZ=$(which pigz)
|
|
||||||
|
|
||||||
# Set persistence
|
|
||||||
myPERSISTENCE=$1
|
|
||||||
|
|
||||||
# Let's create a function to check if folder is empty
|
|
||||||
fuEMPTY () {
|
|
||||||
local myFOLDER=$1
|
|
||||||
|
|
||||||
echo $(ls $myFOLDER | wc -l)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to rotate and compress logs
|
|
||||||
fuLOGROTATE () {
|
|
||||||
local mySTATUS="/opt/tpot/etc/logrotate/status"
|
|
||||||
local myCONF="/opt/tpot/etc/logrotate/logrotate.conf"
|
|
||||||
local myADBHONEYTGZ="/data/adbhoney/downloads.tgz"
|
|
||||||
local myADBHONEYDL="/data/adbhoney/downloads/"
|
|
||||||
local myCOWRIETTYLOGS="/data/cowrie/log/tty/"
|
|
||||||
local myCOWRIETTYTGZ="/data/cowrie/log/ttylogs.tgz"
|
|
||||||
local myCOWRIEDL="/data/cowrie/downloads/"
|
|
||||||
local myCOWRIEDLTGZ="/data/cowrie/downloads.tgz"
|
|
||||||
local myDIONAEABI="/data/dionaea/bistreams/"
|
|
||||||
local myDIONAEABITGZ="/data/dionaea/bistreams.tgz"
|
|
||||||
local myDIONAEABIN="/data/dionaea/binaries/"
|
|
||||||
local myDIONAEABINTGZ="/data/dionaea/binaries.tgz"
|
|
||||||
local myHONEYTRAPATTACKS="/data/honeytrap/attacks/"
|
|
||||||
local myHONEYTRAPATTACKSTGZ="/data/honeytrap/attacks.tgz"
|
|
||||||
local myHONEYTRAPDL="/data/honeytrap/downloads/"
|
|
||||||
local myHONEYTRAPDLTGZ="/data/honeytrap/downloads.tgz"
|
|
||||||
local myTANNERF="/data/tanner/files/"
|
|
||||||
local myTANNERFTGZ="/data/tanner/files.tgz"
|
|
||||||
|
|
||||||
# Ensure correct permissions and ownerships for logrotate to run without issues
|
|
||||||
chmod 770 /data/ -R
|
|
||||||
chown tpot:tpot /data -R
|
|
||||||
chmod 774 /data/nginx/conf -R
|
|
||||||
chmod 774 /data/nginx/cert -R
|
|
||||||
|
|
||||||
# Run logrotate with force (-f) first, so the status file can be written and race conditions (with tar) be avoided
|
|
||||||
logrotate -f -s $mySTATUS $myCONF
|
|
||||||
|
|
||||||
# Compressing some folders first and rotate them later
|
|
||||||
if [ "$(fuEMPTY $myADBHONEYDL)" != "0" ]; then tar -I $myPIGZ -cvf $myADBHONEYTGZ $myADBHONEYDL; fi
|
|
||||||
if [ "$(fuEMPTY $myCOWRIETTYLOGS)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIETTYTGZ $myCOWRIETTYLOGS; fi
|
|
||||||
if [ "$(fuEMPTY $myCOWRIEDL)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIEDLTGZ $myCOWRIEDL; fi
|
|
||||||
if [ "$(fuEMPTY $myDIONAEABI)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABITGZ $myDIONAEABI; fi
|
|
||||||
if [ "$(fuEMPTY $myDIONAEABIN)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABINTGZ $myDIONAEABIN; fi
|
|
||||||
if [ "$(fuEMPTY $myHONEYTRAPATTACKS)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPATTACKSTGZ $myHONEYTRAPATTACKS; fi
|
|
||||||
if [ "$(fuEMPTY $myHONEYTRAPDL)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPDLTGZ $myHONEYTRAPDL; fi
|
|
||||||
if [ "$(fuEMPTY $myTANNERF)" != "0" ]; then tar -I $myPIGZ -cvf $myTANNERFTGZ $myTANNERF; fi
|
|
||||||
|
|
||||||
# Ensure correct permissions and ownership for previously created archives
|
|
||||||
chmod 770 $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ
|
|
||||||
chown tpot:tpot $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ
|
|
||||||
|
|
||||||
# Need to remove subfolders since too many files cause rm to exit with errors
|
|
||||||
rm -rf $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF
|
|
||||||
|
|
||||||
# Recreate subfolders with correct permissions and ownership
|
|
||||||
mkdir -p $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF
|
|
||||||
chmod 770 $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF
|
|
||||||
chown tpot:tpot $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF
|
|
||||||
|
|
||||||
# Run logrotate again to account for previously created archives - DO NOT FORCE HERE!
|
|
||||||
logrotate -s $mySTATUS $myCONF
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare tpotinit data
|
|
||||||
fuTPOTINIT () {
|
|
||||||
mkdir -vp /data/ews/conf \
|
|
||||||
/data/tpot/etc/{compose,logrotate} \
|
|
||||||
/tmp/etc/
|
|
||||||
chmod 770 /data/ews/ -R
|
|
||||||
chmod 770 /data/tpot/ -R
|
|
||||||
chmod 770 /tmp/etc/ -R
|
|
||||||
chown tpot:tpot /data/ews/ -R
|
|
||||||
chown tpot:tpot /data/tpot/ -R
|
|
||||||
chown tpot:tpot /tmp/etc/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare honeytrap data
|
|
||||||
fuADBHONEY () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/adbhoney/*; fi
|
|
||||||
mkdir -vp /data/adbhoney/{downloads,log}
|
|
||||||
chmod 770 /data/adbhoney/ -R
|
|
||||||
chown tpot:tpot /data/adbhoney/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare ciscoasa data
|
|
||||||
fuCISCOASA () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ciscoasa/*; fi
|
|
||||||
mkdir -vp /data/ciscoasa/log
|
|
||||||
chmod 770 /data/ciscoasa -R
|
|
||||||
chown tpot:tpot /data/ciscoasa -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare citrixhoneypot data
|
|
||||||
fuCITRIXHONEYPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/citrixhoneypot/*; fi
|
|
||||||
mkdir -vp /data/citrixhoneypot/logs/
|
|
||||||
chmod 770 /data/citrixhoneypot/ -R
|
|
||||||
chown tpot:tpot /data/citrixhoneypot/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare conpot data
|
|
||||||
fuCONPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/conpot/*; fi
|
|
||||||
mkdir -vp /data/conpot/log
|
|
||||||
chmod 770 /data/conpot -R
|
|
||||||
chown tpot:tpot /data/conpot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare cowrie data
|
|
||||||
fuCOWRIE () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/cowrie/*; fi
|
|
||||||
mkdir -vp /data/cowrie/{downloads,keys,misc,log,log/tty}
|
|
||||||
chmod 770 /data/cowrie -R
|
|
||||||
chown tpot:tpot /data/cowrie -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare ddospot data
|
|
||||||
fuDDOSPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ddospot/log; fi
|
|
||||||
mkdir -vp /data/ddospot/{bl,db,log}
|
|
||||||
chmod 770 /data/ddospot -R
|
|
||||||
chown tpot:tpot /data/ddospot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare dicompot data
|
|
||||||
fuDICOMPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dicompot/log; fi
|
|
||||||
mkdir -vp /data/dicompot/{images,log}
|
|
||||||
chmod 770 /data/dicompot -R
|
|
||||||
chown tpot:tpot /data/dicompot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare dionaea data
|
|
||||||
fuDIONAEA () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dionaea/*; fi
|
|
||||||
mkdir -vp /data/dionaea/{log,bistreams,binaries,rtp,roots,roots/ftp,roots/tftp,roots/www,roots/upnp}
|
|
||||||
touch /data/dionaea/dionaea-errors.log
|
|
||||||
touch /data/dionaea/sipaccounts.sqlite
|
|
||||||
touch /data/dionaea/sipaccounts.sqlite-journal
|
|
||||||
touch /data/dionaea/log/dionaea.json
|
|
||||||
touch /data/dionaea/log/dionaea.sqlite
|
|
||||||
chmod 770 /data/dionaea -R
|
|
||||||
chown tpot:tpot /data/dionaea -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare elasticpot data
|
|
||||||
fuELASTICPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elasticpot/*; fi
|
|
||||||
mkdir -vp /data/elasticpot/log
|
|
||||||
chmod 770 /data/elasticpot -R
|
|
||||||
chown tpot:tpot /data/elasticpot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare elk data
|
|
||||||
fuELK () {
|
|
||||||
# ELK data will be kept for <= 90 days, check /etc/crontab for curator modification
|
|
||||||
# ELK daemon log files will be removed
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elk/log/*; fi
|
|
||||||
mkdir -vp /data/elk/{data,log}
|
|
||||||
chmod 770 /data/elk -R
|
|
||||||
chown tpot:tpot /data/elk -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare endlessh data
|
|
||||||
fuENDLESSH () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/endlessh/log; fi
|
|
||||||
mkdir -vp /data/endlessh/log
|
|
||||||
chmod 770 /data/endlessh -R
|
|
||||||
chown tpot:tpot /data/endlessh -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare fatt data
|
|
||||||
fuFATT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/fatt/*; fi
|
|
||||||
mkdir -vp /data/fatt/log
|
|
||||||
chmod 770 -R /data/fatt
|
|
||||||
chown tpot:tpot -R /data/fatt
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare glastopf data
|
|
||||||
fuGLUTTON () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glutton/*; fi
|
|
||||||
mkdir -vp /data/glutton/log
|
|
||||||
chmod 770 /data/glutton -R
|
|
||||||
chown tpot:tpot /data/glutton -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare hellpot data
|
|
||||||
fuHELLPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/hellpot/log; fi
|
|
||||||
mkdir -vp /data/hellpot/log
|
|
||||||
chmod 770 /data/hellpot -R
|
|
||||||
chown tpot:tpot /data/hellpot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare heralding data
|
|
||||||
fuHERALDING () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/heralding/*; fi
|
|
||||||
mkdir -vp /data/heralding/log
|
|
||||||
chmod 770 /data/heralding -R
|
|
||||||
chown tpot:tpot /data/heralding -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare honeypots data
|
|
||||||
fuHONEYPOTS () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypots/*; fi
|
|
||||||
mkdir -vp /data/honeypots/log
|
|
||||||
chmod 770 /data/honeypots -R
|
|
||||||
chown tpot:tpot /data/honeypots -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare honeysap data
|
|
||||||
fuHONEYSAP () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeysap/*; fi
|
|
||||||
mkdir -vp /data/honeysap/log
|
|
||||||
chmod 770 /data/honeysap -R
|
|
||||||
chown tpot:tpot /data/honeysap -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare honeytrap data
|
|
||||||
fuHONEYTRAP () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeytrap/*; fi
|
|
||||||
mkdir -vp /data/honeytrap/{log,attacks,downloads}
|
|
||||||
chmod 770 /data/honeytrap/ -R
|
|
||||||
chown tpot:tpot /data/honeytrap/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare ipphoney data
|
|
||||||
fuIPPHONEY () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ipphoney/*; fi
|
|
||||||
mkdir -vp /data/ipphoney/log
|
|
||||||
chmod 770 /data/ipphoney -R
|
|
||||||
chown tpot:tpot /data/ipphoney -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare log4pot data
|
|
||||||
fuLOG4POT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/log4pot/*; fi
|
|
||||||
mkdir -vp /data/log4pot/{log,payloads}
|
|
||||||
chmod 770 /data/log4pot -R
|
|
||||||
chown tpot:tpot /data/log4pot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare mailoney data
|
|
||||||
fuMAILONEY () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi
|
|
||||||
mkdir -vp /data/mailoney/log/
|
|
||||||
chmod 770 /data/mailoney/ -R
|
|
||||||
chown tpot:tpot /data/mailoney/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare mailoney data
|
|
||||||
fuMEDPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/medpot/*; fi
|
|
||||||
mkdir -vp /data/medpot/log/
|
|
||||||
chmod 770 /data/medpot/ -R
|
|
||||||
chown tpot:tpot /data/medpot/ -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up nginx logs
|
|
||||||
fuNGINX () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/nginx/log/*; fi
|
|
||||||
mkdir -vp /data/nginx/{cert,conf,log}
|
|
||||||
touch /data/nginx/log/error.log
|
|
||||||
chmod 774 /data/nginx/conf -R
|
|
||||||
chmod 774 /data/nginx/cert -R
|
|
||||||
chown tpot:tpot /data/nginx -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare redishoneypot data
|
|
||||||
fuREDISHONEYPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/redishoneypot/log; fi
|
|
||||||
mkdir -vp /data/redishoneypot/log
|
|
||||||
chmod 770 /data/redishoneypot -R
|
|
||||||
chown tpot:tpot /data/redishoneypot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare sentrypeer data
|
|
||||||
fuSENTRYPEER () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/sentrypeer/log; fi
|
|
||||||
mkdir -vp /data/sentrypeer/log
|
|
||||||
chmod 770 /data/sentrypeer -R
|
|
||||||
chown tpot:tpot /data/sentrypeer -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to prepare spiderfoot db
|
|
||||||
fuSPIDERFOOT () {
|
|
||||||
mkdir -vp /data/spiderfoot
|
|
||||||
touch /data/spiderfoot/spiderfoot.db
|
|
||||||
chmod 770 -R /data/spiderfoot
|
|
||||||
chown tpot:tpot -R /data/spiderfoot
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare suricata data
|
|
||||||
fuSURICATA () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/suricata/*; fi
|
|
||||||
mkdir -vp /data/suricata/log
|
|
||||||
chmod 770 -R /data/suricata
|
|
||||||
chown tpot:tpot -R /data/suricata
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare p0f data
|
|
||||||
fuP0F () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/p0f/*; fi
|
|
||||||
mkdir -vp /data/p0f/log
|
|
||||||
chmod 770 -R /data/p0f
|
|
||||||
chown tpot:tpot -R /data/p0f
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare p0f data
|
|
||||||
fuTANNER () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/tanner/*; fi
|
|
||||||
mkdir -vp /data/tanner/{log,files}
|
|
||||||
chmod 770 -R /data/tanner
|
|
||||||
chown tpot:tpot -R /data/tanner
|
|
||||||
}
|
|
||||||
|
|
||||||
# Let's create a function to clean up and prepare wordpot data
|
|
||||||
fuWORDPOT () {
|
|
||||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/wordpot/log; fi
|
|
||||||
mkdir -vp /data/wordpot/log
|
|
||||||
chmod 770 /data/wordpot -R
|
|
||||||
chown tpot:tpot /data/wordpot -R
|
|
||||||
}
|
|
||||||
|
|
||||||
# Avoid unwanted cleaning
|
|
||||||
if [ "$myPERSISTENCE" = "" ];
|
|
||||||
then
|
|
||||||
echo $myRED"!!! WARNING !!! - This will delete ALL honeypot logs. "$myWHITE
|
|
||||||
while [ "$myQST" != "y" ] && [ "$myQST" != "n" ];
|
|
||||||
do
|
|
||||||
read -p "Continue? (y/n) " myQST
|
|
||||||
done
|
|
||||||
if [ "$myQST" = "n" ];
|
|
||||||
then
|
|
||||||
echo $myGREEN"Puuh! That was close! Aborting!"$myWHITE
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check persistence, if enabled compress and rotate logs
|
|
||||||
if [ "$myPERSISTENCE" = "on" ];
|
|
||||||
then
|
|
||||||
echo "Persistence enabled, now rotating and compressing logs."
|
|
||||||
fuLOGROTATE
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "Checking and preparing data folders."
|
|
||||||
fuTPOTINIT
|
|
||||||
fuADBHONEY
|
|
||||||
fuCISCOASA
|
|
||||||
fuCITRIXHONEYPOT
|
|
||||||
fuCONPOT
|
|
||||||
fuCOWRIE
|
|
||||||
fuDDOSPOT
|
|
||||||
fuDICOMPOT
|
|
||||||
fuDIONAEA
|
|
||||||
fuELASTICPOT
|
|
||||||
fuELK
|
|
||||||
fuENDLESSH
|
|
||||||
fuFATT
|
|
||||||
fuGLUTTON
|
|
||||||
fuHERALDING
|
|
||||||
fuHELLPOT
|
|
||||||
fuHONEYSAP
|
|
||||||
fuHONEYPOTS
|
|
||||||
fuHONEYTRAP
|
|
||||||
fuIPPHONEY
|
|
||||||
fuLOG4POT
|
|
||||||
fuMAILONEY
|
|
||||||
fuMEDPOT
|
|
||||||
fuNGINX
|
|
||||||
fuREDISHONEYPOT
|
|
||||||
fuSENTRYPEER
|
|
||||||
fuSPIDERFOOT
|
|
||||||
fuSURICATA
|
|
||||||
fuP0F
|
|
||||||
fuTANNER
|
|
||||||
fuWORDPOT
|
|
@ -1,89 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
echo """
|
|
||||||
|
|
||||||
##############################
|
|
||||||
# T-POT DTAG Data Submission #
|
|
||||||
# Contact: #
|
|
||||||
# cert@telekom.de #
|
|
||||||
##############################
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Got root?
|
|
||||||
myWHOAMI=$(whoami)
|
|
||||||
if [ "$myWHOAMI" != "root" ]
|
|
||||||
then
|
|
||||||
echo "Need to run as root ..."
|
|
||||||
sudo ./$0
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf "[*] Enter your API UserID: "
|
|
||||||
read apiUser
|
|
||||||
printf "[*] Enter your API Token: "
|
|
||||||
read apiToken
|
|
||||||
printf "[*] If you have multiple T-Pots running, give them each a unique NUMBER, e.g. '2' for your second T-Pot installation. Enter unique number for THIS T-Pot: "
|
|
||||||
read indexNumber
|
|
||||||
if ! [[ "$indexNumber" =~ ^[0-9]+$ ]]
|
|
||||||
then
|
|
||||||
echo "Sorry integers only. You have to start over..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
apiURL="https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage"
|
|
||||||
printf "[*] Currently, your honeypot is configured to transmit data the default backend at 'https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage'. Do you want to change this API endpoint? Only do this if you run your own PEBA backend instance? (N/y): "
|
|
||||||
read replyAPI
|
|
||||||
if [[ $replyAPI =~ ^[Yy]$ ]]
|
|
||||||
then
|
|
||||||
printf "[*] Enter your API endpoint URL and make sure it contains the full path, e.g. 'https://myDomain.local:9922/ews-0.1/alert/postSimpleMessage': "
|
|
||||||
read apiURL
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[*] Recap! You defined: "
|
|
||||||
echo "############################"
|
|
||||||
echo "API User: " $apiUser
|
|
||||||
echo "API Token: " $apiToken
|
|
||||||
echo "API URL: " $apiURL
|
|
||||||
echo "Unique numeric ID for your T-Pot Installation: " $indexNumber
|
|
||||||
echo "Specific honeypot-IDs will look like : <honeypotType>-"$apiUser"-"$indexNumber
|
|
||||||
echo "############################"
|
|
||||||
echo ""
|
|
||||||
printf "[*] Is the above correct (y/N)? "
|
|
||||||
read reply
|
|
||||||
if [[ ! $reply =~ ^[Yy]$ ]]
|
|
||||||
then
|
|
||||||
echo "OK, then run this again..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
echo "[+] Creating config file with API UserID '$apiUser' and API Token '$apiToken'."
|
|
||||||
echo "[+] Fetching config file from github. Outgoing https requests must be enabled!"
|
|
||||||
wget -q https://raw.githubusercontent.com/telekom-security/tpotce/master/docker/ews/dist/ews.cfg -O ews.cfg.dist
|
|
||||||
if [[ -f "ews.cfg.dist" ]]; then
|
|
||||||
echo "[+] Successfully downloaded ews.cfg from github."
|
|
||||||
else
|
|
||||||
echo "[+] Could not download ews.cfg from github."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "[+] Patching ews.cfg API Credentials."
|
|
||||||
sed 's/community-01-user/'$apiUser'/' ews.cfg.dist > ews.cfg
|
|
||||||
sed -i 's/foth{a5maiCee8fineu7/'$apiToken'/' ews.cfg
|
|
||||||
echo "[+] Patching ews.cfg API Url."
|
|
||||||
apiURL=${apiURL////\\/};
|
|
||||||
sed -i 's/https:\/\/community.sicherheitstacho.eu\/ews-0.1\/alert\/postSimpleMessage/'$apiURL'/' ews.cfg
|
|
||||||
echo "[+] Patching ews.cfg honeypot IDs."
|
|
||||||
sed -i 's/community-01/'$apiUser'-'$indexNumber'/' ews.cfg
|
|
||||||
|
|
||||||
rm ews.cfg.dist
|
|
||||||
|
|
||||||
echo "[+] Changing tpot.yml to include new ews.cfg."
|
|
||||||
|
|
||||||
cp ews.cfg /data/ews/conf/ews.cfg
|
|
||||||
cp /opt/tpot/etc/tpot.yml /opt/tpot/etc/tpot.yml.bak
|
|
||||||
sed -i '/- \/data\/ews\/conf\/ews.ip:\/opt\/ewsposter\/ews.ip/a\ \ \ - \/data\/ews\/conf\/ews.cfg:\/opt\/ewsposter\/ews.cfg' /opt/tpot/etc/tpot.yml
|
|
||||||
|
|
||||||
echo "[+] Restarting T-Pot."
|
|
||||||
systemctl restart tpot
|
|
||||||
echo "[+] Done."
|
|