diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..2c3dc40d --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +# Ignore data folder +data/ diff --git a/preview/README.md b/PREVIEW.md similarity index 100% rename from preview/README.md rename to PREVIEW.md diff --git a/SECURITY.md b/SECURITY.md index 56c91e75..3350e6cb 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Supported Versions -| Version | Supported | -| ------- | ------------------ | -| 22.04.x | :white_check_mark: | +| Version | Supported | +|---------| ------------------ | +| 23.12.x | :white_check_mark: | ## Reporting a Vulnerability diff --git a/bin/2fa.sh b/_deprecated/bin/2fa.sh similarity index 100% rename from bin/2fa.sh rename to _deprecated/bin/2fa.sh diff --git a/bin/backup_es_folders.sh b/_deprecated/bin/backup_es_folders.sh similarity index 100% rename from bin/backup_es_folders.sh rename to _deprecated/bin/backup_es_folders.sh diff --git a/bin/blackhole.sh b/_deprecated/bin/blackhole.sh similarity index 100% rename from bin/blackhole.sh rename to _deprecated/bin/blackhole.sh diff --git a/bin/change_ews_config.sh b/_deprecated/bin/change_ews_config.sh similarity index 100% rename from bin/change_ews_config.sh rename to _deprecated/bin/change_ews_config.sh diff --git a/bin/clean.sh b/_deprecated/bin/clean.sh similarity index 100% rename from bin/clean.sh rename to _deprecated/bin/clean.sh diff --git a/bin/deploy.sh b/_deprecated/bin/deploy.sh similarity index 100% rename from bin/deploy.sh rename to _deprecated/bin/deploy.sh diff --git a/bin/deprecated/export_kibana-objects.sh b/_deprecated/bin/deprecated/export_kibana-objects.sh similarity index 100% rename from bin/deprecated/export_kibana-objects.sh rename to _deprecated/bin/deprecated/export_kibana-objects.sh diff --git a/bin/deprecated/hptest.sh b/_deprecated/bin/deprecated/hptest.sh similarity index 100% rename from bin/deprecated/hptest.sh rename to _deprecated/bin/deprecated/hptest.sh diff --git a/bin/deprecated/import_kibana-objects.sh b/_deprecated/bin/deprecated/import_kibana-objects.sh similarity index 100% rename from bin/deprecated/import_kibana-objects.sh rename to _deprecated/bin/deprecated/import_kibana-objects.sh diff --git a/bin/dps.sh b/_deprecated/bin/dps.sh similarity index 100% rename from bin/dps.sh rename to _deprecated/bin/dps.sh diff --git a/bin/dump_es.sh b/_deprecated/bin/dump_es.sh similarity index 100% rename from bin/dump_es.sh rename to _deprecated/bin/dump_es.sh diff --git a/bin/hpfeeds_optin.sh b/_deprecated/bin/hpfeeds_optin.sh similarity index 100% rename from bin/hpfeeds_optin.sh rename to _deprecated/bin/hpfeeds_optin.sh diff --git a/bin/hptest.sh b/_deprecated/bin/hptest.sh similarity index 100% rename from bin/hptest.sh rename to _deprecated/bin/hptest.sh diff --git a/bin/myip.sh b/_deprecated/bin/myip.sh similarity index 100% rename from bin/myip.sh rename to _deprecated/bin/myip.sh diff --git a/bin/mytopips.sh b/_deprecated/bin/mytopips.sh similarity index 100% rename from bin/mytopips.sh rename to _deprecated/bin/mytopips.sh diff --git a/bin/restore_es.sh b/_deprecated/bin/restore_es.sh similarity index 100% rename from bin/restore_es.sh rename to _deprecated/bin/restore_es.sh diff --git a/bin/rules.sh b/_deprecated/bin/rules.sh similarity index 100% rename from bin/rules.sh rename to _deprecated/bin/rules.sh diff --git a/bin/setup_builder.sh b/_deprecated/bin/setup_builder.sh similarity index 100% rename from bin/setup_builder.sh rename to _deprecated/bin/setup_builder.sh diff --git a/bin/tpdclean.sh b/_deprecated/bin/tpdclean.sh similarity index 100% rename from bin/tpdclean.sh rename to _deprecated/bin/tpdclean.sh diff --git a/bin/tped.sh b/_deprecated/bin/tped.sh similarity index 100% rename from bin/tped.sh rename to _deprecated/bin/tped.sh diff --git a/bin/unlock_es.sh b/_deprecated/bin/unlock_es.sh similarity index 100% rename from bin/unlock_es.sh rename to _deprecated/bin/unlock_es.sh diff --git a/bin/updateip.sh b/_deprecated/bin/updateip.sh similarity index 100% rename from bin/updateip.sh rename to _deprecated/bin/updateip.sh diff --git a/cloud/.gitignore b/_deprecated/cloud/.gitignore similarity index 100% rename from cloud/.gitignore rename to _deprecated/cloud/.gitignore diff --git a/cloud/ansible/README.md b/_deprecated/cloud/ansible/README.md similarity index 100% rename from cloud/ansible/README.md rename to _deprecated/cloud/ansible/README.md diff --git a/cloud/ansible/doc/otc_1_project.gif b/_deprecated/cloud/ansible/doc/otc_1_project.gif similarity index 100% rename from cloud/ansible/doc/otc_1_project.gif rename to _deprecated/cloud/ansible/doc/otc_1_project.gif diff --git a/cloud/ansible/doc/otc_2_user.gif b/_deprecated/cloud/ansible/doc/otc_2_user.gif similarity index 100% rename from cloud/ansible/doc/otc_2_user.gif rename to _deprecated/cloud/ansible/doc/otc_2_user.gif diff --git a/cloud/ansible/doc/otc_3_login.gif b/_deprecated/cloud/ansible/doc/otc_3_login.gif similarity index 100% rename from cloud/ansible/doc/otc_3_login.gif rename to _deprecated/cloud/ansible/doc/otc_3_login.gif diff --git a/cloud/ansible/doc/otc_4_import_key.gif b/_deprecated/cloud/ansible/doc/otc_4_import_key.gif similarity index 100% rename from cloud/ansible/doc/otc_4_import_key.gif rename to _deprecated/cloud/ansible/doc/otc_4_import_key.gif diff --git a/cloud/ansible/doc/putty_agent_forwarding.png b/_deprecated/cloud/ansible/doc/putty_agent_forwarding.png similarity index 100% rename from cloud/ansible/doc/putty_agent_forwarding.png rename to _deprecated/cloud/ansible/doc/putty_agent_forwarding.png diff --git a/cloud/ansible/openstack/ansible.cfg b/_deprecated/cloud/ansible/openstack/ansible.cfg similarity index 100% rename from cloud/ansible/openstack/ansible.cfg rename to _deprecated/cloud/ansible/openstack/ansible.cfg diff --git a/cloud/ansible/openstack/deploy_tpot.yaml b/_deprecated/cloud/ansible/openstack/deploy_tpot.yaml similarity index 100% rename from cloud/ansible/openstack/deploy_tpot.yaml rename to _deprecated/cloud/ansible/openstack/deploy_tpot.yaml diff --git a/cloud/ansible/openstack/my_os_cloud.yaml b/_deprecated/cloud/ansible/openstack/my_os_cloud.yaml similarity index 100% rename from cloud/ansible/openstack/my_os_cloud.yaml rename to _deprecated/cloud/ansible/openstack/my_os_cloud.yaml diff --git a/cloud/ansible/openstack/requirements.yaml b/_deprecated/cloud/ansible/openstack/requirements.yaml similarity index 100% rename from cloud/ansible/openstack/requirements.yaml rename to _deprecated/cloud/ansible/openstack/requirements.yaml diff --git a/cloud/ansible/openstack/roles/check/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/check/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/check/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/check/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/create_net/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/create_net/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/create_net/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/create_net/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/create_vm/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/create_vm/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/create_vm/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/create_vm/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/create_vm/vars/main.yaml b/_deprecated/cloud/ansible/openstack/roles/create_vm/vars/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/create_vm/vars/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/create_vm/vars/main.yaml diff --git a/cloud/ansible/openstack/roles/custom_ews/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/custom_ews/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/custom_ews/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/custom_ews/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/custom_ews/templates/ews.cfg b/_deprecated/cloud/ansible/openstack/roles/custom_ews/templates/ews.cfg similarity index 100% rename from cloud/ansible/openstack/roles/custom_ews/templates/ews.cfg rename to _deprecated/cloud/ansible/openstack/roles/custom_ews/templates/ews.cfg diff --git a/cloud/ansible/openstack/roles/custom_hpfeeds/files/hpfeeds.cfg b/_deprecated/cloud/ansible/openstack/roles/custom_hpfeeds/files/hpfeeds.cfg similarity index 100% rename from cloud/ansible/openstack/roles/custom_hpfeeds/files/hpfeeds.cfg rename to _deprecated/cloud/ansible/openstack/roles/custom_hpfeeds/files/hpfeeds.cfg diff --git a/cloud/ansible/openstack/roles/custom_hpfeeds/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/custom_hpfeeds/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/custom_hpfeeds/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/custom_hpfeeds/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/install/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/install/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/install/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/install/tasks/main.yaml diff --git a/cloud/ansible/openstack/roles/install/vars/main.yaml b/_deprecated/cloud/ansible/openstack/roles/install/vars/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/install/vars/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/install/vars/main.yaml diff --git a/cloud/ansible/openstack/roles/reboot/tasks/main.yaml b/_deprecated/cloud/ansible/openstack/roles/reboot/tasks/main.yaml similarity index 100% rename from cloud/ansible/openstack/roles/reboot/tasks/main.yaml rename to _deprecated/cloud/ansible/openstack/roles/reboot/tasks/main.yaml diff --git a/cloud/terraform/README.md b/_deprecated/cloud/terraform/README.md similarity index 100% rename from cloud/terraform/README.md rename to _deprecated/cloud/terraform/README.md diff --git a/cloud/terraform/aws/.terraform.lock.hcl b/_deprecated/cloud/terraform/aws/.terraform.lock.hcl similarity index 100% rename from cloud/terraform/aws/.terraform.lock.hcl rename to _deprecated/cloud/terraform/aws/.terraform.lock.hcl diff --git a/cloud/terraform/aws/main.tf b/_deprecated/cloud/terraform/aws/main.tf similarity index 100% rename from cloud/terraform/aws/main.tf rename to _deprecated/cloud/terraform/aws/main.tf diff --git a/cloud/terraform/aws/outputs.tf b/_deprecated/cloud/terraform/aws/outputs.tf similarity index 100% rename from cloud/terraform/aws/outputs.tf rename to _deprecated/cloud/terraform/aws/outputs.tf diff --git a/cloud/terraform/aws/variables.tf b/_deprecated/cloud/terraform/aws/variables.tf similarity index 100% rename from cloud/terraform/aws/variables.tf rename to _deprecated/cloud/terraform/aws/variables.tf diff --git a/cloud/terraform/aws/versions.tf b/_deprecated/cloud/terraform/aws/versions.tf similarity index 100% rename from cloud/terraform/aws/versions.tf rename to _deprecated/cloud/terraform/aws/versions.tf diff --git a/cloud/terraform/aws_multi_region/_provider.tf b/_deprecated/cloud/terraform/aws_multi_region/_provider.tf similarity index 100% rename from cloud/terraform/aws_multi_region/_provider.tf rename to _deprecated/cloud/terraform/aws_multi_region/_provider.tf diff --git a/cloud/terraform/aws_multi_region/main.tf b/_deprecated/cloud/terraform/aws_multi_region/main.tf similarity index 100% rename from cloud/terraform/aws_multi_region/main.tf rename to _deprecated/cloud/terraform/aws_multi_region/main.tf diff --git a/cloud/terraform/aws_multi_region/modules/multi-region/main.tf b/_deprecated/cloud/terraform/aws_multi_region/modules/multi-region/main.tf similarity index 100% rename from cloud/terraform/aws_multi_region/modules/multi-region/main.tf rename to _deprecated/cloud/terraform/aws_multi_region/modules/multi-region/main.tf diff --git a/cloud/terraform/aws_multi_region/modules/multi-region/outputs.tf b/_deprecated/cloud/terraform/aws_multi_region/modules/multi-region/outputs.tf similarity index 100% rename from cloud/terraform/aws_multi_region/modules/multi-region/outputs.tf rename to _deprecated/cloud/terraform/aws_multi_region/modules/multi-region/outputs.tf diff --git a/cloud/terraform/aws_multi_region/modules/multi-region/variables.tf b/_deprecated/cloud/terraform/aws_multi_region/modules/multi-region/variables.tf similarity index 100% rename from cloud/terraform/aws_multi_region/modules/multi-region/variables.tf rename to _deprecated/cloud/terraform/aws_multi_region/modules/multi-region/variables.tf diff --git a/cloud/terraform/aws_multi_region/modules/multi-region/versions.tf b/_deprecated/cloud/terraform/aws_multi_region/modules/multi-region/versions.tf similarity index 100% rename from cloud/terraform/aws_multi_region/modules/multi-region/versions.tf rename to _deprecated/cloud/terraform/aws_multi_region/modules/multi-region/versions.tf diff --git a/cloud/terraform/aws_multi_region/outputs.tf b/_deprecated/cloud/terraform/aws_multi_region/outputs.tf similarity index 100% rename from cloud/terraform/aws_multi_region/outputs.tf rename to _deprecated/cloud/terraform/aws_multi_region/outputs.tf diff --git a/cloud/terraform/aws_multi_region/variables.tf b/_deprecated/cloud/terraform/aws_multi_region/variables.tf similarity index 100% rename from cloud/terraform/aws_multi_region/variables.tf rename to _deprecated/cloud/terraform/aws_multi_region/variables.tf diff --git a/cloud/terraform/cloud-init.yaml b/_deprecated/cloud/terraform/cloud-init.yaml similarity index 100% rename from cloud/terraform/cloud-init.yaml rename to _deprecated/cloud/terraform/cloud-init.yaml diff --git a/cloud/terraform/otc/.terraform.lock.hcl b/_deprecated/cloud/terraform/otc/.terraform.lock.hcl similarity index 100% rename from cloud/terraform/otc/.terraform.lock.hcl rename to _deprecated/cloud/terraform/otc/.terraform.lock.hcl diff --git a/cloud/terraform/otc/main.tf b/_deprecated/cloud/terraform/otc/main.tf similarity index 100% rename from cloud/terraform/otc/main.tf rename to _deprecated/cloud/terraform/otc/main.tf diff --git a/cloud/terraform/otc/outputs.tf b/_deprecated/cloud/terraform/otc/outputs.tf similarity index 100% rename from cloud/terraform/otc/outputs.tf rename to _deprecated/cloud/terraform/otc/outputs.tf diff --git a/cloud/terraform/otc/provider.tf b/_deprecated/cloud/terraform/otc/provider.tf similarity index 100% rename from cloud/terraform/otc/provider.tf rename to _deprecated/cloud/terraform/otc/provider.tf diff --git a/cloud/terraform/otc/variables.tf b/_deprecated/cloud/terraform/otc/variables.tf similarity index 100% rename from cloud/terraform/otc/variables.tf rename to _deprecated/cloud/terraform/otc/variables.tf diff --git a/cloud/terraform/otc/versions.tf b/_deprecated/cloud/terraform/otc/versions.tf similarity index 100% rename from cloud/terraform/otc/versions.tf rename to _deprecated/cloud/terraform/otc/versions.tf diff --git a/etc/compose/collector.yml b/_deprecated/etc/compose/collector.yml similarity index 100% rename from etc/compose/collector.yml rename to _deprecated/etc/compose/collector.yml diff --git a/etc/compose/hive.yml b/_deprecated/etc/compose/hive.yml similarity index 100% rename from etc/compose/hive.yml rename to _deprecated/etc/compose/hive.yml diff --git a/etc/compose/hive_sensor.yml b/_deprecated/etc/compose/hive_sensor.yml similarity index 100% rename from etc/compose/hive_sensor.yml rename to _deprecated/etc/compose/hive_sensor.yml diff --git a/etc/compose/industrial.yml b/_deprecated/etc/compose/industrial.yml similarity index 100% rename from etc/compose/industrial.yml rename to _deprecated/etc/compose/industrial.yml diff --git a/etc/compose/log4j.yml b/_deprecated/etc/compose/log4j.yml similarity index 100% rename from etc/compose/log4j.yml rename to _deprecated/etc/compose/log4j.yml diff --git a/etc/compose/medical.yml b/_deprecated/etc/compose/medical.yml similarity index 100% rename from etc/compose/medical.yml rename to _deprecated/etc/compose/medical.yml diff --git a/etc/compose/mini.yml b/_deprecated/etc/compose/mini.yml similarity index 100% rename from etc/compose/mini.yml rename to _deprecated/etc/compose/mini.yml diff --git a/etc/compose/nextgen.yml b/_deprecated/etc/compose/nextgen.yml similarity index 100% rename from etc/compose/nextgen.yml rename to _deprecated/etc/compose/nextgen.yml diff --git a/etc/compose/sensor.yml b/_deprecated/etc/compose/sensor.yml similarity index 100% rename from etc/compose/sensor.yml rename to _deprecated/etc/compose/sensor.yml diff --git a/etc/compose/standard.yml b/_deprecated/etc/compose/standard.yml similarity index 100% rename from etc/compose/standard.yml rename to _deprecated/etc/compose/standard.yml diff --git a/etc/compose/tarpit.yml b/_deprecated/etc/compose/tarpit.yml similarity index 100% rename from etc/compose/tarpit.yml rename to _deprecated/etc/compose/tarpit.yml diff --git a/etc/logrotate/logrotate.conf b/_deprecated/etc/logrotate/logrotate.conf similarity index 100% rename from etc/logrotate/logrotate.conf rename to _deprecated/etc/logrotate/logrotate.conf diff --git a/etc/objects/elkbase.tgz b/_deprecated/etc/objects/elkbase.tgz similarity index 100% rename from etc/objects/elkbase.tgz rename to _deprecated/etc/objects/elkbase.tgz diff --git a/etc/objects/kibana_export.ndjson.zip b/_deprecated/etc/objects/kibana_export.ndjson.zip similarity index 100% rename from etc/objects/kibana_export.ndjson.zip rename to _deprecated/etc/objects/kibana_export.ndjson.zip diff --git a/host/etc/rc.local b/_deprecated/host/etc/rc.local similarity index 100% rename from host/etc/rc.local rename to _deprecated/host/etc/rc.local diff --git a/host/etc/systemd/tpot.service b/_deprecated/host/etc/systemd/tpot.service similarity index 100% rename from host/etc/systemd/tpot.service rename to _deprecated/host/etc/systemd/tpot.service diff --git a/host/usr/share/dict/a.txt b/_deprecated/host/usr/share/dict/a.txt similarity index 100% rename from host/usr/share/dict/a.txt rename to _deprecated/host/usr/share/dict/a.txt diff --git a/host/usr/share/dict/n.txt b/_deprecated/host/usr/share/dict/n.txt similarity index 100% rename from host/usr/share/dict/n.txt rename to _deprecated/host/usr/share/dict/n.txt diff --git a/host/usr/share/dict/names b/_deprecated/host/usr/share/dict/names similarity index 100% rename from host/usr/share/dict/names rename to _deprecated/host/usr/share/dict/names diff --git a/install.sh b/_deprecated/install.sh similarity index 100% rename from install.sh rename to _deprecated/install.sh diff --git a/iso/installer/install.sh b/_deprecated/iso/installer/install.sh similarity index 100% rename from iso/installer/install.sh rename to _deprecated/iso/installer/install.sh diff --git a/iso/installer/iso.conf.dist b/_deprecated/iso/installer/iso.conf.dist similarity index 100% rename from iso/installer/iso.conf.dist rename to _deprecated/iso/installer/iso.conf.dist diff --git a/iso/installer/rc.local.install b/_deprecated/iso/installer/rc.local.install similarity index 100% rename from iso/installer/rc.local.install rename to _deprecated/iso/installer/rc.local.install diff --git a/iso/installer/tpot.conf.dist b/_deprecated/iso/installer/tpot.conf.dist similarity index 100% rename from iso/installer/tpot.conf.dist rename to _deprecated/iso/installer/tpot.conf.dist diff --git a/iso/installer/wrapper.sh b/_deprecated/iso/installer/wrapper.sh similarity index 100% rename from iso/installer/wrapper.sh rename to _deprecated/iso/installer/wrapper.sh diff --git a/iso/isolinux/txt.cfg b/_deprecated/iso/isolinux/txt.cfg similarity index 100% rename from iso/isolinux/txt.cfg rename to _deprecated/iso/isolinux/txt.cfg diff --git a/iso/preseed/tpot_amd64.seed b/_deprecated/iso/preseed/tpot_amd64.seed similarity index 100% rename from iso/preseed/tpot_amd64.seed rename to _deprecated/iso/preseed/tpot_amd64.seed diff --git a/iso/preseed/tpot_arm64.seed b/_deprecated/iso/preseed/tpot_arm64.seed similarity index 100% rename from iso/preseed/tpot_arm64.seed rename to _deprecated/iso/preseed/tpot_arm64.seed diff --git a/makeiso.sh b/_deprecated/makeiso.sh similarity index 100% rename from makeiso.sh rename to _deprecated/makeiso.sh diff --git a/packages.txt b/_deprecated/packages.txt similarity index 100% rename from packages.txt rename to _deprecated/packages.txt diff --git a/update.sh b/_deprecated/update.sh similarity index 100% rename from update.sh rename to _deprecated/update.sh diff --git a/cloud/ansible/openstack/clouds.yaml b/cloud/ansible/openstack/clouds.yaml deleted file mode 100644 index c16bfcf3..00000000 --- a/cloud/ansible/openstack/clouds.yaml +++ /dev/null @@ -1,9 +0,0 @@ -clouds: - open-telekom-cloud: - profile: otc - region_name: eu-de - auth: - project_name: eu-de_your_project - username: your_api_user - password: your_password - user_domain_name: OTC-EU-DE-000000000010000XXXXX diff --git a/cloud/terraform/otc/clouds.yaml b/cloud/terraform/otc/clouds.yaml deleted file mode 100644 index 5eefd562..00000000 --- a/cloud/terraform/otc/clouds.yaml +++ /dev/null @@ -1,9 +0,0 @@ -clouds: - open-telekom-cloud: - region_name: eu-de - auth: - project_name: eu-de_your_project - username: your_api_user - password: your_password - user_domain_name: OTC-EU-DE-000000000010000XXXXX - auth_url: https://iam.eu-de.otc.t-systems.com/v3 diff --git a/preview/compose/README b/compose/README similarity index 100% rename from preview/compose/README rename to compose/README diff --git a/preview/compose/mac_win.yml b/compose/mac_win.yml similarity index 100% rename from preview/compose/mac_win.yml rename to compose/mac_win.yml diff --git a/preview/compose/standard.yml b/compose/standard.yml similarity index 100% rename from preview/compose/standard.yml rename to compose/standard.yml diff --git a/preview/docker-compose.yml b/docker-compose.yml similarity index 100% rename from preview/docker-compose.yml rename to docker-compose.yml diff --git a/docker/builder.sh b/docker/builder.sh index 10582f03..9e36af74 100755 --- a/docker/builder.sh +++ b/docker/builder.sh @@ -3,8 +3,8 @@ # Setup Vars myPLATFORMS="linux/amd64,linux/arm64" myHUBORG="dtagdevsec" -myTAG="2204" -myIMAGESBASE="adbhoney ciscoasa citrixhoneypot conpot cowrie ddospot dicompot dionaea elasticpot endlessh ewsposter fatt glutton hellpot heralding honeypots honeytrap ipphoney log4pot mailoney medpot nginx p0f redishoneypot sentrypeer spiderfoot suricata wordpot" +myTAG="dev" +myIMAGESBASE="tpotinit adbhoney ciscoasa citrixhoneypot conpot cowrie ddospot dicompot dionaea elasticpot endlessh ewsposter fatt glutton hellpot heralding honeypots honeytrap ipphoney log4pot mailoney medpot nginx p0f redishoneypot sentrypeer spiderfoot suricata wordpot" myIMAGESELK="elasticsearch kibana logstash map" myIMAGESTANNER="phpox redis snare tanner" myBUILDERLOG="builder.log" diff --git a/docker/tpotinit/Dockerfile b/docker/tpotinit/Dockerfile new file mode 100644 index 00000000..815cde0d --- /dev/null +++ b/docker/tpotinit/Dockerfile @@ -0,0 +1,59 @@ +FROM alpine:edge +# +# Include dist +COPY dist/ /opt/tpot/ +# +# Get and install dependencies & packages +RUN apk --no-cache -U add \ + aria2 \ + apache2-utils \ + bash \ + bind-tools \ + conntrack-tools \ + curl \ + ethtool \ + figlet \ + git \ + grep \ + iproute2 \ + iptables \ + jq \ + logrotate \ + lsblk \ + net-tools \ + openssl \ + pigz \ + tar \ + uuidgen && \ + apk --no-cache -U add --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \ + yq && \ +# +# Setup user + addgroup -g 2000 tpot && \ + adduser -S -s /bin/ash -u 2000 -D -g 2000 tpot && \ +# +# Install tpot + #sed -i "s#/opt/tpot/etc/logrotate/status#/data/tpot/etc/logrotate/status#g" bin/clean.sh && \ + #sed -i "s#/opt/tpot/etc/compose/elk_environment#/data/tpot/etc/compose/elk_environment#g" bin/clean.sh && \ + #sed -i "s#/usr/sbin/iptables-legacy#/sbin/iptables-legacy#g" bin/rules.sh && \ + #sed -i "s/tr -d '\", '/tr -d '\", ,#,-'/g" bin/rules.sh && \ + #sed -i "s#/opt/tpot/etc/compose/elk_environment#/data/tpot/etc/compose/elk_environment#g" bin/updateip.sh && \ + #sed -i "s#.*myLOCALIP=.*#myLOCALIP=\$(/sbin/ip address show | awk '/inet .*brd/{split(\$2,a,\"/\"); print a[1]; exit}')#" bin/updateip.sh && \ + #sed -i "s#.*myUUID=.*#myUUID=\$(cat /data/uuid)#" bin/updateip.sh && \ + #sed -i "s#/etc/issue#/tmp/etc/issue#g" bin/updateip.sh && \ + #sed -i "/toilet/d" bin/updateip.sh && \ + #sed -i "/source \/etc\/environment/d" bin/updateip.sh && \ + #touch /opt/tpot/etc/tpot.yml && \ + cp /root/dist/entrypoint.sh . && \ +# +# Clean up + apk del --purge git && \ + rm -rf /root/* /tmp/* && \ + rm -rf /root/.cache /opt/tpot/.git && \ + rm -rf /var/cache/apk/* +# +# Run tpotinit +WORKDIR /opt/tpot +HEALTHCHECK --retries=1000 --interval=5s CMD test -f /tmp/success || exit 1 +STOPSIGNAL SIGKILL +CMD ["/opt/tpot/entrypoint.sh"] diff --git a/docker/tpotinit/dist/bin/2fa.sh b/docker/tpotinit/dist/bin/2fa.sh new file mode 100755 index 00000000..bbd82c8f --- /dev/null +++ b/docker/tpotinit/dist/bin/2fa.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# Make sure script is started as non-root. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" = "root" ] + then + echo "Need to run as non-root ..." + echo "" + exit +fi + +# set vars, check deps +myPAM_COCKPIT_FILE="/etc/pam.d/cockpit" +if ! [ -s "$myPAM_COCKPIT_FILE" ]; + then + echo "### Cockpit PAM module config does not exist. Something went wrong." + echo "" + exit 1 +fi +myPAM_COCKPIT_GA=" + +# google authenticator for two-factor +auth required pam_google_authenticator.so +" +myAUTHENTICATOR=$(which google-authenticator) +if [ "$myAUTHENTICATOR" == "" ]; + then + echo "### Could not locate google-authenticator, trying to install (if asked provide root password)." + echo "" + sudo apt-get update + sudo apt-get install -y libpam-google-authenticator + exec "$1" "$2" + exit 1 +fi + + +# write PAM changes +function fuWRITE_PAM_CHANGES { + myCHECK=$(cat $myPAM_COCKPIT_FILE | grep -c "google") + if ! [ "$myCHECK" == "0" ]; + then + echo "### PAM config already enabled. Skipped." + echo "" + else + echo "### Updating PAM config for Cockpit (if asked provide root password)." + echo "$myPAM_COCKPIT_GA" | sudo tee -a $myPAM_COCKPIT_FILE + sudo systemctl restart cockpit + fi +} + +# create 2fa +function fuGEN_TOKEN { + echo "### Now generating token for Google Authenticator." + echo "" + google-authenticator -t -d -r 3 -R 30 -w 17 +} + + +# main +echo "### This script will enable Two Factor Authentication for Cockpit." +echo "" +echo "### Please download one of the many authenticator apps from the appstore of your choice." +echo "" +while true; + do + read -p "### Ready to start (y/n)? " myANSWER + case $myANSWER in + [Yy]* ) echo "### OK. Starting ..."; break;; + [Nn]* ) echo "### Exiting."; exit;; + esac +done + +fuWRITE_PAM_CHANGES +fuGEN_TOKEN + +echo "Done. Re-run this script by every user who needs Cockpit access." +echo "" diff --git a/docker/tpotinit/dist/bin/backup_es_folders.sh b/docker/tpotinit/dist/bin/backup_es_folders.sh new file mode 100755 index 00000000..3d15261b --- /dev/null +++ b/docker/tpotinit/dist/bin/backup_es_folders.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Run as root only. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ]; + then + echo "Need to run as root ..." + exit +fi + +if [ "$1" == "" ] || [ "$1" != "all" ] && [ "$1" != "base" ]; + then + echo "Usage: backup_es_folders [all, base]" + echo " all = backup all ES folder" + echo " base = backup only Kibana index". + echo + exit +fi + +# Backup all ES relevant folders +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +# Set vars +myCOUNT=1 +myDATE=$(date +%Y%m%d%H%M) +myELKPATH="/data/elk/data" +myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/.kibana' | awk '{ print $4 }') +myKIBANAINDEXPATH=$myELKPATH/indices/$myKIBANAINDEXNAME + +# Let's ensure normal operation on exit or if interrupted ... +function fuCLEANUP { + ### Start ELK + systemctl start tpot + echo "### Now starting T-Pot ..." +} +trap fuCLEANUP EXIT + +# Stop T-Pot to lift db lock +echo "### Now stopping T-Pot" +systemctl stop tpot +sleep 2 + +# Backup DB in 2 flavors +echo "### Now backing up Elasticsearch folders ..." +if [ "$1" == "all" ]; + then + tar cvfz "elkall_"$myDATE".tgz" $myELKPATH +elif [ "$1" == "base" ]; + then + tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH +fi + diff --git a/docker/tpotinit/dist/bin/blackhole.sh b/docker/tpotinit/dist/bin/blackhole.sh new file mode 100755 index 00000000..e2a51af0 --- /dev/null +++ b/docker/tpotinit/dist/bin/blackhole.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +# Run as root only. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "### Need to run as root ..." + echo + exit +fi + +# Disclaimer +if [ "$1" == "" ]; + then + echo "### Warning!" + echo "### This script will download and add blackhole routes for known mass scanners in an attempt to decrease the chance of detection." + echo "### IPs are neither curated or verified, use at your own risk!" + echo "###" + echo "### As long as is not executed the routes will be re-added on T-Pot start through ." + echo "### Check with or if blackhole is enabled." + echo + echo "Usage: blackhole.sh add (add blackhole routes)" + echo " blackhole.sh del (delete blackhole routes)" + echo + exit +fi + +# QnD paths, files +mkdir -p /etc/blackhole +cd /etc/blackhole +myFILE="mass_scanner.txt" +myURL="https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/mass_scanner.txt" +myBASELINE="500" +# Alternatively, using less routes, but blocking complete /24 networks +#myFILE="mass_scanner_cidr.txt" +#myURL="https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/mass_scanner_cidr.txt" + +# Calculate age of downloaded list, read IPs +if [ -f "$myFILE" ]; + then + myNOW=$(date +%s) + myOLD=$(date +%s -r "$myFILE") + myDAYS=$(( ($myNOW-$myOLD) / (60*60*24) )) + echo "### Downloaded $myFILE list is $myDAYS days old." + myBLACKHOLE_IPS=$(grep -o -P "\b(?:\d{1,3}\.){3}\d{1,3}\b" "$myFILE" | sort -u) +fi + +# Let's load ip list +if [[ ! -f "$myFILE" && "$1" == "add" || "$myDAYS" -gt 30 ]]; + then + echo "### Downloading $myFILE list." + aria2c --allow-overwrite -s16 -x 16 "$myURL" && \ + myBLACKHOLE_IPS=$(grep -o -P "\b(?:\d{1,3}\.){3}\d{1,3}\b" "$myFILE" | sort -u) +fi + +myCOUNT=$(echo $myBLACKHOLE_IPS | wc -w) +# Let's extract mass scanner IPs +if [ "$myCOUNT" -lt "$myBASELINE" ] && [ "$1" == "add" ]; + then + echo "### Something went wrong. Please check contents of /etc/blackhole/$myFILE." + echo "### Aborting." + echo + exit +elif [ "$(ip r | grep 'blackhole' -c)" -gt "$myBASELINE" ] && [ "$1" == "add" ]; + then + echo "### Blackhole already enabled." + echo "### Aborting." + echo + exit +fi + +# Let's add blackhole routes for all mass scanner IPs +if [ "$1" == "add" ]; + then + echo + echo -n "Now adding $myCOUNT IPs to blackhole." + for i in $myBLACKHOLE_IPS; + do + ip route add blackhole "$i" + echo -n "." + done + echo + echo "Added $(ip r | grep "blackhole" -c) IPs to blackhole." + echo + echo "### Remember!" + echo "### As long as is not executed the routes will be re-added on T-Pot start through ." + echo "### Check with or if blackhole is enabled." + echo + exit +fi + +# Let's delete blackhole routes for all mass scanner IPs +if [ "$1" == "del" ] && [ "$myCOUNT" -gt "$myBASELINE" ]; + then + echo + echo -n "Now deleting $myCOUNT IPs from blackhole." + for i in $myBLACKHOLE_IPS; + do + ip route del blackhole "$i" + echo -n "." + done + echo + echo "$(ip r | grep 'blackhole' -c) IPs remaining in blackhole." + echo + rm "$myFILE" + else + echo "### Blackhole already disabled." + echo +fi diff --git a/docker/tpotinit/dist/bin/change_ews_config.sh b/docker/tpotinit/dist/bin/change_ews_config.sh new file mode 100755 index 00000000..5b660656 --- /dev/null +++ b/docker/tpotinit/dist/bin/change_ews_config.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +echo """ + +############################## +# T-POT DTAG Data Submission # +# Contact: # +# cert@telekom.de # +############################## +""" + +# Got root? +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + sudo ./$0 + exit +fi + +printf "[*] Enter your API UserID: " +read apiUser +printf "[*] Enter your API Token: " +read apiToken +printf "[*] If you have multiple T-Pots running, give them each a unique NUMBER, e.g. '2' for your second T-Pot installation. Enter unique number for THIS T-Pot: " +read indexNumber +if ! [[ "$indexNumber" =~ ^[0-9]+$ ]] + then + echo "Sorry integers only. You have to start over..." + exit 1 +fi +apiURL="https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage" +printf "[*] Currently, your honeypot is configured to transmit data the default backend at 'https://community.sicherheitstacho.eu/ews-0.1/alert/postSimpleMessage'. Do you want to change this API endpoint? Only do this if you run your own PEBA backend instance? (N/y): " +read replyAPI +if [[ $replyAPI =~ ^[Yy]$ ]] +then + printf "[*] Enter your API endpoint URL and make sure it contains the full path, e.g. 'https://myDomain.local:9922/ews-0.1/alert/postSimpleMessage': " + read apiURL +fi + + + +echo "" +echo "[*] Recap! You defined: " +echo "############################" +echo "API User: " $apiUser +echo "API Token: " $apiToken +echo "API URL: " $apiURL +echo "Unique numeric ID for your T-Pot Installation: " $indexNumber +echo "Specific honeypot-IDs will look like : -"$apiUser"-"$indexNumber +echo "############################" +echo "" +printf "[*] Is the above correct (y/N)? " +read reply +if [[ ! $reply =~ ^[Yy]$ ]] +then + echo "OK, then run this again..." + exit 1 +fi +echo "" +echo "[+] Creating config file with API UserID '$apiUser' and API Token '$apiToken'." +echo "[+] Fetching config file from github. Outgoing https requests must be enabled!" +wget -q https://raw.githubusercontent.com/telekom-security/tpotce/master/docker/ews/dist/ews.cfg -O ews.cfg.dist +if [[ -f "ews.cfg.dist" ]]; then + echo "[+] Successfully downloaded ews.cfg from github." +else + echo "[+] Could not download ews.cfg from github." + exit 1 +fi +echo "[+] Patching ews.cfg API Credentials." +sed 's/community-01-user/'$apiUser'/' ews.cfg.dist > ews.cfg +sed -i 's/foth{a5maiCee8fineu7/'$apiToken'/' ews.cfg +echo "[+] Patching ews.cfg API Url." +apiURL=${apiURL////\\/}; +sed -i 's/https:\/\/community.sicherheitstacho.eu\/ews-0.1\/alert\/postSimpleMessage/'$apiURL'/' ews.cfg +echo "[+] Patching ews.cfg honeypot IDs." +sed -i 's/community-01/'$apiUser'-'$indexNumber'/' ews.cfg + +rm ews.cfg.dist + +echo "[+] Changing tpot.yml to include new ews.cfg." + +cp ews.cfg /data/ews/conf/ews.cfg +cp /opt/tpot/etc/tpot.yml /opt/tpot/etc/tpot.yml.bak +sed -i '/- \/data\/ews\/conf\/ews.ip:\/opt\/ewsposter\/ews.ip/a\ \ \ - \/data\/ews\/conf\/ews.cfg:\/opt\/ewsposter\/ews.cfg' /opt/tpot/etc/tpot.yml + +echo "[+] Restarting T-Pot." +systemctl restart tpot +echo "[+] Done." diff --git a/docker/tpotinit/dist/bin/clean.sh b/docker/tpotinit/dist/bin/clean.sh new file mode 100755 index 00000000..c9e6cb44 --- /dev/null +++ b/docker/tpotinit/dist/bin/clean.sh @@ -0,0 +1,372 @@ +#!/bin/bash +# T-Pot Container Data Cleaner & Log Rotator +# Set colors +myRED="" +myGREEN="" +myWHITE="" + +# Set pigz +myPIGZ=$(which pigz) + +# Set persistence +myPERSISTENCE=$1 + +# Let's create a function to check if folder is empty +fuEMPTY () { + local myFOLDER=$1 + +echo $(ls $myFOLDER | wc -l) +} + +# Let's create a function to rotate and compress logs +fuLOGROTATE () { + local mySTATUS="/opt/tpot/etc/logrotate/status" + local myCONF="/opt/tpot/etc/logrotate/logrotate.conf" + local myADBHONEYTGZ="/data/adbhoney/downloads.tgz" + local myADBHONEYDL="/data/adbhoney/downloads/" + local myCOWRIETTYLOGS="/data/cowrie/log/tty/" + local myCOWRIETTYTGZ="/data/cowrie/log/ttylogs.tgz" + local myCOWRIEDL="/data/cowrie/downloads/" + local myCOWRIEDLTGZ="/data/cowrie/downloads.tgz" + local myDIONAEABI="/data/dionaea/bistreams/" + local myDIONAEABITGZ="/data/dionaea/bistreams.tgz" + local myDIONAEABIN="/data/dionaea/binaries/" + local myDIONAEABINTGZ="/data/dionaea/binaries.tgz" + local myHONEYTRAPATTACKS="/data/honeytrap/attacks/" + local myHONEYTRAPATTACKSTGZ="/data/honeytrap/attacks.tgz" + local myHONEYTRAPDL="/data/honeytrap/downloads/" + local myHONEYTRAPDLTGZ="/data/honeytrap/downloads.tgz" + local myTANNERF="/data/tanner/files/" + local myTANNERFTGZ="/data/tanner/files.tgz" + +# Ensure correct permissions and ownerships for logrotate to run without issues +chmod 770 /data/ -R +chown tpot:tpot /data -R +chmod 644 /data/nginx/conf -R +chmod 644 /data/nginx/cert -R + +# Run logrotate with force (-f) first, so the status file can be written and race conditions (with tar) be avoided +logrotate -f -s $mySTATUS $myCONF + +# Compressing some folders first and rotate them later +if [ "$(fuEMPTY $myADBHONEYDL)" != "0" ]; then tar -I $myPIGZ -cvf $myADBHONEYTGZ $myADBHONEYDL; fi +if [ "$(fuEMPTY $myCOWRIETTYLOGS)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIETTYTGZ $myCOWRIETTYLOGS; fi +if [ "$(fuEMPTY $myCOWRIEDL)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIEDLTGZ $myCOWRIEDL; fi +if [ "$(fuEMPTY $myDIONAEABI)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABITGZ $myDIONAEABI; fi +if [ "$(fuEMPTY $myDIONAEABIN)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABINTGZ $myDIONAEABIN; fi +if [ "$(fuEMPTY $myHONEYTRAPATTACKS)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPATTACKSTGZ $myHONEYTRAPATTACKS; fi +if [ "$(fuEMPTY $myHONEYTRAPDL)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPDLTGZ $myHONEYTRAPDL; fi +if [ "$(fuEMPTY $myTANNERF)" != "0" ]; then tar -I $myPIGZ -cvf $myTANNERFTGZ $myTANNERF; fi + +# Ensure correct permissions and ownership for previously created archives +chmod 770 $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ +chown tpot:tpot $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ + +# Need to remove subfolders since too many files cause rm to exit with errors +rm -rf $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF + +# Recreate subfolders with correct permissions and ownership +mkdir -p $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF +chmod 770 $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF +chown tpot:tpot $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF + +# Run logrotate again to account for previously created archives - DO NOT FORCE HERE! +logrotate -s $mySTATUS $myCONF +} + +# Let's create a function to clean up and prepare honeytrap data +fuADBHONEY () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/adbhoney/*; fi + mkdir -p /data/adbhoney/log/ /data/adbhoney/downloads/ + chmod 770 /data/adbhoney/ -R + chown tpot:tpot /data/adbhoney/ -R +} + +# Let's create a function to clean up and prepare ciscoasa data +fuCISCOASA () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ciscoasa/*; fi + mkdir -p /data/ciscoasa/log + chmod 770 /data/ciscoasa -R + chown tpot:tpot /data/ciscoasa -R +} + +# Let's create a function to clean up and prepare citrixhoneypot data +fuCITRIXHONEYPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/citrixhoneypot/*; fi + mkdir -p /data/citrixhoneypot/logs/ + chmod 770 /data/citrixhoneypot/ -R + chown tpot:tpot /data/citrixhoneypot/ -R +} + +# Let's create a function to clean up and prepare conpot data +fuCONPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/conpot/*; fi + mkdir -p /data/conpot/log + chmod 770 /data/conpot -R + chown tpot:tpot /data/conpot -R +} + +# Let's create a function to clean up and prepare cowrie data +fuCOWRIE () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/cowrie/*; fi + mkdir -p /data/cowrie/log/tty/ /data/cowrie/downloads/ /data/cowrie/keys/ /data/cowrie/misc/ + chmod 770 /data/cowrie -R + chown tpot:tpot /data/cowrie -R +} + +# Let's create a function to clean up and prepare ddospot data +fuDDOSPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ddospot/log; fi + mkdir -p /data/ddospot/bl /data/ddospot/db /data/ddospot/log + chmod 770 /data/ddospot -R + chown tpot:tpot /data/ddospot -R +} + +# Let's create a function to clean up and prepare dicompot data +fuDICOMPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dicompot/log; fi + mkdir -p /data/dicompot/log + mkdir -p /data/dicompot/images + chmod 770 /data/dicompot -R + chown tpot:tpot /data/dicompot -R +} + +# Let's create a function to clean up and prepare dionaea data +fuDIONAEA () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dionaea/*; fi + mkdir -p /data/dionaea/log /data/dionaea/bistreams /data/dionaea/binaries /data/dionaea/rtp /data/dionaea/roots/ftp /data/dionaea/roots/tftp /data/dionaea/roots/www /data/dionaea/roots/upnp + chmod 770 /data/dionaea -R + chown tpot:tpot /data/dionaea -R +} + +# Let's create a function to clean up and prepare elasticpot data +fuELASTICPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elasticpot/*; fi + mkdir -p /data/elasticpot/log + chmod 770 /data/elasticpot -R + chown tpot:tpot /data/elasticpot -R +} + +# Let's create a function to clean up and prepare elk data +fuELK () { + # ELK data will be kept for <= 90 days, check /etc/crontab for curator modification + # ELK daemon log files will be removed + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elk/log/*; fi + mkdir -p /data/elk + chmod 770 /data/elk -R + chown tpot:tpot /data/elk -R +} + +# Let's create a function to clean up and prepare endlessh data +fuENDLESSH () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/endlessh/log; fi + mkdir -p /data/endlessh/log + chmod 770 /data/endlessh -R + chown tpot:tpot /data/endlessh -R +} + +# Let's create a function to clean up and prepare fatt data +fuFATT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/fatt/*; fi + mkdir -p /data/fatt/log + chmod 770 -R /data/fatt + chown tpot:tpot -R /data/fatt +} + +# Let's create a function to clean up and prepare glastopf data +fuGLUTTON () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glutton/*; fi + mkdir -p /data/glutton/log + chmod 770 /data/glutton -R + chown tpot:tpot /data/glutton -R +} + +# Let's create a function to clean up and prepare hellpot data +fuHELLPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/hellpot/log; fi + mkdir -p /data/hellpot/log + chmod 770 /data/hellpot -R + chown tpot:tpot /data/hellpot -R +} + +# Let's create a function to clean up and prepare heralding data +fuHERALDING () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/heralding/*; fi + mkdir -p /data/heralding/log + chmod 770 /data/heralding -R + chown tpot:tpot /data/heralding -R +} + +# Let's create a function to clean up and prepare honeypots data +fuHONEYPOTS () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypots/*; fi + mkdir -p /data/honeypots/log + chmod 770 /data/honeypots -R + chown tpot:tpot /data/honeypots -R +} + +# Let's create a function to clean up and prepare honeysap data +fuHONEYSAP () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeysap/*; fi + mkdir -p /data/honeysap/log + chmod 770 /data/honeysap -R + chown tpot:tpot /data/honeysap -R +} + +# Let's create a function to clean up and prepare honeytrap data +fuHONEYTRAP () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeytrap/*; fi + mkdir -p /data/honeytrap/log/ /data/honeytrap/attacks/ /data/honeytrap/downloads/ + chmod 770 /data/honeytrap/ -R + chown tpot:tpot /data/honeytrap/ -R +} + +# Let's create a function to clean up and prepare ipphoney data +fuIPPHONEY () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ipphoney/*; fi + mkdir -p /data/ipphoney/log + chmod 770 /data/ipphoney -R + chown tpot:tpot /data/ipphoney -R +} + +# Let's create a function to clean up and prepare log4pot data +fuLOG4POT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/log4pot/*; fi + mkdir -p /data/log4pot/log + chmod 770 /data/log4pot -R + chown tpot:tpot /data/log4pot -R +} + +# Let's create a function to clean up and prepare mailoney data +fuMAILONEY () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi + mkdir -p /data/mailoney/log/ + chmod 770 /data/mailoney/ -R + chown tpot:tpot /data/mailoney/ -R +} + +# Let's create a function to clean up and prepare mailoney data +fuMEDPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/medpot/*; fi + mkdir -p /data/medpot/log/ + chmod 770 /data/medpot/ -R + chown tpot:tpot /data/medpot/ -R +} + +# Let's create a function to clean up nginx logs +fuNGINX () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/nginx/log/*; fi + touch /data/nginx/log/error.log + chmod 644 /data/nginx/conf -R + chmod 644 /data/nginx/cert -R +} + +# Let's create a function to clean up and prepare rdpy data +fuRDPY () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/rdpy/*; fi + mkdir -p /data/rdpy/log/ + chmod 770 /data/rdpy/ -R + chown tpot:tpot /data/rdpy/ -R +} + +# Let's create a function to clean up and prepare redishoneypot data +fuREDISHONEYPOT () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/redishoneypot/log; fi + mkdir -p /data/redishoneypot/log + chmod 770 /data/redishoneypot -R + chown tpot:tpot /data/redishoneypot -R +} + +# Let's create a function to clean up and prepare sentrypeer data +fuSENTRYPEER () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/sentrypeer/log; fi + mkdir -p /data/sentrypeer/log + chmod 770 /data/sentrypeer -R + chown tpot:tpot /data/sentrypeer -R +} + +# Let's create a function to prepare spiderfoot db +fuSPIDERFOOT () { + mkdir -p /data/spiderfoot + touch /data/spiderfoot/spiderfoot.db + chmod 770 -R /data/spiderfoot + chown tpot:tpot -R /data/spiderfoot +} + +# Let's create a function to clean up and prepare suricata data +fuSURICATA () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/suricata/*; fi + mkdir -p /data/suricata/log + chmod 770 -R /data/suricata + chown tpot:tpot -R /data/suricata +} + +# Let's create a function to clean up and prepare p0f data +fuP0F () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/p0f/*; fi + mkdir -p /data/p0f/log + chmod 770 -R /data/p0f + chown tpot:tpot -R /data/p0f +} + +# Let's create a function to clean up and prepare p0f data +fuTANNER () { + if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/tanner/*; fi + mkdir -p /data/tanner/log /data/tanner/files + chmod 770 -R /data/tanner + chown tpot:tpot -R /data/tanner +} + +# Avoid unwanted cleaning +if [ "$myPERSISTENCE" = "" ]; + then + echo $myRED"!!! WARNING !!! - This will delete ALL honeypot logs. "$myWHITE + while [ "$myQST" != "y" ] && [ "$myQST" != "n" ]; + do + read -p "Continue? (y/n) " myQST + done + if [ "$myQST" = "n" ]; + then + echo $myGREEN"Puuh! That was close! Aborting!"$myWHITE + exit + fi +fi + +# Check persistence, if enabled compress and rotate logs +if [ "$myPERSISTENCE" = "on" ]; + then + echo "Persistence enabled, now rotating and compressing logs." + fuLOGROTATE + else + echo "Cleaning up and preparing data folders." + fuADBHONEY + fuCISCOASA + fuCITRIXHONEYPOT + fuCONPOT + fuCOWRIE + fuDDOSPOT + fuDICOMPOT + fuDIONAEA + fuELASTICPOT + fuELK + fuENDLESSH + fuFATT + fuGLUTTON + fuHERALDING + fuHELLPOT + fuHONEYSAP + fuHONEYPOTS + fuHONEYTRAP + fuIPPHONEY + fuLOG4POT + fuMAILONEY + fuMEDPOT + fuNGINX + fuREDISHONEYPOT + fuRDPY + fuSENTRYPEER + fuSPIDERFOOT + fuSURICATA + fuP0F + fuTANNER + fi diff --git a/docker/tpotinit/dist/bin/deploy.sh b/docker/tpotinit/dist/bin/deploy.sh new file mode 100755 index 00000000..e1d5af4b --- /dev/null +++ b/docker/tpotinit/dist/bin/deploy.sh @@ -0,0 +1,182 @@ +#!/bin/bash + +# Do we have root? +function fuGOT_ROOT { +echo +echo -n "### Checking for root: " +if [ "$(whoami)" != "root" ]; + then + echo "[ NOT OK ]" + echo "### Please run as root." + echo "### Example: sudo $0" + exit + else + echo "[ OK ]" +fi +} + +function fuDEPLOY_SENSOR () { +echo +echo "###############################" +echo "# Deploying to T-Pot Hive ... #" +echo "###############################" +echo +sshpass -e ssh -4 -t -T -l "$MY_TPOT_USERNAME" -p 64295 "$MY_HIVE_IP" << EOF +echo "$SSHPASS" | sudo -S bash -c 'useradd -m -s /sbin/nologin -G tpotlogs "$MY_HIVE_USERNAME"; +mkdir -p /home/"$MY_HIVE_USERNAME"/.ssh; +echo "$MY_SENSOR_PUBLICKEY" >> /home/"$MY_HIVE_USERNAME"/.ssh/authorized_keys; +chmod 600 /home/"$MY_HIVE_USERNAME"/.ssh/authorized_keys; +chmod 755 /home/"$MY_HIVE_USERNAME"/.ssh; +chown "$MY_HIVE_USERNAME":"$MY_HIVE_USERNAME" -R /home/"$MY_HIVE_USERNAME"/.ssh' +EOF + +echo +echo "###########################" +echo "# Done. Please reboot ... #" +echo "###########################" +echo + +exit 0 +} + +# Check Hive availability +function fuCHECK_HIVE () { +echo +echo "############################################" +echo "# Checking for T-Pot Hive availability ... #" +echo "############################################" +echo +sshpass -e ssh -4 -t -l "$MY_TPOT_USERNAME" -p 64295 -f -N -L64305:127.0.0.1:64305 "$MY_HIVE_IP" -o "StrictHostKeyChecking=no" +if [ $? -eq 0 ]; + then + echo + echo "#########################" + echo "# T-Pot Hive available! #" + echo "#########################" + echo + myHIVE_OK=$(curl -s http://127.0.0.1:64305) + if [ "$myHIVE_OK" == "ok" ]; + then + echo + echo "##############################" + echo "# T-Pot Hive tunnel test OK! #" + echo "##############################" + echo + kill -9 $(pidof ssh) + else + echo + echo "######################################################" + echo "# T-Pot Hive tunnel test FAILED! #" + echo "# Tunneled port tcp/64305 unreachable on T-Pot Hive. #" + echo "# Aborting. #" + echo "######################################################" + echo + kill -9 $(pidof ssh) + rm $MY_SENSOR_PUBLICKEYFILE + rm $MY_SENSOR_PRIVATEKEYFILE + rm $MY_LS_ENVCONFIGFILE + exit 1 + fi; + else + echo + echo "#################################################################" + echo "# Something went wrong, most likely T-Pot Hive was unreachable! #" + echo "# Aborting. #" + echo "#################################################################" + echo + rm $MY_SENSOR_PUBLICKEYFILE + rm $MY_SENSOR_PRIVATEKEYFILE + rm $MY_LS_ENVCONFIGFILE + exit 1 +fi; +} + +function fuGET_DEPLOY_DATA () { +echo +echo "### Please provide data from your T-Pot Hive installation." +echo "### This usually is the one running the 'T-Pot Hive' type." +echo "### You will be needing the OS user (typically 'tsec'), the users' password and the IP / FQDN." +echo "### Do not worry, the password will not be persisted!" +echo + +read -p "Username: " MY_TPOT_USERNAME +read -s -p "Password: " SSHPASS +echo +export SSHPASS +read -p "IP / FQDN: " MY_HIVE_IP +MY_HIVE_USERNAME="$(hostname)" +MY_TPOT_TYPE="SENSOR" +MY_LS_ENVCONFIGFILE="/data/elk/logstash/ls_environment" + +MY_SENSOR_PUBLICKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME.pub" +MY_SENSOR_PRIVATEKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME" +if ! [ -s "$MY_SENSOR_PRIVATEKEYFILE" ] && ! [ -s "$MY_SENSOR_PUBLICKEYFILE" ]; + then + echo + echo "##############################" + echo "# Generating ssh keyfile ... #" + echo "##############################" + echo + mkdir -p /data/elk/logstash + ssh-keygen -f "$MY_SENSOR_PRIVATEKEYFILE" -N "" -C "$MY_HIVE_USERNAME" + MY_SENSOR_PUBLICKEY="$(cat "$MY_SENSOR_PUBLICKEYFILE")" + else + echo + echo "#############################################" + echo "# There is already a ssh keyfile. Aborting. #" + echo "#############################################" + echo + exit 1 +fi +echo +echo "###########################################################" +echo "# Writing config to /data/elk/logstash/ls_environment. #" +echo "# If you make changes to this file, you need to reboot or #" +echo "# run /opt/tpot/bin/updateip.sh. #" +echo "###########################################################" +echo +tee $MY_LS_ENVCONFIGFILE << EOF +MY_TPOT_TYPE=$MY_TPOT_TYPE +MY_SENSOR_PRIVATEKEYFILE=$MY_SENSOR_PRIVATEKEYFILE +MY_HIVE_USERNAME=$MY_HIVE_USERNAME +MY_HIVE_IP=$MY_HIVE_IP +EOF +} + +# Deploy Pot to Hive +fuGOT_ROOT +echo +echo "#################################" +echo "# Ship T-Pot Logs to T-Pot Hive #" +echo "#################################" +echo +echo "If you already have a T-Pot Hive installation running and" +echo "this T-Pot installation is running the type \"Pot\" the" +echo "script will automagically setup this T-Pot to ship and" +echo "prepare the Hive to receive logs from this T-Pot." +echo +echo +echo "###################################" +echo "# Deploy T-Pot Logs to T-Pot Hive #" +echo "###################################" +echo +echo "[c] - Continue deplyoment" +echo "[q] - Abort and exit" +echo +while [ 1 != 2 ] + do + read -s -n 1 -p "Your choice: " mySELECT + echo $mySELECT + case "$mySELECT" in + [c,C]) + fuGET_DEPLOY_DATA + fuCHECK_HIVE + fuDEPLOY_SENSOR + break + ;; + [q,Q]) + echo "Aborted." + exit 0 + ;; + esac +done diff --git a/docker/tpotinit/dist/bin/deprecated/export_kibana-objects.sh b/docker/tpotinit/dist/bin/deprecated/export_kibana-objects.sh new file mode 100755 index 00000000..e5280dd4 --- /dev/null +++ b/docker/tpotinit/dist/bin/deprecated/export_kibana-objects.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# Export all Kibana objects through Kibana Saved Objects API +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myKIBANA="http://127.0.0.1:64296/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +# Set vars +myDATE=$(date +%Y%m%d%H%M) +myINDEXCOUNT=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].attributes' | tr '\\' '\n' | grep -E "scripted|url" | wc -w) +myINDEXID=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].id' | tr -d '"') +myDASHBOARDS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=dashboard&per_page=500' | jq '.saved_objects[].id' | tr -d '"') +myVISUALIZATIONS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=visualization&per_page=500' | jq '.saved_objects[].id' | tr -d '"') +mySEARCHES=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=search&per_page=500' | jq '.saved_objects[].id' | tr -d '"') +myCONFIGS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=config&per_page=500' | jq '.saved_objects[].id' | tr -d '"') +myCOL1="" +myCOL0="" + +# Let's ensure normal operation on exit or if interrupted ... +function fuCLEANUP { + rm -rf patterns/ dashboards/ visualizations/ searches/ configs/ +} +trap fuCLEANUP EXIT + +# Export index patterns +mkdir -p patterns +echo $myCOL1"### Now exporting"$myCOL0 $myINDEXCOUNT $myCOL1"index pattern fields." $myCOL0 +curl -s -XGET ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' | jq '. | {attributes, references}' > patterns/$myINDEXID.json & +echo + +# Export dashboards +mkdir -p dashboards +echo $myCOL1"### Now exporting"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0 +for i in $myDASHBOARDS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XGET ''$myKIBANA'api/saved_objects/dashboard/'$i'' | jq '. | {attributes, references}' > dashboards/$i.json & + done; +echo + +# Export visualizations +mkdir -p visualizations +echo $myCOL1"### Now exporting"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0 +for i in $myVISUALIZATIONS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XGET ''$myKIBANA'api/saved_objects/visualization/'$i'' | jq '. | {attributes, references}' > visualizations/$i.json & + done; +echo + +# Export searches +mkdir -p searches +echo $myCOL1"### Now exporting"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0 +for i in $mySEARCHES; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XGET ''$myKIBANA'api/saved_objects/search/'$i'' | jq '. | {attributes, references}' > searches/$i.json & + done; +echo + +# Export configs +mkdir -p configs +echo $myCOL1"### Now exporting"$myCOL0 $(echo $myCONFIGS | wc -w) $myCOL1"configs." $myCOL0 +for i in $myCONFIGS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XGET ''$myKIBANA'api/saved_objects/config/'$i'' | jq '. | {attributes, references}' > configs/$i.json & + done; +echo + +# Wait for background exports to finish +wait + +# Building tar archive +echo $myCOL1"### Now building archive"$myCOL0 "kibana-objects_"$myDATE".tgz" +tar cvfz kibana-objects_$myDATE.tgz patterns dashboards visualizations searches configs > /dev/null + +# Stats +echo +echo $myCOL1"### Statistics" +echo $myCOL1"###### Exported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0 +echo $myCOL1"###### Exported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0 +echo $myCOL1"###### Exported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0 +echo $myCOL1"###### Exported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0 +echo $myCOL1"###### Exported"$myCOL0 $(echo $myCONFIGS | wc -w) $myCOL1"configs." $myCOL0 +echo diff --git a/docker/tpotinit/dist/bin/deprecated/hptest.sh b/docker/tpotinit/dist/bin/deprecated/hptest.sh new file mode 100755 index 00000000..94806a71 --- /dev/null +++ b/docker/tpotinit/dist/bin/deprecated/hptest.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +myHOST="$1" +myPACKAGES="dcmtk netcat nmap" +myMEDPOTPACKET=" +MSH|^~\&|ADT1|MCM|LABADT|MCM|198808181126|SECURITY|ADT^A01|MSG00001-|P|2.6 +EVN|A01|198808181123 +PID|||PATID1234^5^M11^^AN||JONES^WILLIAM^A^III||19610615|M||2106-3|677 DELAWARE AVENUE^^EVERETT^MA^02149|GL|(919)379-1212|(919)271-3434~(919)277-3114||S||PATID12345001^2^M10^^ACSN|123456789|9-87654^NC +NK1|1|JONES^BARBARA^K|SPO|||||20011105 +NK1|1|JONES^MICHAEL^A|FTH +PV1|1|I|2000^2012^01||||004777^LEBAUER^SIDNEY^J.|||SUR||-||ADM|A0 +AL1|1||^PENICILLIN||CODE16~CODE17~CODE18 +AL1|2||^CAT DANDER||CODE257 +DG1|001|I9|1550|MAL NEO LIVER, PRIMARY|19880501103005|F +PR1|2234|M11|111^CODE151|COMMON PROCEDURES|198809081123 +ROL|45^RECORDER^ROLE MASTER LIST|AD|RO|KATE^SMITH^ELLEN|199505011201 +GT1|1122|1519|BILL^GATES^A +IN1|001|A357|1234|BCMD|||||132987 +IN2|ID1551001|SSN12345678 +ROL|45^RECORDER^ROLE MASTER LIST|AD|RO|KATE^ELLEN|199505011201" + +function fuGOTROOT { +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + exit +fi +} + +function fuCHECKDEPS { +myINST="" +for myDEPS in $myPACKAGES; +do + myOK=$(dpkg -s $myDEPS | grep ok | awk '{ print $3 }'); + if [ "$myOK" != "ok" ] + then + myINST=$(echo $myINST $myDEPS) + fi +done +if [ "$myINST" != "" ] + then + apt-get update -y + for myDEPS in $myINST; + do + apt-get install $myDEPS -y + done +fi +} + +function fuCHECKFORARGS { +if [ "$myHOST" != "" ]; + then + echo "All arguments met. Continuing." + else + echo "Usage: hp_test.sh <[host or ip]>" + exit +fi +} + +function fuGETPORTS { +myDOCKERCOMPOSEPORTS=$(cat $myDOCKERCOMPOSEYML | yq -r '.services[].ports' | grep ':' | sed -e s/127.0.0.1// | tr -d '", ' | sed -e s/^:// | cut -f1 -d ':' | grep -v "6429\|6430" | sort -gu) +myPORTS=$(for i in $myDOCKERCOMPOSEPORTS; do echo "$i"; done) +echo "Found these ports enabled:" +echo "$myPORTS" +exit +} + +function fuSCAN { +local myTIMEOUT="$1" +local mySCANPORT="$2" +local mySCANIP="$3" +local mySCANOPTS="$4" + +timeout --foreground ${myTIMEOUT} nmap ${mySCANOPTS} -T4 -v -p ${mySCANPORT} ${mySCANIP} & +} + +# Main +fuGOTROOT +fuCHECKDEPS +fuCHECKFORARGS + +echo "Starting scans ..." +echo "$myMEDPOTPACKET" | nc "$myHOST" 2575 & +curl -XGET "http://$myHOST:9200/logstash-*/_search" & +curl -XPOST -H "Content-Type: application/json" -d '{"name":"test","email":"test@test.com"}' "http://$myHOST:9200/test" & +echo "I20100" | timeout --foreground 3 nc "$myHOST" 10001 & +findscu -P -k PatientName="*" $myHOST 11112 & +getscu -P -k PatientName="*" $myHOST 11112 & +telnet $myHOST 3299 & +fuSCAN "180" "7,8,102,135,161,1025,1080,5000,9200" "$myHOST" "-sC -sS -sU -sV" +fuSCAN "180" "2048,4096,5432" "$myHOST" "-sC -sS -sU -sV --version-light" +fuSCAN "120" "20,21" "$myHOST" "--script=ftp* -sC -sS -sV" +fuSCAN "120" "22" "$myHOST" "--script=ssh2-enum-algos,ssh-auth-methods,ssh-hostkey,ssh-publickey-acceptance,sshv1 -sC -sS -sV" +fuSCAN "30" "22" "$myHOST" "--script=ssh-brute" +fuSCAN "120" "23,2323,2324" "$myHOST" "--script=telnet-encryption,telnet-ntlm-info -sC -sS -sV --version-light" +fuSCAN "120" "25" "$myHOST" "--script=smtp* -sC -sS -sV" +fuSCAN "180" "42" "$myHOST" "-sC -sS -sV" +fuSCAN "120" "69" "$myHOST" "--script=tftp-enum -sU" +fuSCAN "120" "80,81,8080,8443" "$myHOST" "-sC -sS -sV" +fuSCAN "120" "110,995" "$myHOST" "--script=pop3-capabilities,pop3-ntlm-info -sC -sS -sV --version-light" +fuSCAN "30" "110,995" "$myHOST" "--script=pop3-brute -sS" +fuSCAN "120" "143,993" "$myHOST" "--script=imap-capabilities,imap-ntlm-info -sC -sS -sV --version-light" +fuSCAN "30" "143,993" "$myHOST" "--script=imap-brute -sS" +fuSCAN "240" "445" "$myHOST" "--script=smb-vuln* -sS -sU" +fuSCAN "120" "502" "$myHOST" "--script=modbus-discover -sS -sU" +fuSCAN "120" "623" "$myHOST" "--script=ipmi-cipher-zero,ipmi-version,supermicro-ipmi -sS -sU" +fuSCAN "30" "623" "$myHOST" "--script=ipmi-brute -sS -sU" +fuSCAN "120" "1433" "$myHOST" "--script=ms-sql* -sS" +fuSCAN "120" "1723" "$myHOST" "--script=pptp-version -sS" +fuSCAN "120" "1883" "$myHOST" "--script=mqtt-subscribe -sS" +fuSCAN "120" "2404" "$myHOST" "--script=iec-identify -sS" +fuSCAN "120" "3306" "$myHOST" "--script=mysql-vuln* -sC -sS -sV" +fuSCAN "120" "3389" "$myHOST" "--script=rdp* -sC -sS -sV" +fuSCAN "120" "5000" "$myHOST" "--script=*upnp* -sS -sU" +fuSCAN "120" "5060,5061" "$myHOST" "--script=sip-call-spoof,sip-enum-users,sip-methods -sS -sU" +fuSCAN "120" "5900" "$myHOST" "--script=vnc-info,vnc-title,realvnc-auth-bypass -sS" +fuSCAN "120" "27017" "$myHOST" "--script=mongo* -sS" +fuSCAN "120" "47808" "$myHOST" "--script=bacnet* -sS" +wait +reset +echo "Done." diff --git a/docker/tpotinit/dist/bin/deprecated/import_kibana-objects.sh b/docker/tpotinit/dist/bin/deprecated/import_kibana-objects.sh new file mode 100755 index 00000000..cf5a6aa0 --- /dev/null +++ b/docker/tpotinit/dist/bin/deprecated/import_kibana-objects.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# Import Kibana objects +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myKIBANA="http://127.0.0.1:64296/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +# Set vars +myDUMP=$1 +myCOL1="" +myCOL0="" + +# Let's ensure normal operation on exit or if interrupted ... +function fuCLEANUP { + rm -rf patterns/ dashboards/ visualizations/ searches/ configs/ +} +trap fuCLEANUP EXIT + +# Check if parameter is given and file exists +if [ "$myDUMP" = "" ]; + then + echo $myCOL1"### Please provide a backup file name."$myCOL0 + echo $myCOL1"### import_kibana-objects.sh "$myCOL0 + echo + exit +fi +if ! [ -a $myDUMP ]; + then + echo $myCOL1"### File not found."$myCOL0 + exit +fi + +# Unpack tar +tar xvfz $myDUMP > /dev/null + +# Restore index patterns +myINDEXID=$(ls patterns/*.json | cut -c 10- | rev | cut -c 6- | rev) +myINDEXCOUNT=$(cat patterns/$myINDEXID.json | tr '\\' '\n' | grep -E "scripted|url" | wc -w) +echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index pattern fields." $myCOL0 +curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/logstash-*' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null +curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null +curl -s -XPOST ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @patterns/$myINDEXID.json > /dev/null & +echo + +# Restore dashboards +myDASHBOARDS=$(ls dashboards/*.json | cut -c 12- | rev | cut -c 6- | rev) +echo $myCOL1"### Now importing "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0 +for i in $myDASHBOARDS; + do + curl -s -XDELETE ''$myKIBANA'api/saved_objects/dashboard/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null & + done; +wait +for i in $myDASHBOARDS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XPOST ''$myKIBANA'api/saved_objects/dashboard/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @dashboards/$i.json > /dev/null & + done; +wait +echo + +# Restore visualizations +myVISUALIZATIONS=$(ls visualizations/*.json | cut -c 16- | rev | cut -c 6- | rev) +echo $myCOL1"### Now importing "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0 +for i in $myVISUALIZATIONS; + do + curl -s -XDELETE ''$myKIBANA'api/saved_objects/visualization/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null & + done; +wait +for i in $myVISUALIZATIONS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XPOST ''$myKIBANA'api/saved_objects/visualization/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @visualizations/$i.json > /dev/null & + done; +wait +echo + +# Restore searches +mySEARCHES=$(ls searches/*.json | cut -c 10- | rev | cut -c 6- | rev) +echo $myCOL1"### Now importing "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0 +for i in $mySEARCHES; + do + curl -s -XDELETE ''$myKIBANA'api/saved_objects/search/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null & + done; +wait +for i in $mySEARCHES; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XPOST ''$myKIBANA'api/saved_objects/search/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @searches/$i.json > /dev/null & + done; +echo +wait + +# Restore configs +myCONFIGS=$(ls configs/*.json | cut -c 9- | rev | cut -c 6- | rev) +echo $myCOL1"### Now importing "$myCOL0$(echo $myCONFIGS | wc -w)$myCOL1 "configs." $myCOL0 +for i in $myCONFIGS; + do + curl -s -XDELETE ''$myKIBANA'api/saved_objects/configs/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null & + done; +wait +for i in $myCONFIGS; + do + echo $myCOL1"###### "$i $myCOL0 + curl -s -XPOST ''$myKIBANA'api/saved_objects/configs/'$i'' -H "Content-Type: application/json" -H "kbn-xsrf: true" -d @configs/$i.json > /dev/null & + done; +echo +wait + +# Stats +echo +echo $myCOL1"### Statistics" +echo $myCOL1"###### Imported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0 +echo $myCOL1"###### Imported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0 +echo $myCOL1"###### Imported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0 +echo $myCOL1"###### Imported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0 +echo $myCOL1"###### Imported"$myCOL0 $(echo $myCONFIGS | wc -w) $myCOL1"configs." $myCOL0 +echo + diff --git a/docker/tpotinit/dist/bin/dps.sh b/docker/tpotinit/dist/bin/dps.sh new file mode 100755 index 00000000..b5969435 --- /dev/null +++ b/docker/tpotinit/dist/bin/dps.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# Run as root only. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + exit +fi + +myPARAM="$1" +if [[ $myPARAM =~ ^([1-9]|[1-9][0-9]|[1-9][0-9][0-9])$ ]]; + then + watch --color -n $myPARAM "$0" + exit +fi + +# Show current status of T-Pot containers +myCONTAINERS="$(cat /opt/tpot/etc/tpot.yml | grep -v '#' | grep container_name | cut -d: -f2 | sort | tr -d " ")" +myRED="" +myGREEN="" +myBLUE="" +myWHITE="" +myMAGENTA="" + +# Blackhole Status +myBLACKHOLE_STATUS=$(ip r | grep "blackhole" -c) +if [ "$myBLACKHOLE_STATUS" -gt "500" ]; + then + myBLACKHOLE_STATUS="${myGREEN}ENABLED" + else + myBLACKHOLE_STATUS="${myRED}DISABLED" +fi + +function fuGETTPOT_STATUS { +# T-Pot Status +myTPOT_STATUS=$(systemctl status tpot | grep "Active" | awk '{ print $2 }') +if [ "$myTPOT_STATUS" == "active" ]; + then + echo "${myGREEN}ACTIVE" + else + echo "${myRED}INACTIVE" +fi +} + +function fuGETSTATUS { +grc --colour=on docker ps -f status=running -f status=exited --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -v "NAME" | sort +} + +function fuGETSYS { +printf "[ ========| System |======== ]\n" +printf "${myBLUE}%+11s ${myWHITE}%-20s\n" "DATE: " "$(date)" +printf "${myBLUE}%+11s ${myWHITE}%-20s\n" "UPTIME: " "$(grc --colour=on uptime)" +printf "${myMAGENTA}%+11s %-20s\n" "T-POT: " "$(fuGETTPOT_STATUS)" +printf "${myMAGENTA}%+11s %-20s\n" "BLACKHOLE: " "$myBLACKHOLE_STATUS${myWHITE}" +echo +} + + myDPS=$(fuGETSTATUS) + myDPSNAMES=$(echo "$myDPS" | awk '{ print $1 }' | sort) + fuGETSYS + printf "%-21s %-28s %s\n" "NAME" "STATUS" "PORTS" + if [ "$myDPS" != "" ]; + then + echo "$myDPS" + fi + for i in $myCONTAINERS; do + myAVAIL=$(echo "$myDPSNAMES" | grep -o "$i" | uniq | wc -l) + if [ "$myAVAIL" = "0" ]; + then + printf "%-28s %-28s\n" "$myRED$i" "DOWN$myWHITE" + fi + done diff --git a/docker/tpotinit/dist/bin/dump_es.sh b/docker/tpotinit/dist/bin/dump_es.sh new file mode 100755 index 00000000..a6e17895 --- /dev/null +++ b/docker/tpotinit/dist/bin/dump_es.sh @@ -0,0 +1,45 @@ +#/bin/bash +# Dump all ES data +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c "green\|yellow") +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +# Let's ensure normal operation on exit or if interrupted ... +function fuCLEANUP { + rm -rf tmp +} +trap fuCLEANUP EXIT + +# Set vars +myDATE=$(date +%Y%m%d%H%M) +myINDICES=$(curl -s -XGET ''$myES'_cat/indices/logstash-*' | awk '{ print $3 }' | sort | grep -v 1970) +myINDICES+=" .kibana" +myCOL1="" +myCOL0="" + +# Dumping Kibana and Logstash data +echo $myCOL1"### The following indices will be dumped: "$myCOL0 +echo $myINDICES +echo + +mkdir tmp +for i in $myINDICES; + do + echo $myCOL1"### Now dumping: "$i $myCOL0 + elasticdump --input=$myES$i --output="tmp/"$i --limit 7500 + echo $myCOL1"### Now compressing: tmp/$i" $myCOL0 + gzip -f "tmp/"$i + done; + +# Build tar archive +echo $myCOL1"### Now building tar archive: es_dump_"$myDATE".tgz" $myCOL0 +tar cvf es_dump_$myDATE.tar tmp/. +echo $myCOL1"### Done."$myCOL0 diff --git a/docker/tpotinit/dist/bin/hpfeeds_optin.sh b/docker/tpotinit/dist/bin/hpfeeds_optin.sh new file mode 100755 index 00000000..b3821522 --- /dev/null +++ b/docker/tpotinit/dist/bin/hpfeeds_optin.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +# Run as root only. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + exit +fi + +myTPOTYMLFILE="/opt/tpot/etc/tpot.yml" + +function fuGENERIC () { +echo +echo "You chose generic, please provide all the details of the broker" +echo +myENABLE="true" +read -p "Host URL: " myHOST +read -p "Port: " myPORT +read -p "Channel: " myCHANNEL +echo "For generic providers set this to 'false'" +echo "If you received a CA certficate mount it into the ewsposter container by modifying $myTPOTYMLFILE" +read -p "TLS - 'false' or path to CA in container: " myCERT +read -p "Ident: " myIDENT +read -p "Secret: " mySECRET +read -p "Format ews (xml) or json: " myFORMAT +} + +function fuOPTOUT () { +echo +while [ 1 != 2 ] + do + read -s -n 1 -p "You chose to opt out (y/n)? " mySELECT + echo $mySELECT + case "$mySELECT" in + [y,Y]) + echo "Opt out." + break + ;; + [n,N]) + echo "Aborted." + exit + ;; + esac +done +myENABLE="false" +myHOST="host" +myPORT="port" +myCHANNEL="channels" +myCERT="false" +myIDENT="user" +mySECRET="secret" +myFORMAT="json" +} + +function fuWRITETOFILE () { +if [ -f '/data/ews/conf/hpfeeds.cfg' ]; then + echo "Creating backup of current config in /data/ews/conf/hpfeeds.cfg.old" + mv /data/ews/conf/hpfeeds.cfg /data/ews/conf/hpfeeds.cfg.old +fi +echo "Storing new config in /data/ews/conf/hpfeeds.cfg" +cat >> /data/ews/conf/hpfeeds.cfg <" + echo + exit +fi +} + +function fuGETPORTS { +myDOCKERCOMPOSEUDPPORTS=$(cat $myDOCKERCOMPOSEYML | grep "udp" | tr -d '"\|#\-' | cut -d ":" -f2 | cut -d "/" -f1 | sort -gu) +myDOCKERCOMPOSEPORTS=$(cat $myDOCKERCOMPOSEYML | yq -r '.services[].ports' | grep ':' | sed -e s/127.0.0.1// | tr -d '", ' | sed -e s/^:// | cut -f1 -d ':' | grep -v "6429\|6430" | sort -gu) +myUDPPORTS=$(for i in $myDOCKERCOMPOSEUDPPORTS; do echo -n "U:$i,"; done) +myPORTS=$(for i in $myDOCKERCOMPOSEPORTS; do echo -n "T:$i,"; done) +} + +# Main +fuGETPORTS +fuGOTROOT +fuCHECKDEPS +fuCHECKFORARGS +echo +echo "Starting scan on all UDP / TCP ports defined in /opt/tpot/etc/tpot.yml ..." +nmap -sV -sC -v -p $myPORTS $1 & +nmap -sU -sV -sC -v -p $myUDPPORTS $1 & +echo +wait +echo "Done." +echo + diff --git a/docker/tpotinit/dist/bin/myip.sh b/docker/tpotinit/dist/bin/myip.sh new file mode 100755 index 00000000..e464b421 --- /dev/null +++ b/docker/tpotinit/dist/bin/myip.sh @@ -0,0 +1,103 @@ +#!/bin/bash + +## Get my external IP + +timeout=2 # seconds to wait for a reply before trying next server +verbose=1 # prints which server was used to STDERR + +dnslist=( + "dig +short myip.opendns.com @resolver1.opendns.com" + "dig +short myip.opendns.com @resolver2.opendns.com" + "dig +short myip.opendns.com @resolver3.opendns.com" + "dig +short myip.opendns.com @resolver4.opendns.com" + "dig +short -4 -t a whoami.akamai.net @ns1-1.akamaitech.net" + "dig +short whoami.akamai.net @ns1-1.akamaitech.net" +) + +httplist=( + alma.ch/myip.cgi + api.infoip.io/ip + api.ipify.org + bot.whatismyipaddress.com + canhazip.com + checkip.amazonaws.com + eth0.me + icanhazip.com + ident.me + ipecho.net/plain + ipinfo.io/ip + ipof.in/txt + ip.tyk.nu + l2.io/ip + smart-ip.net/myip + wgetip.com + whatismyip.akamai.com +) + +# function to check for valid ip +function valid_ip() +{ + local ip=$1 + local stat=1 + + if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + OIFS=$IFS + IFS='.' + ip=($ip) + IFS=$OIFS + [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \ + && ${ip[2]} -le 255 && ${ip[3]} -le 255 ]] + stat=$? + fi + return $stat +} + +# function to shuffle the global array "array" +shuffle() { + local i tmp size max rand + size=${#array[*]} + max=$(( 32768 / size * size )) + for ((i=size-1; i>0; i--)); do + while (( (rand=$RANDOM) >= max )); do :; done + rand=$(( rand % (i+1) )) + tmp=${array[i]} array[i]=${array[rand]} array[rand]=$tmp + done +} +# if we have dig and a list of dns methods, try that first +if hash dig 2>/dev/null && [ ${#dnslist[*]} -gt 0 ]; then + eval array=( \"\${dnslist[@]}\" ) + shuffle + for cmd in "${array[@]}"; do + [ "$verbose" == 1 ] && echo Trying: $cmd 1>&2 + ip=$(timeout $timeout $cmd) + if [ -n "$ip" ]; then + if valid_ip $ip; then + echo $ip + exit + fi + fi + done +fi +# if we haven't succeeded with DNS, try HTTP +if [ ${#httplist[*]} == 0 ]; then + echo "No hosts in httplist array!" >&2 + exit 1 +fi +# use curl or wget, depending on which one we find +curl_or_wget=$(if hash curl 2>/dev/null; then echo "curl -s"; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi); +if [ -z "$curl_or_wget" ]; then + echo "Neither curl nor wget found. Cannot use http method." >&2 + exit 1 +fi +eval array=( \"\${httplist[@]}\" ) +shuffle +for url in "${array[@]}"; do + [ "$verbose" == 1 ] && echo Trying: $curl_or_wget "$url" 1>&2 + ip=$(timeout $timeout $curl_or_wget "$url") + if [ -n "$ip" ]; then + if valid_ip $ip; then + echo $ip + exit + fi + fi +done diff --git a/docker/tpotinit/dist/bin/mytopips.sh b/docker/tpotinit/dist/bin/mytopips.sh new file mode 100755 index 00000000..e343ff02 --- /dev/null +++ b/docker/tpotinit/dist/bin/mytopips.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start elk'." + exit 1 + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +function fuMYTOPIPS { +curl -s -XGET $myES"_search" -H 'Content-Type: application/json' -d' +{ + "aggs": { + "ips": { + "terms": { "field": "src_ip.keyword", "size": 100 } + } + }, + "size" : 0 +}' +} + +echo "### Aggregating top 100 source IPs in ES" +fuMYTOPIPS | jq '.aggregations.ips.buckets[].key' | tr -d '"' diff --git a/docker/tpotinit/dist/bin/restore_es.sh b/docker/tpotinit/dist/bin/restore_es.sh new file mode 100755 index 00000000..ffc5f031 --- /dev/null +++ b/docker/tpotinit/dist/bin/restore_es.sh @@ -0,0 +1,95 @@ +#/bin/bash +# Restore folder based ES backup +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c "green\|yellow") +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." +fi + +# Let's ensure normal operation on exit or if interrupted ... +function fuCLEANUP { + rm -rf tmp +} +trap fuCLEANUP EXIT + +# Set vars +myDUMP=$1 +myCOL1="" +myCOL0="" + +# Check if parameter is given and file exists +if [ "$myDUMP" = "" ]; + then + echo $myCOL1"### Please provide a backup file name."$myCOL0 + echo $myCOL1"### restore-elk.sh "$myCOL0 + echo + exit +fi +if ! [ -a $myDUMP ]; + then + echo $myCOL1"### File not found."$myCOL0 + exit +fi + +# Unpack tar archive +echo $myCOL1"### Now unpacking tar archive: "$myDUMP $myCOL0 +tar xvf $myDUMP + +# Build indices list +myINDICES="$(ls tmp/logstash*.gz | cut -c 5- | rev | cut -c 4- | rev)" +myINDICES+=" .kibana" +echo $myCOL1"### The following indices will be restored: "$myCOL0 +echo $myINDICES +echo + +# Force single seat template for everything +echo -n $myCOL1"### Forcing single seat template: "$myCOL0 +curl -s XPUT ''$myES'_template/.*' -H 'Content-Type: application/json' -d' +{ "index_patterns": ".*", + "order": 1, + "settings": + { + "number_of_shards": 1, + "number_of_replicas": 0 + } +}' +echo + +# Set logstash template +echo -n $myCOL1"### Setting up logstash template: "$myCOL0 +curl -s XPUT ''$myES'_template/logstash' -H 'Content-Type: application/json' -d' +{ + "index_patterns": "logstash-*", + "settings" : { + "index" : { + "number_of_shards": 1, + "number_of_replicas": 0, + "mapping" : { + "total_fields" : { + "limit" : "2000" + } + } + } + } +}' +echo + +# Restore indices +curl -s -X DELETE ''$myES'.kibana*' > /dev/null +for i in $myINDICES; + do + # Delete index if it already exists + curl -s -X DELETE $myES$i > /dev/null + echo $myCOL1"### Now uncompressing: tmp/$i.gz" $myCOL0 + gunzip -f tmp/$i.gz + # Restore index to ES + echo $myCOL1"### Now restoring: "$i $myCOL0 + elasticdump --input=tmp/$i --output=$myES$i --limit 7500 + rm tmp/$i + done; +echo $myCOL1"### Done."$myCOL0 diff --git a/docker/tpotinit/dist/bin/rules.sh b/docker/tpotinit/dist/bin/rules.sh new file mode 100755 index 00000000..52eb7e48 --- /dev/null +++ b/docker/tpotinit/dist/bin/rules.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +### Vars, Ports for Standard services +myHOSTPORTS="7634 64294 64295" +myDOCKERCOMPOSEYML="$1" +myRULESFUNCTION="$2" + +function fuCHECKFORARGS { +### Check if args are present, if not throw error + +if [ "$myDOCKERCOMPOSEYML" != "" ] && ([ "$myRULESFUNCTION" == "set" ] || [ "$myRULESFUNCTION" == "unset" ]); + then + echo "All arguments met. Continuing." + else + echo "Usage: rules.sh <[set, unset]>" + exit +fi +} + +function fuNFQCHECK { +### Check if honeytrap or glutton is actively enabled in docker-compose.yml + +myNFQCHECK=$(grep -e '^\s*honeytrap:\|^\s*glutton:' $myDOCKERCOMPOSEYML | tr -d ': ' | uniq) +if [ "$myNFQCHECK" == "" ]; + then + echo "No NFQ related honeypot detected, no iptables-legacy rules needed. Exiting." + exit + else + echo "Detected $myNFQCHECK as NFQ based honeypot, iptables-legacy rules needed. Continuing." +fi +} + +function fuGETPORTS { +### Get ports from docker-compose.yml + +myDOCKERCOMPOSEPORTS=$(cat $myDOCKERCOMPOSEYML | yq -r '.services[].ports' | grep ':' | sed -e s/127.0.0.1// | tr -d '", ' | sed -e s/^:// | cut -f1 -d ':' ) +myDOCKERCOMPOSEPORTS+=" $myHOSTPORTS" +myRULESPORTS=$(for i in $myDOCKERCOMPOSEPORTS; do echo $i; done | sort -gu) +echo "Setting up / removing these ports:" +echo "$myRULESPORTS" +} + +function fuSETRULES { +### Setting up iptables-legacy rules for honeytrap +if [ "$myNFQCHECK" == "honeytrap" ]; + then + /usr/sbin/iptables-legacy -w -A INPUT -s 127.0.0.1 -j ACCEPT + /usr/sbin/iptables-legacy -w -A INPUT -d 127.0.0.1 -j ACCEPT + + for myPORT in $myRULESPORTS; do + /usr/sbin/iptables-legacy -w -A INPUT -p tcp --dport $myPORT -j ACCEPT + done + + /usr/sbin/iptables-legacy -w -A INPUT -p tcp --syn -m state --state NEW -j NFQUEUE +fi + +### Setting up iptables-legacy rules for glutton +if [ "$myNFQCHECK" == "glutton" ]; + then + /usr/sbin/iptables-legacy -w -t raw -A PREROUTING -s 127.0.0.1 -j ACCEPT + /usr/sbin/iptables-legacy -w -t raw -A PREROUTING -d 127.0.0.1 -j ACCEPT + + for myPORT in $myRULESPORTS; do + /usr/sbin/iptables-legacy -w -t raw -A PREROUTING -p tcp --dport $myPORT -j ACCEPT + done + # No need for NFQ forwarding, such rules are set up by glutton +fi +} + +function fuUNSETRULES { +### Removing iptables-legacy rules for honeytrap +if [ "$myNFQCHECK" == "honeytrap" ]; + then + /usr/sbin/iptables-legacy -w -D INPUT -s 127.0.0.1 -j ACCEPT + /usr/sbin/iptables-legacy -w -D INPUT -d 127.0.0.1 -j ACCEPT + + for myPORT in $myRULESPORTS; do + /usr/sbin/iptables-legacy -w -D INPUT -p tcp --dport $myPORT -j ACCEPT + done + + /usr/sbin/iptables-legacy -w -D INPUT -p tcp --syn -m state --state NEW -j NFQUEUE +fi + +### Removing iptables-legacy rules for glutton +if [ "$myNFQCHECK" == "glutton" ]; + then + /usr/sbin/iptables-legacy -w -t raw -D PREROUTING -s 127.0.0.1 -j ACCEPT + /usr/sbin/iptables-legacy -w -t raw -D PREROUTING -d 127.0.0.1 -j ACCEPT + + for myPORT in $myRULESPORTS; do + /usr/sbin/iptables-legacy -w -t raw -D PREROUTING -p tcp --dport $myPORT -j ACCEPT + done + # No need for removing NFQ forwarding, such rules are removed by glutton +fi +} + +# Main +fuCHECKFORARGS +fuNFQCHECK +fuGETPORTS + +if [ "$myRULESFUNCTION" == "set" ]; + then + fuSETRULES + else + fuUNSETRULES +fi diff --git a/docker/tpotinit/dist/bin/setup_builder.sh b/docker/tpotinit/dist/bin/setup_builder.sh new file mode 100755 index 00000000..93aac477 --- /dev/null +++ b/docker/tpotinit/dist/bin/setup_builder.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# Got root? +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + exit +fi + +# Only run with command switch +if [ "$1" != "-y" ]; then + echo "### Setting up docker for Multi Arch Builds." + echo "### Use on x64 only!" + echo "### Run with -y to install!" + echo + exit +fi + +# Main +mkdir -p /root/.docker/cli-plugins/ +cd /root/.docker/cli-plugins/ +wget https://github.com/docker/buildx/releases/download/v0.10.0/buildx-v0.10.0.linux-amd64 -O docker-buildx +chmod +x docker-buildx + +docker buildx ls + +# We need to create a new builder as the default one cannot handle multi-arch builds +# https://docs.docker.com/desktop/multi-arch/ +docker buildx create --name mybuilder + +# Set as default +docker buildx use mybuilder + +# We need to install emulators, arm64 should be fine for now +# https://github.com/tonistiigi/binfmt/ +docker run --privileged --rm tonistiigi/binfmt --install arm64 + +# Check if everything is setup correctly +docker buildx inspect --bootstrap +echo +echo "### Done." +echo +echo "Example: docker buildx build --platform linux/amd64,linux/arm64 -t username/demo:latest --push ." +echo "Docs: https://docs.docker.com/desktop/multi-arch/" diff --git a/docker/tpotinit/dist/bin/tpdclean.sh b/docker/tpotinit/dist/bin/tpdclean.sh new file mode 100755 index 00000000..7ae50398 --- /dev/null +++ b/docker/tpotinit/dist/bin/tpdclean.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# T-Pot Compose and Container Cleaner +# Set colors +myRED="" +myGREEN="" +myWHITE="" + +# Only run with command switch +if [ "$1" != "-y" ]; then + echo $myRED"### WARNING"$myWHITE + echo "" + echo $myRED"###### This script is only intended for the tpot.service."$myWHITE + echo $myRED"###### Run first and then ."$myWHITE + echo $myRED"###### Be aware, all T-Pot container volumes and images will be removed."$myWHITE + echo "" + echo $myRED"### WARNING "$myWHITE + echo + exit +fi + +# Remove old containers, images and volumes +docker-compose -f /opt/tpot/etc/tpot.yml down -v >> /dev/null 2>&1 +docker-compose -f /opt/tpot/etc/tpot.yml rm -v >> /dev/null 2>&1 +docker network rm $(docker network ls -q) >> /dev/null 2>&1 +docker volume rm $(docker volume ls -q) >> /dev/null 2>&1 +docker rm -v $(docker ps -aq) >> /dev/null 2>&1 +docker rmi $(docker images | grep "" | awk '{print $3}') >> /dev/null 2>&1 +docker rmi $(docker images | grep "2203" | awk '{print $3}') >> /dev/null 2>&1 +exit 0 diff --git a/docker/tpotinit/dist/bin/tped.sh b/docker/tpotinit/dist/bin/tped.sh new file mode 100755 index 00000000..1eadbdff --- /dev/null +++ b/docker/tpotinit/dist/bin/tped.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Run as root only. +myWHOAMI=$(whoami) +if [ "$myWHOAMI" != "root" ] + then + echo "Need to run as root ..." + exit +fi + +# set backtitle, get filename +myBACKTITLE="T-Pot Edition Selection Tool" +myYMLS=$(cd /opt/tpot/etc/compose/ && ls -1 *.yml) +myLINK="/opt/tpot/etc/tpot.yml" + +# Let's load docker images in parallel +function fuPULLIMAGES { +local myTPOTCOMPOSE="/opt/tpot/etc/tpot.yml" +for name in $(cat $myTPOTCOMPOSE | grep -v '#' | grep image | cut -d'"' -f2 | uniq) + do + docker pull $name & + done +wait +echo +} + +# setup menu +for i in $myYMLS; + do + myITEMS+="$i $(echo $i | cut -d "." -f1 | tr [:lower:] [:upper:]) " +done +myEDITION=$(dialog --backtitle "$myBACKTITLE" --menu "Select T-Pot Edition" 18 50 1 $myITEMS 3>&1 1>&2 2>&3 3>&-) +if [ "$myEDITION" == "" ]; + then + echo "Have a nice day!" + exit +fi +dialog --backtitle "$myBACKTITLE" --title "[ Activate now? ]" --yesno "\n$myEDITION" 7 50 +myOK=$? +if [ "$myOK" == "0" ]; + then + echo "OK - Activating and downloading latest images." + systemctl stop tpot + if [ "$(docker ps -aq)" != "" ]; + then + docker stop $(docker ps -aq) + docker rm $(docker ps -aq) + fi + rm -f $myLINK + ln -s /opt/tpot/etc/compose/$myEDITION $myLINK + fuPULLIMAGES + systemctl start tpot + echo "Done. Use \"dps.sh\" for monitoring" + else + echo "Have a nice day!" +fi diff --git a/docker/tpotinit/dist/bin/unlock_es.sh b/docker/tpotinit/dist/bin/unlock_es.sh new file mode 100755 index 00000000..606d85eb --- /dev/null +++ b/docker/tpotinit/dist/bin/unlock_es.sh @@ -0,0 +1,19 @@ +#/bin/bash +# Unlock all ES indices for read / write mode +# Useful in cases where ES locked all indices after disk quota has been reached +# Make sure ES is available +myES="http://127.0.0.1:64298/" +myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c "green\|yellow") +if ! [ "$myESSTATUS" = "1" ] + then + echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'." + exit + else + echo "### Elasticsearch is available, now continuing." + echo +fi + +echo "### Trying to unlock all ES indices for read / write operation: " +curl -XPUT -H "Content-Type: application/json" ''$myES'_all/_settings' -d '{"index.blocks.read_only_allow_delete": null}' +echo + diff --git a/docker/tpotinit/dist/bin/updateip.sh b/docker/tpotinit/dist/bin/updateip.sh new file mode 100755 index 00000000..c63a3e64 --- /dev/null +++ b/docker/tpotinit/dist/bin/updateip.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# Let's add the first local ip to the /etc/issue and external ip to ews.ip file +# If the external IP cannot be detected, the internal IP will be inherited. +source /etc/environment +myCHECKIFSENSOR=$(head -n 1 /opt/tpot/etc/tpot.yml | grep "Sensor" | wc -l) +myUUID=$(lsblk -o MOUNTPOINT,UUID | grep -e "^/ " | awk '{ print $2 }') +myLOCALIP=$(hostname -I | awk '{ print $1 }') +myEXTIP=$(/opt/tpot/bin/myip.sh) +if [ "$myEXTIP" = "" ]; + then + myEXTIP=$myLOCALIP + myEXTIP_LAT="49.865835022498125" + myEXTIP_LONG="8.62606472775735" + else + myEXTIP_LOC=$(curl -s ipinfo.io/$myEXTIP/loc) + myEXTIP_LAT=$(echo "$myEXTIP_LOC" | cut -f1 -d",") + myEXTIP_LONG=$(echo "$myEXTIP_LOC" | cut -f2 -d",") +fi + +# Load Blackhole routes if enabled +myBLACKHOLE_FILE1="/etc/blackhole/mass_scanner.txt" +myBLACKHOLE_FILE2="/etc/blackhole/mass_scanner_cidr.txt" +if [ -f "$myBLACKHOLE_FILE1" ] || [ -f "$myBLACKHOLE_FILE2" ]; + then + /opt/tpot/bin/blackhole.sh add +fi + +myBLACKHOLE_STATUS=$(ip r | grep "blackhole" -c) +if [ "$myBLACKHOLE_STATUS" -gt "500" ]; + then + myBLACKHOLE_STATUS="| BLACKHOLE: [ ENABLED ]" + else + myBLACKHOLE_STATUS="| BLACKHOLE: [ DISABLED ]" +fi + +mySSHUSER=$(cat /etc/passwd | grep 1000 | cut -d ':' -f1) + +# Export +export myUUID +export myLOCALIP +export myEXTIP +export myEXTIP_LAT +export myEXTIP_LONG +export myBLACKHOLE_STATUS +export mySSHUSER + +# Build issue +echo "" > /etc/issue +toilet -f ivrit -F metal --filter border:metal "T-Pot 22.04" | sed 's/\\/\\\\/g' >> /etc/issue +echo >> /etc/issue +echo ",---- [ \n ] [ \d ] [ \t ]" >> /etc/issue +echo "|" >> /etc/issue +echo "| IP: $myLOCALIP ($myEXTIP)" >> /etc/issue +echo "| SSH: ssh -l tsec -p 64295 $myLOCALIP" >> /etc/issue +if [ "$myCHECKIFSENSOR" == "0" ]; + then + echo "| WEB: https://$myLOCALIP:64297" >> /etc/issue +fi +echo "| ADMIN: https://$myLOCALIP:64294" >> /etc/issue +echo "$myBLACKHOLE_STATUS" >> /etc/issue +echo "|" >> /etc/issue +echo "\`----" >> /etc/issue +echo >> /etc/issue +tee /data/ews/conf/ews.ip << EOF +[MAIN] +ip = $myEXTIP +EOF +tee /opt/tpot/etc/compose/elk_environment << EOF +HONEY_UUID=$myUUID +MY_EXTIP=$myEXTIP +MY_EXTIP_LAT=$myEXTIP_LAT +MY_EXTIP_LONG=$myEXTIP_LONG +MY_INTIP=$myLOCALIP +MY_HOSTNAME=$HOSTNAME +EOF + +if [ -s "/data/elk/logstash/ls_environment" ]; + then + source /data/elk/logstash/ls_environment + tee -a /opt/tpot/etc/compose/elk_environment << EOF +MY_TPOT_TYPE=$MY_TPOT_TYPE +MY_SENSOR_PRIVATEKEYFILE=$MY_SENSOR_PRIVATEKEYFILE +MY_HIVE_USERNAME=$MY_HIVE_USERNAME +MY_HIVE_IP=$MY_HIVE_IP +EOF +fi + +chown tpot:tpot /data/ews/conf/ews.ip +chmod 770 /data/ews/conf/ews.ip diff --git a/preview/docker/dist/entrypoint.sh b/docker/tpotinit/dist/entrypoint.sh similarity index 100% rename from preview/docker/dist/entrypoint.sh rename to docker/tpotinit/dist/entrypoint.sh diff --git a/docker/tpotinit/dist/etc/compose/collector.yml b/docker/tpotinit/dist/etc/compose/collector.yml new file mode 100644 index 00000000..2e8134e8 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/collector.yml @@ -0,0 +1,260 @@ +# T-Pot (Collector) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + heralding_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + - "21:21" + - "22:22" + - "23:23" + - "25:25" + - "80:80" + - "110:110" + - "143:143" + - "443:443" + - "465:465" + - "993:993" + - "995:995" + - "1080:1080" + - "3306:3306" + - "3389:3389" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/hive.yml b/docker/tpotinit/dist/etc/compose/hive.yml new file mode 100644 index 00000000..29825486 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/hive.yml @@ -0,0 +1,141 @@ +# T-Pot (Hive) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + spiderfoot_local: + +services: + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 +# mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy +# mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms2048m -Xmx2048m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + ports: + - "127.0.0.1:64305:64305" +# mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/hive_sensor.yml b/docker/tpotinit/dist/etc/compose/hive_sensor.yml new file mode 100644 index 00000000..db16863d --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/hive_sensor.yml @@ -0,0 +1,548 @@ +# T-Pot (Hive_Sensor) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + adbhoney_local: + ciscoasa_local: + citrixhoneypot_local: + conpot_local_IEC104: + conpot_local_guardian_ast: + conpot_local_ipmi: + conpot_local_kamstrup_382: + cowrie_local: + ddospot_local: + dicompot_local: + dionaea_local: + elasticpot_local: + heralding_local: + ipphoney_local: + mailoney_local: + medpot_local: + redishoneypot_local: + tanner_local: + ewsposter_local: + sentrypeer_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Adbhoney service + adbhoney: + container_name: adbhoney + restart: always + networks: + - adbhoney_local + ports: + - "5555:5555" + image: "dtagdevsec/adbhoney:2204" + read_only: true + volumes: + - /data/adbhoney/log:/opt/adbhoney/log + - /data/adbhoney/downloads:/opt/adbhoney/dl + +# Ciscoasa service + ciscoasa: + container_name: ciscoasa + restart: always + tmpfs: + - /tmp/ciscoasa:uid=2000,gid=2000 + networks: + - ciscoasa_local + ports: + - "5000:5000/udp" + - "8443:8443" + image: "dtagdevsec/ciscoasa:2204" + read_only: true + volumes: + - /data/ciscoasa/log:/var/log/ciscoasa + +# CitrixHoneypot service + citrixhoneypot: + container_name: citrixhoneypot + restart: always + networks: + - citrixhoneypot_local + ports: + - "443:443" + image: "dtagdevsec/citrixhoneypot:2204" + read_only: true + volumes: + - /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs + +# Conpot IEC104 service + conpot_IEC104: + container_name: conpot_iec104 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json + - CONPOT_LOG=/var/log/conpot/conpot_IEC104.log + - CONPOT_TEMPLATE=IEC104 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_IEC104 + ports: + - "161:161/udp" + - "2404:2404" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot guardian_ast service + conpot_guardian_ast: + container_name: conpot_guardian_ast + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json + - CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log + - CONPOT_TEMPLATE=guardian_ast + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_guardian_ast + ports: + - "10001:10001" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot ipmi + conpot_ipmi: + container_name: conpot_ipmi + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json + - CONPOT_LOG=/var/log/conpot/conpot_ipmi.log + - CONPOT_TEMPLATE=ipmi + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_ipmi + ports: + - "623:623/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot kamstrup_382 + conpot_kamstrup_382: + container_name: conpot_kamstrup_382 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json + - CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log + - CONPOT_TEMPLATE=kamstrup_382 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_kamstrup_382 + ports: + - "1025:1025" + - "50100:50100" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Cowrie service + cowrie: + container_name: cowrie + restart: always + tmpfs: + - /tmp/cowrie:uid=2000,gid=2000 + - /tmp/cowrie/data:uid=2000,gid=2000 + networks: + - cowrie_local + ports: + - "22:22" + - "23:23" + image: "dtagdevsec/cowrie:2204" + read_only: true + volumes: + - /data/cowrie/downloads:/home/cowrie/cowrie/dl + - /data/cowrie/keys:/home/cowrie/cowrie/etc + - /data/cowrie/log:/home/cowrie/cowrie/log + - /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty + +# Ddospot service + ddospot: + container_name: ddospot + restart: always + networks: + - ddospot_local + ports: + - "19:19/udp" + - "53:53/udp" + - "123:123/udp" +# - "161:161/udp" + - "1900:1900/udp" + image: "dtagdevsec/ddospot:2204" + read_only: true + volumes: + - /data/ddospot/log:/opt/ddospot/ddospot/logs + - /data/ddospot/bl:/opt/ddospot/ddospot/bl + - /data/ddospot/db:/opt/ddospot/ddospot/db + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Dionaea service + dionaea: + container_name: dionaea + stdin_open: true + tty: true + restart: always + networks: + - dionaea_local + ports: + - "20:20" + - "21:21" + - "42:42" + - "69:69/udp" + - "81:81" + - "135:135" + # - "443:443" + - "445:445" + - "1433:1433" + - "1723:1723" + - "1883:1883" + - "3306:3306" + # - "5060:5060" + # - "5060:5060/udp" + # - "5061:5061" + - "27017:27017" + image: "dtagdevsec/dionaea:2204" + read_only: true + volumes: + - /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp + - /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp + - /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www + - /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp + - /data/dionaea:/opt/dionaea/var/dionaea + - /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries + - /data/dionaea/log:/opt/dionaea/var/log + - /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp + +# ElasticPot service + elasticpot: + container_name: elasticpot + restart: always + networks: + - elasticpot_local + ports: + - "9200:9200" + image: "dtagdevsec/elasticpot:2204" + read_only: true + volumes: + - /data/elasticpot/log:/opt/elasticpot/log + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + - "110:110" + - "143:143" + # - "443:443" + - "465:465" + - "993:993" + - "995:995" + # - "3306:3306" + # - "3389:3389" + - "1080:1080" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + +# Ipphoney service + ipphoney: + container_name: ipphoney + restart: always + networks: + - ipphoney_local + ports: + - "631:631" + image: "dtagdevsec/ipphoney:2204" + read_only: true + volumes: + - /data/ipphoney/log:/opt/ipphoney/log + +# Mailoney service + mailoney: + container_name: mailoney + restart: always + environment: + - HPFEEDS_SERVER= + - HPFEEDS_IDENT=user + - HPFEEDS_SECRET=pass + - HPFEEDS_PORT=20000 + - HPFEEDS_CHANNELPREFIX=prefix + networks: + - mailoney_local + ports: + - "25:25" + image: "dtagdevsec/mailoney:2204" + read_only: true + volumes: + - /data/mailoney/log:/opt/mailoney/logs + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +# Redishoneypot service + redishoneypot: + container_name: redishoneypot + restart: always + networks: + - redishoneypot_local + ports: + - "6379:6379" + image: "dtagdevsec/redishoneypot:2204" + read_only: true + volumes: + - /data/redishoneypot/log:/var/log/redishoneypot + +# SentryPeer service + sentrypeer: + container_name: sentrypeer + restart: always +# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1) +# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show +# the bad actors in its logs. Therefore this option is opt-in based. +# environment: +# - SENTRYPEER_PEER_TO_PEER=0 + networks: + - sentrypeer_local + ports: +# - "4222:4222/udp" + - "5060:5060/udp" +# - "127.0.0.1:8082:8082" + image: "dtagdevsec/sentrypeer:2204" + read_only: true + volumes: + - /data/sentrypeer/log:/var/log/sentrypeer + +#### Snare / Tanner +## Tanner Redis Service + tanner_redis: + container_name: tanner_redis + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/redis:2204" + read_only: true + +## PHP Sandbox service + tanner_phpox: + container_name: tanner_phpox + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/phpox:2204" + read_only: true + +## Tanner API Service + tanner_api: + container_name: tanner_api + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + command: tannerapi + depends_on: + - tanner_redis + +## Tanner Service + tanner: + container_name: tanner + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + command: tanner + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + - /data/tanner/files:/opt/tanner/files + depends_on: + - tanner_api +# - tanner_web + - tanner_phpox + +## Snare Service + snare: + container_name: snare + restart: always + tty: true + networks: + - tanner_local + ports: + - "80:80" + image: "dtagdevsec/snare:2204" + depends_on: + - tanner + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip diff --git a/docker/tpotinit/dist/etc/compose/industrial.yml b/docker/tpotinit/dist/etc/compose/industrial.yml new file mode 100644 index 00000000..15478286 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/industrial.yml @@ -0,0 +1,431 @@ +# T-Pot (Industrial) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + conpot_local_default: + conpot_local_IEC104: + conpot_local_guardian_ast: + conpot_local_ipmi: + conpot_local_kamstrup_382: + cowrie_local: + dicompot_local: + heralding_local: + medpot_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Conpot default service + conpot_default: + container_name: conpot_default + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_default.json + - CONPOT_LOG=/var/log/conpot/conpot_default.log + - CONPOT_TEMPLATE=default + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_default + ports: + - "69:69/udp" + - "80:80" + - "102:102" + - "161:161/udp" + - "502:502" +# - "623:623/udp" + - "21:21" + - "44818:44818" + - "47808:47808/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot IEC104 service + conpot_IEC104: + container_name: conpot_iec104 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json + - CONPOT_LOG=/var/log/conpot/conpot_IEC104.log + - CONPOT_TEMPLATE=IEC104 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_IEC104 + ports: +# - "161:161/udp" + - "2404:2404" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot guardian_ast service + conpot_guardian_ast: + container_name: conpot_guardian_ast + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json + - CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log + - CONPOT_TEMPLATE=guardian_ast + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_guardian_ast + ports: + - "10001:10001" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot ipmi + conpot_ipmi: + container_name: conpot_ipmi + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json + - CONPOT_LOG=/var/log/conpot/conpot_ipmi.log + - CONPOT_TEMPLATE=ipmi + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_ipmi + ports: + - "623:623/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot kamstrup_382 + conpot_kamstrup_382: + container_name: conpot_kamstrup_382 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json + - CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log + - CONPOT_TEMPLATE=kamstrup_382 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_kamstrup_382 + ports: + - "1025:1025" + - "50100:50100" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Cowrie service + cowrie: + container_name: cowrie + restart: always + tmpfs: + - /tmp/cowrie:uid=2000,gid=2000 + - /tmp/cowrie/data:uid=2000,gid=2000 + networks: + - cowrie_local + ports: + - "22:22" + - "23:23" + image: "dtagdevsec/cowrie:2204" + read_only: true + volumes: + - /data/cowrie/downloads:/home/cowrie/cowrie/dl + - /data/cowrie/keys:/home/cowrie/cowrie/etc + - /data/cowrie/log:/home/cowrie/cowrie/log + - /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + # - "110:110" + # - "143:143" + # - "443:443" + # - "465:465" + # - "993:993" + # - "995:995" + # - "3306:3306" + # - "3389:3389" + # - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/log4j.yml b/docker/tpotinit/dist/etc/compose/log4j.yml new file mode 100644 index 00000000..9d6b9179 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/log4j.yml @@ -0,0 +1,250 @@ +# T-Pot (Log4j) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + log4pot_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Log4pot service + log4pot: + container_name: log4pot + restart: always + tmpfs: + - /tmp:uid=2000,gid=2000 + networks: + - log4pot_local + ports: + - "80:8080" + - "443:8080" + - "8080:8080" + - "9200:8080" + - "25565:8080" + image: "dtagdevsec/log4pot:2204" + read_only: true + volumes: + - /data/log4pot/log:/var/log/log4pot/log + - /data/log4pot/payloads:/var/log/log4pot/payloads + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/medical.yml b/docker/tpotinit/dist/etc/compose/medical.yml new file mode 100644 index 00000000..f2c966f4 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/medical.yml @@ -0,0 +1,244 @@ +# T-Pot (Medical) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + dicompot_local: + medpot_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/mini.yml b/docker/tpotinit/dist/etc/compose/mini.yml new file mode 100644 index 00000000..052891d9 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/mini.yml @@ -0,0 +1,271 @@ +# T-Pot (Mini) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + honeypots_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# qHoneypots service + honeypots: + container_name: honeypots + stdin_open: true + tty: true + restart: always + tmpfs: + - /tmp:uid=2000,gid=2000 + networks: + - honeypots_local + ports: + - "21:21" + - "22:22" + - "23:23" + - "25:25" + - "53:53/udp" + - "80:80" + - "110:110" + - "123:123" + - "143:143" + - "161:161" + - "389:389" + - "443:443" + - "445:445" + - "1080:1080" + - "1433:1433" + - "1521:1521" + - "3306:3306" + - "5060:5060" + - "5432:5432" + - "5900:5900" + - "6379:6379" + - "6667:6667" + - "8080:8080" + - "9200:9200" + - "11211:11211" + image: "dtagdevsec/honeypots:2204" + read_only: true + volumes: + - /data/honeypots/log:/var/log/honeypots + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/nextgen.yml b/docker/tpotinit/dist/etc/compose/nextgen.yml new file mode 100644 index 00000000..75ddc90e --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/nextgen.yml @@ -0,0 +1,575 @@ +# T-Pot (NextGen) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + adbhoney_local: + ciscoasa_local: + citrixhoneypot_local: + conpot_local_IEC104: + conpot_local_guardian_ast: + conpot_local_ipmi: + conpot_local_kamstrup_382: + ddospot_local: + dicompot_local: + dionaea_local: + elasticpot_local: + endlessh_local: + hellpot_local: + heralding_local: + ipphoney_local: + mailoney_local: + medpot_local: + redishoneypot_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Adbhoney service + adbhoney: + container_name: adbhoney + restart: always + networks: + - adbhoney_local + ports: + - "5555:5555" + image: "dtagdevsec/adbhoney:2204" + read_only: true + volumes: + - /data/adbhoney/log:/opt/adbhoney/log + - /data/adbhoney/downloads:/opt/adbhoney/dl + +# Ciscoasa service + ciscoasa: + container_name: ciscoasa + restart: always + tmpfs: + - /tmp/ciscoasa:uid=2000,gid=2000 + networks: + - ciscoasa_local + ports: + - "5000:5000/udp" + - "8443:8443" + image: "dtagdevsec/ciscoasa:2204" + read_only: true + volumes: + - /data/ciscoasa/log:/var/log/ciscoasa + +# CitrixHoneypot service + citrixhoneypot: + container_name: citrixhoneypot + restart: always + networks: + - citrixhoneypot_local + ports: + - "443:443" + image: "dtagdevsec/citrixhoneypot:2204" + read_only: true + volumes: + - /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs + +# Conpot IEC104 service + conpot_IEC104: + container_name: conpot_iec104 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json + - CONPOT_LOG=/var/log/conpot/conpot_IEC104.log + - CONPOT_TEMPLATE=IEC104 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_IEC104 + ports: + - "161:161/udp" + - "2404:2404" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot guardian_ast service + conpot_guardian_ast: + container_name: conpot_guardian_ast + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json + - CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log + - CONPOT_TEMPLATE=guardian_ast + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_guardian_ast + ports: + - "10001:10001" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot ipmi + conpot_ipmi: + container_name: conpot_ipmi + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json + - CONPOT_LOG=/var/log/conpot/conpot_ipmi.log + - CONPOT_TEMPLATE=ipmi + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_ipmi + ports: + - "623:623/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot kamstrup_382 + conpot_kamstrup_382: + container_name: conpot_kamstrup_382 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json + - CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log + - CONPOT_TEMPLATE=kamstrup_382 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_kamstrup_382 + ports: + - "1025:1025" + - "50100:50100" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Ddospot service + ddospot: + container_name: ddospot + restart: always + networks: + - ddospot_local + ports: + - "19:19/udp" + - "53:53/udp" + - "123:123/udp" +# - "161:161/udp" + - "1900:1900/udp" + image: "dtagdevsec/ddospot:2204" + read_only: true + volumes: + - /data/ddospot/log:/opt/ddospot/ddospot/logs + - /data/ddospot/bl:/opt/ddospot/ddospot/bl + - /data/ddospot/db:/opt/ddospot/ddospot/db + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Dionaea service + dionaea: + container_name: dionaea + stdin_open: true + tty: true + restart: always + networks: + - dionaea_local + ports: + - "20:20" + - "21:21" + - "42:42" + - "69:69/udp" + - "81:81" + - "135:135" + # - "443:443" + - "445:445" + - "1433:1433" + - "1723:1723" + - "1883:1883" + - "3306:3306" + # - "5060:5060" + # - "5060:5060/udp" + # - "5061:5061" + - "27017:27017" + image: "dtagdevsec/dionaea:2204" + read_only: true + volumes: + - /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp + - /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp + - /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www + - /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp + - /data/dionaea:/opt/dionaea/var/dionaea + - /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries + - /data/dionaea/log:/opt/dionaea/var/log + - /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp + +# ElasticPot service + elasticpot: + container_name: elasticpot + restart: always + networks: + - elasticpot_local + ports: + - "9200:9200" + image: "dtagdevsec/elasticpot:2204" + read_only: true + volumes: + - /data/elasticpot/log:/opt/elasticpot/log + +# Endlessh service + endlessh: + container_name: endlessh + restart: always + networks: + - endlessh_local + ports: + - "22:2222" + image: "dtagdevsec/endlessh:2204" + read_only: true + volumes: + - /data/endlessh/log:/var/log/endlessh + +# Glutton service + glutton: + container_name: glutton + restart: always + tmpfs: + - /var/lib/glutton:uid=2000,gid=2000 + - /run:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/glutton:2204" + read_only: true + volumes: + - /data/glutton/log:/var/log/glutton +# - /root/tpotce/docker/glutton/dist/rules.yaml:/opt/glutton/rules/rules.yaml + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + - "110:110" + - "143:143" + # - "443:443" + - "465:465" + - "993:993" + - "995:995" + # - "3306:3306" + # - "3389:3389" + - "1080:1080" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Ipphoney service + ipphoney: + container_name: ipphoney + restart: always + networks: + - ipphoney_local + ports: + - "631:631" + image: "dtagdevsec/ipphoney:2204" + read_only: true + volumes: + - /data/ipphoney/log:/opt/ipphoney/log + +# Mailoney service + mailoney: + container_name: mailoney + restart: always + environment: + - HPFEEDS_SERVER= + - HPFEEDS_IDENT=user + - HPFEEDS_SECRET=pass + - HPFEEDS_PORT=20000 + - HPFEEDS_CHANNELPREFIX=prefix + networks: + - mailoney_local + ports: + - "25:25" + image: "dtagdevsec/mailoney:2204" + read_only: true + volumes: + - /data/mailoney/log:/opt/mailoney/logs + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +# Redishoneypot service + redishoneypot: + container_name: redishoneypot + restart: always + networks: + - redishoneypot_local + ports: + - "6379:6379" + image: "dtagdevsec/redishoneypot:2204" + read_only: true + volumes: + - /data/redishoneypot/log:/var/log/redishoneypot + +# Hellpot service + hellpot: + container_name: hellpot + restart: always + networks: + - hellpot_local + ports: + - "80:8080" + image: "dtagdevsec/hellpot:2204" + read_only: true + volumes: + - /data/hellpot/log:/var/log/hellpot + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/sensor.yml b/docker/tpotinit/dist/etc/compose/sensor.yml new file mode 100644 index 00000000..15cd5613 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/sensor.yml @@ -0,0 +1,535 @@ +# T-Pot (Sensor) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + adbhoney_local: + ciscoasa_local: + citrixhoneypot_local: + conpot_local_IEC104: + conpot_local_guardian_ast: + conpot_local_ipmi: + conpot_local_kamstrup_382: + cowrie_local: + ddospot_local: + dicompot_local: + dionaea_local: + elasticpot_local: + heralding_local: + ipphoney_local: + mailoney_local: + medpot_local: + redishoneypot_local: + tanner_local: + ewsposter_local: + sentrypeer_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Adbhoney service + adbhoney: + container_name: adbhoney + restart: always + networks: + - adbhoney_local + ports: + - "5555:5555" + image: "dtagdevsec/adbhoney:2204" + read_only: true + volumes: + - /data/adbhoney/log:/opt/adbhoney/log + - /data/adbhoney/downloads:/opt/adbhoney/dl + +# Ciscoasa service + ciscoasa: + container_name: ciscoasa + restart: always + tmpfs: + - /tmp/ciscoasa:uid=2000,gid=2000 + networks: + - ciscoasa_local + ports: + - "5000:5000/udp" + - "8443:8443" + image: "dtagdevsec/ciscoasa:2204" + read_only: true + volumes: + - /data/ciscoasa/log:/var/log/ciscoasa + +# CitrixHoneypot service + citrixhoneypot: + container_name: citrixhoneypot + restart: always + networks: + - citrixhoneypot_local + ports: + - "443:443" + image: "dtagdevsec/citrixhoneypot:2204" + read_only: true + volumes: + - /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs + +# Conpot IEC104 service + conpot_IEC104: + container_name: conpot_iec104 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json + - CONPOT_LOG=/var/log/conpot/conpot_IEC104.log + - CONPOT_TEMPLATE=IEC104 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_IEC104 + ports: + - "161:161/udp" + - "2404:2404" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot guardian_ast service + conpot_guardian_ast: + container_name: conpot_guardian_ast + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json + - CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log + - CONPOT_TEMPLATE=guardian_ast + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_guardian_ast + ports: + - "10001:10001" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot ipmi + conpot_ipmi: + container_name: conpot_ipmi + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json + - CONPOT_LOG=/var/log/conpot/conpot_ipmi.log + - CONPOT_TEMPLATE=ipmi + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_ipmi + ports: + - "623:623/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot kamstrup_382 + conpot_kamstrup_382: + container_name: conpot_kamstrup_382 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json + - CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log + - CONPOT_TEMPLATE=kamstrup_382 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_kamstrup_382 + ports: + - "1025:1025" + - "50100:50100" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Cowrie service + cowrie: + container_name: cowrie + restart: always + tmpfs: + - /tmp/cowrie:uid=2000,gid=2000 + - /tmp/cowrie/data:uid=2000,gid=2000 + networks: + - cowrie_local + ports: + - "22:22" + - "23:23" + image: "dtagdevsec/cowrie:2204" + read_only: true + volumes: + - /data/cowrie/downloads:/home/cowrie/cowrie/dl + - /data/cowrie/keys:/home/cowrie/cowrie/etc + - /data/cowrie/log:/home/cowrie/cowrie/log + - /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty + +# Ddospot service + ddospot: + container_name: ddospot + restart: always + networks: + - ddospot_local + ports: + - "19:19/udp" + - "53:53/udp" + - "123:123/udp" +# - "161:161/udp" + - "1900:1900/udp" + image: "dtagdevsec/ddospot:2204" + read_only: true + volumes: + - /data/ddospot/log:/opt/ddospot/ddospot/logs + - /data/ddospot/bl:/opt/ddospot/ddospot/bl + - /data/ddospot/db:/opt/ddospot/ddospot/db + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Dionaea service + dionaea: + container_name: dionaea + stdin_open: true + tty: true + restart: always + networks: + - dionaea_local + ports: + - "20:20" + - "21:21" + - "42:42" + - "69:69/udp" + - "81:81" + - "135:135" + # - "443:443" + - "445:445" + - "1433:1433" + - "1723:1723" + - "1883:1883" + - "3306:3306" + # - "5060:5060" + # - "5060:5060/udp" + # - "5061:5061" + - "27017:27017" + image: "dtagdevsec/dionaea:2204" + read_only: true + volumes: + - /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp + - /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp + - /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www + - /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp + - /data/dionaea:/opt/dionaea/var/dionaea + - /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries + - /data/dionaea/log:/opt/dionaea/var/log + - /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp + +# ElasticPot service + elasticpot: + container_name: elasticpot + restart: always + networks: + - elasticpot_local + ports: + - "9200:9200" + image: "dtagdevsec/elasticpot:2204" + read_only: true + volumes: + - /data/elasticpot/log:/opt/elasticpot/log + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + - "110:110" + - "143:143" + # - "443:443" + - "465:465" + - "993:993" + - "995:995" + # - "3306:3306" + # - "3389:3389" + - "1080:1080" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + +# Ipphoney service + ipphoney: + container_name: ipphoney + restart: always + networks: + - ipphoney_local + ports: + - "631:631" + image: "dtagdevsec/ipphoney:2204" + read_only: true + volumes: + - /data/ipphoney/log:/opt/ipphoney/log + +# Mailoney service + mailoney: + container_name: mailoney + restart: always + environment: + - HPFEEDS_SERVER= + - HPFEEDS_IDENT=user + - HPFEEDS_SECRET=pass + - HPFEEDS_PORT=20000 + - HPFEEDS_CHANNELPREFIX=prefix + networks: + - mailoney_local + ports: + - "25:25" + image: "dtagdevsec/mailoney:2204" + read_only: true + volumes: + - /data/mailoney/log:/opt/mailoney/logs + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +# Redishoneypot service + redishoneypot: + container_name: redishoneypot + restart: always + networks: + - redishoneypot_local + ports: + - "6379:6379" + image: "dtagdevsec/redishoneypot:2204" + read_only: true + volumes: + - /data/redishoneypot/log:/var/log/redishoneypot + +# SentryPeer service + sentrypeer: + container_name: sentrypeer + restart: always +# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1) +# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show +# the bad actors in its logs. Therefore this option is opt-in based. +# environment: +# - SENTRYPEER_PEER_TO_PEER=0 + networks: + - sentrypeer_local + ports: +# - "4222:4222/udp" + - "5060:5060/udp" +# - "127.0.0.1:8082:8082" + image: "dtagdevsec/sentrypeer:2204" + read_only: true + volumes: + - /data/sentrypeer/log:/var/log/sentrypeer + +#### Snare / Tanner +## Tanner Redis Service + tanner_redis: + container_name: tanner_redis + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/redis:2204" + read_only: true + +## PHP Sandbox service + tanner_phpox: + container_name: tanner_phpox + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/phpox:2204" + read_only: true + +## Tanner API Service + tanner_api: + container_name: tanner_api + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + command: tannerapi + depends_on: + - tanner_redis + +## Tanner Service + tanner: + container_name: tanner + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + command: tanner + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + - /data/tanner/files:/opt/tanner/files + depends_on: + - tanner_api +# - tanner_web + - tanner_phpox + +## Snare Service + snare: + container_name: snare + restart: always + tty: true + networks: + - tanner_local + ports: + - "80:80" + image: "dtagdevsec/snare:2204" + depends_on: + - tanner + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip diff --git a/docker/tpotinit/dist/etc/compose/standard.yml b/docker/tpotinit/dist/etc/compose/standard.yml new file mode 100644 index 00000000..e16cfd39 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/standard.yml @@ -0,0 +1,662 @@ +# T-Pot (Standard) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + adbhoney_local: + ciscoasa_local: + citrixhoneypot_local: + conpot_local_IEC104: + conpot_local_guardian_ast: + conpot_local_ipmi: + conpot_local_kamstrup_382: + cowrie_local: + ddospot_local: + dicompot_local: + dionaea_local: + elasticpot_local: + heralding_local: + ipphoney_local: + mailoney_local: + medpot_local: + redishoneypot_local: + tanner_local: + ewsposter_local: + sentrypeer_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Adbhoney service + adbhoney: + container_name: adbhoney + restart: always + networks: + - adbhoney_local + ports: + - "5555:5555" + image: "dtagdevsec/adbhoney:2204" + read_only: true + volumes: + - /data/adbhoney/log:/opt/adbhoney/log + - /data/adbhoney/downloads:/opt/adbhoney/dl + +# Ciscoasa service + ciscoasa: + container_name: ciscoasa + restart: always + tmpfs: + - /tmp/ciscoasa:uid=2000,gid=2000 + networks: + - ciscoasa_local + ports: + - "5000:5000/udp" + - "8443:8443" + image: "dtagdevsec/ciscoasa:2204" + read_only: true + volumes: + - /data/ciscoasa/log:/var/log/ciscoasa + +# CitrixHoneypot service + citrixhoneypot: + container_name: citrixhoneypot + restart: always + networks: + - citrixhoneypot_local + ports: + - "443:443" + image: "dtagdevsec/citrixhoneypot:2204" + read_only: true + volumes: + - /data/citrixhoneypot/logs:/opt/citrixhoneypot/logs + +# Conpot IEC104 service + conpot_IEC104: + container_name: conpot_iec104 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_IEC104.json + - CONPOT_LOG=/var/log/conpot/conpot_IEC104.log + - CONPOT_TEMPLATE=IEC104 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_IEC104 + ports: + - "161:161/udp" + - "2404:2404" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot guardian_ast service + conpot_guardian_ast: + container_name: conpot_guardian_ast + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_guardian_ast.json + - CONPOT_LOG=/var/log/conpot/conpot_guardian_ast.log + - CONPOT_TEMPLATE=guardian_ast + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_guardian_ast + ports: + - "10001:10001" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot ipmi + conpot_ipmi: + container_name: conpot_ipmi + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_ipmi.json + - CONPOT_LOG=/var/log/conpot/conpot_ipmi.log + - CONPOT_TEMPLATE=ipmi + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_ipmi + ports: + - "623:623/udp" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Conpot kamstrup_382 + conpot_kamstrup_382: + container_name: conpot_kamstrup_382 + restart: always + environment: + - CONPOT_CONFIG=/etc/conpot/conpot.cfg + - CONPOT_JSON_LOG=/var/log/conpot/conpot_kamstrup_382.json + - CONPOT_LOG=/var/log/conpot/conpot_kamstrup_382.log + - CONPOT_TEMPLATE=kamstrup_382 + - CONPOT_TMP=/tmp/conpot + tmpfs: + - /tmp/conpot:uid=2000,gid=2000 + networks: + - conpot_local_kamstrup_382 + ports: + - "1025:1025" + - "50100:50100" + image: "dtagdevsec/conpot:2204" + read_only: true + volumes: + - /data/conpot/log:/var/log/conpot + +# Cowrie service + cowrie: + container_name: cowrie + restart: always + tmpfs: + - /tmp/cowrie:uid=2000,gid=2000 + - /tmp/cowrie/data:uid=2000,gid=2000 + networks: + - cowrie_local + ports: + - "22:22" + - "23:23" + image: "dtagdevsec/cowrie:2204" + read_only: true + volumes: + - /data/cowrie/downloads:/home/cowrie/cowrie/dl + - /data/cowrie/keys:/home/cowrie/cowrie/etc + - /data/cowrie/log:/home/cowrie/cowrie/log + - /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty + +# Ddospot service + ddospot: + container_name: ddospot + restart: always + networks: + - ddospot_local + ports: + - "19:19/udp" + - "53:53/udp" + - "123:123/udp" +# - "161:161/udp" + - "1900:1900/udp" + image: "dtagdevsec/ddospot:2204" + read_only: true + volumes: + - /data/ddospot/log:/opt/ddospot/ddospot/logs + - /data/ddospot/bl:/opt/ddospot/ddospot/bl + - /data/ddospot/db:/opt/ddospot/ddospot/db + +# Dicompot service +# Get the Horos Client for testing: https://horosproject.org/ +# Get Dicom images (CC BY 3.0): https://www.cancerimagingarchive.net/collections/ +# Put images (which must be in Dicom DCM format or it will not work!) into /data/dicompot/images + dicompot: + container_name: dicompot + restart: always + networks: + - dicompot_local + ports: + - "11112:11112" + image: "dtagdevsec/dicompot:2204" + read_only: true + volumes: + - /data/dicompot/log:/var/log/dicompot +# - /data/dicompot/images:/opt/dicompot/images + +# Dionaea service + dionaea: + container_name: dionaea + stdin_open: true + tty: true + restart: always + networks: + - dionaea_local + ports: + - "20:20" + - "21:21" + - "42:42" + - "69:69/udp" + - "81:81" + - "135:135" + # - "443:443" + - "445:445" + - "1433:1433" + - "1723:1723" + - "1883:1883" + - "3306:3306" + # - "5060:5060" + # - "5060:5060/udp" + # - "5061:5061" + - "27017:27017" + image: "dtagdevsec/dionaea:2204" + read_only: true + volumes: + - /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp + - /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp + - /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www + - /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp + - /data/dionaea:/opt/dionaea/var/dionaea + - /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries + - /data/dionaea/log:/opt/dionaea/var/log + - /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp + +# ElasticPot service + elasticpot: + container_name: elasticpot + restart: always + networks: + - elasticpot_local + ports: + - "9200:9200" + image: "dtagdevsec/elasticpot:2204" + read_only: true + volumes: + - /data/elasticpot/log:/opt/elasticpot/log + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + - "110:110" + - "143:143" + # - "443:443" + - "465:465" + - "993:993" + - "995:995" + # - "3306:3306" + # - "3389:3389" + - "1080:1080" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + +# Ipphoney service + ipphoney: + container_name: ipphoney + restart: always + networks: + - ipphoney_local + ports: + - "631:631" + image: "dtagdevsec/ipphoney:2204" + read_only: true + volumes: + - /data/ipphoney/log:/opt/ipphoney/log + +# Mailoney service + mailoney: + container_name: mailoney + restart: always + environment: + - HPFEEDS_SERVER= + - HPFEEDS_IDENT=user + - HPFEEDS_SECRET=pass + - HPFEEDS_PORT=20000 + - HPFEEDS_CHANNELPREFIX=prefix + networks: + - mailoney_local + ports: + - "25:25" + image: "dtagdevsec/mailoney:2204" + read_only: true + volumes: + - /data/mailoney/log:/opt/mailoney/logs + +# Medpot service + medpot: + container_name: medpot + restart: always + networks: + - medpot_local + ports: + - "2575:2575" + image: "dtagdevsec/medpot:2204" + read_only: true + volumes: + - /data/medpot/log/:/var/log/medpot + +# Redishoneypot service + redishoneypot: + container_name: redishoneypot + restart: always + networks: + - redishoneypot_local + ports: + - "6379:6379" + image: "dtagdevsec/redishoneypot:2204" + read_only: true + volumes: + - /data/redishoneypot/log:/var/log/redishoneypot + +# SentryPeer service + sentrypeer: + container_name: sentrypeer + restart: always +# SentryPeer offers to exchange bad actor data via DHT / P2P mode by setting the ENV to true (1) +# In some cases (i.e. internally deployed T-Pots) this might be confusing as SentryPeer will show +# the bad actors in its logs. Therefore this option is opt-in based. +# environment: +# - SENTRYPEER_PEER_TO_PEER=0 + networks: + - sentrypeer_local + ports: +# - "4222:4222/udp" + - "5060:5060/udp" +# - "127.0.0.1:8082:8082" + image: "dtagdevsec/sentrypeer:2204" + read_only: true + volumes: + - /data/sentrypeer/log:/var/log/sentrypeer + +#### Snare / Tanner +## Tanner Redis Service + tanner_redis: + container_name: tanner_redis + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/redis:2204" + read_only: true + +## PHP Sandbox service + tanner_phpox: + container_name: tanner_phpox + restart: always + tty: true + networks: + - tanner_local + image: "dtagdevsec/phpox:2204" + read_only: true + +## Tanner API Service + tanner_api: + container_name: tanner_api + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + command: tannerapi + depends_on: + - tanner_redis + +## Tanner Service + tanner: + container_name: tanner + restart: always + tmpfs: + - /tmp/tanner:uid=2000,gid=2000 + tty: true + networks: + - tanner_local + image: "dtagdevsec/tanner:2204" + command: tanner + read_only: true + volumes: + - /data/tanner/log:/var/log/tanner + - /data/tanner/files:/opt/tanner/files + depends_on: + - tanner_api +# - tanner_web + - tanner_phpox + +## Snare Service + snare: + container_name: snare + restart: always + tty: true + networks: + - tanner_local + ports: + - "80:80" + image: "dtagdevsec/snare:2204" + depends_on: + - tanner + + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/compose/tarpit.yml b/docker/tpotinit/dist/etc/compose/tarpit.yml new file mode 100644 index 00000000..3ca278b8 --- /dev/null +++ b/docker/tpotinit/dist/etc/compose/tarpit.yml @@ -0,0 +1,287 @@ +# T-Pot (Tarpit) +# Do not erase ports sections, these are used by /opt/tpot/bin/rules.sh to setup iptables ACCEPT rules for NFQ (honeytrap / glutton) +version: '2.3' + +networks: + endlessh_local: + hellpot_local: + heralding_local: + ewsposter_local: + spiderfoot_local: + +services: + +################## +#### Honeypots +################## + +# Endlessh service + endlessh: + container_name: endlessh + restart: always + networks: + - endlessh_local + ports: + - "22:2222" + image: "dtagdevsec/endlessh:2204" + read_only: true + volumes: + - /data/endlessh/log:/var/log/endlessh + +# Heralding service + heralding: + container_name: heralding + restart: always + tmpfs: + - /tmp/heralding:uid=2000,gid=2000 + networks: + - heralding_local + ports: + # - "21:21" + # - "22:22" + # - "23:23" + # - "25:25" + # - "80:80" + - "110:110" + - "143:143" + # - "443:443" + - "465:465" + - "993:993" + - "995:995" + # - "3306:3306" + # - "3389:3389" + - "1080:1080" + - "5432:5432" + - "5900:5900" + image: "dtagdevsec/heralding:2204" + read_only: true + volumes: + - /data/heralding/log:/var/log/heralding + +# Honeytrap service + honeytrap: + container_name: honeytrap + restart: always + tmpfs: + - /tmp/honeytrap:uid=2000,gid=2000 + network_mode: "host" + cap_add: + - NET_ADMIN + image: "dtagdevsec/honeytrap:2204" + read_only: true + volumes: + - /data/honeytrap/attacks:/opt/honeytrap/var/attacks + - /data/honeytrap/downloads:/opt/honeytrap/var/downloads + - /data/honeytrap/log:/opt/honeytrap/var/log + +# Hellpot service + hellpot: + container_name: hellpot + restart: always + networks: + - hellpot_local + ports: + - "80:8080" + image: "dtagdevsec/hellpot:2204" + read_only: true + volumes: + - /data/hellpot/log:/var/log/hellpot + +################## +#### NSM +################## + +# Fatt service + fatt: + container_name: fatt + restart: always + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/fatt:2204" + volumes: + - /data/fatt/log:/opt/fatt/log + +# P0f service + p0f: + container_name: p0f + restart: always + network_mode: "host" + image: "dtagdevsec/p0f:2204" + read_only: true + volumes: + - /data/p0f/log:/var/log/p0f + +# Suricata service + suricata: + container_name: suricata + restart: always + environment: + # For ET Pro ruleset replace "OPEN" with your OINKCODE + - OINKCODE=OPEN + # Loading externel Rules from URL + # - FROMURL="https://username:password@yoururl.com|https://username:password@otherurl.com" + network_mode: "host" + cap_add: + - NET_ADMIN + - SYS_NICE + - NET_RAW + image: "dtagdevsec/suricata:2204" + volumes: + - /data/suricata/log:/var/log/suricata + + +################## +#### Tools +################## + +#### ELK +## Elasticsearch service + elasticsearch: + container_name: elasticsearch + restart: always + environment: + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms2048m -Xmx2048m + - ES_TMPDIR=/tmp + cap_add: + - IPC_LOCK + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: 4g + ports: + - "127.0.0.1:64298:9200" + image: "dtagdevsec/elasticsearch:2204" + volumes: + - /data:/data + +## Kibana service + kibana: + container_name: kibana + restart: always + depends_on: + elasticsearch: + condition: service_healthy + mem_limit: 1g + ports: + - "127.0.0.1:64296:5601" + image: "dtagdevsec/kibana:2204" + +## Logstash service + logstash: + container_name: logstash + restart: always + environment: + - LS_JAVA_OPTS=-Xms1024m -Xmx1024m + depends_on: + elasticsearch: + condition: service_healthy + env_file: + - /opt/tpot/etc/compose/elk_environment + mem_limit: 2g + image: "dtagdevsec/logstash:2204" + volumes: + - /data:/data + +## Map Redis Service + map_redis: + container_name: map_redis + restart: always + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/redis:2204" + read_only: true + +## Map Web Service + map_web: + container_name: map_web + restart: always + environment: + - MAP_COMMAND=AttackMapServer.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + ports: + - "127.0.0.1:64299:64299" + image: "dtagdevsec/map:2204" + +## Map Data Service + map_data: + container_name: map_data + restart: always + depends_on: + elasticsearch: + condition: service_healthy + environment: + - MAP_COMMAND=DataServer_v2.py + env_file: + - /opt/tpot/etc/compose/elk_environment + stop_signal: SIGKILL + tty: true + image: "dtagdevsec/map:2204" +#### /ELK + +# Ewsposter service + ewsposter: + container_name: ewsposter + restart: always + networks: + - ewsposter_local + environment: + - EWS_HPFEEDS_ENABLE=false + - EWS_HPFEEDS_HOST=host + - EWS_HPFEEDS_PORT=port + - EWS_HPFEEDS_CHANNELS=channels + - EWS_HPFEEDS_IDENT=user + - EWS_HPFEEDS_SECRET=secret + - EWS_HPFEEDS_TLSCERT=false + - EWS_HPFEEDS_FORMAT=json + env_file: + - /opt/tpot/etc/compose/elk_environment + image: "dtagdevsec/ewsposter:2204" + volumes: + - /data:/data + - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip + +# Nginx service + nginx: + container_name: nginx + restart: always + tmpfs: + - /var/tmp/nginx/client_body + - /var/tmp/nginx/proxy + - /var/tmp/nginx/fastcgi + - /var/tmp/nginx/uwsgi + - /var/tmp/nginx/scgi + - /run + - /var/lib/nginx/tmp:uid=100,gid=82 + network_mode: "host" + ports: + - "64297:64297" + - "127.0.0.1:64304:64304" + image: "dtagdevsec/nginx:2204" + read_only: true + volumes: + - /data/nginx/cert/:/etc/nginx/cert/:ro + - /data/nginx/conf/nginxpasswd:/etc/nginx/nginxpasswd:ro + - /data/nginx/log/:/var/log/nginx/ + +# Spiderfoot service + spiderfoot: + container_name: spiderfoot + restart: always + networks: + - spiderfoot_local + ports: + - "127.0.0.1:64303:8080" + image: "dtagdevsec/spiderfoot:2204" + volumes: + - /data/spiderfoot:/home/spiderfoot/.spiderfoot diff --git a/docker/tpotinit/dist/etc/logrotate/logrotate.conf b/docker/tpotinit/dist/etc/logrotate/logrotate.conf new file mode 100644 index 00000000..07223601 --- /dev/null +++ b/docker/tpotinit/dist/etc/logrotate/logrotate.conf @@ -0,0 +1,69 @@ +/data/adbhoney/log/*.json +/data/adbhoney/log/*.log +/data/ciscoasa/log/ciscoasa.log +/data/citrixhoneypot/logs/server.log +/data/conpot/log/conpot*.json +/data/conpot/log/conpot*.log +/data/cowrie/log/cowrie.json +/data/cowrie/log/cowrie-textlog.log +/data/cowrie/log/lastlog.txt +/data/ddospot/log/*.log +/data/dicompot/log/dicompot.log +/data/dionaea/log/dionaea.json +/data/dionaea/log/dionaea.sqlite +/data/dionaea/dionaea-errors.log +/data/elasticpot/log/elasticpot.log +/data/elasticpot/log/elasticpot.json +/data/elk/log/*.log +/data/endlessh/log/*.log +/data/fatt/log/fatt.log +/data/glutton/log/*.log +/data/glutton/log/*.err +/data/hellpot/log/*.log +/data/heralding/log/*.log +/data/heralding/log/*.csv +/data/heralding/log/*.json +/data/honeypots/log/*.log +/data/honeysap/log/*.log +/data/honeytrap/log/*.log +/data/honeytrap/log/*.json +/data/ipphoney/log/*.json +/data/log4pot/log/*.log +/data/mailoney/log/*.log +/data/medpot/log/*.log +/data/nginx/log/*.log +/data/p0f/log/p0f.json +/data/rdpy/log/rdpy.log +/data/redishoneypot/log/*.log +/data/sentrypeer/log/*.json +/data/suricata/log/*.log +/data/suricata/log/*.json +/data/tanner/log/*.json +{ + su tpot tpot + copytruncate + create 770 tpot tpot + daily + missingok + notifempty + rotate 30 + compress + compresscmd /usr/bin/pigz +} + +/data/adbhoney/downloads.tgz +/data/cowrie/log/ttylogs.tgz +/data/cowrie/downloads.tgz +/data/dionaea/bistreams.tgz +/data/dionaea/binaries.tgz +/data/honeytrap/attacks.tgz +/data/honeytrap/downloads.tgz +{ + su tpot tpot + copytruncate + create 770 tpot tpot + daily + missingok + notifempty + rotate 30 +} diff --git a/docker/tpotinit/dist/etc/objects/elkbase.tgz b/docker/tpotinit/dist/etc/objects/elkbase.tgz new file mode 100644 index 00000000..8370fec4 Binary files /dev/null and b/docker/tpotinit/dist/etc/objects/elkbase.tgz differ diff --git a/docker/tpotinit/dist/etc/objects/kibana_export.ndjson.zip b/docker/tpotinit/dist/etc/objects/kibana_export.ndjson.zip new file mode 100644 index 00000000..121d12d2 Binary files /dev/null and b/docker/tpotinit/dist/etc/objects/kibana_export.ndjson.zip differ diff --git a/preview/docker/docker-compose.yml b/docker/tpotinit/docker-compose.yml similarity index 86% rename from preview/docker/docker-compose.yml rename to docker/tpotinit/docker-compose.yml index 2def27a6..dccc996d 100644 --- a/preview/docker/docker-compose.yml +++ b/docker/tpotinit/docker-compose.yml @@ -7,7 +7,7 @@ services: build: . container_name: tpotinit restart: "no" - image: "dtagdevsec/tpotinit:2204" + image: "dtagdevsec/tpotinit:dev" # volumes: # - /var/run/docker.sock:/var/run/docker.sock:ro network_mode: "host" diff --git a/preview/docker/macvlan/docker-compose.yml b/docker/tpotinit/macvlan/docker-compose.yml similarity index 100% rename from preview/docker/macvlan/docker-compose.yml rename to docker/tpotinit/macvlan/docker-compose.yml diff --git a/preview/env.example b/env.example similarity index 99% rename from preview/env.example rename to env.example index f11f60cc..5db49a98 100644 --- a/preview/env.example +++ b/env.example @@ -35,7 +35,7 @@ TPOT_DOCKER_COMPOSE=./docker-compose.yml TPOT_REPO=dtagdevsec # T-Pot Version Tag -TPOT_VERSION=2204 +TPOT_VERSION=dev # T-Pot Pull Policy # always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry. diff --git a/preview/installer/debian/install.sh b/installer/debian/install.sh similarity index 84% rename from preview/installer/debian/install.sh rename to installer/debian/install.sh index 821e4b4d..a15f68ee 100755 --- a/preview/installer/debian/install.sh +++ b/installer/debian/install.sh @@ -52,9 +52,15 @@ sudo systemctl enable docker sudo systemctl stop docker sudo systemctl start docker -# Add user to Docker group -echo "Adding user to Docker group..." +# Add T-Pot user and group to avoid any permission denied on the data folder while keeping permissions 770 +echo "Creating T-Pot group and user ..." +addgroup --gid 2000 tpot +adduser --system --no-create-home --uid 2000 --disabled-password --disabled-login --gid 2000 tpot +# Add user to Docker, T-Pot group +echo "Adding $(whoami) to Docker group..." sudo usermod -aG docker $(whoami) +echo "Adding $(whoami) to T-Pot group..." +sudo usermod -aG tpot $(whoami) # Add aliases echo "Adding aliases..." diff --git a/preview/installer/debian/sudo-install.sh b/installer/debian/sudo-install.sh similarity index 100% rename from preview/installer/debian/sudo-install.sh rename to installer/debian/sudo-install.sh diff --git a/preview/installer/debian/uninstall.sh b/installer/debian/uninstall.sh similarity index 81% rename from preview/installer/debian/uninstall.sh rename to installer/debian/uninstall.sh index 323a8d40..66435b07 100755 --- a/preview/installer/debian/uninstall.sh +++ b/installer/debian/uninstall.sh @@ -36,9 +36,16 @@ sudo apt-get -y autoremove sudo rm -rf /etc/apt/sources.list.d/docker.list sudo rm -rf /etc/apt/keyrings/docker.gpg -# Remove user from Docker group -echo "Removing user from Docker group..." +# Remove user from Docker, T-Pot group +echo "Removing $(whoami) from T-Pot group..." +sudo deluser $(whoami) tpot +echo "Removing $(whoami) from Docker group..." sudo deluser $(whoami) docker +# Remove T-Pot user and group +echo "Removing T-Pot user..." +sudo deluser tpot +echo "Removing T-Pot group..." +sudo delgroup tpot # Remove aliases echo "Removing aliases..." diff --git a/preview/installer/fedora/install.sh b/installer/fedora/install.sh similarity index 86% rename from preview/installer/fedora/install.sh rename to installer/fedora/install.sh index 6226077a..5a22f56a 100755 --- a/preview/installer/fedora/install.sh +++ b/installer/fedora/install.sh @@ -60,9 +60,15 @@ sudo systemctl start docker echo "Installing recommended packages..." sudo dnf -y install bash-completion git grc net-tools -# Add user to Docker group -echo "Adding user to Docker group..." +# Add T-Pot user and group to avoid any permission denied on the data folder while keeping permissions 770 +echo "Creating T-Pot group and user..." +sudo groupadd -g 2000 tpot +sudo useradd -r -u 2000 -g 2000 -M -s /sbin/nologin tpot +# Add user to Docker, T-Pot group +echo "Adding $(whoami) to Docker group..." sudo usermod -aG docker $(whoami) +echo "Adding $(whoami) to T-Pot group..." +sudo usermod -aG tpot $(whoami) # Add aliases echo "Adding aliases..." diff --git a/preview/installer/fedora/uninstall.sh b/installer/fedora/uninstall.sh similarity index 86% rename from preview/installer/fedora/uninstall.sh rename to installer/fedora/uninstall.sh index 2b0f2f38..8726647e 100755 --- a/preview/installer/fedora/uninstall.sh +++ b/installer/fedora/uninstall.sh @@ -55,9 +55,16 @@ sudo dnf -y remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin do sudo dnf config-manager --disable docker-ce-stable sudo rm /etc/yum.repos.d/docker-ce.repo -# Remove user from Docker group -echo "Removing user from Docker group..." +# Remove user from Docker, T-Pot group +echo "Removing $(whoami) from T-Pot group..." +sudo gpasswd -d $(whoami) tpot +echo "Removing $(whoami) from Docker group..." sudo gpasswd -d $(whoami) docker +# Remove T-Pot user and group +echo "Removing T-Pot user..." +sudo userdel tpot +echo "Removing T-Pot group..." +sudo groupdel tpot # Remove aliases echo "Removing aliases..." diff --git a/preview/installer/suse/install.sh b/installer/suse/install.sh similarity index 80% rename from preview/installer/suse/install.sh rename to installer/suse/install.sh index 5096d32f..121c71fd 100755 --- a/preview/installer/suse/install.sh +++ b/installer/suse/install.sh @@ -44,9 +44,16 @@ echo "Enabling and starting docker..." systemctl enable docker systemctl start docker -# Add user to Docker group -echo "Adding user to Docker group..." -sudo usermod -aG docker $(whoami) +# Add T-Pot user and group to avoid any permission denied on the data folder while keeping permissions 770 +echo "Creating T-Pot group and user ..." +sudo groupadd -g 2000 tpot +sudo useradd -r -u 2000 -g 2000 -s /sbin/nologin tpot + +# Add user to Docker, T-Pot group +echo "Adding $(whoami) to Docker group..." +sudo usermod -a -G docker $(whoami) +echo "Adding $(whoami) to T-Pot group..." +sudo usermod -a -G tpot $(whoami) # Add aliases echo "Adding aliases..." diff --git a/preview/installer/suse/uninstall.sh b/installer/suse/uninstall.sh similarity index 82% rename from preview/installer/suse/uninstall.sh rename to installer/suse/uninstall.sh index 632e44d2..b794a43c 100755 --- a/preview/installer/suse/uninstall.sh +++ b/installer/suse/uninstall.sh @@ -40,9 +40,16 @@ sudo systemctl disable docker sudo zypper -n remove docker docker-compose sudo zypper -n install cups postfix -# Remove user from Docker group -echo "Removing user from Docker group..." +# Remove user from Docker, T-Pot group +echo "Removing $(whoami) from T-Pot group..." +sudo gpasswd -d $(whoami) tpot +echo "Removing $(whoami) from Docker group..." sudo gpasswd -d $(whoami) docker +# Remove T-Pot user and group +echo "Removing T-Pot user..." +sudo userdel tpot +echo "Removing T-Pot group..." +sudo groupdel tpot # Remove aliases echo "Removing aliases..." diff --git a/preview/installer/ubuntu/install.sh b/installer/ubuntu/install.sh similarity index 86% rename from preview/installer/ubuntu/install.sh rename to installer/ubuntu/install.sh index 876526f8..4061c751 100755 --- a/preview/installer/ubuntu/install.sh +++ b/installer/ubuntu/install.sh @@ -60,9 +60,15 @@ sudo systemctl enable docker sudo systemctl stop docker sudo systemctl start docker -# Add user to Docker group -echo "Adding user to Docker group..." +# Add T-Pot user and group to avoid any permission denied on the data folder while keeping permissions 770 +echo "Creating T-Pot group and user ..." +addgroup --gid 2000 tpot +adduser --system --no-create-home --uid 2000 --disabled-password --disabled-login --gid 2000 tpot +# Add user to Docker, T-Pot group +echo "Adding $(whoami) to Docker group..." sudo usermod -aG docker $(whoami) +echo "Adding $(whoami) to T-Pot group..." +sudo usermod -aG tpot $(whoami) # Add aliases echo "Adding aliases..." diff --git a/preview/installer/ubuntu/uninstall.sh b/installer/ubuntu/uninstall.sh similarity index 84% rename from preview/installer/ubuntu/uninstall.sh rename to installer/ubuntu/uninstall.sh index fd5995cf..82b9939b 100755 --- a/preview/installer/ubuntu/uninstall.sh +++ b/installer/ubuntu/uninstall.sh @@ -43,9 +43,16 @@ sudo apt-get -y autoremove sudo rm -rf /etc/apt/sources.list.d/docker.list sudo rm -rf /etc/apt/keyrings/docker.gpg -# Remove user from Docker group -echo "Removing user from Docker group..." +# Remove user from Docker, T-Pot group +echo "Removing $(whoami) from T-Pot group..." +sudo deluser $(whoami) tpot +echo "Removing $(whoami) from Docker group..." sudo deluser $(whoami) docker +# Remove T-Pot user and group +echo "Removing T-Pot user..." +sudo deluser tpot +echo "Removing T-Pot group..." +sudo delgroup tpot # Remove aliases echo "Removing aliases..." diff --git a/preview/.env b/preview/.env deleted file mode 100644 index f55ff641..00000000 --- a/preview/.env +++ /dev/null @@ -1,52 +0,0 @@ -# T-Pot config file. Do not remove. - -# Set Web username and password here, only required for first run -# Removing the password after first run is recommended -# You can always add or remove users as you see fit using htpasswd: -# htpasswd -b -c //nginx/conf/nginxpasswd -WEB_USER=changeme -WEB_PW=changeme - -# T-Pot Blackhole -# ENABLED: T-Pot will download a db of known mass scanners and nullroute them -# Be aware, this will put T-Pot off the map for stealth reasons and -# you will get less traffic. Routes will active until reboot and will -# be re-added with every T-Pot start until disabled. -# DISABLED: This is the default and no stealth efforts are in place. -TPOT_BLACKHOLE=DISABLED - -################################################################################### -# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! # -################################################################################### - -# T-Pot Landing page provides Cockpit Link -COCKPIT=false - -# docker.sock Path -TPOT_DOCKER_SOCK=/var/run/docker.sock - -# docker compose .env -TPOT_DOCKER_ENV=./.env - -# Docker-Compose file -TPOT_DOCKER_COMPOSE=./docker-compose.yml - -# T-Pot Repo -TPOT_REPO=dtagdevsec - -# T-Pot Version Tag -TPOT_VERSION=2204 - -# T-Pot Pull Policy -# always: (T-Pot default) Compose implementations SHOULD always pull the image from the registry. -# never: Compose implementations SHOULD NOT pull the image from a registry and SHOULD rely on the platform cached image. -# missing: Compose implementations SHOULD pull the image only if it's not available in the platform cache. -# build: Compose implementations SHOULD build the image. Compose implementations SHOULD rebuild the image if already present. -TPOT_PULL_POLICY=always - -# T-Pot Data Path -TPOT_DATA_PATH=./data - -# OSType (linux, mac, win) -# Most docker features are available on linux -TPOT_OSTYPE=linux diff --git a/preview/docker/Dockerfile b/preview/docker/Dockerfile deleted file mode 100644 index 93af7b17..00000000 --- a/preview/docker/Dockerfile +++ /dev/null @@ -1,61 +0,0 @@ -FROM alpine:edge -# -# Include dist -COPY dist/ /root/dist/ -# -# Get and install dependencies & packages -RUN apk --no-cache -U add \ - aria2 \ - apache2-utils \ - bash \ - bind-tools \ - conntrack-tools \ - curl \ - ethtool \ - figlet \ - git \ - grep \ - iproute2 \ - iptables \ - jq \ - logrotate \ - lsblk \ - net-tools \ - openssl \ - pigz \ - tar \ - uuidgen && \ - apk --no-cache -U add --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \ - yq && \ -# -# Setup user - addgroup -g 2000 tpot && \ - adduser -S -s /bin/ash -u 2000 -D -g 2000 tpot && \ -# -# Install tpot - git clone --depth=1 https://github.com/telekom-security/tpotce /opt/tpot && \ - cd /opt/tpot && \ - sed -i "s#/opt/tpot/etc/logrotate/status#/data/tpot/etc/logrotate/status#g" bin/clean.sh && \ - sed -i "s#/opt/tpot/etc/compose/elk_environment#/data/tpot/etc/compose/elk_environment#g" bin/clean.sh && \ - sed -i "s#/usr/sbin/iptables-legacy#/sbin/iptables-legacy#g" bin/rules.sh && \ - sed -i "s/tr -d '\", '/tr -d '\", ,#,-'/g" bin/rules.sh && \ - sed -i "s#/opt/tpot/etc/compose/elk_environment#/data/tpot/etc/compose/elk_environment#g" bin/updateip.sh && \ - sed -i "s#.*myLOCALIP=.*#myLOCALIP=\$(/sbin/ip address show | awk '/inet .*brd/{split(\$2,a,\"/\"); print a[1]; exit}')#" bin/updateip.sh && \ - sed -i "s#.*myUUID=.*#myUUID=\$(cat /data/uuid)#" bin/updateip.sh && \ - sed -i "s#/etc/issue#/tmp/etc/issue#g" bin/updateip.sh && \ - sed -i "/toilet/d" bin/updateip.sh && \ - sed -i "/source \/etc\/environment/d" bin/updateip.sh && \ - touch /opt/tpot/etc/tpot.yml && \ - cp /root/dist/entrypoint.sh . && \ -# -# Clean up - apk del --purge git && \ - rm -rf /root/* /tmp/* && \ - rm -rf /root/.cache /opt/tpot/.git && \ - rm -rf /var/cache/apk/* -# -# Run tpotinit -WORKDIR /opt/tpot -HEALTHCHECK --retries=1000 --interval=5s CMD test -f /tmp/success || exit 1 -STOPSIGNAL SIGKILL -CMD ["/opt/tpot/entrypoint.sh"] diff --git a/version b/version index c207f759..34291177 100644 --- a/version +++ b/version @@ -1 +1 @@ -22.04.0 +23.12.0