276 Commits

Author SHA1 Message Date
e2752458d4 bump elk to 7.17.0 to support 8.0.1 in 22.x 2022-03-18 16:23:27 +00:00
cac7cdcec6 fix data fields with regard to the request field, log4pot, nginx 2022-01-17 17:10:48 +01:00
ed79b72869 Update objects for qeeqbox honeypots 2022-01-13 15:22:49 +01:00
e7e521edba tweaking 2022-01-12 01:28:06 +00:00
7d012726b7 tweaking 2022-01-11 15:43:45 +00:00
d6ea4cdde2 prep for elk 8.x, pave way for next t-pot release 2022-01-07 18:03:00 +00:00
f441ec0bfc Merge branch 'master' of https://github.com/telekom-security/tpotce 2022-01-07 15:42:46 +00:00
fb49a77180 tweaking, json_batch transfer to hive 2022-01-07 15:41:57 +00:00
5dc6350106 New objects for next release 2022-01-06 17:47:39 +01:00
202246a3cd tweaking 2022-01-06 16:45:51 +00:00
467dfae320 cleanup, move to correct folders 2022-01-04 18:35:44 +00:00
788a4c4f98 prepare for new attack map feature
tweaking, cleanup
2022-01-04 16:16:27 +00:00
0178b4c4d3 Work in progress!
This is the foundation for the distributed T-Pot feature,
highly work in progress, only works with local docker image builds,
will be available for prod for upcoming T-Pot 22xx.
2022-01-03 18:25:31 +00:00
68b080a3a8 Work in progress!
This is the foundation for the distributed T-Pot feature,
highly work in progress, only works with local docker image builds,
will be available for prod for upcoming T-Pot 22xx.
2022-01-03 18:24:17 +00:00
ef1a1fa057 Merge branch 'master' of https://github.com/telekom-security/tpotce 2021-12-21 11:37:18 +00:00
daf41b4b71 tweaking 2021-12-21 11:36:38 +00:00
0bca794fe7 bump log4pot to latest master
rebuild on ubuntu for payload download support
2021-12-20 18:40:38 +00:00
aaccb43471 bump elk stack to 7.16.2
ELK 7.16.2 includes log4j 2.17.0 to address latest issues
2021-12-20 11:17:18 +00:00
beb9abca16 fixes #973 2021-12-17 02:25:10 +01:00
fb93d85119 Log4Pot Credits, Install Flavor 2021-12-16 23:10:25 +01:00
ceee197e68 Add Kibana Objects for Log4Pot 2021-12-16 21:53:04 +00:00
b0339610a2 Prep for Log4Pot integration 2021-12-16 20:25:40 +00:00
a98b447556 ELK 7.16.1 fixes log4j vulns. 2021-12-13 15:59:48 +00:00
b4c1805551 disable log4j lookups 2021-12-13 10:54:07 +00:00
0ef2e89cac remove log4j JndiLookup Class 2021-12-13 10:35:22 +00:00
b76f0f109f tweaking 2021-12-09 22:17:30 +00:00
5f29516197 tweaking 2021-12-08 23:55:13 +00:00
ff1c12e848 Disable FATT submissions for now 2021-11-30 16:04:58 +01:00
2ee2d08e5a rename 2021-11-20 13:11:12 +00:00
3103c94355 add mini edition 2021-11-20 13:08:35 +00:00
a3be0011fb Merge branch 'master' of https://github.com/telekom-security/tpotce 2021-11-19 23:22:11 +00:00
ce39e1bd4f logstash logging for honeypots 2021-11-19 23:20:13 +00:00
6fb2fa783a update for new honeypots 2021-11-18 21:32:48 +01:00
e76a643296 Update Readme for new honeypots 2021-11-18 20:58:17 +01:00
6c155ad87f add qeeqbox honeypots 2021-11-18 19:55:44 +00:00
81b8242c68 bump ewsposter to latest master 2021-11-18 13:48:02 +00:00
d2cbf6ebbc build fix for tanner 2021-11-18 13:39:05 +00:00
591be0791b Fixes #939
https://stackoverflow.com/questions/28785383/how-to-disable-persistence-with-redis
2021-11-18 13:05:01 +01:00
adee51bee5 bump heralding to latest master 2021-11-16 18:23:25 +00:00
b214db6e9d bump cowrie to 2.3.0, ewsposter to 1.21 2021-11-05 17:43:47 +00:00
2694c05953 Updated Kibana objects for new honeypots 2021-11-02 20:19:02 +01:00
c9b909e51d finetune new honeypots logging 2021-11-02 19:13:28 +00:00
db74c610ad bump hellpot to 0.3 and train config for CVE-2021-39341 2021-11-01 13:36:44 +00:00
ea624351b5 finetuning logstash.conf for new honeypots 2021-10-29 16:28:16 +00:00
c1eb9f7216 logstash parsing for ddospot, hellpot 2021-10-28 18:57:55 +00:00
1a844d13ba start integrating new honeypots into ELK 2021-10-27 16:14:52 +00:00
348a5d572b bump elastic stack to 7.15.1 2021-10-26 13:56:38 +00:00
77dcd771df move debian to ubuntu 20.04 2021-10-05 15:26:02 +00:00
b566b39688 move honeytrap to ubuntu 20.04
thanks to @adepasquale's work
2021-10-04 20:19:40 +00:00
8285657e5d remove snare, tanner from nextgen 2021-10-01 16:26:18 +00:00
dd7fb325b6 add new honeypots to nextgen to prep for ELK setup
honeytrap testing
2021-10-01 16:18:10 +00:00
ab092faa2c prep conpot rebuild 2021-10-01 15:10:37 +00:00
28681ef398 prep heralding rebuild 2021-10-01 14:32:24 +00:00
eefd38a335 bump elastic stack to 7.15.0
no image upgrade before 7.15.1
2021-09-30 20:40:42 +00:00
261b380db7 cleaup fatt, bump suricata to 6.0.3 2021-09-30 19:39:59 +00:00
77e2dd2da6 cleanup spiderfoot, prep fatt rebuild 2021-09-30 19:14:11 +00:00
183136c1f1 bump spiderfoot to v3.4 2021-09-30 17:03:28 +00:00
1fe0247095 prep p0f, medpot for image rebuild 2021-09-30 15:58:10 +00:00
adab02a067 prep for updated nginx image 2021-09-28 19:51:08 +00:00
58aa3162cb prep for ewsposter fix 2021-09-28 15:58:15 +00:00
405ee521a6 prep ubuntu rebuild for honeytrap 2021-09-24 17:09:55 +00:00
9a3465aef1 bump cowrie to latest master, prep for rebuild 2021-09-24 17:03:55 +00:00
e23c57e58d some tests with dionaea 2021-09-24 16:10:14 +00:00
44749fe9e7 bump honeysap to alpine3.11 2021-09-24 15:47:05 +00:00
f5d11bb008 bump snare, tanner, prep for rebuild 2021-09-24 15:18:59 +00:00
efa9d991ba revert honeypy to alpine 2021-09-23 22:28:33 +00:00
a7faafeba9 test mailoney 2021-09-23 21:50:37 +00:00
f05abc07c9 cleanup 2021-09-23 21:20:25 +00:00
eeae863820 revert to alpine 2021-09-23 21:11:24 +00:00
9f9d1a65bd debian test 2021-09-23 20:53:38 +00:00
a48840d1b2 prep rdpy for debian rebuild 2021-09-23 20:15:33 +00:00
48de3d846c fix typo in crontab 2021-09-23 10:00:20 +00:00
122135dd80 prepare rebuilding dicompot 2021-09-20 21:57:39 +00:00
8576e576a6 prep mailoney for rebuild 2021-09-20 20:20:04 +00:00
32e1e8a8ea prep for rebuilding ciscoasa, elasticpot, honeypy 2021-09-20 16:08:16 +00:00
ed224215a4 tweak cyberchef image for better security, prep citrixhoneypot for rebuild 2021-09-20 14:29:42 +00:00
e9c03e512c prep rebuild for adbhoney, cyberchef 2021-09-20 09:15:28 +00:00
ed0c5aa89f add logstash-output-gelf, fixes #861 2021-09-15 17:39:04 +00:00
d5290e68ff Update Kibana objects 2021-09-15 18:00:56 +02:00
9de1bdd0b5 tweaking, bump elastic stack to 7.14.1, rebuild dashboards 2021-09-15 15:58:44 +00:00
00457b8b70 Merge pull request #887 from shaderecker/ansible
Minor Ansible improvements
2021-09-02 09:50:56 +02:00
e26600ad75 Minor Ansible improvements 2021-09-01 21:55:22 +02:00
310f560c65 Update credts and licenses 2021-08-26 15:14:04 +02:00
06ef8850fe prep for ELK 7.13.4, start full integration of new honeypots 2021-08-25 15:04:27 +00:00
05a7d33c9f add paths, logrotate settings, cleaner settings for new honeypots 2021-08-24 11:51:01 +00:00
baaba5311a Merge pull request #881 from brianlechthaler/patch-5
🔄 🇯🇵 Update AMIs & add region ap-northeast-3
2021-08-24 12:40:48 +02:00
35014a15ca 🔄 🇯🇵 Update AMIs & add region ap-northeast-3
This commit updates all AMIs to debian-10-arm64-20210721-710, and add the AWS region 🇯🇵 ap-northeast-3 (Osaka, Japan) to the list.
2021-08-21 14:14:09 -07:00
2aa4c3c2c6 disable ntp server on host, start working on ddospot 2021-07-09 23:16:19 +00:00
0867d8f011 prep for redishoneypot 2021-07-05 19:59:44 +00:00
a2071eb4d2 hellpot cleanup and prep for endlessh 2021-07-03 15:51:32 +00:00
e6402b793c start including hellpot 2021-07-02 22:12:47 +00:00
4cb84166c5 bump ewsposter to 1.2.0, elk stack to 7.13.2 2021-06-28 16:30:40 +00:00
b6be931641 prep for new ewsposter, rollout to follow next week 2021-06-24 16:26:53 +00:00
f51ab7ec0f prepare to bump elastic stack to 7.13.1 2021-06-10 17:03:22 +00:00
f22ec3a360 Merge branch 'master' of https://github.com/telekom-security/tpotce 2021-05-26 11:01:47 +00:00
de38e5e86f Rebuild Logstash, Elasticsearch
Setting static limits for Elasticsearch / Logstash on Xms, Xmx and Container RAM results in unwanted side effects for some installations. With Elastic supporting dynamic heap management for Java 14+ we now use OpenJDK 16 JRE and as such remove limitations. This should improve stability for T-Pot, provided the minimum requirements will be met.
2021-05-26 11:00:49 +00:00
bd9cb43960 Merge pull request #837 from shaderecker/terraform
Terraform improvements
2021-05-19 16:05:01 +02:00
7763ceff4c Test connection before git clone
Test the connection to github before cloning the repository.
Previously it could happen that the git clone failed due to the external network connection not being established immediately after boot.
2021-05-19 15:57:30 +02:00
0e1a86f93b Use b64_url for eip bandwidth name
Missed this one in #819
2021-05-19 14:28:40 +02:00
0f0c728c90 Merge pull request #836 from shaderecker/tf-disk
TF: Use SAS disk on OTC
2021-05-18 17:03:42 +02:00
16d5a6e0c1 Use SAS disk 2021-05-18 16:49:56 +02:00
0c5ab33b8a bump elastic stack to 7.12.1 2021-05-17 16:32:03 +00:00
cd91183b8b Prep obejcts for 7.12.1 2021-05-12 15:38:04 +02:00
12c4308b89 Merge pull request #818 from trixam/suricata-updatescript
Update update.sh
2021-05-03 14:43:01 +02:00
bbf5d70d98 Update sensor.yml 2021-05-03 14:42:39 +02:00
60e57bce52 Update update.sh
Adding quotation marks for $URL
2021-05-03 14:40:08 +02:00
460214f848 Update sensor.yml 2021-05-03 14:37:52 +02:00
334b98c01b Merge pull request #819 from shaderecker/tf-ecs-name
Terraform: Use b64_url for ecs name
2021-04-26 11:34:07 +02:00
0493e5eb3d Use b64_url for ecs name
Previously it could happen that special characters were generated in the name.
Now it allows only letters, digits, underscore & hyphen to conform with ecs naming requirements.
2021-04-26 11:31:47 +02:00
dceaa984c9 Update update.sh
Download rules via URL
2021-04-21 12:44:36 +02:00
8abd1be5bb Merge pull request #815 from shaderecker/cloud-updates
Cloud updates (Ansible & Terraform)
2021-04-15 17:35:57 +02:00
d0cc43e89e Ansible: Create VM: Use default timeout and explicitly declare auto_ip 2021-04-15 17:00:13 +02:00
8c19ea68c8 Ansible: Use OTC nameservers for subnet 2021-04-15 16:58:56 +02:00
0649d56521 Improve Ansible resource naming 2021-04-15 16:58:19 +02:00
628ea0224c Update Terraform readme 2021-04-15 16:34:52 +02:00
c9ec5347d5 TF: Formatting 2021-04-15 16:23:49 +02:00
de3d7c7f4f TF: Check input variables also for AWS 2021-04-15 16:22:55 +02:00
b0ea90c65b TF: Rework ECS and EIP setup 2021-04-15 16:18:17 +02:00
0c7d0d0eaa TF: Check if input variables are defined 2021-04-15 15:16:33 +02:00
aec0761580 TF: More formatting 2021-04-15 14:59:03 +02:00
77e0b8c313 Update provider versions 2021-04-15 14:51:12 +02:00
c659572df1 TF: Formatting 2021-04-15 14:44:55 +02:00
37120a7324 Update gitignore 2021-04-15 12:37:30 +02:00
532907c27c rebuild honeytrap 2021-02-25 11:57:16 +00:00
fb860fb861 fix protocols for conpot testing 2021-02-25 11:55:51 +00:00
1c7e5274aa fix protocols for conpot
fixes #781
2021-02-25 11:32:59 +00:00
7587efaed8 cleanup 2021-02-22 11:21:18 +00:00
f7d696007c Release 20.06.2 2021-02-22 10:51:51 +00:00
46e297386b Update CHANGELOG.md 2021-02-19 15:55:22 +01:00
7d423f29da rebuild snare, tanner, redis, phpox 2021-02-19 13:02:08 +00:00
41c0255ea6 Add Elastic License info 2021-02-19 10:21:53 +00:00
d5f0ceb15b push elastic stack to 7.11.1 2021-02-19 10:17:30 +00:00
5f38e730d4 rebuild conpot for latest alpine edge, bump to latest master 2021-02-18 17:39:52 +00:00
c48ad0863d bump ewsposter to latest master 2021-02-18 16:52:43 +00:00
4bc2b1bf03 rebuild cowrie for alpine 3.13 2021-02-18 16:38:35 +00:00
3d123f35a4 rebuild glutton for alpine 3.13, update to latest master 2021-02-18 11:12:21 +00:00
d4519892f6 rebuild dionaea 2021-02-18 10:37:17 +00:00
0aa1a05c92 enable smtps for heralding 2021-02-16 17:14:56 +00:00
69c535619d bump heralding to 1.0.7 and rebuild for alpine 1.13 2021-02-16 16:59:17 +00:00
5fe59c3bd8 rebuild ipphoney for alpine 3.13 2021-02-16 16:14:37 +00:00
d8d0a6f190 rebuild fatt for alpine 3.13 2021-02-16 13:27:56 +00:00
4d407b420d rebuild ewsposter for alpine 3.13 2021-02-16 13:15:26 +00:00
181e3585b7 bump spiderfoot to 3.3 and rebuild for alpine 3.13 2021-02-16 11:01:43 +00:00
2597af73ee rebuild dicompot for alpine 3.13 2021-02-15 12:34:11 +00:00
0ab220ebf0 rebuild p0f for alpine 3.13 2021-02-15 12:12:24 +00:00
2777fc1f41 rebuild medpot for alpine 3.13 2021-02-15 12:09:19 +00:00
91483a231d rebuild honeysap 2021-02-15 11:46:55 +00:00
95ea079f4d rebuild heimdall, nginx for php7.4, alpine 3.13 2021-02-15 11:00:00 +00:00
8112f48270 rebuild elasticpot for alpine 3.13 2021-02-15 10:14:52 +00:00
898f8be4db rebuild citrixhoneypot for alpine 3.13 2021-02-15 10:05:29 +00:00
a28ee97f13 rebuild ciscoasa for alpine 3.13 2021-02-15 10:01:03 +00:00
b01bf50aaf Merge pull request #769 from shaderecker/ansible
Ansible updates
2021-02-15 10:12:14 +01:00
86cc54ee88 Update README.md 2021-02-13 20:39:32 +01:00
2fb1967ef1 Update README.md 2021-02-13 20:16:34 +01:00
48e02ceb1c Allow for creation of multiple T-Pots 2021-02-13 20:12:58 +01:00
c014e9635d Update README.md 2021-02-13 19:03:56 +01:00
ca4946c87c Update gitignore 2021-02-13 18:58:42 +01:00
9ff9c3c4df Merge branch 'ansible' of github.com:shaderecker/tpotce into ansible 2021-02-13 18:29:45 +01:00
423914f63f Unify cloud parameter 2021-02-13 18:29:27 +01:00
f6db541293 Update README.md 2021-02-13 18:20:01 +01:00
efb51f8233 Add collection requirements 2021-02-13 18:04:23 +01:00
acc64c2771 Fix name 2021-02-13 17:52:18 +01:00
780acd0384 Fix name 2021-02-13 17:47:48 +01:00
b014f73045 Use FQCNs 2021-02-13 17:46:28 +01:00
bb8d2f27c6 Split network and vm creation into own roles 2021-02-13 17:22:49 +01:00
487c091ba7 Use ansible internal tools to generate random name 2021-02-13 15:36:39 +01:00
c3ebf8487b Lowercase group names 2021-02-13 15:27:36 +01:00
51b15b6510 Update docu links 2021-02-13 15:04:50 +01:00
f2c48d7efc bump cyberchef to latest release 2021-02-12 17:09:44 +00:00
039f3c115a update adbhoney image 2021-02-12 14:21:31 +00:00
80d9efa729 bump elk stack images to alpine 3.13 2021-02-12 13:54:42 +00:00
e5f29f3c90 bump elk stack to 7.11.0 2021-02-12 13:21:35 +00:00
01af362ff6 Merge pull request #764 from shaderecker/terraform-otc
OTC: Retrieve Debian Image ID from Terraform Data Source
2021-02-05 16:59:50 +01:00
98c7dd17d7 OTC: Retrieve Debian Image ID from Terraform Data Source 2021-02-05 16:07:53 +01:00
70c152377d Merge pull request #763 from shaderecker/terraform-otc
Terraform updates
2021-02-05 11:54:31 +01:00
b214bed014 Merge branch 'master' into terraform-otc 2021-02-04 22:57:41 +01:00
bde60734ea Update variables.tf
- Latest Debian 10.7 AMIs (https://wiki.debian.org/Cloud/AmazonEC2Image/Buster?action=recall&rev=21)
- Add MEDICAL
2021-02-04 22:51:01 +01:00
362dd75473 Add provider constraints and dependency lock file 2021-02-04 22:29:02 +01:00
a7be2ca0a8 Cosmetics 2021-02-04 22:23:09 +01:00
da81f12877 Update variables.tf
- Update flavor to newest s3 generation
- Update to latest OTC Debian 10 image
- Add MEDICAL
2021-02-04 22:08:22 +01:00
4e8a1e8ea9 TF 0.14: Add dependency lock file 2021-02-04 22:00:40 +01:00
1b386ed32f Update providers and add version constraints 2021-02-04 21:59:49 +01:00
5a65ceb5b5 b64 is deprecated, switch to b64_std for newer version 2021-02-04 21:57:50 +01:00
c60d53ca3f Merge pull request #754 from shaderecker/cloud-region
Explicitly add region name to clouds.yaml
2021-01-26 16:38:41 +01:00
e7a41feef4 Explicitly add region name 2021-01-26 16:24:09 +01:00
ee3d667615 bump dionaea to 0.11.0 2021-01-19 10:59:32 +00:00
df27ba4e5f Merge pull request #750 from shaderecker/patch-2
Update Ansible Docu
2021-01-14 09:43:29 +01:00
459db01e23 Update Ansible Docu
Add disclaimer about Ansible 2.10 & how to install with pip
2021-01-13 23:53:39 +01:00
f767179cc9 Merge pull request #749 from shaderecker/pip3
Ansible: Set pip executable to pip3
2021-01-12 17:14:46 +01:00
749e7ee246 Set to pip3 to avoid Python Autodiscovery 2021-01-12 17:04:03 +01:00
3a7eda96fa Merge pull request #747 from shaderecker/patch-1
Add MEDICAL to tpot.conf.dist
2021-01-08 12:02:23 +01:00
43ae92cf44 Remove redundant tpot.conf.dist file content 2021-01-08 11:34:03 +01:00
2fb51f3b3a Add MEDICAL to tpot.conf.dist 2021-01-08 11:31:58 +01:00
d2dc43e1ef Update internet IF retrieval
To be consistent with @adepasquale PR #746 fatt, glutton and p0f Dockerfiles were updated accordingly.
2021-01-06 17:05:09 +00:00
db73a0656e Merge pull request #746 from adepasquale/master
Change method to get default Suricata interface

@adepasquale Thanks again!
2021-01-06 17:45:32 +01:00
b3b983afe6 Change method to get default Suricata interface
On some systems, interface number 2 is not always the correct one.
With AWK we now collect the first active interface having both an
address and a broadcast.
2021-01-06 11:14:24 +01:00
273cab4759 Update general-issue-for-t-pot.md 2021-01-05 16:03:42 +01:00
e1745bdea1 fix broken sqlite db 2020-12-28 21:49:28 +00:00
c34570f665 remove docker parallel pulls 2020-12-28 20:54:09 +00:00
020cbb5355 avoid ghcr.io because of slow transfers 2020-12-28 20:37:47 +00:00
aea14c9ead docker pull background 2020-12-28 17:46:05 +00:00
b57f6ddd1e remove netselect-apt
causes too many unpredictable errors
#733 as the latest example
2020-12-28 10:40:19 +00:00
af6ce8854d bump elastic stack to 7.10.1 2020-12-10 15:20:18 +00:00
6069b214a5 bump ewsposter to 1.12 2020-12-10 11:40:53 +00:00
252051dfe7 Merge pull request #731 from shaderecker/patch-1
More Python 3 stuff
2020-12-04 15:41:27 +01:00
f9fa1bcc74 Fix setup on Debian
On Debian there are not the same preinstalled packages as on Ubuntu.
Fix the compilation of netifaces, which requires gcc and python3-dev.
2020-12-04 14:42:32 +01:00
f3f9f6ae72 cleanup 2020-12-03 00:01:38 +00:00
bdf095367d prep for ewsposter 1.11 2020-12-02 23:21:23 +00:00
4abb0e5ce6 Missed this one
Python 3 is our friend :D
2020-12-02 23:56:54 +01:00
ba87ebfdaa update objects for Elastic Stack 7.10.0 2020-12-02 22:54:54 +00:00
8a7e81815e prep for Elastic Stack 7.10.0 2020-12-02 22:36:17 +00:00
17eff81e9c Merge pull request #728 from shaderecker/patch-1
Update pip dependency to Python3
2020-11-30 20:06:05 +01:00
f8f1bc1757 Merge pull request #727 from adepasquale/suricata-update
Suricata: use suricata-update for rule management
2020-11-30 20:05:24 +01:00
87a27e4f2b Suricata: use suricata-update for rule management
As a bonus we can now run "suricata-update" using docker-exec,
triggering both a rule update and a Suricata rule reload.
2020-11-30 17:56:14 +01:00
7f8f3a01c3 Update pip dependency to Python3 2020-11-30 17:27:28 +01:00
2ecef8c607 enable MQTT
as eagle eyed by @adepasquale
2020-11-27 19:07:12 +01:00
d992a25a0a Merge pull request #726 from adepasquale/suricata-yaml-6.0.x
Suricata: update suricata.yaml config to 6.0.x
2020-11-27 18:55:57 +01:00
73a5847753 Suricata: update suricata.yaml config to 6.0.x
Merge in the latest updates from suricata-6.0.x while at the same time
keeping the custom T-Pot configuration.

https://github.com/OISF/suricata/blob/suricata-6.0.0/suricata.yaml.in
2020-11-26 19:16:01 +01:00
c976aea73e Merge pull request #725 from adepasquale/suricata-yaml-5.x
Suricata: update suricata.yaml config to 5.x
2020-11-26 16:23:50 +01:00
4ada38988c bump cowrie to 2.2.0 2020-11-26 08:17:09 +00:00
0010f99662 Suricata: disable eve.stats since it's unused
Prevent the error below by disabling stats globally and in eve-log:

<Error> - [ERRCODE: SC_ERR_STATS_LOG_GENERIC(278)] - eve.stats: stats are disabled globally: set stats.enabled to true.
2020-11-25 17:07:49 +01:00
e2f76c44cb Suricata: update suricata.yaml config to 5.x
Merge in the latest updates from suricata-5.x while at the same time
keeping the custom T-Pot configuration.

https://github.com/OISF/suricata/blob/master-5.0.x/suricata.yaml.in
2020-11-25 15:51:41 +01:00
e26853c7fa bump suricata to 5.0.4 2020-10-28 17:53:23 +00:00
d64cbe6741 bump ipphoney to latest master 2020-10-28 17:34:28 +00:00
c3809b5a98 bump heralding to latest master 2020-10-28 17:27:09 +00:00
a3d40cc57c bump spiderfoot to 3.2.1 2020-10-28 17:08:55 +00:00
e3fda4d464 bump dionaea to 0.9.2 2020-10-28 16:45:53 +00:00
4bf245d13b bump conpot to latest master 2020-10-28 13:56:52 +00:00
92925cecbd bump dicompot to latest master 2020-10-27 21:30:33 +00:00
f204cdf9b8 bump elk to 7.3 2020-10-27 19:43:32 +00:00
ff4a394e3b reverting elk to 7.9.1 2020-10-15 12:24:46 +00:00
ce7b79b71a Merge pull request #707 from brianlechthaler/patch-3
Bump Elastic dependencies to 7.9.2
2020-10-15 13:37:11 +02:00
b28cc2edd0 prepare for new ewsposter 2020-10-15 09:14:30 +00:00
84a741ec64 IMPORTANT: Fix Node Version
Bump node version to `10.22.1-alpine`

**KIBANA WILL NOT WORK WITHOUT THIS**
2020-10-07 13:53:21 -07:00
6b37578d8d Merge pull request #706 from brianlechthaler/patch-2
Debian 10.6 AMI + Add AWS Regions
2020-10-07 14:28:19 +02:00
d351a89096 Bump Kibana version to 7.9.2 2020-10-04 18:05:16 -07:00
488da48df7 Bump Logstash version to 7.9.2 2020-10-04 18:04:15 -07:00
85da099cd0 Bump Elasticsearch to 7.9.2 2020-10-04 18:03:00 -07:00
bd8a9ca92d Debian 10.6 AMI + Add AWS Regions
# Changes:
1) 🇿🇦 Add AWS Capetown, South Africa Region (`af-south-1`)
2) 🇮🇹 Add AWS Milan, Italy Region (`eu-south-1`)
3) Bump all AMIs to Debian Buster 10.6

# References:
1) Debian 10 (Buster) Wiki Article on Official EC2 Images: https://wiki.debian.org/Cloud/AmazonEC2Image/Buster?action=recall&rev=16
2) For information on Debian 10.6, see: https://www.debian.org/News/2020/20200926
3) Official AWS Documentation on Regions and Zones can be found here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
2020-10-03 22:22:57 -07:00
1afbb89ef4 Merge pull request #691 from brianlechthaler/patch-1
Update Suricata Capture Filter for New Docker Repo
2020-09-08 20:08:28 +02:00
b1d8e293de add DockerHub back in cap filter
see https://github.com/telekom-security/tpotce/pull/691#issuecomment-688648225
2020-09-08 10:45:58 -07:00
7fdf9edb60 Update Suricata Capture Filter for New Docker Repo 2020-09-07 19:57:15 -07:00
0e7abb8d2c restore mibfix for conpot 2020-09-07 15:46:52 +00:00
2bac239763 fix version string for update check 2020-09-04 18:59:15 +02:00
a90f135f06 Merge pull request #690 from telekom-security/ghcr
Move to GitHub Container Registry, Cleanup, Bump ELK stack to 7.9.1
2020-09-04 18:55:54 +02:00
adee659baa Add files via upload 2020-09-04 18:54:40 +02:00
1e8f6305c9 adjust changelog 2020-09-04 16:40:51 +00:00
38b792a06e prepare ghcr for merge 2020-09-04 16:27:05 +00:00
1ee9c29805 set new container registry, point installer to branch 2020-09-04 13:29:14 +00:00
2e5639a50b fix links 2020-09-04 13:01:21 +00:00
47dca8b835 continue pin / prep images ghcr 2020-09-04 12:37:28 +00:00
1ac79d6be7 begin prep for move to GitHub Container Registry
Start pinning Dockerfiles to specific releases / commits
2020-09-02 15:18:32 +00:00
9a7f55bb52 Merge pull request #687 from shaderecker/terraform-otc
Update Terraform config for 0.13
2020-08-26 12:14:17 +02:00
42852a85ea Update README.md 2020-08-26 11:46:16 +02:00
c33229b53a Fix variable typo 2020-08-26 11:45:17 +02:00
840662da48 Update OTC Debian 10 base image id 2020-08-26 11:21:55 +02:00
d8f14d9c9f AWS: Update required_providers for Terraform 0.13 2020-08-26 11:04:34 +02:00
72e4134c86 OTC: Update required_providers for Terraform 0.13 2020-08-26 10:59:39 +02:00
5b1e07b9c8 finalize objects for ipphoney 2020-08-25 16:12:29 +00:00
2be185a371 add kibana objects for ipphoney 2020-08-25 15:08:28 +00:00
54a6a944aa prep for ipphoney 2020-08-25 12:25:59 +00:00
b86d2c715b prep for ipphoney 2020-08-24 21:36:08 +00:00
8f06b5b499 start prepping for ipphoney 2020-08-24 15:55:50 +00:00
6ec5a04802 fix deps issue with conpot 2020-08-24 15:55:10 +00:00
5080151b7c prep for elk 7.9 2020-08-24 10:35:46 +00:00
c1f7146800 prep elk stack for 7.9.0 2020-08-20 15:03:16 +00:00
743616fa09 update conpot to latest working master 2020-08-13 16:30:37 +00:00
6e18b6f660 bump elasticpot to latest master 2020-08-13 10:37:03 +00:00
50d67fc286 bump spiderfoot to 3.1 final
Fix Spiderfoot issue not showing current scan
2020-08-13 09:06:49 +00:00
c28642932a bump elk stack to 7.8.1 2020-08-13 08:34:44 +00:00
969e269bd1 improve cowrie dashboard, fixes #664 2020-07-09 15:11:32 +00:00
8af45c9440 prevent cowrie from unwanted log rotation 2020-07-07 00:00:57 +00:00
6d29f504df provide fix for #669 2020-07-06 23:30:11 +00:00
9b7f100f74 Add testimonial from @robcowart 2020-07-01 11:53:38 +02:00
e1485bfd04 Merge pull request #663 from dtag-dev-sec/dev
fix crontab
2020-06-30 18:34:34 +02:00
31c6bc6f96 fix crontab 2020-06-30 16:31:22 +00:00
150 changed files with 5216 additions and 1267 deletions

View File

@ -7,6 +7,8 @@ assignees: ''
--- ---
🗨️ Please post your questions in [Discussions](https://github.com/telekom-security/tpotce/discussions) and keep the issues for **issues**. Thank you 😁.<br>
Before you post your issue make sure it has not been answered yet and provide `basic support information` if you come to the conclusion it is a new issue. Before you post your issue make sure it has not been answered yet and provide `basic support information` if you come to the conclusion it is a new issue.
- 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first - 🔍 Use the [search function](https://github.com/dtag-dev-sec/tpotce/issues?utf8=%E2%9C%93&q=) first

View File

@ -1,5 +1,92 @@
# Changelog # Changelog
## 20210222
- **New Release 20.06.2**
- **Countless Cloud Contributions**
- Thanks to @shaderecker
## 20210219
- **Rebuild Snare, Tanner, Redis, Phpox**
- Rebuild images to their latest masters and upgrade Alpine OS to 3.13 where possible.
- **Bump Elastic Stack to 7.11.1**
- Updgrade Elastic Stack Images to 7.11.1 and update License Info to reflect new Elastic License.
- Prepare for new release.
## 20210218
- **Rebuild Conpot, EWSPoster, Cowrie, Glutton, Dionaea**
- Rebuild images to their latest masters and upgrade Alpine OS to 3.13 where possible.
## 20210216
- **Bump Heralding to 1.0.7**
- Rebuild and upgrade image to 1.0.7 and upgrade Alpine OS to 3.13.
- Enable SMTPS for Heralding.
- **Rebuild IPPHoney, Fatt, EWSPoster, Spiderfoot**
- Rebuild images to their latest masters and upgrade Alpine OS to 3.13 where possible.
- Upgrade Spiderfoot to 3.3
## 20210215
- **Rebuild Dicompot, p0f, Medpot, Honeysap, Heimdall, Elasticpot, Citrixhoneypot, Ciscoasa**
- Rebuild images to their latest masters and upgrade Alpine OS to 3.13 where possible.
## 20210212
- **Rebuild Cyberchef, Adbhoney, Elastic Stack**
- Rebuild images to their latest masters and upgrade Alpine OS to 3.13 where possible.
- Bump Elastic Stack to 7.11.0
- Bump Cyberchef to 9.27.0
## 20210119
- **Bump Dionaea to 0.11.0**
- Upgrade Dionaea to 0.11.0, rebuild image and upgrade Alpine OS to 3.13.
## 20210106
- **Update Internet IF retrieval**
- To be consistent with @adepasquale PR #746 fatt, glutton and p0f Dockerfiles were updated accordingly.
- Merge PR #746 from @adepasquale, thank you!
## 20201228
- **Fix broken SQlite DB**
- Fix a broken `app.sqlite` in Heimdall
- **Avoid ghcr.io because of slow transfers**
- **Remove netselect-apt**
- causes too many unpredictable errors #733 as the latest example
## 20201210
- **Bump Elastic Stack 7.10.1, EWSPoster to 1.12**
## 20201202
- **Update Elastic Stack to 7.10.0**
## 20201130
- **Suricata, use suricata-update for rule management**
- As a bonus we can now run "suricata-update" using docker-exec, triggering both a rule update and a Suricata rule reload.
- Thanks to @adepasquale!
## 20201126
- **Suricata, update suricata.yaml for 6.x**
- Merge in the latest updates from suricata-6.0.x while at the same time keeping the custom T-Pot configuration.
- Thanks to @adepasquale!
- **Bump Cowrie to 2.2.0**
## 20201028
- **Bump Suricata to 5.0.4, Spiderfoot to 3.2.1, Dionaea to 0.9.2, IPPHoney, Heralding, Conpot to latest masters**
## 20201027
- **Bump Dicompot to latest master, Elastic Stack to 7.9.3**
## 20201005
- **Bump Elastic Stack to 7.9.2**
- @brianlechthaler, thanks for PR #706, which had issues regarding Elastic Stack and resulted in reverting to 7.9.1
## 20200904
- **Release T-Pot 20.06.1**
- Github offers a free Docker Container Registry for public packages. For our Open Source projects we want to make sure to have everything in one place and thus moving from Docker Hub to the GitHub Container Registry.
- **Bump Elastic Stack**
- Update the Elastic Stack to 7.9.1.
- **Rebuild Images**
- All docker images were rebuilt based on the latest (and stable running) versions of the tools and honeypots and have been pinned to specific Alpine / Debian versions and git commits so rebuilds will less likely fail.
- **Cleaning up**
- Clean up old references and links.
## 20200630 ## 20200630
- **Release T-Pot 20.06** - **Release T-Pot 20.06**
- After 4 months of public testing with the NextGen edition T-Pot 20.06 can finally be released. - After 4 months of public testing with the NextGen edition T-Pot 20.06 can finally be released.
@ -51,7 +138,7 @@
- **Update ISO image to fix upstream bug of missing kernel modules** - **Update ISO image to fix upstream bug of missing kernel modules**
- **Include dashboards for CitrixHoneypot** - **Include dashboards for CitrixHoneypot**
- Please run `/opt/tpot/update.sh` for the necessary modifications, omit the reboot and run `/opt/tpot/bin/tped.sh` to (re-)select the NextGen installation type. - Please run `/opt/tpot/update.sh` for the necessary modifications, omit the reboot and run `/opt/tpot/bin/tped.sh` to (re-)select the NextGen installation type.
- This update requires the latest Kibana objects as well. Download the latest from https://raw.githubusercontent.com/dtag-dev-sec/tpotce/master/etc/objects/kibana_export.json.zip, unzip and import the objects within Kibana WebUI > Management > Saved Objects > Export / Import". All objects will be overwritten upon import, make sure to run an export first. - This update requires the latest Kibana objects as well. Download the latest from https://raw.githubusercontent.com/telekom-security/tpotce/master/etc/objects/kibana_export.json.zip, unzip and import the objects within Kibana WebUI > Management > Saved Objects > Export / Import". All objects will be overwritten upon import, make sure to run an export first.
## 20200115 ## 20200115
- **Prepare integration of CitrixHoneypot** - **Prepare integration of CitrixHoneypot**

View File

@ -11,17 +11,24 @@ and includes dockerized versions of the following honeypots
* [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot), * [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot),
* [conpot](http://conpot.org/), * [conpot](http://conpot.org/),
* [cowrie](https://github.com/cowrie/cowrie), * [cowrie](https://github.com/cowrie/cowrie),
* [ddospot](https://github.com/aelth/ddospot),
* [dicompot](https://github.com/nsmfoo/dicompot), * [dicompot](https://github.com/nsmfoo/dicompot),
* [dionaea](https://github.com/DinoTools/dionaea), * [dionaea](https://github.com/DinoTools/dionaea),
* [elasticpot](https://gitlab.com/bontchev/elasticpot), * [elasticpot](https://gitlab.com/bontchev/elasticpot),
* [endlessh](https://github.com/skeeto/endlessh),
* [glutton](https://github.com/mushorg/glutton), * [glutton](https://github.com/mushorg/glutton),
* [heralding](https://github.com/johnnykv/heralding), * [heralding](https://github.com/johnnykv/heralding),
* [hellpot](https://github.com/yunginnanet/HellPot),
* [honeypots](https://github.com/qeeqbox/honeypots),
* [honeypy](https://github.com/foospidy/HoneyPy), * [honeypy](https://github.com/foospidy/HoneyPy),
* [honeysap](https://github.com/SecureAuthCorp/HoneySAP), * [honeysap](https://github.com/SecureAuthCorp/HoneySAP),
* [honeytrap](https://github.com/armedpot/honeytrap/), * [honeytrap](https://github.com/armedpot/honeytrap/),
* [ipphoney](https://gitlab.com/bontchev/ipphoney),
* [log4pot](https://github.com/thomaspatzke/Log4Pot),
* [mailoney](https://github.com/awhitehatter/mailoney), * [mailoney](https://github.com/awhitehatter/mailoney),
* [medpot](https://github.com/schmalle/medpot), * [medpot](https://github.com/schmalle/medpot),
* [rdpy](https://github.com/citronneur/rdpy), * [rdpy](https://github.com/citronneur/rdpy),
* [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot),
* [snare](http://mushmush.org/), * [snare](http://mushmush.org/),
* [tanner](http://mushmush.org/) * [tanner](http://mushmush.org/)
@ -39,7 +46,7 @@ Furthermore T-Pot includes the following tools
# TL;DR # TL;DR
1. Meet the [system requirements](#requirements). The T-Pot installation needs at least 8 GB RAM and 128 GB free disk space as well as a working (outgoing non-filtered) internet connection. 1. Meet the [system requirements](#requirements). The T-Pot installation needs at least 8 GB RAM and 128 GB free disk space as well as a working (outgoing non-filtered) internet connection.
2. Download the T-Pot ISO from [GitHub](https://github.com/dtag-dev-sec/tpotce/releases) or [create it yourself](#createiso). 2. Download the T-Pot ISO from [GitHub](https://github.com/telekom-security/tpotce/releases) or [create it yourself](#createiso).
3. Install the system in a [VM](#vm) or on [physical hardware](#hw) with [internet access](#placement). 3. Install the system in a [VM](#vm) or on [physical hardware](#hw) with [internet access](#placement).
4. Enjoy your favorite beverage - [watch](https://sicherheitstacho.eu) and [analyze](#kibana). 4. Enjoy your favorite beverage - [watch](https://sicherheitstacho.eu) and [analyze](#kibana).
@ -91,16 +98,23 @@ In T-Pot we combine the dockerized honeypots ...
* [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot), * [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot),
* [conpot](http://conpot.org/), * [conpot](http://conpot.org/),
* [cowrie](http://www.micheloosterhof.com/cowrie/), * [cowrie](http://www.micheloosterhof.com/cowrie/),
* [ddospot](https://github.com/aelth/ddospot),
* [dicompot](https://github.com/nsmfoo/dicompot), * [dicompot](https://github.com/nsmfoo/dicompot),
* [dionaea](https://github.com/DinoTools/dionaea), * [dionaea](https://github.com/DinoTools/dionaea),
* [elasticpot](https://gitlab.com/bontchev/elasticpot), * [elasticpot](https://gitlab.com/bontchev/elasticpot),
* [endlessh](https://github.com/skeeto/endlessh),
* [glutton](https://github.com/mushorg/glutton), * [glutton](https://github.com/mushorg/glutton),
* [heralding](https://github.com/johnnykv/heralding), * [heralding](https://github.com/johnnykv/heralding),
* [hellpot](https://github.com/yunginnanet/HellPot),
* [honeypots](https://github.com/qeeqbox/honeypots),
* [honeypy](https://github.com/foospidy/HoneyPy), * [honeypy](https://github.com/foospidy/HoneyPy),
* [honeysap](https://github.com/SecureAuthCorp/HoneySAP), * [honeysap](https://github.com/SecureAuthCorp/HoneySAP),
* [honeytrap](https://github.com/armedpot/honeytrap/), * [honeytrap](https://github.com/armedpot/honeytrap/),
* [ipphoney](https://gitlab.com/bontchev/ipphoney),
* [log4pot](https://github.com/thomaspatzke/Log4Pot),
* [mailoney](https://github.com/awhitehatter/mailoney), * [mailoney](https://github.com/awhitehatter/mailoney),
* [medpot](https://github.com/schmalle/medpot), * [medpot](https://github.com/schmalle/medpot),
* [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot),
* [rdpy](https://github.com/citronneur/rdpy), * [rdpy](https://github.com/citronneur/rdpy),
* [snare](http://mushmush.org/), * [snare](http://mushmush.org/),
* [tanner](http://mushmush.org/) * [tanner](http://mushmush.org/)
@ -130,7 +144,7 @@ The T-Pot project provides all the tools and documentation necessary to build yo
The source code and configuration files are fully stored in the T-Pot GitHub repository. The docker images are preconfigured for the T-Pot environment. If you want to run the docker images separately, make sure you study the docker-compose configuration (`/opt/tpot/etc/tpot.yml`) and the T-Pot systemd script (`/etc/systemd/system/tpot.service`), as they provide a good starting point for implementing changes. The source code and configuration files are fully stored in the T-Pot GitHub repository. The docker images are preconfigured for the T-Pot environment. If you want to run the docker images separately, make sure you study the docker-compose configuration (`/opt/tpot/etc/tpot.yml`) and the T-Pot systemd script (`/etc/systemd/system/tpot.service`), as they provide a good starting point for implementing changes.
The individual docker configurations are located in the [docker folder](https://github.com/dtag-dev-sec/tpotce/tree/master/docker). The individual docker configurations are located in the [docker folder](https://github.com/telekom-security/tpotce/tree/master/docker).
<a name="requirements"></a> <a name="requirements"></a>
# System Requirements # System Requirements
@ -168,7 +182,7 @@ There are prebuilt installation types available each focussing on different aspe
##### NextGen ##### NextGen
- Honeypots: adbhoney, ciscoasa, citrixhoneypot, conpot, cowrie, dicompot, dionaea, glutton, heralding, honeypy, honeysap, mailoney, medpot, rdpy, snare & tanner - Honeypots: adbhoney, ciscoasa, citrixhoneypot, conpot, cowrie, dicompot, dionaea, glutton, heralding, honeypy, honeysap, ipphoney, mailoney, medpot, rdpy, snare & tanner
- Tools: cockpit, cyberchef, ELK, fatt, elasticsearch head, ewsposter, nginx / heimdall, spiderfoot, p0f & suricata - Tools: cockpit, cyberchef, ELK, fatt, elasticsearch head, ewsposter, nginx / heimdall, spiderfoot, p0f & suricata
@ -181,18 +195,18 @@ There are prebuilt installation types available each focussing on different aspe
# Installation # Installation
The installation of T-Pot is straight forward and heavily depends on a working, transparent and non-proxied up and running internet connection. Otherwise the installation **will fail!** The installation of T-Pot is straight forward and heavily depends on a working, transparent and non-proxied up and running internet connection. Otherwise the installation **will fail!**
Firstly, decide if you want to download the prebuilt installation ISO image from [GitHub](https://github.com/dtag-dev-sec/tpotce/releases), [create it yourself](#createiso) ***or*** [post-install on an existing Debian 10 (Buster)](#postinstall). Firstly, decide if you want to download the prebuilt installation ISO image from [GitHub](https://github.com/telekom-security/tpotce/releases), [create it yourself](#createiso) ***or*** [post-install on an existing Debian 10 (Buster)](#postinstall).
Secondly, decide where you the system to run: [real hardware](#hardware) or in a [virtual machine](#vm)? Secondly, decide where you the system to run: [real hardware](#hardware) or in a [virtual machine](#vm)?
<a name="prebuilt"></a> <a name="prebuilt"></a>
## Prebuilt ISO Image ## Prebuilt ISO Image
An installation ISO image is available for download (~50MB), which is created by the [ISO Creator](https://github.com/dtag-dev-sec/tpotce) you can use yourself in order to create your own image. It will basically just save you some time downloading components and creating the ISO image. An installation ISO image is available for download (~50MB), which is created by the [ISO Creator](https://github.com/telekom-security/tpotce) you can use yourself in order to create your own image. It will basically just save you some time downloading components and creating the ISO image.
You can download the prebuilt installation ISO from [GitHub](https://github.com/dtag-dev-sec/tpotce/releases) and jump to the [installation](#vm) section. You can download the prebuilt installation ISO from [GitHub](https://github.com/telekom-security/tpotce/releases) and jump to the [installation](#vm) section.
<a name="createiso"></a> <a name="createiso"></a>
## Create your own ISO Image ## Create your own ISO Image
For transparency reasons and to give you the ability to customize your install you use the [ISO Creator](https://github.com/dtag-dev-sec/tpotce) that enables you to create your own ISO installation image. For transparency reasons and to give you the ability to customize your install you use the [ISO Creator](https://github.com/telekom-security/tpotce) that enables you to create your own ISO installation image.
**Requirements to create the ISO image:** **Requirements to create the ISO image:**
- Debian 10 as host system (others *may* work, but *remain* untested) - Debian 10 as host system (others *may* work, but *remain* untested)
@ -204,7 +218,7 @@ For transparency reasons and to give you the ability to customize your install y
1. Clone the repository and enter it. 1. Clone the repository and enter it.
``` ```
git clone https://github.com/dtag-dev-sec/tpotce git clone https://github.com/telekom-security/tpotce
cd tpotce cd tpotce
``` ```
2. Run the `makeiso.sh` script to build the ISO image. 2. Run the `makeiso.sh` script to build the ISO image.
@ -235,7 +249,7 @@ You can now jump [here](#firstrun).
If you decide to run T-Pot on dedicated hardware, just follow these steps: If you decide to run T-Pot on dedicated hardware, just follow these steps:
1. Burn a CD from the ISO image or make a bootable USB stick using the image. <br> 1. Burn a CD from the ISO image or make a bootable USB stick using the image. <br>
Whereas most CD burning tools allow you to burn from ISO images, the procedure to create a bootable USB stick from an ISO image depends on your system. There are various Windows GUI tools available, e.g. [this tip](http://www.ubuntu.com/download/desktop/create-a-usb-stick-on-windows) might help you.<br> On [Linux](http://askubuntu.com/questions/59551/how-to-burn-a-iso-to-a-usb-device) or [MacOS](http://www.ubuntu.com/download/desktop/create-a-usb-stick-on-mac-osx) you can use the tool *dd* or create the USB stick with T-Pot's [ISO Creator](https://github.com/dtag-dev-sec). Whereas most CD burning tools allow you to burn from ISO images, the procedure to create a bootable USB stick from an ISO image depends on your system. There are various Windows GUI tools available, e.g. [this tip](http://www.ubuntu.com/download/desktop/create-a-usb-stick-on-windows) might help you.<br> On [Linux](http://askubuntu.com/questions/59551/how-to-burn-a-iso-to-a-usb-device) or [MacOS](http://www.ubuntu.com/download/desktop/create-a-usb-stick-on-mac-osx) you can use the tool *dd* or create the USB stick with T-Pot's [ISO Creator](https://github.com/telekom-security).
2. Boot from the USB stick and install. 2. Boot from the USB stick and install.
*Please note*: Limited tests are performed for the Intel NUC platform other hardware platforms **remain untested**. There is no hardware support provided of any kind. *Please note*: Limited tests are performed for the Intel NUC platform other hardware platforms **remain untested**. There is no hardware support provided of any kind.
@ -253,7 +267,7 @@ The T-Pot Universal Installer will upgrade the system and install all required T
Just follow these steps: Just follow these steps:
``` ```
git clone https://github.com/dtag-dev-sec/tpotce git clone https://github.com/telekom-security/tpotce
cd tpotce/iso/installer/ cd tpotce/iso/installer/
./install.sh --type=user ./install.sh --type=user
``` ```
@ -267,7 +281,7 @@ You can also let the installer run automatically if you provide your own `tpot.c
Just follow these steps while adjusting `tpot.conf` to your needs: Just follow these steps while adjusting `tpot.conf` to your needs:
``` ```
git clone https://github.com/dtag-dev-sec/tpotce git clone https://github.com/telekom-security/tpotce
cd tpotce/iso/installer/ cd tpotce/iso/installer/
cp tpot.conf.dist tpot.conf cp tpot.conf.dist tpot.conf
./install.sh --type=auto --conf=tpot.conf ./install.sh --type=auto --conf=tpot.conf
@ -288,9 +302,9 @@ If you would like to contribute, you can add other cloud deployments like Chef o
You can find an [Ansible](https://www.ansible.com/) based T-Pot deployment in the [`cloud/ansible`](cloud/ansible) folder. You can find an [Ansible](https://www.ansible.com/) based T-Pot deployment in the [`cloud/ansible`](cloud/ansible) folder.
The Playbook in the [`cloud/ansible/openstack`](cloud/ansible/openstack) folder is reusable for all **OpenStack** clouds out of the box. The Playbook in the [`cloud/ansible/openstack`](cloud/ansible/openstack) folder is reusable for all **OpenStack** clouds out of the box.
It first creates all resources (security group, network, subnet, router), deploys a new server and then installs and configures T-Pot. It first creates all resources (security group, network, subnet, router), deploys one (or more) new servers and then installs and configures T-Pot on them.
You can have a look at the Playbook and easily adapt the deploy role for other [cloud providers](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html). You can have a look at the Playbook and easily adapt the deploy role for other [cloud providers](https://docs.ansible.com/ansible/latest/scenario_guides/cloud_guides.html). Check out [Ansible Galaxy](https://galaxy.ansible.com/search?keywords=&order_by=-relevance&page=1&deprecated=false&type=collection&tags=cloud) for more cloud collections.
*Please note*: Cloud providers usually offer adjusted Debian OS images, which might not be compatible with T-Pot. There is no cloud provider support provided of any kind. *Please note*: Cloud providers usually offer adjusted Debian OS images, which might not be compatible with T-Pot. There is no cloud provider support provided of any kind.
@ -302,7 +316,7 @@ You can find [Terraform](https://www.terraform.io/) configuration in the [`cloud
This can be used to launch a virtual machine, bootstrap any dependencies and install T-Pot in a single step. This can be used to launch a virtual machine, bootstrap any dependencies and install T-Pot in a single step.
Configuration for **Amazon Web Services** (AWS) and **Open Telekom Cloud** (OTC) is currently included. Configuration for **Amazon Web Services** (AWS) and **Open Telekom Cloud** (OTC) is currently included.
This can easily be extended to support other [Terraform providers](https://www.terraform.io/docs/providers/index.html). This can easily be extended to support other [Terraform providers](https://registry.terraform.io/browse/providers?category=public-cloud%2Ccloud-automation%2Cinfrastructure).
*Please note*: Cloud providers usually offer adjusted Debian OS images, which might not be compatible with T-Pot. There is no cloud provider support provided of any kind. *Please note*: Cloud providers usually offer adjusted Debian OS images, which might not be compatible with T-Pot. There is no cloud provider support provided of any kind.
@ -434,7 +448,7 @@ You may opt out of the submission by removing the `# Ewsposter service` from `/o
restart: always restart: always
networks: networks:
- ewsposter_local - ewsposter_local
image: "dtagdevsec/ewsposter:2006" image: "ghcr.io/telekom-security/ewsposter:2006"
volumes: volumes:
- /data:/data - /data:/data
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
@ -464,7 +478,7 @@ As with every development there is always room for improvements ...
Some features may be provided with updated docker images, others may require some hands on from your side. Some features may be provided with updated docker images, others may require some hands on from your side.
You are always invited to participate in development on our [GitHub](https://github.com/dtag-dev-sec/tpotce) page. You are always invited to participate in development on our [GitHub](https://github.com/telekom-security/tpotce) page.
<a name="disclaimer"></a> <a name="disclaimer"></a>
# Disclaimer # Disclaimer
@ -476,21 +490,24 @@ You are always invited to participate in development on our [GitHub](https://git
<a name="faq"></a> <a name="faq"></a>
# FAQ # FAQ
Please report any issues or questions on our [GitHub issue list](https://github.com/dtag-dev-sec/tpotce/issues), so the community can participate. Please report any issues or questions on our [GitHub issue list](https://github.com/telekom-security/tpotce/issues), so the community can participate.
<a name="contact"></a> <a name="contact"></a>
# Contact # Contact
The software is provided **as is** in a Community Edition format. T-Pot is designed to run out of the box and with zero maintenance involved. <br> The software is provided **as is** in a Community Edition format. T-Pot is designed to run out of the box and with zero maintenance involved. <br>
We hope you understand that we cannot provide support on an individual basis. We will try to address questions, bugs and problems on our [GitHub issue list](https://github.com/dtag-dev-sec/tpotce/issues). We hope you understand that we cannot provide support on an individual basis. We will try to address questions, bugs and problems on our [GitHub issue list](https://github.com/telekom-security/tpotce/issues).
<a name="licenses"></a> <a name="licenses"></a>
# Licenses # Licenses
The software that T-Pot is built on uses the following licenses. The software that T-Pot is built on uses the following licenses.
<br>GPLv2: [conpot](https://github.com/mushorg/conpot/blob/master/LICENSE.txt), [dionaea](https://github.com/DinoTools/dionaea/blob/master/LICENSE), [honeysap](https://github.com/SecureAuthCorp/HoneySAP/blob/master/COPYING), [honeypy](https://github.com/foospidy/HoneyPy/blob/master/LICENSE), [honeytrap](https://github.com/armedpot/honeytrap/blob/master/LICENSE), [suricata](http://suricata-ids.org/about/open-source/) <br>GPLv2: [conpot](https://github.com/mushorg/conpot/blob/master/LICENSE.txt), [dionaea](https://github.com/DinoTools/dionaea/blob/master/LICENSE), [honeysap](https://github.com/SecureAuthCorp/HoneySAP/blob/master/COPYING), [honeypy](https://github.com/foospidy/HoneyPy/blob/master/LICENSE), [honeytrap](https://github.com/armedpot/honeytrap/blob/master/LICENSE), [suricata](http://suricata-ids.org/about/open-source/)
<br>GPLv3: [adbhoney](https://github.com/huuck/ADBHoney), [elasticpot](https://gitlab.com/bontchev/elasticpot/-/blob/master/LICENSE), [ewsposter](https://github.com/dtag-dev-sec/ews/), [fatt](https://github.com/0x4D31/fatt/blob/master/LICENSE), [rdpy](https://github.com/citronneur/rdpy/blob/master/LICENSE), [heralding](https://github.com/johnnykv/heralding/blob/master/LICENSE.txt), [snare](https://github.com/mushorg/snare/blob/master/LICENSE), [tanner](https://github.com/mushorg/snare/blob/master/LICENSE) <br>GPLv3: [adbhoney](https://github.com/huuck/ADBHoney), [elasticpot](https://gitlab.com/bontchev/elasticpot/-/blob/master/LICENSE), [ewsposter](https://github.com/telekom-security/ews/), [log4pot](https://github.com/thomaspatzke/Log4Pot/blob/master/LICENSE), [fatt](https://github.com/0x4D31/fatt/blob/master/LICENSE), [rdpy](https://github.com/citronneur/rdpy/blob/master/LICENSE), [heralding](https://github.com/johnnykv/heralding/blob/master/LICENSE.txt), [ipphoney](https://gitlab.com/bontchev/ipphoney/-/blob/master/LICENSE), [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/blob/main/LICENSE), [snare](https://github.com/mushorg/snare/blob/master/LICENSE), [tanner](https://github.com/mushorg/snare/blob/master/LICENSE)
<br>Apache 2 License: [cyberchef](https://github.com/gchq/CyberChef/blob/master/LICENSE), [dicompot](https://github.com/nsmfoo/dicompot/blob/master/LICENSE), [elasticsearch](https://github.com/elasticsearch/elasticsearch/blob/master/LICENSE.txt), [logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE), [kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.md), [docker](https://github.com/docker/docker/blob/master/LICENSE), [elasticsearch-head](https://github.com/mobz/elasticsearch-head/blob/master/LICENCE) <br>Apache 2 License: [cyberchef](https://github.com/gchq/CyberChef/blob/master/LICENSE), [dicompot](https://github.com/nsmfoo/dicompot/blob/master/LICENSE), [elasticsearch](https://github.com/elasticsearch/elasticsearch/blob/master/LICENSE.txt), [logstash](https://github.com/elasticsearch/logstash/blob/master/LICENSE), [kibana](https://github.com/elasticsearch/kibana/blob/master/LICENSE.md), [docker](https://github.com/docker/docker/blob/master/LICENSE), [elasticsearch-head](https://github.com/mobz/elasticsearch-head/blob/master/LICENCE)
<br>MIT license: [ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/blob/master/LICENSE), [glutton](https://github.com/mushorg/glutton/blob/master/LICENSE) <br>MIT license: [ciscoasa](https://github.com/Cymmetria/ciscoasa_honeypot/blob/master/LICENSE), [ddospot](https://github.com/aelth/ddospot/blob/master/LICENSE), [glutton](https://github.com/mushorg/glutton/blob/master/LICENSE), [hellpot](https://github.com/yunginnanet/HellPot/blob/master/LICENSE)
<br> Other: [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot#licencing-agreement-malwaretech-public-licence), [cowrie](https://github.com/micheloosterhof/cowrie/blob/master/LICENSE.md), [mailoney](https://github.com/awhitehatter/mailoney), [Debian licensing](https://www.debian.org/legal/licenses/) <br> Unlicense: [endlessh](https://github.com/skeeto/endlessh/blob/master/UNLICENSE)
<br> Other: [citrixhoneypot](https://github.com/MalwareTech/CitrixHoneypot#licencing-agreement-malwaretech-public-licence), [cowrie](https://github.com/micheloosterhof/cowrie/blob/master/LICENSE.md), [mailoney](https://github.com/awhitehatter/mailoney), [Debian licensing](https://www.debian.org/legal/licenses/), [Elastic License](https://www.elastic.co/licensing/elastic-license)
<br> AGPL-3.0: [honeypots](https://github.com/qeeqbox/honeypots/blob/main/LICENSE)
<a name="credits"></a> <a name="credits"></a>
# Credits # Credits
@ -505,6 +522,7 @@ Without open source and the fruitful development community (we are proud to be a
* [cockpit](https://github.com/cockpit-project/cockpit/graphs/contributors) * [cockpit](https://github.com/cockpit-project/cockpit/graphs/contributors)
* [conpot](https://github.com/mushorg/conpot/graphs/contributors) * [conpot](https://github.com/mushorg/conpot/graphs/contributors)
* [cowrie](https://github.com/micheloosterhof/cowrie/graphs/contributors) * [cowrie](https://github.com/micheloosterhof/cowrie/graphs/contributors)
* [ddospot](https://github.com/aelth/ddospot/graphs/contributors)
* [debian](http://www.debian.org/) * [debian](http://www.debian.org/)
* [dicompot](https://github.com/nsmfoo/dicompot/graphs/contributors) * [dicompot](https://github.com/nsmfoo/dicompot/graphs/contributors)
* [dionaea](https://github.com/DinoTools/dionaea/graphs/contributors) * [dionaea](https://github.com/DinoTools/dionaea/graphs/contributors)
@ -512,19 +530,25 @@ Without open source and the fruitful development community (we are proud to be a
* [elasticpot](https://gitlab.com/bontchev/elasticpot/-/project_members) * [elasticpot](https://gitlab.com/bontchev/elasticpot/-/project_members)
* [elasticsearch](https://github.com/elastic/elasticsearch/graphs/contributors) * [elasticsearch](https://github.com/elastic/elasticsearch/graphs/contributors)
* [elasticsearch-head](https://github.com/mobz/elasticsearch-head/graphs/contributors) * [elasticsearch-head](https://github.com/mobz/elasticsearch-head/graphs/contributors)
* [endlessh](https://github.com/skeeto/endlessh/graphs/contributors)
* [ewsposter](https://github.com/armedpot/ewsposter/graphs/contributors) * [ewsposter](https://github.com/armedpot/ewsposter/graphs/contributors)
* [fatt](https://github.com/0x4D31/fatt/graphs/contributors) * [fatt](https://github.com/0x4D31/fatt/graphs/contributors)
* [glutton](https://github.com/mushorg/glutton/graphs/contributors) * [glutton](https://github.com/mushorg/glutton/graphs/contributors)
* [hellpot](https://github.com/yunginnanet/HellPot/graphs/contributors)
* [heralding](https://github.com/johnnykv/heralding/graphs/contributors) * [heralding](https://github.com/johnnykv/heralding/graphs/contributors)
* [honeypots](https://github.com/qeeqbox/honeypots/graphs/contributors)
* [honeypy](https://github.com/foospidy/HoneyPy/graphs/contributors) * [honeypy](https://github.com/foospidy/HoneyPy/graphs/contributors)
* [honeysap](https://github.com/SecureAuthCorp/HoneySAP/graphs/contributors) * [honeysap](https://github.com/SecureAuthCorp/HoneySAP/graphs/contributors)
* [honeytrap](https://github.com/armedpot/honeytrap/graphs/contributors) * [honeytrap](https://github.com/armedpot/honeytrap/graphs/contributors)
* [ipphoney](https://gitlab.com/bontchev/ipphoney/-/project_members)
* [kibana](https://github.com/elastic/kibana/graphs/contributors) * [kibana](https://github.com/elastic/kibana/graphs/contributors)
* [logstash](https://github.com/elastic/logstash/graphs/contributors) * [logstash](https://github.com/elastic/logstash/graphs/contributors)
* [log4pot](https://github.com/thomaspatzke/Log4Pot/graphs/contributors)
* [mailoney](https://github.com/awhitehatter/mailoney) * [mailoney](https://github.com/awhitehatter/mailoney)
* [medpot](https://github.com/schmalle/medpot/graphs/contributors) * [medpot](https://github.com/schmalle/medpot/graphs/contributors)
* [p0f](http://lcamtuf.coredump.cx/p0f3/) * [p0f](http://lcamtuf.coredump.cx/p0f3/)
* [rdpy](https://github.com/citronneur/rdpy) * [rdpy](https://github.com/citronneur/rdpy)
* [redishoneypot](https://github.com/cypwnpwnsocute/RedisHoneyPot/graphs/contributors)
* [spiderfoot](https://github.com/smicallef/spiderfoot) * [spiderfoot](https://github.com/smicallef/spiderfoot)
* [snare](https://github.com/mushorg/snare/graphs/contributors) * [snare](https://github.com/mushorg/snare/graphs/contributors)
* [tanner](https://github.com/mushorg/tanner/graphs/contributors) * [tanner](https://github.com/mushorg/tanner/graphs/contributors)
@ -544,6 +568,8 @@ Without open source and the fruitful development community (we are proud to be a
A new version of T-Pot is released about every 6-12 months, development has shifted more and more towards rolling releases and the usage of `/opt/tpot/update.sh`. A new version of T-Pot is released about every 6-12 months, development has shifted more and more towards rolling releases and the usage of `/opt/tpot/update.sh`.
<a name="testimonial"></a> <a name="testimonial"></a>
# Testimonial # Testimonials
One of the greatest feedback we have gotten so far is by one of the Conpot developers:<br> One of the greatest feedback we have gotten so far is by one of the Conpot developers:<br>
***"[...] I highly recommend T-Pot which is ... it's not exactly a swiss army knife .. it's more like a swiss army soldier, equipped with a swiss army knife. Inside a tank. A swiss tank. [...]"*** ***"[...] I highly recommend T-Pot which is ... it's not exactly a swiss army knife .. it's more like a swiss army soldier, equipped with a swiss army knife. Inside a tank. A swiss tank. [...]"***<br>
And from @robcowart (creator of [ElastiFlow](https://github.com/robcowart/elastiflow)):<br>
***"#TPot is one of the most well put together turnkey honeypot solutions. It is a must-have for anyone wanting to analyze and understand the behavior of malicious actors and the threat they pose to your organization."***

View File

@ -60,7 +60,7 @@ fi
echo "" echo ""
echo "[+] Creating config file with API UserID '$apiUser' and API Token '$apiToken'." echo "[+] Creating config file with API UserID '$apiUser' and API Token '$apiToken'."
echo "[+] Fetching config file from github. Outgoing https requests must be enabled!" echo "[+] Fetching config file from github. Outgoing https requests must be enabled!"
wget -q https://raw.githubusercontent.com/dtag-dev-sec/tpotce/master/docker/ews/dist/ews.cfg -O ews.cfg.dist wget -q https://raw.githubusercontent.com/telekom-security/tpotce/master/docker/ews/dist/ews.cfg -O ews.cfg.dist
if [[ -f "ews.cfg.dist" ]]; then if [[ -f "ews.cfg.dist" ]]; then
echo "[+] Successfully downloaded ews.cfg from github." echo "[+] Successfully downloaded ews.cfg from github."
else else

View File

@ -114,6 +114,14 @@ fuCOWRIE () {
chown tpot:tpot /data/cowrie -R chown tpot:tpot /data/cowrie -R
} }
# Let's create a function to clean up and prepare ddospot data
fuDDOSPOT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ddospot/log; fi
mkdir -p /data/ddospot/log
chmod 770 /data/ddospot -R
chown tpot:tpot /data/ddospot -R
}
# Let's create a function to clean up and prepare dicompot data # Let's create a function to clean up and prepare dicompot data
fuDICOMPOT () { fuDICOMPOT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dicompot/log; fi if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dicompot/log; fi
@ -149,6 +157,14 @@ fuELK () {
chown tpot:tpot /data/elk -R chown tpot:tpot /data/elk -R
} }
# Let's create a function to clean up and prepare endlessh data
fuENDLESSH () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/endlessh/log; fi
mkdir -p /data/endlessh/log
chmod 770 /data/endlessh -R
chown tpot:tpot /data/endlessh -R
}
# Let's create a function to clean up and prepare fatt data # Let's create a function to clean up and prepare fatt data
fuFATT () { fuFATT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/fatt/*; fi if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/fatt/*; fi
@ -165,6 +181,14 @@ fuGLUTTON () {
chown tpot:tpot /data/glutton -R chown tpot:tpot /data/glutton -R
} }
# Let's create a function to clean up and prepare hellpot data
fuHELLPOT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/hellpot/log; fi
mkdir -p /data/hellpot/log
chmod 770 /data/hellpot -R
chown tpot:tpot /data/hellpot -R
}
# Let's create a function to clean up and prepare heralding data # Let's create a function to clean up and prepare heralding data
fuHERALDING () { fuHERALDING () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/heralding/*; fi if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/heralding/*; fi
@ -173,6 +197,14 @@ fuHERALDING () {
chown tpot:tpot /data/heralding -R chown tpot:tpot /data/heralding -R
} }
# Let's create a function to clean up and prepare honeypots data
fuHONEYPOTS () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypots/*; fi
mkdir -p /data/honeypots/log
chmod 770 /data/honeypots -R
chown tpot:tpot /data/honeypots -R
}
# Let's create a function to clean up and prepare honeypy data # Let's create a function to clean up and prepare honeypy data
fuHONEYPY () { fuHONEYPY () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypy/*; fi if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypy/*; fi
@ -197,6 +229,22 @@ fuHONEYTRAP () {
chown tpot:tpot /data/honeytrap/ -R chown tpot:tpot /data/honeytrap/ -R
} }
# Let's create a function to clean up and prepare ipphoney data
fuIPPHONEY () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ipphoney/*; fi
mkdir -p /data/ipphoney/log
chmod 770 /data/ipphoney -R
chown tpot:tpot /data/ipphoney -R
}
# Let's create a function to clean up and prepare log4pot data
fuLOG4POT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/log4pot/*; fi
mkdir -p /data/log4pot/log
chmod 770 /data/log4pot -R
chown tpot:tpot /data/log4pot -R
}
# Let's create a function to clean up and prepare mailoney data # Let's create a function to clean up and prepare mailoney data
fuMAILONEY () { fuMAILONEY () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi
@ -229,6 +277,14 @@ fuRDPY () {
chown tpot:tpot /data/rdpy/ -R chown tpot:tpot /data/rdpy/ -R
} }
# Let's create a function to clean up and prepare redishoneypot data
fuREDISHONEYPOT () {
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/redishoneypot/log; fi
mkdir -p /data/redishoneypot/log
chmod 770 /data/redishoneypot -R
chown tpot:tpot /data/redishoneypot -R
}
# Let's create a function to prepare spiderfoot db # Let's create a function to prepare spiderfoot db
fuSPIDERFOOT () { fuSPIDERFOOT () {
mkdir -p /data/spiderfoot mkdir -p /data/spiderfoot
@ -288,19 +344,26 @@ if [ "$myPERSISTENCE" = "on" ];
fuCITRIXHONEYPOT fuCITRIXHONEYPOT
fuCONPOT fuCONPOT
fuCOWRIE fuCOWRIE
fuDDOSPOT
fuDICOMPOT fuDICOMPOT
fuDIONAEA fuDIONAEA
fuELASTICPOT fuELASTICPOT
fuELK fuELK
fuENDLESSH
fuFATT fuFATT
fuGLUTTON fuGLUTTON
fuHERALDING fuHERALDING
fuHELLPOT
fuHONEYSAP fuHONEYSAP
fuHONEYPOTS
fuHONEYPY fuHONEYPY
fuHONEYTRAP fuHONEYTRAP
fuIPPHONEY
fuLOG4POT
fuMAILONEY fuMAILONEY
fuMEDPOT fuMEDPOT
fuNGINX fuNGINX
fuREDISHONEYPOT
fuRDPY fuRDPY
fuSPIDERFOOT fuSPIDERFOOT
fuSURICATA fuSURICATA

182
bin/deploy.sh Executable file
View File

@ -0,0 +1,182 @@
#!/bin/bash
# Do we have root?
function fuGOT_ROOT {
echo
echo -n "### Checking for root: "
if [ "$(whoami)" != "root" ];
then
echo "[ NOT OK ]"
echo "### Please run as root."
echo "### Example: sudo $0"
exit
else
echo "[ OK ]"
fi
}
function fuDEPLOY_POT () {
echo
echo "###############################"
echo "# Deploying to T-Pot Hive ... #"
echo "###############################"
echo
sshpass -e ssh -4 -t -T -l "$MY_TPOT_USERNAME" -p 64295 "$MY_HIVE_IP" << EOF
echo "$SSHPASS" | sudo -S bash -c 'useradd -m -s /sbin/nologin -G tpotlogs "$MY_HIVE_USERNAME";
mkdir -p /home/"$MY_HIVE_USERNAME"/.ssh;
echo "$MY_POT_PUBLICKEY" >> /home/"$MY_HIVE_USERNAME"/.ssh/authorized_keys;
chmod 600 /home/"$MY_HIVE_USERNAME"/.ssh/authorized_keys;
chmod 755 /home/"$MY_HIVE_USERNAME"/.ssh;
chown "$MY_HIVE_USERNAME":"$MY_HIVE_USERNAME" -R /home/"$MY_HIVE_USERNAME"/.ssh'
EOF
echo
echo "###########################"
echo "# Done. Please reboot ... #"
echo "###########################"
echo
exit 0
}
# Check Hive availability
function fuCHECK_HIVE () {
echo
echo "############################################"
echo "# Checking for T-Pot Hive availability ... #"
echo "############################################"
echo
sshpass -e ssh -4 -t -l "$MY_TPOT_USERNAME" -p 64295 -f -N -L64305:127.0.0.1:64305 "$MY_HIVE_IP" -o "StrictHostKeyChecking=no"
if [ $? -eq 0 ];
then
echo
echo "#########################"
echo "# T-Pot Hive available! #"
echo "#########################"
echo
myHIVE_OK=$(curl -s http://127.0.0.1:64305)
if [ "$myHIVE_OK" == "ok" ];
then
echo
echo "##############################"
echo "# T-Pot Hive tunnel test OK! #"
echo "##############################"
echo
kill -9 $(pidof ssh)
else
echo
echo "######################################################"
echo "# T-Pot Hive tunnel test FAILED! #"
echo "# Tunneled port tcp/64305 unreachable on T-Pot Hive. #"
echo "# Aborting. #"
echo "######################################################"
echo
kill -9 $(pidof ssh)
rm $MY_POT_PUBLICKEYFILE
rm $MY_POT_PRIVATEKEYFILE
rm $MY_LS_ENVCONFIGFILE
exit 1
fi;
else
echo
echo "#################################################################"
echo "# Something went wrong, most likely T-Pot Hive was unreachable! #"
echo "# Aborting. #"
echo "#################################################################"
echo
rm $MY_POT_PUBLICKEYFILE
rm $MY_POT_PRIVATEKEYFILE
rm $MY_LS_ENVCONFIGFILE
exit 1
fi;
}
function fuGET_DEPLOY_DATA () {
echo
echo "### Please provide data from your T-Pot Hive installation."
echo "### This usually is the one running the 'T-Pot Hive' type."
echo "### You will be needing the OS user (typically 'tsec'), the users' password and the IP / FQDN."
echo "### Do not worry, the password will not be persisted!"
echo
read -p "Username: " MY_TPOT_USERNAME
read -s -p "Password: " SSHPASS
echo
export SSHPASS
read -p "IP / FQDN: " MY_HIVE_IP
MY_HIVE_USERNAME="$(hostname)"
MY_TPOT_TYPE="POT"
MY_LS_ENVCONFIGFILE="/data/elk/logstash/ls_environment"
MY_POT_PUBLICKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME.pub"
MY_POT_PRIVATEKEYFILE="/data/elk/logstash/$MY_HIVE_USERNAME"
if ! [ -s "$MY_POT_PRIVATEKEYFILE" ] && ! [ -s "$MY_POT_PUBLICKEYFILE" ];
then
echo
echo "##############################"
echo "# Generating ssh keyfile ... #"
echo "##############################"
echo
mkdir -p /data/elk/logstash
ssh-keygen -f "$MY_POT_PRIVATEKEYFILE" -N "" -C "$MY_HIVE_USERNAME"
MY_POT_PUBLICKEY="$(cat "$MY_POT_PUBLICKEYFILE")"
else
echo
echo "#############################################"
echo "# There is already a ssh keyfile. Aborting. #"
echo "#############################################"
echo
exit 1
fi
echo
echo "###########################################################"
echo "# Writing config to /data/elk/logstash/ls_environment. #"
echo "# If you make changes to this file, you need to reboot or #"
echo "# run /opt/tpot/bin/updateip.sh. #"
echo "###########################################################"
echo
tee $MY_LS_ENVCONFIGFILE << EOF
MY_TPOT_TYPE=$MY_TPOT_TYPE
MY_POT_PRIVATEKEYFILE=$MY_POT_PRIVATEKEYFILE
MY_HIVE_USERNAME=$MY_HIVE_USERNAME
MY_HIVE_IP=$MY_HIVE_IP
EOF
}
# Deploy Pot to Hive
fuGOT_ROOT
echo
echo "#################################"
echo "# Ship T-Pot Logs to T-Pot Hive #"
echo "#################################"
echo
echo "If you already have a T-Pot Hive installation running and"
echo "this T-Pot installation is running the type \"Pot\" the"
echo "script will automagically setup this T-Pot to ship and"
echo "prepare the Hive to receive logs from this T-Pot."
echo
echo
echo "###################################"
echo "# Deploy T-Pot Logs to T-Pot Hive #"
echo "###################################"
echo
echo "[c] - Continue deplyoment"
echo "[q] - Abort and exit"
echo
while [ 1 != 2 ]
do
read -s -n 1 -p "Your choice: " mySELECT
echo $mySELECT
case "$mySELECT" in
[c,C])
fuGET_DEPLOY_DATA
fuCHECK_HIVE
fuDEPLOY_POT
break
;;
[q,Q])
echo "Aborted."
exit 0
;;
esac
done

View File

@ -6,7 +6,7 @@ myKIBANA="http://127.0.0.1:64296/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ] if ! [ "$myESSTATUS" = "1" ]
then then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'." echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'."
exit exit
else else
echo "### Elasticsearch is available, now continuing." echo "### Elasticsearch is available, now continuing."
@ -15,7 +15,7 @@ fi
# Set vars # Set vars
myDATE=$(date +%Y%m%d%H%M) myDATE=$(date +%Y%m%d%H%M)
myINDEXCOUNT=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].attributes' | tr '\\' '\n' | grep "scripted" | wc -w) myINDEXCOUNT=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].attributes' | tr '\\' '\n' | grep -E "scripted|url" | wc -w)
myINDEXID=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].id' | tr -d '"') myINDEXID=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=index-pattern' | jq '.saved_objects[].id' | tr -d '"')
myDASHBOARDS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=dashboard&per_page=500' | jq '.saved_objects[].id' | tr -d '"') myDASHBOARDS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=dashboard&per_page=500' | jq '.saved_objects[].id' | tr -d '"')
myVISUALIZATIONS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=visualization&per_page=500' | jq '.saved_objects[].id' | tr -d '"') myVISUALIZATIONS=$(curl -s -XGET ''$myKIBANA'api/saved_objects/_find?type=visualization&per_page=500' | jq '.saved_objects[].id' | tr -d '"')

View File

@ -6,7 +6,7 @@ myKIBANA="http://127.0.0.1:64296/"
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green) myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
if ! [ "$myESSTATUS" = "1" ] if ! [ "$myESSTATUS" = "1" ]
then then
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'." echo "### Elasticsearch is not available, try starting via 'systemctl start tpot'."
exit exit
else else
echo "### Elasticsearch is available, now continuing." echo "### Elasticsearch is available, now continuing."
@ -43,7 +43,7 @@ tar xvfz $myDUMP > /dev/null
# Restore index patterns # Restore index patterns
myINDEXID=$(ls patterns/*.json | cut -c 10- | rev | cut -c 6- | rev) myINDEXID=$(ls patterns/*.json | cut -c 10- | rev | cut -c 6- | rev)
myINDEXCOUNT=$(cat patterns/$myINDEXID.json | tr '\\' '\n' | grep "scripted" | wc -w) myINDEXCOUNT=$(cat patterns/$myINDEXID.json | tr '\\' '\n' | grep -E "scripted|url" | wc -w)
echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index pattern fields." $myCOL0 echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index pattern fields." $myCOL0
curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/logstash-*' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/logstash-*' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null
curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null curl -s -XDELETE ''$myKIBANA'api/saved_objects/index-pattern/'$myINDEXID'' -H "Content-Type: application/json" -H "kbn-xsrf: true" > /dev/null

View File

@ -29,7 +29,7 @@ for i in $myYMLS;
do do
myITEMS+="$i $(echo $i | cut -d "." -f1 | tr [:lower:] [:upper:]) " myITEMS+="$i $(echo $i | cut -d "." -f1 | tr [:lower:] [:upper:]) "
done done
myEDITION=$(dialog --backtitle "$myBACKTITLE" --menu "Select T-Pot Edition" 12 50 5 $myITEMS 3>&1 1>&2 2>&3 3>&-) myEDITION=$(dialog --backtitle "$myBACKTITLE" --menu "Select T-Pot Edition" 17 50 10 $myITEMS 3>&1 1>&2 2>&3 3>&-)
if [ "$myEDITION" == "" ]; if [ "$myEDITION" == "" ];
then then
echo "Have a nice day!" echo "Have a nice day!"

View File

@ -2,6 +2,7 @@
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file # Let's add the first local ip to the /etc/issue and external ip to ews.ip file
# If the external IP cannot be detected, the internal IP will be inherited. # If the external IP cannot be detected, the internal IP will be inherited.
source /etc/environment source /etc/environment
myUUID=$(lsblk -o MOUNTPOINT,UUID | grep "/" | awk '{ print $2 }')
myLOCALIP=$(hostname -I | awk '{ print $1 }') myLOCALIP=$(hostname -I | awk '{ print $1 }')
myEXTIP=$(/opt/tpot/bin/myip.sh) myEXTIP=$(/opt/tpot/bin/myip.sh)
if [ "$myEXTIP" = "" ]; if [ "$myEXTIP" = "" ];
@ -26,9 +27,22 @@ tee /data/ews/conf/ews.ip << EOF
ip = $myEXTIP ip = $myEXTIP
EOF EOF
tee /opt/tpot/etc/compose/elk_environment << EOF tee /opt/tpot/etc/compose/elk_environment << EOF
HONEY_UUID=$myUUID
MY_EXTIP=$myEXTIP MY_EXTIP=$myEXTIP
MY_INTIP=$myLOCALIP MY_INTIP=$myLOCALIP
MY_HOSTNAME=$HOSTNAME MY_HOSTNAME=$HOSTNAME
EOF EOF
if [ -s "/data/elk/logstash/ls_environment" ];
then
source /data/elk/logstash/ls_environment
tee -a /opt/tpot/etc/compose/elk_environment << EOF
MY_TPOT_TYPE=$MY_TPOT_TYPE
MY_POT_PRIVATEKEYFILE=$MY_POT_PRIVATEKEYFILE
MY_HIVE_USERNAME=$MY_HIVE_USERNAME
MY_HIVE_IP=$MY_HIVE_IP
EOF
fi
chown tpot:tpot /data/ews/conf/ews.ip chown tpot:tpot /data/ews/conf/ews.ip
chmod 770 /data/ews/conf/ews.ip chmod 770 /data/ews/conf/ews.ip

10
cloud/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Ansible
*.retry
# Terraform
**/.terraform
**/terraform.*
# OpenStack clouds
**/clouds.yaml
**/secure.yaml

View File

@ -1,2 +0,0 @@
# Ansible
*.retry

View File

@ -2,15 +2,16 @@
Here you can find a ready-to-use solution for your automated T-Pot deployment using [Ansible](https://www.ansible.com/). Here you can find a ready-to-use solution for your automated T-Pot deployment using [Ansible](https://www.ansible.com/).
It consists of an Ansible Playbook with multiple roles, which is reusable for all [OpenStack](https://www.openstack.org/) based clouds (e.g. Open Telekom Cloud, Orange Cloud, Telefonica Open Cloud, OVH) out of the box. It consists of an Ansible Playbook with multiple roles, which is reusable for all [OpenStack](https://www.openstack.org/) based clouds (e.g. Open Telekom Cloud, Orange Cloud, Telefonica Open Cloud, OVH) out of the box.
Apart from that you can easily adapt the deploy role to use other [cloud providers](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html) (e.g. AWS, Azure, Digital Ocean, Google). Apart from that you can easily adapt the deploy role to use other [cloud providers](https://docs.ansible.com/ansible/latest/scenario_guides/cloud_guides.html). Check out [Ansible Galaxy](https://galaxy.ansible.com/search?keywords=&order_by=-relevance&page=1&deprecated=false&type=collection&tags=cloud) for more cloud collections.
The Playbook first creates all resources (security group, network, subnet, router), deploys a new server and then installs and configures T-Pot. The Playbook first creates all resources (security group, network, subnet, router), deploys one (or more) new servers and then installs and configures T-Pot on them.
This example showcases the deployment on our own OpenStack based Public Cloud Offering [Open Telekom Cloud](https://open-telekom-cloud.com/en). This example showcases the deployment on our own OpenStack based Public Cloud Offering [Open Telekom Cloud](https://open-telekom-cloud.com/en).
# Table of contents # Table of contents
- [Preparation of Ansible Master](#ansible-master) - [Preparation of Ansible Master](#ansible-master)
- [Ansible Installation](#ansible) - [Ansible Installation](#ansible)
- [OpenStack Collection Installation](#collection)
- [Agent Forwarding](#agent-forwarding) - [Agent Forwarding](#agent-forwarding)
- [Preparations in Open Telekom Cloud Console](#preparation) - [Preparations in Open Telekom Cloud Console](#preparation)
- [Create new project](#project) - [Create new project](#project)
@ -18,8 +19,9 @@ This example showcases the deployment on our own OpenStack based Public Cloud Of
- [Import Key Pair](#key-pair) - [Import Key Pair](#key-pair)
- [Clone Git Repository](#clone-git) - [Clone Git Repository](#clone-git)
- [Settings and recommended values](#settings) - [Settings and recommended values](#settings)
- [Clouds.yaml](#clouds-yaml) - [clouds.yaml](#clouds-yaml)
- [Ansible remote user](#remote-user) - [Ansible remote user](#remote-user)
- [Number of instances to deploy](#number)
- [Instance settings](#instance-settings) - [Instance settings](#instance-settings)
- [User password](#user-password) - [User password](#user-password)
- [Configure `tpot.conf.dist`](#tpot-conf) - [Configure `tpot.conf.dist`](#tpot-conf)
@ -36,6 +38,8 @@ Ansible works over the SSH Port, so you don't have to add any special rules to y
<a name="ansible"></a> <a name="ansible"></a>
## Ansible Installation ## Ansible Installation
:warning: Ansible 2.10 or newer is required!
Example for Ubuntu 18.04: Example for Ubuntu 18.04:
At first we update the system: At first we update the system:
@ -48,6 +52,17 @@ Then we need to add the repository and install Ansible:
For other OSes and Distros have a look at the official [Ansible Documentation](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html). For other OSes and Distros have a look at the official [Ansible Documentation](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html).
If your OS does not offer a recent version of Ansible (>= 2.10) you should consider [installing Ansible with pip](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).
In short (if you already have Python3/pip3 installed):
```
pip3 install ansible
```
<a name="collection"></a>
## OpenStack Collection Installation
For interacting with OpenStack resources in Ansible, you need to install the collection from Ansible Galaxy:
`ansible-galaxy collection install openstack.cloud`
<a name="agent-forwarding"></a> <a name="agent-forwarding"></a>
## Agent Forwarding ## Agent Forwarding
If you run the Ansible Playbook remotely on your Ansible Master Server, Agent Forwarding must be enabled in order to let Ansible connect to newly created machines. If you run the Ansible Playbook remotely on your Ansible Master Server, Agent Forwarding must be enabled in order to let Ansible connect to newly created machines.
@ -96,7 +111,7 @@ Import your SSH public key.
<a name="clone-git"></a> <a name="clone-git"></a>
# Clone Git Repository # Clone Git Repository
Clone the `tpotce` repository to your Ansible Master: Clone the `tpotce` repository to your Ansible Master:
`git clone https://github.com/dtag-dev-sec/tpotce.git` `git clone https://github.com/telekom-security/tpotce.git`
All Ansible related files are located in the [`cloud/ansible/openstack`](openstack) folder. All Ansible related files are located in the [`cloud/ansible/openstack`](openstack) folder.
<a name="settings"></a> <a name="settings"></a>
@ -104,7 +119,7 @@ All Ansible related files are located in the [`cloud/ansible/openstack`](opensta
You can configure all aspects of your Elastic Cloud Server and T-Pot before using the Playbook: You can configure all aspects of your Elastic Cloud Server and T-Pot before using the Playbook:
<a name="clouds-yaml"></a> <a name="clouds-yaml"></a>
## Clouds.yaml ## clouds.yaml
Located at [`openstack/clouds.yaml`](openstack/clouds.yaml). Located at [`openstack/clouds.yaml`](openstack/clouds.yaml).
Enter your Open Telekom Cloud API user credentials here (username, password, project name, user domain name): Enter your Open Telekom Cloud API user credentials here (username, password, project name, user domain name):
``` ```
@ -118,22 +133,36 @@ clouds:
user_domain_name: OTC-EU-DE-000000000010000XXXXX user_domain_name: OTC-EU-DE-000000000010000XXXXX
``` ```
You can also perform different authentication methods like sourcing OpenStack OS_* environment variables or providing an inline dictionary. You can also perform different authentication methods like sourcing OpenStack OS_* environment variables or providing an inline dictionary.
For more information have a look in the [os_server](https://docs.ansible.com/ansible/latest/modules/os_server_module.html) Ansible module documentation. For more information have a look in the [openstack.cloud.server](https://docs.ansible.com/ansible/latest/collections/openstack/cloud/server_module.html) Ansible module documentation.
If you already have your own `clouds.yaml` file or have multiple clouds in there, you can specify which one to use in the `openstack/my_os_cloud.yaml` file:
```
# Enter the name of your cloud to use from clouds.yaml
cloud: open-telekom-cloud
```
<a name="remote-user"></a> <a name="remote-user"></a>
## Ansible remote user ## Ansible remote user
You may have to adjust the `remote_user` in the Ansible Playbook under [`openstack/deploy_tpot.yaml`](openstack/deploy_tpot.yaml) depending on your Debian base image (e.g. on Open Telekom Cloud the default Debian user is `linux`). You may have to adjust the `remote_user` in the Ansible Playbook under [`openstack/deploy_tpot.yaml`](openstack/deploy_tpot.yaml) depending on your Debian base image (e.g. on Open Telekom Cloud the default Debian user is `linux`).
<a name="number"></a>
## Number of instances to deploy
You can adjust the number of VMs/T-Pots that you want to create in [`openstack/deploy_tpot.yaml`](openstack/deploy_tpot.yaml):
```
loop: "{{ range(0, 1) }}"
```
One instance is set as the default, increase to your liking.
<a name="instance-settings"></a> <a name="instance-settings"></a>
## Instance settings ## Instance settings
Located at [`openstack/roles/deploy/vars/main.yaml`](openstack/roles/deploy/vars/main.yaml). Located at [`openstack/roles/create_vm/vars/main.yaml`](openstack/roles/create_vm/vars/main.yaml).
Here you can customize your virtual machine specifications: Here you can customize your virtual machine specifications:
- Choose an availability zone. For Open Telekom Cloud reference see [here](https://docs.otc.t-systems.com/en-us/endpoint/index.html). - Choose an availability zone. For Open Telekom Cloud reference see [here](https://docs.otc.t-systems.com/en-us/endpoint/index.html).
- Change the OS image (For T-Pot we need Debian) - Change the OS image (For T-Pot we need Debian)
- (Optional) Change the volume size - (Optional) Change the volume size
- Specify your key pair (:warning: Mandatory) - Specify your key pair (:warning: Mandatory)
- (Optional) Change the instance type (flavor) - (Optional) Change the instance type (flavor)
`s2.medium.8` corresponds to 1 vCPU and 8GB of RAM and is the minimum required flavor. `s3.medium.8` corresponds to 1 vCPU and 8GB of RAM and is the minimum required flavor.
A full list of Open Telekom Cloud flavors can be found [here](https://docs.otc.t-systems.com/en-us/usermanual/ecs/en-us_topic_0177512565.html). A full list of Open Telekom Cloud flavors can be found [here](https://docs.otc.t-systems.com/en-us/usermanual/ecs/en-us_topic_0177512565.html).
``` ```
@ -141,7 +170,7 @@ availability_zone: eu-de-03
image: Standard_Debian_10_latest image: Standard_Debian_10_latest
volume_size: 128 volume_size: 128
key_name: your-KeyPair key_name: your-KeyPair
flavor: s2.medium.8 flavor: s3.medium.8
``` ```
<a name="user-password"></a> <a name="user-password"></a>
@ -160,14 +189,6 @@ Here you can choose:
- a username for the web interface - a username for the web interface
- a password for the web interface (**you should definitely change that**) - a password for the web interface (**you should definitely change that**)
```
# tpot configuration file
# myCONF_TPOT_FLAVOR=[STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]
myCONF_TPOT_FLAVOR='STANDARD'
myCONF_WEB_USER='webuser'
myCONF_WEB_PW='w3b$ecret'
```
<a name="ews-cfg"></a> <a name="ews-cfg"></a>
## Optional: Custom `ews.cfg` ## Optional: Custom `ews.cfg`
Enable this by uncommenting the role in the [deploy_tpot.yaml](openstack/deploy_tpot.yaml) playbook. Enable this by uncommenting the role in the [deploy_tpot.yaml](openstack/deploy_tpot.yaml) playbook.
@ -200,7 +221,7 @@ Enable this by uncommenting the role in the [deploy_tpot.yaml](openstack/deploy_
# - custom_hpfeeds # - custom_hpfeeds
``` ```
You can specify custom HPFEEDS in [`openstack/roles/custom_hpfeeds/templates/hpfeeds.cfg`](openstack/roles/custom_hpfeeds/templates/hpfeeds.cfg). You can specify custom HPFEEDS in [`openstack/roles/custom_hpfeeds/files/hpfeeds.cfg`](openstack/roles/custom_hpfeeds/files/hpfeeds.cfg).
That file contains the defaults (turned off) and you can adapt it for your needs, e.g. for SISSDEN: That file contains the defaults (turned off) and you can adapt it for your needs, e.g. for SISSDEN:
``` ```
myENABLE=true myENABLE=true
@ -216,6 +237,7 @@ myFORMAT=json
<a name="deploy"></a> <a name="deploy"></a>
# Deploying a T-Pot :honey_pot::honeybee: # Deploying a T-Pot :honey_pot::honeybee:
Now, after configuring everything, we can finally start deploying T-Pots! Now, after configuring everything, we can finally start deploying T-Pots!
Go to the [`openstack`](openstack) folder and run the Ansible Playbook with: Go to the [`openstack`](openstack) folder and run the Ansible Playbook with:
`ansible-playbook deploy_tpot.yaml` `ansible-playbook deploy_tpot.yaml`
(Yes, it is as easy as that :smile:) (Yes, it is as easy as that :smile:)
@ -223,15 +245,13 @@ Go to the [`openstack`](openstack) folder and run the Ansible Playbook with:
If you are running on a machine which asks for a sudo password, you can use: If you are running on a machine which asks for a sudo password, you can use:
`ansible-playbook --ask-become-pass deploy_tpot.yaml` `ansible-playbook --ask-become-pass deploy_tpot.yaml`
The Playbook will first install required packages on the Ansible Master and then deploy a new server instance. The Playbook will first install required packages on the Ansible Master and then deploy one (or more) new server instances.
After that, T-Pot gets installed and configured on the newly created host, optionally custom configs are applied and finally it reboots. After that, T-Pot gets installed and configured on them, optionally custom configs are applied and finally it reboots.
Once this is done, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/dtag-dev-sec/tpotce#ssh-and-web-access). Once this is done, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/telekom-security/tpotce#ssh-and-web-access).
<a name="documentation"></a> <a name="documentation"></a>
# Further documentation # Further documentation
- [Ansible Documentation](https://docs.ansible.com/ansible/latest/) - [Ansible Documentation](https://docs.ansible.com/ansible/latest/)
- [Cloud modules — Ansible Documentation](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html) - [openstack.cloud.server Create/Delete Compute Instances from OpenStack](https://docs.ansible.com/ansible/latest/collections/openstack/cloud/server_module.html)
- [os_server Create/Delete Compute Instances from OpenStack — Ansible Documentation](https://docs.ansible.com/ansible/latest/modules/os_server_module.html)
- [Open Telekom Cloud Help Center](https://docs.otc.t-systems.com/) - [Open Telekom Cloud Help Center](https://docs.otc.t-systems.com/)
- [Open Telekom Cloud API Overview](https://docs.otc.t-systems.com/en-us/api/wp/en-us_topic_0052070394.html)

View File

@ -1,6 +1,7 @@
clouds: clouds:
open-telekom-cloud: open-telekom-cloud:
profile: otc profile: otc
region_name: eu-de
auth: auth:
project_name: eu-de_your_project project_name: eu-de_your_project
username: your_api_user username: your_api_user

View File

@ -4,13 +4,22 @@
roles: roles:
- check - check
- name: Deploy instance - name: Deploy instances
hosts: localhost hosts: localhost
roles: vars_files: my_os_cloud.yaml
- deploy tasks:
- name: Create security group and network
ansible.builtin.include_role:
name: create_net
- name: Create one or more instances
ansible.builtin.include_role:
name: create_vm
loop: "{{ range(0, 1) }}"
loop_control:
extended: yes
- name: Install T-Pot on new instance - name: Install T-Pot
hosts: TPOT hosts: tpot
remote_user: linux remote_user: linux
become: yes become: yes
gather_facts: no gather_facts: no

View File

@ -0,0 +1,2 @@
# Enter the name of your cloud to use from clouds.yaml
cloud: open-telekom-cloud

View File

@ -0,0 +1,2 @@
collections:
- name: openstack.cloud

View File

@ -1,17 +1,19 @@
- name: Install dependencies - name: Install dependencies
package: ansible.builtin.package:
name: name:
- pwgen - gcc
- python-setuptools - python3-dev
- python-pip - python3-setuptools
- python3-pip
state: present state: present
- name: Install openstacksdk - name: Install openstacksdk
pip: ansible.builtin.pip:
name: openstacksdk name: openstacksdk
executable: pip3
- name: Check if agent forwarding is enabled - name: Check if agent forwarding is enabled
fail: ansible.builtin.fail:
msg: Please enable agent forwarding to allow Ansible to connect to the remote host! msg: Please enable agent forwarding to allow Ansible to connect to the remote host!
ignore_errors: yes ignore_errors: yes
when: lookup('env','SSH_AUTH_SOCK') == "" failed_when: lookup('env','SSH_AUTH_SOCK') == ""

View File

@ -0,0 +1,33 @@
- name: Create security group
openstack.cloud.security_group:
cloud: "{{ cloud }}"
name: sg-tpot-ansible
description: Security Group for T-Pot
- name: Add rules to security group
openstack.cloud.security_group_rule:
cloud: "{{ cloud }}"
security_group: sg-tpot-ansible
remote_ip_prefix: 0.0.0.0/0
- name: Create network
openstack.cloud.network:
cloud: "{{ cloud }}"
name: network-tpot-ansible
- name: Create subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: network-tpot-ansible
name: subnet-tpot-ansible
cidr: 192.168.0.0/24
dns_nameservers:
- 100.125.4.25
- 100.125.129.199
- name: Create router
openstack.cloud.router:
cloud: "{{ cloud }}"
name: router-tpot-ansible
interfaces:
- subnet-tpot-ansible

View File

@ -0,0 +1,24 @@
- name: Generate T-Pot name
ansible.builtin.set_fact:
tpot_name: "t-pot-ansible-{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=6') }}"
- name: Create instance {{ ansible_loop.index }} of {{ ansible_loop.length }}
openstack.cloud.server:
cloud: "{{ cloud }}"
name: "{{ tpot_name }}"
availability_zone: "{{ availability_zone }}"
image: "{{ image }}"
boot_from_volume: yes
volume_size: "{{ volume_size }}"
key_name: "{{ key_name }}"
auto_ip: yes
flavor: "{{ flavor }}"
security_groups: sg-tpot-ansible
network: network-tpot-ansible
register: tpot
- name: Add instance to inventory
ansible.builtin.add_host:
hostname: "{{ tpot_name }}"
ansible_host: "{{ tpot.server.public_v4 }}"
groups: tpot

View File

@ -2,4 +2,4 @@ availability_zone: eu-de-03
image: Standard_Debian_10_latest image: Standard_Debian_10_latest
volume_size: 128 volume_size: 128
key_name: your-KeyPair key_name: your-KeyPair
flavor: s2.medium.8 flavor: s3.medium.8

View File

@ -1,5 +1,5 @@
- name: Copy ews configuration file - name: Copy ews configuration file
template: ansible.builtin.template:
src: ews.cfg src: ews.cfg
dest: /data/ews/conf dest: /data/ews/conf
owner: root owner: root
@ -7,7 +7,7 @@
mode: 0644 mode: 0644
- name: Patching tpot.yml with custom ews configuration file - name: Patching tpot.yml with custom ews configuration file
lineinfile: ansible.builtin.lineinfile:
path: /opt/tpot/etc/tpot.yml path: /opt/tpot/etc/tpot.yml
insertafter: "/opt/ewsposter/ews.ip" insertafter: "/opt/ewsposter/ews.ip"
line: " - /data/ews/conf/ews.cfg:/opt/ewsposter/ews.cfg" line: " - /data/ews/conf/ews.cfg:/opt/ewsposter/ews.cfg"

View File

@ -1,5 +1,5 @@
- name: Copy hpfeeds configuration file - name: Copy hpfeeds configuration file
copy: ansible.builtin.copy:
src: hpfeeds.cfg src: hpfeeds.cfg
dest: /data/ews/conf dest: /data/ews/conf
owner: tpot owner: tpot
@ -8,5 +8,5 @@
register: config register: config
- name: Applying hpfeeds settings - name: Applying hpfeeds settings
command: /opt/tpot/bin/hpfeeds_optin.sh --conf=/data/ews/conf/hpfeeds.cfg ansible.builtin.command: /opt/tpot/bin/hpfeeds_optin.sh --conf=/data/ews/conf/hpfeeds.cfg
when: config.changed == true when: config.changed == true

View File

@ -1,58 +0,0 @@
- name: Create T-Pot name
shell: echo t-pot-ansible-$(pwgen -ns 6 -1)
register: tpot_name
- name: Create security group
os_security_group:
cloud: open-telekom-cloud
name: sg-tpot-any
description: tpot any-any
- name: Add rules to security group
os_security_group_rule:
cloud: open-telekom-cloud
security_group: sg-tpot-any
remote_ip_prefix: 0.0.0.0/0
- name: Create network
os_network:
cloud: open-telekom-cloud
name: network-tpot
- name: Create subnet
os_subnet:
cloud: open-telekom-cloud
network_name: network-tpot
name: subnet-tpot
cidr: 192.168.0.0/24
dns_nameservers:
- 1.1.1.1
- 8.8.8.8
- name: Create router
os_router:
cloud: open-telekom-cloud
name: router-tpot
interfaces:
- subnet-tpot
- name: Launch an instance
os_server:
cloud: open-telekom-cloud
name: "{{ tpot_name.stdout }}"
availability_zone: "{{ availability_zone }}"
image: "{{ image }}"
boot_from_volume: yes
volume_size: "{{ volume_size }}"
key_name: "{{ key_name }}"
timeout: 200
flavor: "{{ flavor }}"
security_groups: sg-tpot-any
network: network-tpot
register: tpot
- name: Add instance to inventory
add_host:
hostname: "{{ tpot_name.stdout }}"
ansible_host: "{{ tpot.server.public_v4 }}"
groups: TPOT

View File

@ -1,29 +1,29 @@
- name: Waiting for SSH connection - name: Waiting for SSH connection
wait_for_connection: ansible.builtin.wait_for_connection:
- name: Gathering facts - name: Gathering facts
setup: ansible.builtin.setup:
- name: Cloning T-Pot install directory - name: Cloning T-Pot install directory
git: ansible.builtin.git:
repo: "https://github.com/dtag-dev-sec/tpotce.git" repo: "https://github.com/telekom-security/tpotce.git"
dest: /root/tpot dest: /root/tpot
- name: Prepare to set user password - name: Prepare to set user password
set_fact: ansible.builtin.set_fact:
user_name: "{{ ansible_user }}" user_name: "{{ ansible_user }}"
user_salt: "s0mew1ck3dTpoT" user_salt: "s0mew1ck3dTpoT"
no_log: true no_log: true
- name: Changing password for user {{ user_name }} - name: Changing password for user {{ user_name }}
user: ansible.builtin.user:
name: "{{ ansible_user }}" name: "{{ ansible_user }}"
password: "{{ user_password | password_hash('sha512', user_salt) }}" password: "{{ user_password | password_hash('sha512', user_salt) }}"
state: present state: present
shell: /bin/bash shell: /bin/bash
- name: Copy T-Pot configuration file - name: Copy T-Pot configuration file
template: ansible.builtin.copy:
src: ../../../../../../iso/installer/tpot.conf.dist src: ../../../../../../iso/installer/tpot.conf.dist
dest: /root/tpot.conf dest: /root/tpot.conf
owner: root owner: root
@ -31,15 +31,15 @@
mode: 0644 mode: 0644
- name: Install T-Pot on instance - be patient, this might take 15 to 30 minutes depending on the connection speed. - name: Install T-Pot on instance - be patient, this might take 15 to 30 minutes depending on the connection speed.
command: /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf ansible.builtin.command: /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf
- name: Delete T-Pot configuration file - name: Delete T-Pot configuration file
file: ansible.builtin.file:
path: /root/tpot.conf path: /root/tpot.conf
state: absent state: absent
- name: Change unattended-upgrades to take default action - name: Change unattended-upgrades to take default action
blockinfile: ansible.builtin.blockinfile:
dest: /etc/apt/apt.conf.d/50unattended-upgrades dest: /etc/apt/apt.conf.d/50unattended-upgrades
block: | block: |
Dpkg::Options { Dpkg::Options {

View File

@ -1,10 +1,10 @@
- name: Finally rebooting T-Pot - name: Finally rebooting T-Pot
command: shutdown -r now ansible.builtin.command: shutdown -r now
async: 1 async: 1
poll: 0 poll: 0
- name: Next login options - name: Next login options
debug: ansible.builtin.debug:
msg: msg:
- "***** SSH Access:" - "***** SSH Access:"
- "***** ssh {{ ansible_user }}@{{ ansible_host }} -p 64295" - "***** ssh {{ ansible_user }}@{{ ansible_host }} -p 64295"

View File

@ -1,2 +0,0 @@
**/.terraform
**/terraform.*

View File

@ -1,7 +1,7 @@
# T-Pot Terraform # T-Pot Terraform
This [Terraform](https://www.terraform.io/) configuration can be used to launch a virtual machine, bootstrap any dependencies and install T-Pot in a single step. This [Terraform](https://www.terraform.io/) configuration can be used to launch a virtual machine, bootstrap any dependencies and install T-Pot in a single step.
Configuration for Amazon Web Services (AWS) and Open Telekom Cloud (OTC) is currently included. Configuration for Amazon Web Services (AWS) and Open Telekom Cloud (OTC) is currently included.
This can easily be extended to support other [Terraform providers](https://www.terraform.io/docs/providers/index.html). This can easily be extended to support other [Terraform providers](https://registry.terraform.io/browse/providers?category=public-cloud%2Ccloud-automation%2Cinfrastructure).
[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) is used to bootstrap the instance and install T-Pot on startup. [Cloud-init](https://cloudinit.readthedocs.io/en/latest/) is used to bootstrap the instance and install T-Pot on startup.
@ -9,7 +9,7 @@ This can easily be extended to support other [Terraform providers](https://www.t
- [What get's created](#what-created) - [What get's created](#what-created)
- [Amazon Web Services (AWS)](#what-created-aws) - [Amazon Web Services (AWS)](#what-created-aws)
- [Open Telekom Cloud (OTC)](#what-created-otc) - [Open Telekom Cloud (OTC)](#what-created-otc)
- [Pre-Requisites](#pre) - [Prerequisites](#pre)
- [Amazon Web Services (AWS)](#pre-aws) - [Amazon Web Services (AWS)](#pre-aws)
- [Open Telekom Cloud (OTC)](#pre-otc) - [Open Telekom Cloud (OTC)](#pre-otc)
- [Terraform Variables](#variables) - [Terraform Variables](#variables)
@ -37,16 +37,17 @@ This can easily be extended to support other [Terraform providers](https://www.t
<a name="what-created-otc"></a> <a name="what-created-otc"></a>
### Open Telekom Cloud (OTC) ### Open Telekom Cloud (OTC)
* ECS instance: * ECS instance:
* s2.medium.8 (1 vCPU, 8 GB RAM) * s3.medium.8 (1 vCPU, 8 GB RAM)
* 128 GB disk * 128 GB disk
* Debian 10 * Debian 10
* Public EIP * Public EIP
* Security Group * Security Group
* Network, Subnet, Router (= Virtual Private Cloud [VPC]) * All TCP/UDP ports are open to the Internet
* Virtual Private Cloud (VPC) and Subnet
<a name="pre"></a> <a name="pre"></a>
## Pre-Requisites ## Prerequisites
* [Terraform](https://www.terraform.io/) 0.12 * [Terraform](https://www.terraform.io/) 0.13
<a name="pre-aws"></a> <a name="pre-aws"></a>
### Amazon Web Services (AWS) ### Amazon Web Services (AWS)
@ -90,12 +91,13 @@ In `aws/variables.tf`, you can change the additional variables:
<a name="variables-otc"></a> <a name="variables-otc"></a>
### Open Telekom Cloud (OTC) ### Open Telekom Cloud (OTC)
In `otc/variables.tf`, you can change the additional variables: In `otc/variables.tf`, you can change the additional variables:
* `availabiliy_zone` * `ecs_flavor`
* `flavor` * `ecs_disk_size`
* `availability_zone`
* `key_pair` - Specify an existing SSH key pair * `key_pair` - Specify an existing SSH key pair
* `image_id` * `eip_size`
* `volume_size`
Furthermore you can configure the naming of the created infrastructure (per default everything gets prefixed with "tpot-", e.g. "tpot-router"). ... and some more, but these are the most relevant.
<a name="initialising"></a> <a name="initialising"></a>
## Initialising ## Initialising
@ -124,4 +126,4 @@ If you want the remove the built infrastructure, you can run [`terraform destroy
<a name="connecting"></a> <a name="connecting"></a>
## Connecting to the Instance ## Connecting to the Instance
When the installation is completed, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/dtag-dev-sec/tpotce#ssh-and-web-access). When the installation is completed, you can proceed with connecting/logging in to the T-Pot according to the [documentation](https://github.com/telekom-security/tpotce#ssh-and-web-access).

20
cloud/terraform/aws/.terraform.lock.hcl generated Normal file
View File

@ -0,0 +1,20 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "3.26.0"
constraints = "3.26.0"
hashes = [
"h1:0i78FItlPeiomd+4ThZrtm56P5K33k7/6dnEe4ZePI0=",
"zh:26043eed36d070ca032cf04bc980c654a25821a8abc0c85e1e570e3935bbfcbb",
"zh:2fe68f3f78d23830a04d7fac3eda550eef1f627dfc130486f70a65dc5c254300",
"zh:3d66484c608c64678e639db25d63872783ce60363a1246e30317f21c9c23b84b",
"zh:46ffd755cfd4cf94fe66342797b5afdcef010a24e126c67fee141b357d393535",
"zh:5e96f24357e945c9067cf5e032ad1d003609629c956c2f9f642fefe714e74587",
"zh:60c27aca36bb63bf3e865c2193be80ca83b376581d00f9c220af4b013e163c4d",
"zh:896f0f22d19d41e71b22f9240b261714c3915b165ddefeb771e7734d69dc47ea",
"zh:90de9966cb2fd3e2f326df291595e55d2dd2d90e7d6dd085c2c8691dce82bdb4",
"zh:ad05a91a88ceb1d6de5a568f7cc0b0e5bc0a79f3da70bc28c1e7f3750e362d58",
"zh:e8c63f59c6465329e1f3357498face3dd7ef10a033df3c366a33aa9e94b46c01",
]
}

View File

@ -60,7 +60,7 @@ resource "aws_instance" "tpot" {
volume_size = 128 volume_size = 128
delete_on_termination = true delete_on_termination = true
} }
user_data = templatefile("../cloud-init.yaml", {timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password}) user_data = templatefile("../cloud-init.yaml", { timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password })
vpc_security_group_ids = [aws_security_group.tpot.id] vpc_security_group_ids = [aws_security_group.tpot.id]
associate_public_ip_address = true associate_public_ip_address = true
} }

View File

@ -32,28 +32,31 @@ variable "ec2_instance_type" {
variable "ec2_ami" { variable "ec2_ami" {
type = map(string) type = map(string)
default = { default = {
"ap-east-1" = "ami-f9c58188" "af-south-1" = "ami-0272d4f5fb1b98a0d"
"ap-northeast-1" = "ami-0fae5501ae428f9d7" "ap-east-1" = "ami-00d242e2f23abf6d2"
"ap-northeast-2" = "ami-0522874b039290246" "ap-northeast-1" = "ami-001c6b4d627e8be53"
"ap-south-1" = "ami-03b4e18f70aca8973" "ap-northeast-2" = "ami-0d841ed4bf80e764c"
"ap-southeast-1" = "ami-0852293c17f5240b3" "ap-northeast-3" = "ami-01b0a01d770321320"
"ap-southeast-2" = "ami-03ea2db714f1f6acf" "ap-south-1" = "ami-04ba7e5bd7c6f6929"
"ca-central-1" = "ami-094511e5020cdea18" "ap-southeast-1" = "ami-0dca3eabb09c32ae2"
"eu-central-1" = "ami-0394acab8c5063f6f" "ap-southeast-2" = "ami-03ff8684dc585ddae"
"eu-north-1" = "ami-0c82d9a7f5674320a" "ca-central-1" = "ami-08af22d7c0382fd83"
"eu-west-1" = "ami-006d280940ad4a96c" "eu-central-1" = "ami-0f41e297b3c53fab8"
"eu-west-2" = "ami-08fe9ea08db6f1258" "eu-north-1" = "ami-0bbc6a00971c77d6d"
"eu-west-3" = "ami-04563f5eab11f2b87" "eu-south-1" = "ami-03ff8684dc585ddae"
"me-south-1" = "ami-0492a01b319d1f052" "eu-west-1" = "ami-080684ad73d431a05"
"sa-east-1" = "ami-05e16feea94258a69" "eu-west-2" = "ami-04b259723891dfc53"
"us-east-1" = "ami-04d70e069399af2e9" "eu-west-3" = "ami-00662eead74f66895"
"us-east-2" = "ami-04100f1cdba76b497" "me-south-1" = "ami-021a6c6047091ab5b"
"us-west-1" = "ami-014c78f266c5b7163" "sa-east-1" = "ami-0aac091cce68a049c"
"us-west-2" = "ami-023b7a69b9328e1f9" "us-east-1" = "ami-05ad4ed7f9c48178b"
"us-east-2" = "ami-07640f3f27c0ad3d3"
"us-west-1" = "ami-0c053f1d5f22eb09f"
"us-west-2" = "ami-090cd3aed687b1ee1"
} }
} }
# cloud-init configuration ## cloud-init configuration ##
variable "timezone" { variable "timezone" {
default = "UTC" default = "UTC"
} }
@ -61,20 +64,30 @@ variable "timezone" {
variable "linux_password" { variable "linux_password" {
#default = "LiNuXuSeRPaSs#" #default = "LiNuXuSeRPaSs#"
description = "Set a password for the default user" description = "Set a password for the default user"
validation {
condition = length(var.linux_password) > 0
error_message = "Please specify a password for the default user."
}
} }
# These will go in the generated tpot.conf file ## These will go in the generated tpot.conf file ##
variable "tpot_flavor" { variable "tpot_flavor" {
default = "STANDARD" default = "STANDARD"
description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]" description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN, MEDICAL]"
} }
variable "web_user" { variable "web_user" {
default = "webuser" default = "webuser"
description = "Set a username for the web user" description = "Set a username for the web user"
} }
variable "web_password" { variable "web_password" {
#default = "w3b$ecret" #default = "w3b$ecret"
description = "Set a password for the web user" description = "Set a password for the web user"
validation {
condition = length(var.web_password) > 0
error_message = "Please specify a password for the web user."
}
} }

View File

@ -1,3 +1,9 @@
terraform { terraform {
required_version = ">= 0.12" required_version = ">= 0.13"
required_providers {
aws = {
source = "hashicorp/aws"
version = "3.26.0"
}
}
} }

View File

@ -5,7 +5,8 @@ packages:
- git - git
runcmd: runcmd:
- git clone https://github.com/dtag-dev-sec/tpotce /root/tpot - curl -sS --retry 5 https://github.com
- git clone https://github.com/telekom-security/tpotce /root/tpot
- /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf - /root/tpot/iso/installer/install.sh --type=auto --conf=/root/tpot.conf
- rm /root/tpot.conf - rm /root/tpot.conf
- /sbin/shutdown -r now - /sbin/shutdown -r now

38
cloud/terraform/otc/.terraform.lock.hcl generated Normal file
View File

@ -0,0 +1,38 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/random" {
version = "3.1.0"
constraints = "~> 3.1.0"
hashes = [
"h1:BZMEPucF+pbu9gsPk0G0BHx7YP04+tKdq2MrRDF1EDM=",
"zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc",
"zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626",
"zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff",
"zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2",
"zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992",
"zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427",
"zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc",
"zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f",
"zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b",
"zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7",
"zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a",
]
}
provider "registry.terraform.io/opentelekomcloud/opentelekomcloud" {
version = "1.23.6"
constraints = "~> 1.23.4"
hashes = [
"h1:B/1Md957jWaDgFqsJDzmJc75KwL0eC/PCVuZ8HV5xSc=",
"zh:1aa79010869d082157fb44fc83c3bff4e40938ec0ca916f704d974c7f7ca39e4",
"zh:3155b8366828ce50231f69962b55df1e2261ed63c44bb64e2c950dd68769df1b",
"zh:4a909617aa96a6d8aead14f56996ad94e0a1cae9d28e8df1ddae19c2095ed337",
"zh:4f71046719632b4b90f88d29d8ba88915ee6ad66cd9d7ebe84a7459013e5003a",
"zh:67e4d10b2db79ad78ae2ec8d9dfac53c4721028f97f4436a7aa45e80b1beefd3",
"zh:7f12541fc5a3513e5522ff2bd5fee17d1e67bfe64f9ef59d03863fc7389e12ce",
"zh:86fadabfc8307cf6084a412ffc9c797ec94932d08bc663a3fcebf98101e951f6",
"zh:98744b39c2bfe3e8e6f929f750a689971071b257f3f066f669f93c8e0b76d179",
"zh:c363d41debb060804e2c6bd9cb50b4e8daa37362299e3ea74e187265cd85f2ca",
]
}

View File

@ -1,5 +1,6 @@
clouds: clouds:
open-telekom-cloud: open-telekom-cloud:
region_name: eu-de
auth: auth:
project_name: eu-de_your_project project_name: eu-de_your_project
username: your_api_user username: your_api_user

View File

@ -1,3 +1,7 @@
data "opentelekomcloud_images_image_v2" "debian" {
name = "Standard_Debian_10_latest"
}
resource "opentelekomcloud_networking_secgroup_v2" "secgroup_1" { resource "opentelekomcloud_networking_secgroup_v2" "secgroup_1" {
name = var.secgroup_name name = var.secgroup_name
description = var.secgroup_desc description = var.secgroup_desc
@ -10,24 +14,18 @@ resource "opentelekomcloud_networking_secgroup_rule_v2" "secgroup_rule_1" {
security_group_id = opentelekomcloud_networking_secgroup_v2.secgroup_1.id security_group_id = opentelekomcloud_networking_secgroup_v2.secgroup_1.id
} }
resource "opentelekomcloud_networking_network_v2" "network_1" { resource "opentelekomcloud_vpc_v1" "vpc_1" {
name = var.network_name name = var.vpc_name
cidr = var.vpc_cidr
} }
resource "opentelekomcloud_networking_subnet_v2" "subnet_1" { resource "opentelekomcloud_vpc_subnet_v1" "subnet_1" {
name = var.subnet_name name = var.subnet_name
network_id = opentelekomcloud_networking_network_v2.network_1.id cidr = var.subnet_cidr
cidr = "192.168.0.0/24" vpc_id = opentelekomcloud_vpc_v1.vpc_1.id
dns_nameservers = ["1.1.1.1", "8.8.8.8"]
}
resource "opentelekomcloud_networking_router_v2" "router_1" { gateway_ip = var.subnet_gateway_ip
name = var.router_name dns_list = ["100.125.4.25", "100.125.129.199"]
}
resource "opentelekomcloud_networking_router_interface_v2" "router_interface_1" {
router_id = opentelekomcloud_networking_router_v2.router_1.id
subnet_id = opentelekomcloud_networking_subnet_v2.subnet_1.id
} }
resource "random_id" "tpot" { resource "random_id" "tpot" {
@ -35,33 +33,36 @@ resource "random_id" "tpot" {
prefix = var.ecs_prefix prefix = var.ecs_prefix
} }
resource "opentelekomcloud_compute_instance_v2" "ecs_1" { resource "opentelekomcloud_ecs_instance_v1" "ecs_1" {
availability_zone = var.availabiliy_zone name = random_id.tpot.b64_url
name = random_id.tpot.b64 image_id = data.opentelekomcloud_images_image_v2.debian.id
flavor_name = var.flavor flavor = var.ecs_flavor
key_pair = var.key_pair vpc_id = opentelekomcloud_vpc_v1.vpc_1.id
security_groups = [opentelekomcloud_networking_secgroup_v2.secgroup_1.name]
user_data = templatefile("../cloud-init.yaml", {timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password})
network { nics {
name = opentelekomcloud_networking_network_v2.network_1.name network_id = opentelekomcloud_vpc_subnet_v1.subnet_1.id
} }
block_device { system_disk_size = var.ecs_disk_size
uuid = var.image_id system_disk_type = "SAS"
source_type = "image" security_groups = [opentelekomcloud_networking_secgroup_v2.secgroup_1.id]
volume_size = var.volume_size availability_zone = var.availability_zone
destination_type = "volume" key_name = var.key_pair
delete_on_termination = "true" user_data = templatefile("../cloud-init.yaml", { timezone = var.timezone, password = var.linux_password, tpot_flavor = var.tpot_flavor, web_user = var.web_user, web_password = var.web_password })
}
resource "opentelekomcloud_vpc_eip_v1" "eip_1" {
publicip {
type = "5_bgp"
}
bandwidth {
name = "bandwidth-${random_id.tpot.b64_url}"
size = var.eip_size
share_type = "PER"
} }
depends_on = [opentelekomcloud_networking_router_interface_v2.router_interface_1]
} }
resource "opentelekomcloud_networking_floatingip_v2" "floatip_1" { resource "opentelekomcloud_compute_floatingip_associate_v2" "fip_1" {
} floating_ip = opentelekomcloud_vpc_eip_v1.eip_1.publicip.0.ip_address
instance_id = opentelekomcloud_ecs_instance_v1.ecs_1.id
resource "opentelekomcloud_compute_floatingip_associate_v2" "fip_2" {
floating_ip = opentelekomcloud_networking_floatingip_v2.floatip_1.address
instance_id = opentelekomcloud_compute_instance_v2.ecs_1.id
} }

View File

@ -1,11 +1,11 @@
output "Admin_UI" { output "Admin_UI" {
value = "https://${opentelekomcloud_networking_floatingip_v2.floatip_1.address}:64294" value = "https://${opentelekomcloud_vpc_eip_v1.eip_1.publicip.0.ip_address}:64294"
} }
output "SSH_Access" { output "SSH_Access" {
value = "ssh -p 64295 linux@${opentelekomcloud_networking_floatingip_v2.floatip_1.address}" value = "ssh -p 64295 linux@${opentelekomcloud_vpc_eip_v1.eip_1.publicip.0.ip_address}"
} }
output "Web_UI" { output "Web_UI" {
value = "https://${opentelekomcloud_networking_floatingip_v2.floatip_1.address}:64297" value = "https://${opentelekomcloud_vpc_eip_v1.eip_1.publicip.0.ip_address}:64297"
} }

View File

@ -1,3 +1,3 @@
provider "opentelekomcloud" { provider "opentelekomcloud" {
cloud = "open-telekom-cloud" cloud = "open-telekom-cloud"
} }

View File

@ -1,4 +1,4 @@
# cloud-init configuration ## cloud-init configuration ##
variable "timezone" { variable "timezone" {
default = "UTC" default = "UTC"
} }
@ -6,71 +6,93 @@ variable "timezone" {
variable "linux_password" { variable "linux_password" {
#default = "LiNuXuSeRPaSs#" #default = "LiNuXuSeRPaSs#"
description = "Set a password for the default user" description = "Set a password for the default user"
validation {
condition = length(var.linux_password) > 0
error_message = "Please specify a password for the default user."
}
} }
# Cloud resources name configuration ## Security Group ##
variable "secgroup_name" { variable "secgroup_name" {
default = "tpot-secgroup" default = "sg-tpot"
} }
variable "secgroup_desc" { variable "secgroup_desc" {
default = "T-Pot Security Group" default = "Security Group for T-Pot"
} }
variable "network_name" { ## Virtual Private Cloud ##
default = "tpot-network" variable "vpc_name" {
default = "vpc-tpot"
} }
variable "vpc_cidr" {
default = "192.168.0.0/16"
}
## Subnet ##
variable "subnet_name" { variable "subnet_name" {
default = "tpot-subnet" default = "subnet-tpot"
} }
variable "router_name" { variable "subnet_cidr" {
default = "tpot-router" default = "192.168.0.0/24"
} }
variable "subnet_gateway_ip" {
default = "192.168.0.1"
}
## Elastic Cloud Server ##
variable "ecs_prefix" { variable "ecs_prefix" {
default = "tpot-" default = "tpot-"
} }
# ECS configuration variable "ecs_flavor" {
variable "availabiliy_zone" { default = "s3.medium.8"
default = "eu-de-03"
description = "Select an availability zone"
} }
variable "flavor" { variable "ecs_disk_size" {
default = "s2.medium.8" default = "128"
description = "Select a compute flavor" }
variable "availability_zone" {
default = "eu-de-03"
} }
variable "key_pair" { variable "key_pair" {
#default = "" #default = ""
description = "Specify your SSH key pair" description = "Specify your SSH key pair"
validation {
condition = length(var.key_pair) > 0
error_message = "Please specify a Key Pair."
}
} }
variable "image_id" { ## Elastic IP ##
default = "d97dd29c-9318-4e4c-8d3a-7307d1513b77" variable "eip_size" {
description = "Select a Debian 10 base image id" default = "100"
} }
variable "volume_size" { ## These will go in the generated tpot.conf file ##
default = "128"
description = "Set the volume size"
}
# These will go in the generated tpot.conf file
variable "tpot_flavor" { variable "tpot_flavor" {
default = "STANDARD" default = "STANDARD"
description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN]" description = "Specify your tpot flavor [STANDARD, SENSOR, INDUSTRIAL, COLLECTOR, NEXTGEN, MEDICAL]"
} }
variable "web_user" { variable "web_user" {
default = "webuser" default = "webuser"
description = "Set a username for the web user" description = "Set a username for the web user"
} }
variable "web_password" { variable "web_password" {
#default = "w3b$ecret" #default = "w3b$ecret"
description = "Set a password for the web user" description = "Set a password for the web user"
validation {
condition = length(var.web_password) > 0
error_message = "Please specify a password for the web user."
}
} }

View File

@ -1,3 +1,13 @@
terraform { terraform {
required_version = ">= 0.12" required_version = ">= 0.13"
required_providers {
opentelekomcloud = {
source = "opentelekomcloud/opentelekomcloud"
version = "~> 1.23.4"
}
random = {
source = "hashicorp/random"
version = "~> 3.1.0"
}
}
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 408 KiB

After

Width:  |  Height:  |  Size: 311 KiB

View File

@ -1,11 +1,10 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Install packages # Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \
git \ git \
libcap \ libcap \
py3-pip \ py3-pip \
@ -13,7 +12,9 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
python3-dev && \ python3-dev && \
# #
# Install adbhoney from git # Install adbhoney from git
git clone --depth=1 https://github.com/huuck/ADBHoney /opt/adbhoney && \ git clone https://github.com/huuck/ADBHoney /opt/adbhoney && \
cd /opt/adbhoney && \
git checkout ad7c17e78d01f6860d58ba826a4b6a4e4f83acbd && \
cp /root/dist/adbhoney.cfg /opt/adbhoney && \ cp /root/dist/adbhoney.cfg /opt/adbhoney && \
sed -i 's/dst_ip/dest_ip/' /opt/adbhoney/adbhoney/core.py && \ sed -i 's/dst_ip/dest_ip/' /opt/adbhoney/adbhoney/core.py && \
sed -i 's/dst_port/dest_port/' /opt/adbhoney/adbhoney/core.py && \ sed -i 's/dst_port/dest_port/' /opt/adbhoney/adbhoney/core.py && \
@ -22,7 +23,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
addgroup -g 2000 adbhoney && \ addgroup -g 2000 adbhoney && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 adbhoney && \ adduser -S -H -s /bin/ash -u 2000 -D -g 2000 adbhoney && \
chown -R adbhoney:adbhoney /opt/adbhoney && \ chown -R adbhoney:adbhoney /opt/adbhoney && \
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \ setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
# #
# Clean up # Clean up
apk del --purge git \ apk del --purge git \

View File

@ -14,6 +14,7 @@ services:
- adbhoney_local - adbhoney_local
ports: ports:
- "5555:5555" - "5555:5555"
# image: "dtagdevsec/adbhoney:2006"
image: "dtagdevsec/adbhoney:2006" image: "dtagdevsec/adbhoney:2006"
read_only: true read_only: true
volumes: volumes:

View File

@ -1,17 +1,17 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup env and apt # Setup env and apt
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U upgrade && \
apk -U upgrade && \
apk add build-base \ apk add build-base \
git \ git \
libffi \ libffi \
libffi-dev \ libffi-dev \
openssl \ openssl \
openssl-dev \ openssl-dev \
py3-cryptography \
py3-pip \ py3-pip \
python3 \ python3 \
python3-dev && \ python3-dev && \
@ -23,8 +23,9 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# Get and install packages # Get and install packages
mkdir -p /opt/ && \ mkdir -p /opt/ && \
cd /opt/ && \ cd /opt/ && \
git clone --depth=1 https://github.com/cymmetria/ciscoasa_honeypot && \ git clone https://github.com/cymmetria/ciscoasa_honeypot && \
cd ciscoasa_honeypot && \ cd ciscoasa_honeypot && \
git checkout d6e91f1aab7fe6fc01fabf2046e76b68dd6dc9e2 && \
pip3 install --no-cache-dir -r requirements.txt && \ pip3 install --no-cache-dir -r requirements.txt && \
cp /root/dist/asa_server.py /opt/ciscoasa_honeypot && \ cp /root/dist/asa_server.py /opt/ciscoasa_honeypot && \
chown -R ciscoasa:ciscoasa /opt/ciscoasa_honeypot && \ chown -R ciscoasa:ciscoasa /opt/ciscoasa_honeypot && \

View File

@ -1,21 +1,20 @@
FROM alpine:latest FROM alpine:3.14
# #
# Install packages # Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \
git \ git \
libcap \ libcap \
openssl \ openssl \
py3-pip \ py3-pip \
python3 \ python3 \
python3-dev && \ python3-dev && \
# #
pip3 install --no-cache-dir python-json-logger && \ pip3 install --no-cache-dir python-json-logger && \
# #
# Install CitrixHoneypot from GitHub # Install CitrixHoneypot from GitHub
# git clone --depth=1 https://github.com/malwaretech/citrixhoneypot /opt/citrixhoneypot && \ git clone https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \
# git clone --depth=1 https://github.com/vorband/CitrixHoneypot /opt/citrixhoneypot && \ cd /opt/citrixhoneypot && \
git clone --depth=1 https://github.com/t3chn0m4g3/CitrixHoneypot /opt/citrixhoneypot && \ git checkout f59ad7320dc5bbb8c23c8baa5f111b52c52fbef3 && \
# #
# Setup user, groups and configs # Setup user, groups and configs
mkdir -p /opt/citrixhoneypot/logs /opt/citrixhoneypot/ssl && \ mkdir -p /opt/citrixhoneypot/logs /opt/citrixhoneypot/ssl && \
@ -30,7 +29,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
addgroup -g 2000 citrixhoneypot && \ addgroup -g 2000 citrixhoneypot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 citrixhoneypot && \ adduser -S -H -s /bin/ash -u 2000 -D -g 2000 citrixhoneypot && \
chown -R citrixhoneypot:citrixhoneypot /opt/citrixhoneypot && \ chown -R citrixhoneypot:citrixhoneypot /opt/citrixhoneypot && \
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \ setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
# #
# Clean up # Clean up
apk del --purge git \ apk del --purge git \

View File

@ -1,11 +1,10 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup apt # Setup apt
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \
build-base \ build-base \
file \ file \
git \ git \
@ -17,19 +16,19 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
libxslt-dev \ libxslt-dev \
mariadb-dev \ mariadb-dev \
pkgconfig \ pkgconfig \
py3-pip \
python3 \ python3 \
python3-dev \ python3-dev \
py-cffi \ py3-cffi \
py-cryptography \ py3-cryptography \
py3-gevent \
py3-pip \
tcpdump \ tcpdump \
wget && \ wget && \
# #
# Setup ConPot # Setup ConPot
git clone --depth=1 https://github.com/mushorg/conpot /opt/conpot && \ git clone https://github.com/mushorg/conpot /opt/conpot && \
cd /opt/conpot/ && \ cd /opt/conpot/ && \
# Patch to accept ENV for MIB path git checkout 804fd65aa3b7ffa31c07fd4e863d4a5500414cf3 && \
sed -i "s/tmp_mib_dir = tempfile.mkdtemp()/tmp_mib_dir = tempfile.mkdtemp(dir=os.environ['CONPOT_TMP'])/" /opt/conpot/conpot/protocols/snmp/snmp_server.py && \
# Change template default ports if <1024 # Change template default ports if <1024
sed -i 's/port="2121"/port="21"/' /opt/conpot/conpot/templates/default/ftp/ftp.xml && \ sed -i 's/port="2121"/port="21"/' /opt/conpot/conpot/templates/default/ftp/ftp.xml && \
sed -i 's/port="8800"/port="80"/' /opt/conpot/conpot/templates/default/http/http.xml && \ sed -i 's/port="8800"/port="80"/' /opt/conpot/conpot/templates/default/http/http.xml && \
@ -42,15 +41,16 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
sed -i 's/port="6230"/port="623"/' /opt/conpot/conpot/templates/ipmi/ipmi/ipmi.xml && \ sed -i 's/port="6230"/port="623"/' /opt/conpot/conpot/templates/ipmi/ipmi/ipmi.xml && \
pip3 install --no-cache-dir -U setuptools && \ pip3 install --no-cache-dir -U setuptools && \
pip3 install --no-cache-dir . && \ pip3 install --no-cache-dir . && \
pip3 install --no-cache-dir pysnmp-mibs && \
cd / && \ cd / && \
rm -rf /opt/conpot /tmp/* /var/tmp/* && \ rm -rf /opt/conpot /tmp/* /var/tmp/* && \
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \ setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
# #
# Get wireshark manuf db for scapy, setup configs, user, groups # Get wireshark manuf db for scapy, setup configs, user, groups
mkdir -p /etc/conpot /var/log/conpot /usr/share/wireshark && \ mkdir -p /etc/conpot /var/log/conpot /usr/share/wireshark && \
wget https://github.com/wireshark/wireshark/raw/master/manuf -o /usr/share/wireshark/manuf && \ wget https://github.com/wireshark/wireshark/raw/master/manuf -o /usr/share/wireshark/manuf && \
cp /root/dist/conpot.cfg /etc/conpot/conpot.cfg && \ cp /root/dist/conpot.cfg /etc/conpot/conpot.cfg && \
cp -R /root/dist/templates /usr/lib/python3.8/site-packages/conpot/ && \ cp -R /root/dist/templates /usr/lib/python3.9/site-packages/conpot/ && \
addgroup -g 2000 conpot && \ addgroup -g 2000 conpot && \
adduser -S -s /bin/ash -u 2000 -D -g 2000 conpot && \ adduser -S -s /bin/ash -u 2000 -D -g 2000 conpot && \
# #
@ -75,4 +75,4 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# Start conpot # Start conpot
STOPSIGNAL SIGINT STOPSIGNAL SIGINT
USER conpot:conpot USER conpot:conpot
CMD exec /usr/bin/conpot --temp_dir $CONPOT_TMP --template $CONPOT_TEMPLATE --logfile $CONPOT_LOG --config $CONPOT_CONFIG CMD exec /usr/bin/conpot --mibcache $CONPOT_TMP --temp_dir $CONPOT_TMP --template $CONPOT_TEMPLATE --logfile $CONPOT_LOG --config $CONPOT_CONFIG

View File

@ -3,7 +3,7 @@ sensorid = conpot
[virtual_file_system] [virtual_file_system]
data_fs_url = %(CONPOT_TMP)s data_fs_url = %(CONPOT_TMP)s
fs_url = tar:///usr/lib/python3.8/site-packages/conpot/data.tar fs_url = tar:///usr/lib/python3.9/site-packages/conpot/data.tar
[session] [session]
timeout = 30 timeout = 30

View File

@ -70,7 +70,7 @@
<value type="value">100000000</value> <value type="value">100000000</value>
</key> </key>
<key name="ifPhysAddress"> <key name="ifPhysAddress">
<value type="value">"\x00\x0e\x8c\x29\xc5\x1a"</value> <value type="value">"0x000e8c29c51a"</value>
</key> </key>
<key name="ifAdminStatus"> <key name="ifAdminStatus">
<value type="value">1</value> <value type="value">1</value>
@ -347,6 +347,10 @@
<!-- IEC104 Protocol parameter --> <!-- IEC104 Protocol parameter -->
<!-- Common (Object) Address, aka COA, Station Address -->
<key name="CommonAddress">
<value type="value">"0x1e28"</value>
</key>
<!-- Timeout of connection establishment --> <!-- Timeout of connection establishment -->
<key name="T_0"> <key name="T_0">
<value type="value">30</value> <value type="value">30</value>

View File

@ -26,15 +26,15 @@ services:
networks: networks:
- conpot_local_default - conpot_local_default
ports: ports:
# - "69:69" # - "69:69/udp"
- "80:80" - "80:80"
- "102:102" - "102:102"
- "161:161" - "161:161/udp"
- "502:502" - "502:502"
# - "623:623" # - "623:623/udp"
- "2121:21" - "2121:21"
- "44818:44818" - "44818:44818"
- "47808:47808" - "47808:47808/udp"
image: "dtagdevsec/conpot:2006" image: "dtagdevsec/conpot:2006"
read_only: true read_only: true
volumes: volumes:
@ -56,7 +56,7 @@ services:
networks: networks:
- conpot_local_IEC104 - conpot_local_IEC104
ports: ports:
# - "161:161" # - "161:161/udp"
- "2404:2404" - "2404:2404"
image: "dtagdevsec/conpot:2006" image: "dtagdevsec/conpot:2006"
read_only: true read_only: true
@ -101,7 +101,7 @@ services:
networks: networks:
- conpot_local_ipmi - conpot_local_ipmi
ports: ports:
- "623:623" - "623:623/udp"
image: "dtagdevsec/conpot:2006" image: "dtagdevsec/conpot:2006"
read_only: true read_only: true
volumes: volumes:

View File

@ -1,28 +1,23 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Get and install dependencies & packages # Get and install dependencies & packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \ bash \
bash \ build-base \
build-base \ git \
git \ gmp-dev \
gmp-dev \ libcap \
libcap \ libffi-dev \
libffi-dev \ mpc1-dev \
mpc1-dev \ mpfr-dev \
mpfr-dev \ openssl \
openssl \ openssl-dev \
openssl-dev \ py3-pip \
py3-pip \ python3 \
python3 \ python3-dev && \
python3-dev \
py3-bcrypt \
py3-mysqlclient \
py3-requests \
py3-setuptools && \
# #
# Setup user # Setup user
addgroup -g 2000 cowrie && \ addgroup -g 2000 cowrie && \
@ -31,10 +26,13 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# Install cowrie # Install cowrie
mkdir -p /home/cowrie && \ mkdir -p /home/cowrie && \
cd /home/cowrie && \ cd /home/cowrie && \
git clone --depth=1 https://github.com/micheloosterhof/cowrie -b v2.1.0 && \ git clone --depth=1 https://github.com/micheloosterhof/cowrie -b v2.3.0 && \
cd cowrie && \ cd cowrie && \
# git checkout 6b1e82915478292f1e77ed776866771772b48f2e && \
# sed -i s/logfile.DailyLogFile/logfile.LogFile/g src/cowrie/python/logfile.py && \
mkdir -p log && \ mkdir -p log && \
cp /root/dist/requirements.txt . && \ sed -i '/packaging.*/d' requirements.txt && \
pip3 install --upgrade pip && \
pip3 install -r requirements.txt && \ pip3 install -r requirements.txt && \
# #
# Setup configs # Setup configs

View File

@ -36,6 +36,11 @@ rsa_public_key = etc/ssh_host_rsa_key.pub
rsa_private_key = etc/ssh_host_rsa_key rsa_private_key = etc/ssh_host_rsa_key
dsa_public_key = etc/ssh_host_dsa_key.pub dsa_public_key = etc/ssh_host_dsa_key.pub
dsa_private_key = etc/ssh_host_dsa_key dsa_private_key = etc/ssh_host_dsa_key
ecdsa_public_key = etc/ssh_host_ecdsa_key.pub
ecdsa_private_key = etc/ssh_host_ecdsa_key
ed25519_public_key = etc/ssh_host_ed25519_key.pub
ed25519_private_key = etc/ssh_host_ed25519_key
public_key_auth = ssh-rsa,ssh-dss,ecdsa-sha2-nistp256,ssh-ed25519
#version = SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.2 #version = SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.2
version = SSH-2.0-OpenSSH_7.9p1 version = SSH-2.0-OpenSSH_7.9p1
ciphers = aes128-ctr,aes192-ctr,aes256-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc ciphers = aes128-ctr,aes192-ctr,aes256-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc

View File

@ -1,13 +0,0 @@
attrs==19.3.0
bcrypt==3.1.7
configparser==4.0.2
cryptography==2.9.2
packaging==20.3
pyasn1_modules==0.2.8
pyopenssl==19.1.0
pyparsing==2.4.7
python-dateutil==2.8.1
service_identity==18.1.0
tftpy==0.8.0
treq==20.4.1
twisted==20.3.0

View File

@ -1,37 +1,34 @@
FROM alpine:3.10 FROM node:10.24.1-alpine3.11 as builder
#
# Get and install dependencies & packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \
curl \
git \
npm \
nodejs && \
npm install -g grunt-cli && \
npm install -g http-server && \
npm install npm@latest -g && \
# #
# Install CyberChef # Install CyberChef
cd /root && \ RUN apk -U --no-cache add git
git clone https://github.com/gchq/cyberchef --depth=1 && \ RUN chown -R node:node /srv
chown -R nobody:nobody cyberchef && \ RUN npm install -g grunt-cli
cd cyberchef && \ WORKDIR /srv
npm install && \ USER node
grunt prod && \ RUN git clone https://github.com/gchq/cyberchef -b v9.32.3 .
mkdir -p /opt/cyberchef && \ ENV NODE_OPTIONS=--max_old_space_size=2048
mv build/prod/* /opt/cyberchef && \ RUN npm install
cd / && \ RUN grunt prod
#
# Move from builder
FROM alpine:3.14
#
RUN apk -U --no-cache add \
curl \
npm && \
npm install -g http-server && \
# #
# Clean up # Clean up
apk del --purge git \
npm && \
rm -rf /root/* && \ rm -rf /root/* && \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
# #
COPY --from=builder /srv/build/prod /opt/cyberchef
#
# Healthcheck # Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:8000' HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:8000'
# #
# Set user, workdir and start spiderfoot # Set user, workdir and start cyberchef
USER nobody:nobody USER nobody:nobody
WORKDIR /opt/cyberchef WORKDIR /opt/cyberchef
CMD ["http-server", "-p", "8000"] CMD ["http-server", "-p", "8000"]

52
docker/ddospot/Dockerfile Normal file
View File

@ -0,0 +1,52 @@
FROM alpine:3.14
#
# Install packages
RUN apk -U add \
build-base \
git \
libcap \
py3-pip \
python3 \
python3-dev && \
#
# Install ddospot from GitHub and setup
mkdir -p /opt && \
cd /opt/ && \
git clone https://github.com/aelth/ddospot && \
cd ddospot && \
git checkout 49f515237bd2d5744290ed21dcca9b53def243ba && \
# We only want JSON events, setting logger format to ('') ...
sed -i "/handler.setFormatter(logging.Formatter(/{n;N;d}" /opt/ddospot/ddospot/core/potloader.py && \
sed -i "s#handler.setFormatter(logging.Formatter(#handler.setFormatter(logging.Formatter(''))#g" /opt/ddospot/ddospot/core/potloader.py && \
# ... and remove msg from log message for individual honeypots
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/chargen/chargen.py && \
sed -i "s#self.logger.info('New DNS query - \%s' \% (raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/dns/dns.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/generic/generic.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/ntp/ntp.py && \
sed -i "s#self.logger.info('\%s - \%s' \% (msg, raw_json))#self.logger.info(raw_json)#g" /opt/ddospot/ddospot/pots/ssdp/ssdp.py && \
# We are using logrotate
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/chargen/chargenpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/dns/dnspot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/generic/genericpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/ntp/ntpot.conf && \
sed -i "s#rotate_size = 10#rotate_size = 9999#g" /opt/ddospot/ddospot/pots/ssdp/ssdpot.conf && \
pip3 install -r ddospot/requirements.txt && \
setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
#
# Setup user, groups and configs
addgroup -g 2000 ddospot && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 ddospot && \
chown ddospot:ddospot -R /opt/ddospot && \
#
# Clean up
apk del --purge build-base \
git \
python3-dev && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Start ddospot
STOPSIGNAL SIGINT
USER ddospot:ddospot
WORKDIR /opt/ddospot/ddospot/
CMD ["/usr/bin/python3","ddospot.py", "-n"]

View File

@ -0,0 +1,26 @@
version: '2.3'
networks:
ddospot_local:
services:
# Ddospot service
ddospot:
build: .
container_name: ddospot
restart: always
networks:
- ddospot_local
ports:
- "19:19/udp"
- "53:53/udp"
- "123:123/udp"
# - "161:161/udp"
- "1900:1900/udp"
image: "dtagdevsec/ddospot:2006"
read_only: true
volumes:
- /data/ddospot/log:/opt/ddospot/ddospot/logs
- /data/ddospot/bl:/opt/ddospot/ddospot/bl
- /data/ddospot/db:/opt/ddospot/ddospot/db

View File

@ -1,10 +1,10 @@
[![](https://images.microbadger.com/badges/version/dtagdevsec/elasticpot:1903.svg)](https://microbadger.com/images/dtagdevsec/elasticpot:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/dtagdevsec/elasticpot:1903.svg)](https://microbadger.com/images/dtagdevsec/elasticpot:1903 "Get your own image badge on microbadger.com") [![](https://images.microbadger.com/badges/version/ghcr.io/telekom-security/elasticpot:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/elasticpot:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/ghcr.io/telekom-security/elasticpot:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/elasticpot:1903 "Get your own image badge on microbadger.com")
# elasticpot # elasticpot
[elasticpot](https://github.com/schmalle/ElasticPot) is a simple elastic search honeypot. [elasticpot](https://github.com/schmalle/ElasticPot) is a simple elastic search honeypot.
This dockerized version is part of the **[T-Pot community honeypot](http://dtag-dev-sec.github.io/)** of Deutsche Telekom AG. This dockerized version is part of the **[T-Pot community honeypot](http://telekom-security.github.io/)** of Deutsche Telekom AG.
The `Dockerfile` contains the blueprint for the dockerized elasticpot and will be used to setup the docker image. The `Dockerfile` contains the blueprint for the dockerized elasticpot and will be used to setup the docker image.

View File

@ -14,7 +14,7 @@ services:
- elasticpot_local - elasticpot_local
ports: ports:
- "9200:9200" - "9200:9200"
image: "dtagdevsec/elasticpot:2006" image: "ghcr.io/telekom-security/elasticpot:2006"
read_only: true read_only: true
volumes: volumes:
- /data/elasticpot/log:/opt/ElasticpotPY/log - /data/elasticpot/log:/opt/ElasticpotPY/log

View File

@ -1,10 +1,10 @@
[![](https://images.microbadger.com/badges/version/dtagdevsec/glastopf:1903.svg)](https://microbadger.com/images/dtagdevsec/glastopf:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/dtagdevsec/glastopf:1903.svg)](https://microbadger.com/images/dtagdevsec/glastopf:1903 "Get your own image badge on microbadger.com") [![](https://images.microbadger.com/badges/version/ghcr.io/telekom-security/glastopf:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/glastopf:1903 "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/ghcr.io/telekom-security/glastopf:1903.svg)](https://microbadger.com/images/ghcr.io/telekom-security/glastopf:1903 "Get your own image badge on microbadger.com")
# glastopf (deprecated) # glastopf (deprecated)
[glastopf](https://github.com/mushorg/glastopf) is a python web application honeypot. [glastopf](https://github.com/mushorg/glastopf) is a python web application honeypot.
This dockerized version is part of the **[T-Pot community honeypot](http://dtag-dev-sec.github.io/)** of Deutsche Telekom AG. This dockerized version is part of the **[T-Pot community honeypot](http://telekom-security.github.io/)** of Deutsche Telekom AG.
The `Dockerfile` contains the blueprint for the dockerized glastopf and will be used to setup the docker image. The `Dockerfile` contains the blueprint for the dockerized glastopf and will be used to setup the docker image.

View File

@ -16,7 +16,7 @@ services:
- glastopf_local - glastopf_local
ports: ports:
- "8081:80" - "8081:80"
image: "dtagdevsec/glastopf:1903" image: "ghcr.io/telekom-security/glastopf:1903"
read_only: true read_only: true
volumes: volumes:
- /data/glastopf/db:/tmp/glastopf/db - /data/glastopf/db:/tmp/glastopf/db

View File

@ -16,4 +16,4 @@ services:
- hpfeeds_local - hpfeeds_local
ports: ports:
- "20000:20000" - "20000:20000"
image: "dtagdevsec/hpfeeds:latest" image: "ghcr.io/telekom-security/hpfeeds:latest"

View File

@ -17,7 +17,7 @@ services:
network_mode: "host" network_mode: "host"
ports: ports:
- "64297:64297" - "64297:64297"
image: "dtagdevsec/nginx:1903" image: "ghcr.io/telekom-security/nginx:1903"
read_only: true read_only: true
volumes: volumes:
- /data/nginx/cert/:/etc/nginx/cert/:ro - /data/nginx/cert/:/etc/nginx/cert/:ro

View File

@ -1,8 +1,7 @@
FROM alpine:latest FROM alpine:3.14
# #
# Setup apk # Setup apk
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add --no-cache \
apk -U add \
build-base \ build-base \
git \ git \
g++ && \ g++ && \
@ -14,6 +13,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
cd /opt/go/ && \ cd /opt/go/ && \
git clone https://github.com/nsmfoo/dicompot.git && \ git clone https://github.com/nsmfoo/dicompot.git && \
cd dicompot && \ cd dicompot && \
git checkout 41331194156bbb17078bcc1594f4952ac06a731e && \
go mod download && \ go mod download && \
go install -a -x github.com/nsmfoo/dicompot/server && \ go install -a -x github.com/nsmfoo/dicompot/server && \
# #

View File

@ -1,4 +1,4 @@
FROM debian:buster-slim FROM ubuntu:20.04
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive
# #
# Include dist # Include dist
@ -6,6 +6,9 @@ ADD dist/ /root/dist/
# #
# Install dependencies and packages # Install dependencies and packages
RUN apt-get update -y && \ RUN apt-get update -y && \
apt-get install wget -y && \
wget http://archive.ubuntu.com/ubuntu/pool/universe/libe/libemu/libemu2_0.2.0+git20120122-1.2build1_amd64.deb http://archive.ubuntu.com/ubuntu/pool/universe/libe/libemu/libemu-dev_0.2.0+git20120122-1.2build1_amd64.deb && \
apt install ./libemu2_0.2.0+git20120122-1.2build1_amd64.deb ./libemu-dev_0.2.0+git20120122-1.2build1_amd64.deb -y && \
apt-get dist-upgrade -y && \ apt-get dist-upgrade -y && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
build-essential \ build-essential \
@ -16,7 +19,7 @@ RUN apt-get update -y && \
git \ git \
libcap2-bin \ libcap2-bin \
libcurl4-openssl-dev \ libcurl4-openssl-dev \
libemu-dev \ # libemu-dev \
libev-dev \ libev-dev \
libglib2.0-dev \ libglib2.0-dev \
libloudmouth1-dev \ libloudmouth1-dev \
@ -36,7 +39,7 @@ RUN apt-get update -y && \
# #
# Get and install dionaea # Get and install dionaea
# Latest master is unstable, SIP causes crashing # Latest master is unstable, SIP causes crashing
git clone --depth=1 https://github.com/dinotools/dionaea -b 0.8.0 /root/dionaea/ && \ git clone --depth=1 https://github.com/dinotools/dionaea -b 0.11.0 /root/dionaea/ && \
cd /root/dionaea && \ cd /root/dionaea && \
#git checkout 1426750b9fd09c5bfeae74d506237333cd8505e2 && \ #git checkout 1426750b9fd09c5bfeae74d506237333cd8505e2 && \
mkdir build && \ mkdir build && \
@ -78,7 +81,8 @@ RUN apt-get update -y && \
python3-dev \ python3-dev \
python3-boto3 \ python3-boto3 \
python3-bson \ python3-bson \
python3-yaml && \ python3-yaml \
wget && \
# #
apt-get install -y \ apt-get install -y \
ca-certificates \ ca-certificates \
@ -93,7 +97,8 @@ RUN apt-get update -y && \
libnetfilter-queue1 \ libnetfilter-queue1 \
libnl-3-200 \ libnl-3-200 \
libpcap0.8 \ libpcap0.8 \
libpython3.7 \ # libpython3.6 \
libpython3.8 \
libudns0 && \ libudns0 && \
# #
apt-get autoremove --purge -y && \ apt-get autoremove --purge -y && \

View File

@ -1,17 +1,18 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Install packages # Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \
build-base \ build-base \
ca-certificates \ ca-certificates \
git \ git \
libffi-dev \ libffi-dev \
openssl \ openssl \
openssl-dev \ openssl-dev \
postgresql-dev \
py3-cryptography \
py3-mysqlclient \ py3-mysqlclient \
py3-requests \ py3-requests \
py3-pip \ py3-pip \
@ -19,8 +20,9 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
python3-dev && \ python3-dev && \
mkdir -p /opt && \ mkdir -p /opt && \
cd /opt/ && \ cd /opt/ && \
git clone --depth=1 https://gitlab.com/bontchev/elasticpot.git/ && \ git clone https://gitlab.com/bontchev/elasticpot.git/ && \
cd elasticpot && \ cd elasticpot && \
git checkout d12649730d819bd78ea622361b6c65120173ad45 && \
pip3 install -r requirements.txt && \ pip3 install -r requirements.txt && \
# #
# Setup user, groups and configs # Setup user, groups and configs
@ -33,6 +35,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
git \ git \
libffi-dev \ libffi-dev \
openssl-dev \ openssl-dev \
postgresql-dev \
python3-dev && \ python3-dev && \
rm -rf /root/* && \ rm -rf /root/* && \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*

View File

@ -10,7 +10,7 @@ services:
restart: always restart: always
environment: environment:
- bootstrap.memory_lock=true - bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2048m -Xmx2048m # - ES_JAVA_OPTS=-Xms2048m -Xmx2048m
- ES_TMPDIR=/tmp - ES_TMPDIR=/tmp
cap_add: cap_add:
- IPC_LOCK - IPC_LOCK
@ -21,7 +21,7 @@ services:
nofile: nofile:
soft: 65536 soft: 65536
hard: 65536 hard: 65536
mem_limit: 4g # mem_limit: 4g
ports: ports:
- "127.0.0.1:64298:9200" - "127.0.0.1:64298:9200"
image: "dtagdevsec/elasticsearch:2006" image: "dtagdevsec/elasticsearch:2006"
@ -46,6 +46,8 @@ services:
build: logstash/. build: logstash/.
container_name: logstash container_name: logstash
restart: always restart: always
# environment:
# - LS_JAVA_OPTS=-Xms2048m -Xmx2048m
depends_on: depends_on:
elasticsearch: elasticsearch:
condition: service_healthy condition: service_healthy

View File

@ -1,25 +1,28 @@
FROM alpine FROM alpine:3.14
# #
# VARS # VARS
ENV ES_VER=7.8.0 \ ENV ES_VER=7.17.0 \
JAVA_HOME=/usr/lib/jvm/java-11-openjdk ES_JAVA_HOME=/usr/lib/jvm/java-16-openjdk
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup env and apt RUN apk -U --no-cache add \
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \
aria2 \ aria2 \
bash \ bash \
curl \ curl \
nss \ nss && \
openjdk11-jre && \ apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community openjdk16-jre && \
# #
# Get and install packages # Get and install packages
cd /root/dist/ && \ cd /root/dist/ && \
mkdir -p /usr/share/elasticsearch/ && \ mkdir -p /usr/share/elasticsearch/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VER-linux-x86_64.tar.gz && \ aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VER-linux-x86_64.tar.gz && \
tar xvfz elasticsearch-$ES_VER-linux-x86_64.tar.gz --strip-components=1 -C /usr/share/elasticsearch/ && \ tar xvfz elasticsearch-$ES_VER-linux-x86_64.tar.gz --strip-components=1 -C /usr/share/elasticsearch/ && \
rm -rf /usr/share/elasticsearch/jdk && \
rm -rf /usr/share/elasticsearch/modules/x-pack-ml && \
# For some reason Alpine 3.14 does not report the -x flag correctly and thus elasticsearch does not find java
sed -i 's/! -x/! -e/g' /usr/share/elasticsearch/bin/elasticsearch-env && \
# #
# Add and move files # Add and move files
cd /root/dist/ && \ cd /root/dist/ && \
@ -30,7 +33,6 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
addgroup -g 2000 elasticsearch && \ addgroup -g 2000 elasticsearch && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 elasticsearch && \ adduser -S -H -s /bin/ash -u 2000 -D -g 2000 elasticsearch && \
chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/ && \ chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/ && \
rm -rf /usr/share/elasticsearch/modules/x-pack-ml && \
# #
# Clean up # Clean up
apk del --purge aria2 && \ apk del --purge aria2 && \

View File

@ -2,7 +2,6 @@ cluster.name: tpotcluster
node.name: "tpotcluster-node-01" node.name: "tpotcluster-node-01"
xpack.ml.enabled: false xpack.ml.enabled: false
xpack.security.enabled: false xpack.security.enabled: false
#xpack.ilm.enabled: false
path: path:
logs: /data/elk/log logs: /data/elk/log
data: /data/elk/data data: /data/elk/data
@ -10,7 +9,5 @@ http.host: 0.0.0.0
http.cors.enabled: true http.cors.enabled: true
http.cors.allow-origin: "*" http.cors.allow-origin: "*"
indices.query.bool.max_clause_count: 2000 indices.query.bool.max_clause_count: 2000
cluster.initial_master_nodes: cluster.routing.allocation.disk.watermark.enable_for_single_data_node: true
- "tpotcluster-node-01" discovery.type: single-node
discovery.zen.ping.unicast.hosts:
- localhost

View File

@ -1,16 +1,18 @@
FROM alpine:latest FROM alpine:3.14
# #
# Setup env and apt # Setup env and apt
RUN apk -U add \ RUN apk -U add \
curl \ curl \
git \ git \
nodejs \ nodejs \
nodejs-npm && \ #nodejs-npm && \
npm && \
# #
# Get and install packages # Get and install packages
mkdir -p /usr/src/app/ && \ mkdir -p /usr/src/app/ && \
cd /usr/src/app/ && \ cd /usr/src/app/ && \
git clone --depth=1 https://github.com/mobz/elasticsearch-head . && \ git clone https://github.com/mobz/elasticsearch-head . && \
git checkout 2d51fecac2980d350fcd3319fd9fe2999f63c9db && \
npm install http-server && \ npm install http-server && \
sed -i "s#\"http\:\/\/localhost\:9200\"#window.location.protocol \+ \'\/\/\' \+ window.location.hostname \+ \'\:\' \+ window.location.port \+ \'\/es\/\'#" /usr/src/app/_site/app.js && \ sed -i "s#\"http\:\/\/localhost\:9200\"#window.location.protocol \+ \'\/\/\' \+ window.location.hostname \+ \'\:\' \+ window.location.port \+ \'\/es\/\'#" /usr/src/app/_site/app.js && \
# #

View File

@ -1,16 +1,15 @@
FROM node:10.21.0-alpine FROM node:16.13.2-alpine3.14
# #
# VARS # VARS
ENV KB_VER=7.8.0 ENV KB_VER=7.17.0
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup env and apt RUN apk -U --no-cache add \
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \
aria2 \ aria2 \
curl && \ curl \
gcompat && \
# #
# Get and install packages # Get and install packages
cd /root/dist/ && \ cd /root/dist/ && \
@ -24,32 +23,17 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# #
# Add and move files # Add and move files
cd /root/dist/ && \ cd /root/dist/ && \
# cp kibana.svg /usr/share/kibana/src/ui/public/images/kibana.svg && \
# cp kibana.svg /usr/share/kibana/src/ui/public/icons/kibana.svg && \
# cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon.ico && \
# cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon-16x16.png && \
# cp elk.ico /usr/share/kibana/src/ui/public/assets/favicons/favicon-32x32.png && \
# #
# Setup user, groups and configs # Setup user, groups and configs
sed -i 's/#server.basePath: ""/server.basePath: "\/kibana"/' /usr/share/kibana/config/kibana.yml && \ sed -i 's/#server.basePath: ""/server.basePath: "\/kibana"/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#kibana.defaultAppId: "home"/kibana.defaultAppId: "dashboards"/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#server.host: "localhost"/server.host: "0.0.0.0"/' /usr/share/kibana/config/kibana.yml && \ sed -i 's/#server.host: "localhost"/server.host: "0.0.0.0"/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/elasticsearch.hosts: \["http:\/\/elasticsearch:9200"\]/' /usr/share/kibana/config/kibana.yml && \ sed -i 's/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/elasticsearch.hosts: \["http:\/\/elasticsearch:9200"\]/' /usr/share/kibana/config/kibana.yml && \
sed -i 's/#server.rewriteBasePath: false/server.rewriteBasePath: false/' /usr/share/kibana/config/kibana.yml && \ sed -i 's/#server.rewriteBasePath: false/server.rewriteBasePath: false/' /usr/share/kibana/config/kibana.yml && \
# sed -i "s/#005571/#e20074/g" /usr/share/kibana/built_assets/css/plugins/kibana/index.css && \ echo "xpack.reporting.roles.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
# sed -i "s/#007ba4/#9e0051/g" /usr/share/kibana/built_assets/css/plugins/kibana/index.css && \
# sed -i "s/#00465d/#4f0028/g" /usr/share/kibana/built_assets/css/plugins/kibana/index.css && \
echo "xpack.infra.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.logstash.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.canvas.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.spaces.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.apm.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.security.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.uptime.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.siem.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "xpack.ml.enabled: false" >> /usr/share/kibana/config/kibana.yml && \
echo "elasticsearch.requestTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \ echo "elasticsearch.requestTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "elasticsearch.shardTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \ echo "elasticsearch.shardTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "kibana.autocompleteTimeout: 60000" >> /usr/share/kibana/config/kibana.yml && \
echo "kibana.autocompleteTerminateAfter: 1000000" >> /usr/share/kibana/config/kibana.yml && \
rm -rf /usr/share/kibana/optimize/bundles/* && \ rm -rf /usr/share/kibana/optimize/bundles/* && \
/usr/share/kibana/bin/kibana --optimize --allow-root && \ /usr/share/kibana/bin/kibana --optimize --allow-root && \
addgroup -g 2000 kibana && \ addgroup -g 2000 kibana && \

View File

@ -1,21 +1,23 @@
FROM alpine FROM alpine:3.14
# #
# VARS # VARS
ENV LS_VER=7.8.0 ENV LS_VER=7.17.0
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup env and apt # Setup env and apt
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ #RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
apk -U --no-cache add \ RUN apk -U --no-cache add \
aria2 \ aria2 \
autossh \
bash \ bash \
bzip2 \ bzip2 \
curl \ curl \
libc6-compat \ libc6-compat \
libzmq \ libzmq \
nss \ nss \
openjdk11-jre && \ openssh && \
apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community openjdk16-jre && \
# #
# Get and install packages # Get and install packages
mkdir -p /etc/listbot && \ mkdir -p /etc/listbot && \
@ -25,10 +27,16 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
bunzip2 *.bz2 && \ bunzip2 *.bz2 && \
cd /root/dist/ && \ cd /root/dist/ && \
mkdir -p /usr/share/logstash/ && \ mkdir -p /usr/share/logstash/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/logstash/logstash-$LS_VER.tar.gz && \ aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/logstash/logstash-$LS_VER-linux-x86_64.tar.gz && \
tar xvfz logstash-$LS_VER.tar.gz --strip-components=1 -C /usr/share/logstash/ && \ tar xvfz logstash-$LS_VER-linux-x86_64.tar.gz --strip-components=1 -C /usr/share/logstash/ && \
/usr/share/logstash/bin/logstash-plugin install logstash-filter-translate && \ rm -rf /usr/share/logstash/jdk && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-syslog && \ # For some reason Alpine 3.14 does not report the -x flag correctly and thus elasticsearch does not find java
sed -i 's/! -x/! -e/g' /usr/share/logstash/bin/logstash.lib.sh && \
/usr/share/logstash/bin/logstash-plugin install --preserve --no-verify logstash-filter-translate && \
/usr/share/logstash/bin/logstash-plugin install --preserve --no-verify logstash-input-http && \
/usr/share/logstash/bin/logstash-plugin install --preserve --no-verify logstash-output-gelf && \
/usr/share/logstash/bin/logstash-plugin install --preserve --no-verify logstash-output-http && \
/usr/share/logstash/bin/logstash-plugin install --preserve --no-verify logstash-output-syslog && \
# #
# Add and move files # Add and move files
cd /root/dist/ && \ cd /root/dist/ && \
@ -36,8 +44,11 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
chmod u+x /usr/bin/update.sh && \ chmod u+x /usr/bin/update.sh && \
mkdir -p /etc/logstash/conf.d && \ mkdir -p /etc/logstash/conf.d && \
cp logstash.conf /etc/logstash/conf.d/ && \ cp logstash.conf /etc/logstash/conf.d/ && \
cp elasticsearch-template-es7x.json /usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-elasticsearch-10.5.1-java/lib/logstash/outputs/elasticsearch/ && \ cp http_input.conf /etc/logstash/conf.d/ && \
cp common_configs.rb /usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-elasticsearch-10.5.1-java/lib/logstash/outputs/elasticsearch/ && \ cp http_output.conf /etc/logstash/conf.d/ && \
cp pipelines.yml /usr/share/logstash/config/pipelines.yml && \
cp pipelines_pot.yml /usr/share/logstash/config/pipelines_pot.yml && \
cp tpot_es_template.json /etc/logstash/ && \
# #
# Setup user, groups and configs # Setup user, groups and configs
addgroup -g 2000 logstash && \ addgroup -g 2000 logstash && \
@ -56,4 +67,6 @@ HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
# #
# Start logstash # Start logstash
#USER logstash:logstash #USER logstash:logstash
CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf --config.reload.automatic --java-execution #CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf --config.reload.automatic --java-execution --log.level debug
#CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/http_output.conf --config.reload.automatic --java-execution
CMD update.sh && exec /usr/share/logstash/bin/logstash --config.reload.automatic --java-execution

View File

@ -0,0 +1,68 @@
FROM alpine:3.14
#
# VARS
ENV LS_VER=7.15.1
# Include dist
ADD dist/ /root/dist/
#
# Setup env and apt
#RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
RUN apk -U --no-cache add \
aria2 \
bash \
bzip2 \
curl \
libc6-compat \
libzmq \
nss && \
apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community openjdk16-jre && \
#
# Get and install packages
mkdir -p /etc/listbot && \
cd /etc/listbot && \
aria2c -s16 -x 16 https://listbot.sicherheitstacho.eu/cve.yaml.bz2 && \
aria2c -s16 -x 16 https://listbot.sicherheitstacho.eu/iprep.yaml.bz2 && \
bunzip2 *.bz2 && \
cd /root/dist/ && \
mkdir -p /usr/share/logstash/ && \
aria2c -s 16 -x 16 https://artifacts.elastic.co/downloads/logstash/logstash-$LS_VER-linux-x86_64.tar.gz && \
tar xvfz logstash-$LS_VER-linux-x86_64.tar.gz --strip-components=1 -C /usr/share/logstash/ && \
rm -rf /usr/share/logstash/jdk && \
# For some reason Alpine 3.14 does not report the -x flag correctly and thus elasticsearch does not find java
sed -i 's/! -x/! -e/g' /usr/share/logstash/bin/logstash.lib.sh && \
/usr/share/logstash/bin/logstash-plugin install logstash-filter-translate && \
/usr/share/logstash/bin/logstash-plugin install logstash-input-http && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-gelf && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-http && \
/usr/share/logstash/bin/logstash-plugin install logstash-output-syslog && \
#
# Add and move files
cd /root/dist/ && \
cp update.sh /usr/bin/ && \
chmod u+x /usr/bin/update.sh && \
mkdir -p /etc/logstash/conf.d && \
cp logstash.conf /etc/logstash/conf.d/ && \
cp http.conf /etc/logstash/conf.d/ && \
cp pipelines.yml /usr/share/logstash/config/pipelines.yml && \
cp tpot_es_template.json /etc/logstash/ && \
#
# Setup user, groups and configs
addgroup -g 2000 logstash && \
adduser -S -H -s /bin/bash -u 2000 -D -g 2000 logstash && \
chown -R logstash:logstash /usr/share/logstash && \
chown -R logstash:logstash /etc/listbot && \
chmod 755 /usr/bin/update.sh && \
#
# Clean up
rm -rf /root/* && \
rm -rf /tmp/* && \
rm -rf /var/cache/apk/*
#
# Healthcheck
HEALTHCHECK --retries=10 CMD curl -s -XGET 'http://127.0.0.1:9600'
#
# Start logstash
#USER logstash:logstash
#CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf --config.reload.automatic --java-execution --log.level debug
#CMD update.sh && exec /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf --config.reload.automatic --java-execution
CMD update.sh && exec /usr/share/logstash/bin/logstash --config.reload.automatic --java-execution

View File

@ -1,167 +0,0 @@
require 'forwardable' # Needed for logstash core SafeURI. We need to patch this in core: https://github.com/elastic/logstash/pull/5978
module LogStash; module Outputs; class ElasticSearch
module CommonConfigs
DEFAULT_INDEX_NAME = "logstash-%{+yyyy.MM.dd}"
DEFAULT_POLICY = "logstash-policy"
DEFAULT_ROLLOVER_ALIAS = 'logstash'
DEFAULT_HOST = ::LogStash::Util::SafeURI.new("//127.0.0.1")
def self.included(mod)
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
# The default value will partition your indices by day so you can more easily
# delete old data or only search specific date ranges.
# Indexes may not contain uppercase characters.
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
# LS uses Joda to format the index pattern from event timestamp.
# Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
mod.config :index, :validate => :string, :default => DEFAULT_INDEX_NAME
mod.config :document_type,
:validate => :string,
:deprecated => "Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature"
# From Logstash 1.3 onwards, a template is applied to Elasticsearch during
# Logstash's startup if one with the name `template_name` does not already exist.
# By default, the contents of this template is the default template for
# `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
# `logstash-*`. Should you require support for other index names, or would like
# to change the mappings in the template in general, a custom template can be
# specified by setting `template` to the path of a template file.
#
# Setting `manage_template` to false disables this feature. If you require more
# control over template creation, (e.g. creating indices dynamically based on
# field names) you should set `manage_template` to false and use the REST
# API to apply your templates manually.
mod.config :manage_template, :validate => :boolean, :default => true
# This configuration option defines how the template is named inside Elasticsearch.
# Note that if you have used the template management features and subsequently
# change this, you will need to prune the old template manually, e.g.
#
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
#
# where `OldTemplateName` is whatever the former setting was.
mod.config :template_name, :validate => :string, :default => "logstash"
# You can set the path to your own template here, if you so desire.
# If not set, the included template will be used.
mod.config :template, :validate => :path
# The template_overwrite option will always overwrite the indicated template
# in Elasticsearch with either the one indicated by template or the included one.
# This option is set to false by default. If you always want to stay up to date
# with the template provided by Logstash, this option could be very useful to you.
# Likewise, if you have your own template file managed by puppet, for example, and
# you wanted to be able to update it regularly, this option could help there as well.
#
# Please note that if you are using your own customized version of the Logstash
# template (logstash), setting this to true will make Logstash to overwrite
# the "logstash" template (i.e. removing all customized settings)
mod.config :template_overwrite, :validate => :boolean, :default => true
# The document ID for the index. Useful for overwriting existing entries in
# Elasticsearch with the same ID.
mod.config :document_id, :validate => :string
# The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
mod.config :version, :validate => :string
# The version_type to use for indexing.
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
# See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
mod.config :version_type, :validate => ["internal", 'external', "external_gt", "external_gte", "force"]
# A routing override to be applied to all processed events.
# This can be dynamic using the `%{foo}` syntax.
mod.config :routing, :validate => :string
# For child documents, ID of the associated parent.
# This can be dynamic using the `%{foo}` syntax.
mod.config :parent, :validate => :string, :default => nil
# For child documents, name of the join field
mod.config :join_field, :validate => :string, :default => nil
# Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
# Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
# `"127.0.0.1"`
# `["127.0.0.1:9200","127.0.0.2:9200"]`
# `["http://127.0.0.1"]`
# `["https://127.0.0.1:9200"]`
# `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
# It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
# to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
#
# Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
mod.config :hosts, :validate => :uri, :default => [ DEFAULT_HOST ], :list => true
# Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
#
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_id[cloud documentation]
mod.config :cloud_id, :validate => :string
# Set upsert content for update mode.s
# Create a new document with this parameter as json string if `document_id` doesn't exists
mod.config :upsert, :validate => :string, :default => ""
# Enable `doc_as_upsert` for update mode.
# Create a new document with source if `document_id` doesn't exist in Elasticsearch
mod.config :doc_as_upsert, :validate => :boolean, :default => false
# Set script name for scripted update mode
mod.config :script, :validate => :string, :default => ""
# Define the type of script referenced by "script" variable
# inline : "script" contains inline script
# indexed : "script" contains the name of script directly indexed in elasticsearch
# file : "script" contains the name of script stored in elasticseach's config directory
mod.config :script_type, :validate => ["inline", 'indexed', "file"], :default => ["inline"]
# Set the language of the used script. If not set, this defaults to painless in ES 5.0
mod.config :script_lang, :validate => :string, :default => "painless"
# Set variable name passed to script (scripted update)
mod.config :script_var_name, :validate => :string, :default => "event"
# if enabled, script is in charge of creating non-existent document (scripted update)
mod.config :scripted_upsert, :validate => :boolean, :default => false
# Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
mod.config :retry_initial_interval, :validate => :number, :default => 2
# Set max interval in seconds between bulk retries.
mod.config :retry_max_interval, :validate => :number, :default => 64
# The number of times Elasticsearch should internally retry an update/upserted document
# See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
# for more info
mod.config :retry_on_conflict, :validate => :number, :default => 1
# Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
# here like `pipeline => "%{INGEST_PIPELINE}"`
mod.config :pipeline, :validate => :string, :default => nil
# -----
# ILM configurations (beta)
# -----
# Flag for enabling Index Lifecycle Management integration.
mod.config :ilm_enabled, :validate => [true, false, 'true', 'false', 'auto'], :default => 'auto'
# Rollover alias used for indexing data. If rollover alias doesn't exist, Logstash will create it and map it to the relevant index
mod.config :ilm_rollover_alias, :validate => :string, :default => DEFAULT_ROLLOVER_ALIAS
# appends “{now/d}-000001” by default for new index creation, subsequent rollover indices will increment based on this pattern i.e. “000002”
# {now/d} is date math, and will insert the appropriate value automatically.
mod.config :ilm_pattern, :validate => :string, :default => '{now/d}-000001'
# ILM policy to use, if undefined the default policy will be used.
mod.config :ilm_policy, :validate => :string, :default => DEFAULT_POLICY
end
end
end end end

View File

@ -0,0 +1,19 @@
# Input section
input {
http {
id => "tpot"
host => "0.0.0.0"
port => "80"
}
}
# Output section
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
# With templates now being legacy and ILM in place we need to set the daily index with its template manually. Otherwise a new index might be created with differents settings configured through Kibana.
index => "logstash-%{+YYYY.MM.dd}"
template => "/etc/logstash/tpot_es_template.json"
}
}

View File

@ -0,0 +1,756 @@
# Input section
input {
# Fatt
file {
path => ["/data/fatt/log/fatt.log"]
codec => json
type => "Fatt"
}
# Suricata
file {
path => ["/data/suricata/log/eve.json"]
codec => json
type => "Suricata"
}
# P0f
file {
path => ["/data/p0f/log/p0f.json"]
codec => json
type => "P0f"
}
# Adbhoney
file {
path => ["/data/adbhoney/log/adbhoney.json"]
codec => json
type => "Adbhoney"
}
# Ciscoasa
file {
path => ["/data/ciscoasa/log/ciscoasa.log"]
codec => plain
type => "Ciscoasa"
}
# CitrixHoneypot
file {
path => ["/data/citrixhoneypot/logs/server.log"]
codec => json
type => "CitrixHoneypot"
}
# Conpot
file {
path => ["/data/conpot/log/*.json"]
codec => json
type => "ConPot"
}
# Cowrie
file {
path => ["/data/cowrie/log/cowrie.json"]
codec => json
type => "Cowrie"
}
# Dionaea
file {
path => ["/data/dionaea/log/dionaea.json"]
codec => json
type => "Dionaea"
}
# Dicompot
file {
path => ["/data/dicompot/log/dicompot.log"]
codec => json
type => "Dicompot"
}
# Ddospot
file {
path => ["/data/ddospot/log/*.log"]
codec => json
type => "Ddospot"
}
# ElasticPot
file {
path => ["/data/elasticpot/log/elasticpot.json"]
codec => json
type => "ElasticPot"
}
# Endlessh
file {
path => ["/data/endlessh/log/endlessh.log"]
codec => plain
type => "Endlessh"
}
# Glutton
file {
path => ["/data/glutton/log/glutton.log"]
codec => json
type => "Glutton"
}
# Hellpot
file {
path => ["/data/hellpot/log/hellpot.log"]
codec => json
type => "Hellpot"
}
# Heralding
file {
path => ["/data/heralding/log/auth.csv"]
type => "Heralding"
}
# Honeypots
file {
path => ["/data/honeypots/log/*.log"]
codec => json
type => "Honeypots"
}
# Honeypy
file {
path => ["/data/honeypy/log/json.log"]
codec => json
type => "Honeypy"
}
# Honeysap
file {
path => ["/data/honeysap/log/honeysap-external.log"]
codec => json
type => "Honeysap"
}
# Honeytrap
file {
path => ["/data/honeytrap/log/attackers.json"]
codec => json
type => "Honeytrap"
}
# Ipphoney
file {
path => ["/data/ipphoney/log/ipphoney.json"]
codec => json
type => "Ipphoney"
}
# Log4pot
file {
path => ["/data/log4pot/log/log4pot.log"]
codec => json
type => "Log4pot"
}
# Mailoney
file {
path => ["/data/mailoney/log/commands.log"]
codec => json
type => "Mailoney"
}
# Medpot
file {
path => ["/data/medpot/log/medpot.log"]
codec => json
type => "Medpot"
}
# Rdpy
file {
path => ["/data/rdpy/log/rdpy.log"]
type => "Rdpy"
}
# Redishoneypot
file {
path => ["/data/redishoneypot/log/redishoneypot.log"]
codec => json
type => "Redishoneypot"
}
# Host NGINX
file {
path => ["/data/nginx/log/access.log"]
codec => json
type => "NGINX"
}
# Tanner
file {
path => ["/data/tanner/log/tanner_report.json"]
codec => json
type => "Tanner"
}
}
# Filter Section
filter {
# Fatt
if [type] == "Fatt" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"sourceIp" => "src_ip"
"destinationIp" => "dest_ip"
"sourcePort" => "src_port"
"destinationPort" => "dest_port"
"gquic" => "fatt_gquic"
"http" => "fatt_http"
"rdp" => "fatt_rdp"
"ssh" => "fatt_ssh"
"tls" => "fatt_tls"
}
}
}
# Suricata
if [type] == "Suricata" {
date {
match => [ "timestamp", "ISO8601" ]
}
translate {
refresh_interval => 86400
field => "[alert][signature_id]"
destination => "[alert][cve_id]"
dictionary_path => "/etc/listbot/cve.yaml"
# fallback => "-"
}
}
# P0f
if [type] == "P0f" {
date {
match => [ "timestamp", "yyyy'/'MM'/'dd HH:mm:ss" ]
remove_field => ["timestamp"]
}
mutate {
rename => {
"server_port" => "dest_port"
"server_ip" => "dest_ip"
"client_port" => "src_port"
"client_ip" => "src_ip"
}
}
}
# Adbhoney
if [type] == "Adbhoney" {
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["unixtime"]
}
}
# Ciscoasa
if [type] == "Ciscoasa" {
kv {
remove_char_key => " '{}"
remove_char_value => "'{}"
value_split => ":"
field_split => ","
}
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
add_field => {
"dest_ip" => "${MY_EXTIP}"
}
}
}
# CitrixHoneypot
if [type] == "CitrixHoneypot" {
grok {
match => {
"message" => [ "\A\(%{IPV4:src_ip:string}:%{INT:src_port:integer}\): %{JAVAMETHOD:http.http_method:string}%{SPACE}%{CISCO_REASON:fileinfo.state:string}: %{UNIXPATH:fileinfo.filename:string}",
"\A\(%{IPV4:src_ip:string}:%{INT:src_port:integer}\): %{JAVAMETHOD:http.http_method:string}%{SPACE}%{CISCO_REASON:fileinfo.state:string}: %{GREEDYDATA:payload:string}",
"\A\(%{IPV4:src_ip:string}:%{INT:src_port:integer}\): %{S3_REQUEST_LINE:msg:string} %{CISCO_REASON:fileinfo.state:string}: %{GREEDYDATA:payload:string:string}",
"\A\(%{IPV4:src_ip:string}:%{INT:src_port:integer}\): %{GREEDYDATA:msg:string}" ]
}
}
date {
match => [ "asctime", "ISO8601" ]
remove_field => ["asctime"]
remove_field => ["message"]
}
mutate {
add_field => {
"dest_port" => "443"
}
rename => {
"levelname" => "level"
}
}
}
# Conpot
if [type] == "ConPot" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
}
}
}
# Cowrie
if [type] == "Cowrie" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
}
}
}
# Ddospot
if [type] == "Ddospot" {
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
remove_field => ["time"]
}
if [path] == "/data/ddospot/log/chargenpot.log" {
mutate {
add_field => {
"dest_port" => "19"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/dnspot.log" {
mutate {
add_field => {
"dest_port" => "53"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/ntpot.log" {
mutate {
add_field => {
"dest_port" => "123"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/ssdpot.log" {
mutate {
add_field => {
"dest_port" => "1900"
"dest_ip" => "${MY_EXTIP}"
}
}
}
}
# Dionaea
if [type] == "Dionaea" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
}
gsub => [
"src_ip", "::ffff:", "",
"dest_ip", "::ffff:", ""
]
}
if [credentials] {
mutate {
add_field => {
"username" => "%{[credentials][username]}"
"password" => "%{[credentials][password]}"
}
remove_field => "[credentials]"
}
}
}
# Dicompot
if [type] == "Dicompot" {
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss" ]
remove_field => ["time"]
remove_field => ["timestamp"]
}
mutate {
rename => {
"ID" => "id"
"IP" => "src_ip"
"Port" => "src_port"
"AETitle" => "aetitle"
"Command" => "input"
"Files" => "files"
"Identifier" => "identifier"
"Matches" => "matches"
"Status" => "session"
"Version" => "version"
}
}
}
# ElasticPot
if [type] == "ElasticPot" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"content_type" => "http.http_content_type"
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
"message" => "event_type"
"request" => "request_method"
"user_agent" => "http_user_agent"
"url" => "http.url"
}
}
}
# Endlessh
# Example: 2021-10-29T21:08:31.026Z CLOSE host=1.2.3.4 port=12345 fd=4 time=20.015 bytes=24
# Example: 2021-10-29T21:08:11.011Z ACCEPT host=1.2.3.4 port=12346 fd=4 n=1/4096
if [type] == "Endlessh" {
grok { match => { "message" => [ "\A%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:reason}%{SPACE}host=%{IPV4:src_ip}%{SPACE}port=%{INT:src_port}%{SPACE}fd=%{INT}%{SPACE}time=%{SECOND:duration}%{SPACE}bytes=%{NUMBER:bytes}", "\A%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:reason}%{SPACE}host=%{IPV4:src_ip}%{SPACE}port=%{INT:src_port}%{SPACE}fd=%{INT}%{SPACE}n=%{INT}/%{INT}" ] } }
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "22"
"dest_ip" => "${MY_EXTIP}"
}
}
}
# Glutton
if [type] == "Glutton" {
date {
match => [ "ts", "UNIX" ]
remove_field => ["ts"]
}
}
# Hellpot
if [type] == "Hellpot" {
date {
match => [ "time", "ISO8601" ]
remove_field => ["time"]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "80"
"dest_ip" => "${MY_EXTIP}"
}
rename => {
"BYTES" => "bytes"
"DURATION" => "duration"
"REMOTE_ADDR" => "src_ip"
"URL" => "url"
"USERAGENT" => "http_user_agent"
"message" => "reason"
}
}
}
# Heralding
if [type] == "Heralding" {
csv {
columns => ["timestamp","auth_id","session_id","src_ip","src_port","dest_ip","dest_port","proto","username","password"] separator => ","
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
remove_field => ["timestamp"]
}
}
# Honeypy
if [type] == "Honeypy" {
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["timestamp"]
remove_field => ["date"]
remove_field => ["time"]
remove_field => ["millisecond"]
}
}
# Honeypots
if [type] == "Honeypots" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Honeysap
if [type] == "Honeysap" {
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
remove_field => ["timestamp"]
}
mutate {
rename => {
"[data][error_msg]" => "event_type"
"service" => "sensor"
"source_port" => "src_port"
"source_ip" => "src_ip"
"target_port" => "dest_port"
"target_ip" => "dest_ip"
}
remove_field => "event"
remove_field => "return_code"
}
if [data] {
mutate {
remove_field => "[data]"
}
}
}
# Honeytrap
if [type] == "Honeytrap" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"[attack_connection][local_port]" => "dest_port"
"[attack_connection][local_ip]" => "dest_ip"
"[attack_connection][remote_port]" => "src_port"
"[attack_connection][remote_ip]" => "src_ip"
}
}
}
# Ipphoney
if [type] == "Ipphoney" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"query" => "ipp_query"
"content_type" => "http.http_content_type"
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
"request" => "request_method"
"operation" => "data"
"user_agent" => "http_user_agent"
"url" => "http.url"
}
}
}
# Log4pot
if [type] == "Log4pot" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"request" => "request_uri"
"server_port" => "dest_port"
"port" => "src_port"
"client" => "src_ip"
}
}
}
# Mailoney
if [type] == "Mailoney" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
add_field => { "dest_port" => "25" }
}
}
# Medpot
if [type] == "Medpot" {
mutate {
add_field => {
"dest_port" => "2575"
"dest_ip" => "${MY_EXTIP}"
}
}
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Rdpy
if [type] == "Rdpy" {
grok { match => { "message" => [ "\A%{TIMESTAMP_ISO8601:timestamp},domain:%{CISCO_REASON:domain},username:%{CISCO_REASON:username},password:%{CISCO_REASON:password},hostname:%{GREEDYDATA:hostname}", "\A%{TIMESTAMP_ISO8601:timestamp},Connection from %{IPV4:src_ip}:%{INT:src_port:integer}" ] } }
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["timestamp"]
}
mutate {
add_field => { "dest_port" => "3389" }
}
}
# Redishoneypot
if [type] == "Redishoneypot" {
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss" ]
remove_field => ["time"]
remove_field => ["timestamp"]
}
mutate {
split => { "addr" => ":" }
add_field => {
"src_ip" => "%{[addr][0]}"
"src_port" => "%{[addr][1]}"
"dest_port" => "6379"
"dest_ip" => "${MY_EXTIP}"
}
remove_field => ["addr"]
}
}
# NGINX
if [type] == "NGINX" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"request" => "request_data"
}
}
}
# Tanner
if [type] == "Tanner" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"[peer][ip]" => "src_ip"
"[peer][port]" => "src_port"
}
add_field => { "dest_port" => "80" }
}
}
# Drop if parse fails
if "_grokparsefailure" in [tags] { drop {} }
if "_jsonparsefailure" in [tags] { drop {} }
# Add T-Pot hostname and external IP
mutate {
add_field => {
"t-pot_ip_ext" => "${MY_EXTIP}"
"t-pot_ip_int" => "${MY_INTIP}"
"t-pot_hostname" => "${MY_HOSTNAME}"
}
}
# Add geo coordinates / ASN info / IP rep.
if [src_ip] {
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-City.mmdb"
}
geoip {
cache_size => 10000
source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-ASN.mmdb"
}
translate {
refresh_interval => 86400
field => "src_ip"
destination => "ip_rep"
dictionary_path => "/etc/listbot/iprep.yaml"
}
}
if [t-pot_ip_ext] {
geoip {
cache_size => 10000
source => "t-pot_ip_ext"
target => "geoip_ext"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-City.mmdb"
}
geoip {
cache_size => 10000
source => "t-pot_ip_ext"
target => "geoip_ext"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-ASN.mmdb"
}
}
# In some rare conditions dest_port, src_port, status are indexed as string, forcing integer for now
if [dest_port] {
mutate {
convert => { "dest_port" => "integer" }
}
}
if [src_port] {
mutate {
convert => { "src_port" => "integer" }
}
}
if [status] {
mutate {
convert => { "status" => "integer" }
}
}
if [id] {
mutate {
convert => { "id" => "string" }
}
}
if [request] {
mutate {
convert => { "request" => "string" }
}
}
}
# Output section
output {
http {
http_method => "post"
http_compression => true
id => "${MY_HOSTNAME}"
codec => "json"
format => "json_batch"
url => "http://127.0.0.1:64305"
}
}

View File

@ -71,6 +71,13 @@ input {
type => "Dicompot" type => "Dicompot"
} }
# Ddospot
file {
path => ["/data/ddospot/log/*.log"]
codec => json
type => "Ddospot"
}
# ElasticPot # ElasticPot
file { file {
path => ["/data/elasticpot/log/elasticpot.json"] path => ["/data/elasticpot/log/elasticpot.json"]
@ -78,6 +85,13 @@ input {
type => "ElasticPot" type => "ElasticPot"
} }
# Endlessh
file {
path => ["/data/endlessh/log/endlessh.log"]
codec => plain
type => "Endlessh"
}
# Glutton # Glutton
file { file {
path => ["/data/glutton/log/glutton.log"] path => ["/data/glutton/log/glutton.log"]
@ -85,12 +99,26 @@ input {
type => "Glutton" type => "Glutton"
} }
# Hellpot
file {
path => ["/data/hellpot/log/hellpot.log"]
codec => json
type => "Hellpot"
}
# Heralding # Heralding
file { file {
path => ["/data/heralding/log/auth.csv"] path => ["/data/heralding/log/auth.csv"]
type => "Heralding" type => "Heralding"
} }
# Honeypots
file {
path => ["/data/honeypots/log/*.log"]
codec => json
type => "Honeypots"
}
# Honeypy # Honeypy
file { file {
path => ["/data/honeypy/log/json.log"] path => ["/data/honeypy/log/json.log"]
@ -112,6 +140,20 @@ input {
type => "Honeytrap" type => "Honeytrap"
} }
# Ipphoney
file {
path => ["/data/ipphoney/log/ipphoney.json"]
codec => json
type => "Ipphoney"
}
# Log4pot
file {
path => ["/data/log4pot/log/log4pot.log"]
codec => json
type => "Log4pot"
}
# Mailoney # Mailoney
file { file {
path => ["/data/mailoney/log/commands.log"] path => ["/data/mailoney/log/commands.log"]
@ -132,6 +174,13 @@ input {
type => "Rdpy" type => "Rdpy"
} }
# Redishoneypot
file {
path => ["/data/redishoneypot/log/redishoneypot.log"]
codec => json
type => "Redishoneypot"
}
# Host NGINX # Host NGINX
file { file {
path => ["/data/nginx/log/access.log"] path => ["/data/nginx/log/access.log"]
@ -279,6 +328,46 @@ filter {
} }
} }
# Ddospot
if [type] == "Ddospot" {
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss.SSSSSS" ]
remove_field => ["time"]
}
if [path] == "/data/ddospot/log/chargenpot.log" {
mutate {
add_field => {
"dest_port" => "19"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/dnspot.log" {
mutate {
add_field => {
"dest_port" => "53"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/ntpot.log" {
mutate {
add_field => {
"dest_port" => "123"
"dest_ip" => "${MY_EXTIP}"
}
}
}
if [path] == "/data/ddospot/log/ssdpot.log" {
mutate {
add_field => {
"dest_port" => "1900"
"dest_ip" => "${MY_EXTIP}"
}
}
}
}
# Dionaea # Dionaea
if [type] == "Dionaea" { if [type] == "Dionaea" {
date { date {
@ -314,6 +403,7 @@ filter {
} }
mutate { mutate {
rename => { rename => {
"ID" => "id"
"IP" => "src_ip" "IP" => "src_ip"
"Port" => "src_port" "Port" => "src_port"
"AETitle" => "aetitle" "AETitle" => "aetitle"
@ -345,6 +435,23 @@ filter {
} }
} }
# Endlessh
# Example: 2021-10-29T21:08:31.026Z CLOSE host=1.2.3.4 port=12345 fd=4 time=20.015 bytes=24
# Example: 2021-10-29T21:08:11.011Z ACCEPT host=1.2.3.4 port=12346 fd=4 n=1/4096
if [type] == "Endlessh" {
grok { match => { "message" => [ "\A%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:reason}%{SPACE}host=%{IPV4:src_ip}%{SPACE}port=%{INT:src_port}%{SPACE}fd=%{INT}%{SPACE}time=%{SECOND:duration}%{SPACE}bytes=%{NUMBER:bytes}", "\A%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:reason}%{SPACE}host=%{IPV4:src_ip}%{SPACE}port=%{INT:src_port}%{SPACE}fd=%{INT}%{SPACE}n=%{INT}/%{INT}" ] } }
date {
match => [ "timestamp", "ISO8601" ]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "22"
"dest_ip" => "${MY_EXTIP}"
}
}
}
# Glutton # Glutton
if [type] == "Glutton" { if [type] == "Glutton" {
date { date {
@ -353,6 +460,29 @@ filter {
} }
} }
# Hellpot
if [type] == "Hellpot" {
date {
match => [ "time", "ISO8601" ]
remove_field => ["time"]
remove_field => ["timestamp"]
}
mutate {
add_field => {
"dest_port" => "80"
"dest_ip" => "${MY_EXTIP}"
}
rename => {
"BYTES" => "bytes"
"DURATION" => "duration"
"REMOTE_ADDR" => "src_ip"
"URL" => "url"
"USERAGENT" => "http_user_agent"
"message" => "reason"
}
}
}
# Heralding # Heralding
if [type] == "Heralding" { if [type] == "Heralding" {
csv { csv {
@ -375,6 +505,13 @@ filter {
} }
} }
# Honeypots
if [type] == "Honeypots" {
date {
match => [ "timestamp", "ISO8601" ]
}
}
# Honeysap # Honeysap
if [type] == "Honeysap" { if [type] == "Honeysap" {
date { date {
@ -415,15 +552,47 @@ filter {
} }
} }
# Ipphoney
if [type] == "Ipphoney" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"query" => "ipp_query"
"content_type" => "http.http_content_type"
"dst_port" => "dest_port"
"dst_ip" => "dest_ip"
"request" => "request_method"
"operation" => "data"
"user_agent" => "http_user_agent"
"url" => "http.url"
}
}
}
# Log4pot
if [type] == "Log4pot" {
date {
match => [ "timestamp", "ISO8601" ]
}
mutate {
rename => {
"request" => "request_uri"
"server_port" => "dest_port"
"port" => "src_port"
"client" => "src_ip"
}
}
}
# Mailoney # Mailoney
if [type] == "Mailoney" { if [type] == "Mailoney" {
date { date {
match => [ "timestamp", "ISO8601" ] match => [ "timestamp", "ISO8601" ]
} }
mutate { mutate {
add_field => { add_field => { "dest_port" => "25" }
"dest_port" => "25"
}
} }
} }
@ -448,9 +617,26 @@ filter {
remove_field => ["timestamp"] remove_field => ["timestamp"]
} }
mutate { mutate {
add_field => { "dest_port" => "3389" }
}
}
# Redishoneypot
if [type] == "Redishoneypot" {
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss" ]
remove_field => ["time"]
remove_field => ["timestamp"]
}
mutate {
split => { "addr" => ":" }
add_field => { add_field => {
"dest_port" => "3389" "src_ip" => "%{[addr][0]}"
"src_port" => "%{[addr][1]}"
"dest_port" => "6379"
"dest_ip" => "${MY_EXTIP}"
} }
remove_field => ["addr"]
} }
} }
@ -459,6 +645,11 @@ filter {
date { date {
match => [ "timestamp", "ISO8601" ] match => [ "timestamp", "ISO8601" ]
} }
mutate {
rename => {
"request" => "request_data"
}
}
} }
# Tanner # Tanner
@ -471,26 +662,34 @@ filter {
"[peer][ip]" => "src_ip" "[peer][ip]" => "src_ip"
"[peer][port]" => "src_port" "[peer][port]" => "src_port"
} }
add_field => { add_field => { "dest_port" => "80" }
"dest_port" => "80"
}
} }
} }
# Drop if parse fails # Drop if parse fails
if "_grokparsefailure" in [tags] { drop {} } if "_grokparsefailure" in [tags] { drop {} }
if "_jsonparsefailure" in [tags] { drop {} }
# Add T-Pot hostname and external IP
mutate {
add_field => {
"t-pot_ip_ext" => "${MY_EXTIP}"
"t-pot_ip_int" => "${MY_INTIP}"
"t-pot_hostname" => "${MY_HOSTNAME}"
}
}
# Add geo coordinates / ASN info / IP rep. # Add geo coordinates / ASN info / IP rep.
if [src_ip] { if [src_ip] {
geoip { geoip {
cache_size => 10000 cache_size => 10000
source => "src_ip" source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb" database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-City.mmdb"
} }
geoip { geoip {
cache_size => 10000 cache_size => 10000
source => "src_ip" source => "src_ip"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-ASN.mmdb" database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-ASN.mmdb"
} }
translate { translate {
refresh_interval => 86400 refresh_interval => 86400
@ -499,6 +698,20 @@ if "_grokparsefailure" in [tags] { drop {} }
dictionary_path => "/etc/listbot/iprep.yaml" dictionary_path => "/etc/listbot/iprep.yaml"
} }
} }
if [t-pot_ip_ext] {
geoip {
cache_size => 10000
source => "t-pot_ip_ext"
target => "geoip_ext"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-City.mmdb"
}
geoip {
cache_size => 10000
source => "t-pot_ip_ext"
target => "geoip_ext"
database => "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-7.2.11-java/vendor/GeoLite2-ASN.mmdb"
}
}
# In some rare conditions dest_port, src_port, status are indexed as string, forcing integer for now # In some rare conditions dest_port, src_port, status are indexed as string, forcing integer for now
if [dest_port] { if [dest_port] {
@ -516,15 +729,14 @@ if "_grokparsefailure" in [tags] { drop {} }
convert => { "status" => "integer" } convert => { "status" => "integer" }
} }
} }
if [id] {
# Add T-Pot hostname and external IP
if [type] == "Adbhoney" or [type] == "Ciscoasa" or [type] == "CitrixHoneypot" or [type] == "ConPot" or [type] == "Cowrie" or [type] == "Dicompot" or [type] == "Dionaea" or [type] == "ElasticPot" or [type] == "Fatt" or [type] == "Glutton" or [type] == "Honeysap" or [type] == "Honeytrap" or [type] == "Heralding" or [type] == "Honeypy" or [type] == "Mailoney" or [type] == "Medpot" or [type] == "P0f" or [type] == "Rdpy" or [type] == "Suricata" or [type] == "Tanner" {
mutate { mutate {
add_field => { convert => { "id" => "string" }
"t-pot_ip_ext" => "${MY_EXTIP}" }
"t-pot_ip_int" => "${MY_INTIP}" }
"t-pot_hostname" => "${MY_HOSTNAME}" if [request] {
} mutate {
convert => { "request" => "string" }
} }
} }
@ -534,9 +746,10 @@ if "_grokparsefailure" in [tags] { drop {} }
output { output {
elasticsearch { elasticsearch {
hosts => ["elasticsearch:9200"] hosts => ["elasticsearch:9200"]
# With ILM in place we need to set the daily index manually, if not => FUBAR # With templates now being legacy and ILM in place we need to set the daily index with its template manually. Otherwise a new index might be created with differents settings configured through Kibana.
index => "logstash-%{+YYYY.MM.dd}" index => "logstash-%{+YYYY.MM.dd}"
# document_type => "doc" template => "/etc/logstash/tpot_es_template.json"
#document_type => "doc"
} }
#if [type] == "Suricata" { #if [type] == "Suricata" {

View File

@ -0,0 +1,4 @@
- pipeline.id: logstash
path.config: "/etc/logstash/conf.d/logstash.conf"
- pipeline.id: http_input
path.config: "/etc/logstash/conf.d/http_input.conf"

View File

@ -0,0 +1,2 @@
- pipeline.id: http_output
path.config: "/etc/logstash/conf.d/http_output.conf"

View File

@ -43,6 +43,15 @@
"latitude" : { "type" : "half_float" }, "latitude" : { "type" : "half_float" },
"longitude" : { "type" : "half_float" } "longitude" : { "type" : "half_float" }
} }
},
"geoip_ext" : {
"dynamic": true,
"properties" : {
"ip": { "type": "ip" },
"location" : { "type" : "geo_point" },
"latitude" : { "type" : "half_float" },
"longitude" : { "type" : "half_float" }
}
} }
} }
} }

View File

@ -35,11 +35,88 @@ if [ "$myCHECK" == "0" ];
echo "Cannot reach Listbot, starting Logstash without latest translation maps." echo "Cannot reach Listbot, starting Logstash without latest translation maps."
fi fi
# Make sure logstash can put latest logstash template by deleting the old one first # Distributed T-Pot installation needs a different pipeline config and autossh tunnel.
if [ "$MY_TPOT_TYPE" == "POT" ];
then
echo
echo "Distributed T-Pot setup, sending T-Pot logs to $MY_HIVE_IP."
echo
echo "T-Pot type: $MY_TPOT_TYPE"
echo "Keyfile used: $MY_POT_PRIVATEKEYFILE"
echo "Hive username: $MY_HIVE_USERNAME"
echo "Hive IP: $MY_HIVE_IP"
echo
cp /usr/share/logstash/config/pipelines_pot.yml /usr/share/logstash/config/pipelines.yml
autossh -f -M 0 -4 -l $MY_HIVE_USERNAME -i $MY_POT_PRIVATEKEYFILE -p 64295 -N -L64305:127.0.0.1:64305 $MY_HIVE_IP -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile=/dev/null"
exit 0
fi
# We do want to enforce our es_template thus we always need to delete the default template, putting our default afterwards
# This is now done via common_configs.rb => overwrite default logstash template # This is now done via common_configs.rb => overwrite default logstash template
#echo "Removing logstash template." echo "Removing logstash template."
#curl -XDELETE http://elasticsearch:9200/_template/logstash curl -s -XDELETE http://elasticsearch:9200/_template/logstash
#echo echo
#echo "Checking if empty." echo "Checking if empty."
#curl -XGET http://elasticsearch:9200/_template/logstash curl -s -XGET http://elasticsearch:9200/_template/logstash
#echo echo
echo "Putting default template."
curl -XPUT "http://elasticsearch:9200/_template/logstash" -H 'Content-Type: application/json' -d'
{
"index_patterns" : "logstash-*",
"version" : 60001,
"settings" : {
"index.refresh_interval" : "5s",
"number_of_shards" : 1,
"index.number_of_replicas" : "0",
"index.mapping.total_fields.limit" : "2000",
"index.query": {
"default_field": "*"
}
},
"mappings" : {
"dynamic_templates" : [ {
"message_field" : {
"path_match" : "message",
"match_mapping_type" : "string",
"mapping" : {
"type" : "text",
"norms" : false
}
}
}, {
"string_fields" : {
"match" : "*",
"match_mapping_type" : "string",
"mapping" : {
"type" : "text", "norms" : false,
"fields" : {
"keyword" : { "type": "keyword", "ignore_above": 256 }
}
}
}
} ],
"properties" : {
"@timestamp": { "type": "date"},
"@version": { "type": "keyword"},
"geoip" : {
"dynamic": true,
"properties" : {
"ip": { "type": "ip" },
"location" : { "type" : "geo_point" },
"latitude" : { "type" : "half_float" },
"longitude" : { "type" : "half_float" }
}
},
"geoip_ext" : {
"dynamic": true,
"properties" : {
"ip": { "type": "ip" },
"location" : { "type" : "geo_point" },
"latitude" : { "type" : "half_float" },
"longitude" : { "type" : "half_float" }
}
}
}
}
}'
echo

View File

@ -7,12 +7,17 @@ services:
build: . build: .
container_name: logstash container_name: logstash
restart: always restart: always
# environment:
# - LS_JAVA_OPTS=-Xms2048m -Xmx2048m
# depends_on: # depends_on:
# elasticsearch: # elasticsearch:
# condition: service_healthy # condition: service_healthy
env_file: env_file:
- /opt/tpot/etc/compose/elk_environment - /opt/tpot/etc/compose/elk_environment
ports:
- "127.0.0.1:64305:80"
image: "dtagdevsec/logstash:2006" image: "dtagdevsec/logstash:2006"
volumes: volumes:
- /data:/data - /data:/data
- /root/tpotce/docker/elk/logstash/dist/logstash.conf:/etc/logstash/conf.d/logstash.conf # - /root/tpotce/docker/elk/logstash/dist/logstash.conf:/etc/logstash/conf.d/logstash.conf
# - /root/tpotce/docker/elk/logstash/dist/http.conf:/etc/logstash/conf.d/http.conf

View File

@ -0,0 +1,42 @@
FROM alpine:3.13 as builder
#
# Include dist
ADD dist/ /root/dist/
#
# Install packages
RUN apk -U add --no-cache \
build-base \
git \
libcap && \
#
# Install endlessh from git
git clone https://github.com/skeeto/endlessh /opt/endlessh && \
cd /opt/endlessh && \
git checkout dfe44eb2c5b6fc3c48a39ed826fe0e4459cdf6ef && \
make && \
mv /opt/endlessh/endlessh /root/dist
#
FROM alpine:3.14
#
COPY --from=builder /root/dist/* /opt/endlessh/
#
# Install packages
RUN apk -U add --no-cache \
libcap && \
#
# Setup user, groups and configs
mkdir -p /var/log/endlessh && \
addgroup -g 2000 endlessh && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 endlessh && \
chown -R endlessh:endlessh /opt/endlessh && \
#setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \
#
# Clean up
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Set workdir and start endlessh
STOPSIGNAL SIGINT
USER endlessh:endlessh
WORKDIR /opt/endlessh/
CMD ./endlessh -f endlessh.conf >/var/log/endlessh/endlessh.log

27
docker/endlessh/dist/endlessh.conf vendored Normal file
View File

@ -0,0 +1,27 @@
# The port on which to listen for new SSH connections.
Port 2222
# The endless banner is sent one line at a time. This is the delay
# in milliseconds between individual lines.
Delay 10000
# The length of each line is randomized. This controls the maximum
# length of each line. Shorter lines may keep clients on for longer if
# they give up after a certain number of bytes.
MaxLineLength 32
# Maximum number of connections to accept at a time. Connections beyond
# this are not immediately rejected, but will wait in the queue.
MaxClients 4096
# Set the detail level for the log.
# 0 = Quiet
# 1 = Standard, useful log messages
# 2 = Very noisy debugging information
LogLevel 1
# Set the family of the listening socket
# 0 = Use IPv4 Mapped IPv6 (Both v4 and v6, default)
# 4 = Use IPv4 only
# 6 = Use IPv6 only
BindFamily 4

View File

@ -0,0 +1,20 @@
version: '2.3'
networks:
endlessh_local:
services:
# Endlessh service
endlessh:
build: .
container_name: endlessh
restart: always
networks:
- endlessh_local
ports:
- "22:2222"
image: "dtagdevsec/endlessh:2006"
read_only: true
volumes:
- /data/endlessh/log:/var/log/endlessh

View File

@ -1,11 +1,10 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Install packages # Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U --no-cache add \
apk -U --no-cache add \
build-base \ build-base \
git \ git \
libffi-dev \ libffi-dev \
@ -14,16 +13,20 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
python3 \ python3 \
python3-dev \ python3-dev \
py3-cffi \ py3-cffi \
py3-cryptography \
py3-ipaddress \ py3-ipaddress \
py3-lxml \ py3-lxml \
py3-mysqlclient \ py3-mysqlclient \
py3-requests \ py3-requests \
py3-pip \ py3-pip \
py3-setuptools && \ py3-setuptools && \
pip3 install --no-cache-dir configparser hpfeeds3 pyOpenSSL xmljson && \ pip3 install --no-cache-dir configparser hpfeeds3 influxdb influxdb-client pyOpenSSL xmljson && \
# #
# Setup ewsposter # Setup ewsposter
git clone --depth=1 https://github.com/dtag-dev-sec/ewsposter /opt/ewsposter && \ git clone https://github.com/telekom-security/ewsposter /opt/ewsposter && \
cd /opt/ewsposter && \
# git checkout 11ab4c8a0a1b63d4bca8c52c07f2eab520d0b257 && \
git checkout 17c08f3ae500d838c1528c9700e4430d5f6ad214 && \
mkdir -p /opt/ewsposter/spool /opt/ewsposter/log && \ mkdir -p /opt/ewsposter/spool /opt/ewsposter/log && \
# #
# Setup user and groups # Setup user and groups

View File

@ -4,10 +4,11 @@ spooldir = /opt/ewsposter/spool/
logdir = /opt/ewsposter/log/ logdir = /opt/ewsposter/log/
del_malware_after_send = false del_malware_after_send = false
send_malware = false send_malware = false
sendlimit = 500 sendlimit = 5000
contact = your_email_address contact = your_email_address
proxy = proxy = None
ip = ip_int = None
ip_ext = None
[EWS] [EWS]
ews = true ews = true
@ -33,30 +34,22 @@ hpfformat = %(EWS_HPFEEDS_FORMAT)s
json = false json = false
jsondir = /data/ews/json/ jsondir = /data/ews/json/
[INFLUXDB]
influxdb = false
host = http://localhost
port = 8086
username = <your username for influx 1.8>
password = <your password for influx 1.8>
token = <your token for influx 2.0>
bucket = <your bucket/database for 2.0/1.8>
org = <your org for influx 2.0>
[GLASTOPFV3] [GLASTOPFV3]
glastopfv3 = true glastopfv3 = false
nodeid = glastopfv3-community-01 nodeid = glastopfv3-community-01
sqlitedb = /data/glastopf/db/glastopf.db sqlitedb = /data/glastopf/db/glastopf.db
malwaredir = /data/glastopf/data/files/ malwaredir = /data/glastopf/data/files/
[GLASTOPFV2]
glastopfv2 = false
nodeid =
mysqlhost =
mysqldb =
mysqluser =
mysqlpw =
malwaredir =
[KIPPO]
kippo = false
nodeid =
mysqlhost =
mysqldb =
mysqluser =
mysqlpw =
malwaredir =
[COWRIE] [COWRIE]
cowrie = true cowrie = true
nodeid = cowrie-community-01 nodeid = cowrie-community-01
@ -75,12 +68,6 @@ newversion = true
payloaddir = /data/honeytrap/attacks/ payloaddir = /data/honeytrap/attacks/
attackerfile = /data/honeytrap/log/attacker.log attackerfile = /data/honeytrap/log/attacker.log
[RDPDETECT]
rdpdetect = false
nodeid =
iptableslog =
targetip =
[EMOBILITY] [EMOBILITY]
eMobility = false eMobility = false
nodeid = emobility-community-01 nodeid = emobility-community-01
@ -92,12 +79,12 @@ nodeid = conpot-community-01
logfile = /data/conpot/log/conpot*.json logfile = /data/conpot/log/conpot*.json
[ELASTICPOT] [ELASTICPOT]
elasticpot = false elasticpot = true
nodeid = elasticpot-community-01 nodeid = elasticpot-community-01
logfile = /data/elasticpot/log/elasticpot.json logfile = /data/elasticpot/log/elasticpot.json
[SURICATA] [SURICATA]
suricata = true suricata = false
nodeid = suricata-community-01 nodeid = suricata-community-01
logfile = /data/suricata/log/eve.json logfile = /data/suricata/log/eve.json
@ -112,7 +99,7 @@ nodeid = rdpy-community-01
logfile = /data/rdpy/log/rdpy.log logfile = /data/rdpy/log/rdpy.log
[VNCLOWPOT] [VNCLOWPOT]
vnclowpot = true vnclowpot = false
nodeid = vnclowpot-community-01 nodeid = vnclowpot-community-01
logfile = /data/vnclowpot/log/vnclowpot.log logfile = /data/vnclowpot/log/vnclowpot.log
@ -135,3 +122,43 @@ logfile = /data/tanner/log/tanner_report.json
glutton = true glutton = true
nodeid = glutton-community-01 nodeid = glutton-community-01
logfile = /data/glutton/log/glutton.log logfile = /data/glutton/log/glutton.log
[HONEYSAP]
honeysap = true
nodeid = honeysap-community-01
logfile = /data/honeysap/log/honeysap-external.log
[ADBHONEY]
adbhoney = true
nodeid = adbhoney-community-01
logfile = /data/adbhoney/log/adbhoney.json
[FATT]
fatt = false
nodeid = fatt-community-01
logfile = /data/fatt/log/fatt.log
[IPPHONEY]
ipphoney = true
nodeid = ipphoney-community-01
logfile = /data/ipphoney/log/ipphoney.json
[DICOMPOT]
dicompot = true
nodeid = dicompot-community-01
logfile = /data/dicompot/log/dicompot.log
[MEDPOT]
medpot = true
nodeid = medpot-community-01
logfile = /data/medpot/log/medpot.log
[HONEYPY]
honeypy = true
nodeid = honeypy-community-01
logfile = /data/honeypy/log/json.log
[CITRIX]
citrix = true
nodeid = citrix-community-01
logfile = /data/citrixhoneypot/logs/server.log

View File

@ -26,5 +26,4 @@ services:
image: "dtagdevsec/ewsposter:2006" image: "dtagdevsec/ewsposter:2006"
volumes: volumes:
- /data:/data - /data:/data
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip # - /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip

View File

@ -1,18 +1,14 @@
FROM alpine:latest FROM alpine:3.14
#
# Include dist
#ADD dist/ /root/dist/
# #
# Get and install dependencies & packages # Get and install dependencies & packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U add \
apk -U add \
git \ git \
py3-libxml2 \ py3-libxml2 \
py3-lxml \ py3-lxml \
py3-pip \ py3-pip \
python3 \ python3 \
python3-dev && \ python3-dev \
apk -U add tshark --repository http://dl-3.alpinelinux.org/alpine/edge/community/ && \ tshark && \
# #
# Setup user # Setup user
addgroup -g 2000 fatt && \ addgroup -g 2000 fatt && \
@ -21,10 +17,12 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# Install fatt # Install fatt
mkdir -p /opt && \ mkdir -p /opt && \
cd /opt && \ cd /opt && \
git clone --depth=1 https://github.com/0x4D31/fatt && \ git clone https://github.com/0x4D31/fatt && \
cd fatt && \ cd fatt && \
git checkout 314cd1ff7873b5a145a51ec4e85f6107828a2c79 && \
mkdir -p log && \ mkdir -p log && \
pip3 install pyshark==0.4.2.2 && \ # pyshark >= 0.4.3 breaks fatt
pip3 install pyshark==0.4.2.11 && \
# #
# Setup configs # Setup configs
chown fatt:fatt -R /opt/fatt/* && \ chown fatt:fatt -R /opt/fatt/* && \
@ -39,4 +37,4 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
STOPSIGNAL SIGINT STOPSIGNAL SIGINT
ENV PYTHONPATH /opt/fatt ENV PYTHONPATH /opt/fatt
WORKDIR /opt/fatt WORKDIR /opt/fatt
CMD python3 fatt.py -i $(/sbin/ip address | grep '^2: ' | awk '{ print $2 }' | tr -d [:punct:]) --print_output --json_logging -o log/fatt.log CMD python3 fatt.py -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') --print_output --json_logging -o log/fatt.log

View File

@ -1,11 +1,10 @@
FROM alpine:latest FROM alpine:3.13
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Setup apk # Setup apk
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U --no-cache add \
apk -U --no-cache add \
build-base \ build-base \
git \ git \
go \ go \
@ -22,6 +21,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
cd /opt/go/ && \ cd /opt/go/ && \
git clone https://github.com/mushorg/glutton && \ git clone https://github.com/mushorg/glutton && \
cd /opt/go/glutton/ && \ cd /opt/go/glutton/ && \
git checkout c25045b95b43ed9bfee89b2d14a50f5794a9cf2b && \
mv /root/dist/system.go /opt/go/glutton/ && \ mv /root/dist/system.go /opt/go/glutton/ && \
go mod download && \ go mod download && \
make build && \ make build && \
@ -52,4 +52,4 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# Start glutton # Start glutton
WORKDIR /opt/glutton WORKDIR /opt/glutton
USER glutton:glutton USER glutton:glutton
CMD exec bin/server -i $(/sbin/ip address | grep '^2: ' | awk '{ print $2 }' | tr -d [:punct:]) -l /var/log/glutton/glutton.log > /dev/null 2>&1 CMD exec bin/server -i $(/sbin/ip address show | /usr/bin/awk '/inet.*brd/{ print $NF; exit }') -l /var/log/glutton/glutton.log > /dev/null 2>&1

View File

@ -1,11 +1,10 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Get and install dependencies & packages # Get and install dependencies & packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U --no-cache add \
apk -U --no-cache add \
git \ git \
nginx \ nginx \
nginx-mod-http-headers-more \ nginx-mod-http-headers-more \
@ -28,11 +27,16 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
# #
# Clone and setup Heimdall, Nginx # Clone and setup Heimdall, Nginx
git clone https://github.com/linuxserver/heimdall && \ git clone https://github.com/linuxserver/heimdall && \
cd heimdall && \
git checkout 61a5a1a8b023771e0ff7c056add5537d20737e51 && \
cd .. && \
cp -R heimdall/. /var/lib/nginx/html && \ cp -R heimdall/. /var/lib/nginx/html && \
rm -rf heimdall && \ rm -rf heimdall && \
cd /var/lib/nginx/html && \ cd /var/lib/nginx/html && \
cp .env.example .env && \ cp .env.example .env && \
php artisan key:generate && \ # Fix error for ArrayInput in smyfony with regard to PHP7.4 (https://github.com/symfony/symfony/pull/32806/files)
sed -i "135s/.*/} elseif (0 === strpos(\$key, '-')) {/" /var/lib/nginx/html/vendor/symfony/console/Input/ArrayInput.php && \
php7 artisan key:generate && \
# #
## Add previously configured content ## Add previously configured content
mkdir -p /var/lib/nginx/html/storage/app/public/backgrounds/ && \ mkdir -p /var/lib/nginx/html/storage/app/public/backgrounds/ && \
@ -60,6 +64,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
sed -i "s/APP_NAME=Heimdall/APP_NAME=T-Pot/g" /var/lib/nginx/html/.env && \ sed -i "s/APP_NAME=Heimdall/APP_NAME=T-Pot/g" /var/lib/nginx/html/.env && \
## Add Nginx / T-Pot specific configs ## Add Nginx / T-Pot specific configs
rm -rf /etc/nginx/conf.d/* /usr/share/nginx/html/* && \ rm -rf /etc/nginx/conf.d/* /usr/share/nginx/html/* && \
mkdir -p /etc/nginx/conf.d && \
cp /root/dist/conf/nginx.conf /etc/nginx/ && \ cp /root/dist/conf/nginx.conf /etc/nginx/ && \
cp -R /root/dist/conf/ssl /etc/nginx/ && \ cp -R /root/dist/conf/ssl /etc/nginx/ && \
cp /root/dist/conf/tpotweb.conf /etc/nginx/conf.d/ && \ cp /root/dist/conf/tpotweb.conf /etc/nginx/conf.d/ && \

View File

@ -149,4 +149,8 @@ server {
proxy_pass http://127.0.0.1:64303/spiderfoot/scandelete; proxy_pass http://127.0.0.1:64303/spiderfoot/scandelete;
} }
location /scaninfo {
proxy_pass http://127.0.0.1:64303/spiderfoot/scaninfo;
}
} }

48
docker/hellpot/Dockerfile Normal file
View File

@ -0,0 +1,48 @@
FROM alpine:3.14
#
# Include dist
ADD dist/ /root/dist/
#
# Setup apk
RUN apk -U --no-cache add \
build-base \
git \
go \
g++ && \
#
# Setup go, hellpot
cd /root && \
export GOPATH=/opt/go/ && \
mkdir -p /opt/hellpot && \
mkdir -p /opt/go && \
git clone https://github.com/yunginnanet/HellPot && \
cd HellPot && \
git checkout f87b1f17e21b36edae41b7f49d4a54ae420a9bf8 && \
# Hellpot ignores setting the logpath, need to this hardcoded :(
sed -i 's#logDir = snek.GetString("logger.directory")#logDir = "/var/log/hellpot/"#g' config/logger.go && \
sed -i 's#tnow := "HellPot"#tnow := "hellpot"#g' config/logger.go && \
go build cmd/HellPot/HellPot.go && \
mv /root/HellPot/HellPot /opt/hellpot/ && \
#
# Setup user, groups and configs
addgroup -g 2000 hellpot && \
adduser -S -s /bin/ash -u 2000 -D -g 2000 hellpot && \
mkdir -p /var/log/hellpot && \
# Hellpot wants to create .config folder always in user's home
mkdir -p /home/hellpot/.config/HellPot/logs && \
mv /root/dist/config.toml /home/hellpot/.config/HellPot/ && \
chown hellpot:hellpot -R /home/hellpot && \
#
# Clean up
apk del --purge build-base \
git \
go \
g++ && \
rm -rf /var/cache/apk/* \
/opt/go \
/root/dist
#
# Start hellpot
WORKDIR /opt/hellpot
USER hellpot:hellpot
CMD ["./HellPot"]

23
docker/hellpot/dist/config.toml vendored Normal file
View File

@ -0,0 +1,23 @@
[http]
bind_addr = "0.0.0.0"
bind_port = "8080"
paths = ["wp-login.php","wp-login","wp-json/omapp/v1/support"]
# Unix Socket Listener (will override default)
use_unix_socket = false
unix_socket = "/var/run/hellpot"
[logger]
debug = true
log_directory = "/var/log/hellpot/"
nocolor = true
use_date_filename = false
[performance]
# max_workers is only valid if restrict_concurrency is true
restrict_concurrency = false
max_workers = 256
[deception]
# Used as "Server: " header (if not proxied)
server_name = "nginx"

View File

@ -0,0 +1,20 @@
version: '2.3'
networks:
hellpot_local:
services:
# hellpot service
hellpot:
build: .
container_name: hellpot
restart: always
networks:
- hellpot_local
ports:
- "80:8080"
image: "dtagdevsec/hellpot:2006"
read_only: true
volumes:
- /data/hellpot/log:/var/log/hellpot

View File

@ -1,28 +1,28 @@
FROM alpine:latest FROM alpine:3.14
# #
# Include dist # Include dist
ADD dist/ /root/dist/ ADD dist/ /root/dist/
# #
# Install packages # Install packages
RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \ RUN apk -U --no-cache add \
apk -U --no-cache add \
build-base \ build-base \
git \ git \
libcap \ libcap \
libffi-dev \ libffi-dev \
openssl-dev \ openssl-dev \
libzmq \ py3-pyzmq \
postgresql-dev \ postgresql-dev \
py3-pip \ py3-pip \
python3 \ python3 \
python3-dev \ python3-dev && \
py-virtualenv && \
# #
# Setup heralding # Setup heralding
mkdir -p /opt && \ mkdir -p /opt && \
cd /opt/ && \ cd /opt/ && \
git clone --depth=1 https://github.com/johnnykv/heralding && \ git clone https://github.com/johnnykv/heralding && \
cd heralding && \ cd heralding && \
git checkout c31f99c55c7318c09272d8d9998e560c3d4de9aa && \
pip3 install --upgrade pip && \
pip3 install --no-cache-dir -r requirements.txt && \ pip3 install --no-cache-dir -r requirements.txt && \
pip3 install --no-cache-dir . && \ pip3 install --no-cache-dir . && \
# #
@ -31,7 +31,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 heralding && \ adduser -S -H -s /bin/ash -u 2000 -D -g 2000 heralding && \
mkdir -p /var/log/heralding/ /etc/heralding && \ mkdir -p /var/log/heralding/ /etc/heralding && \
mv /root/dist/heralding.yml /etc/heralding/ && \ mv /root/dist/heralding.yml /etc/heralding/ && \
setcap cap_net_bind_service=+ep /usr/bin/python3.8 && \ setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
chown -R heralding:heralding /var/log/heralding && \ chown -R heralding:heralding /var/log/heralding && \
# #
# Clean up # Clean up
@ -42,8 +42,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
libffi-dev \ libffi-dev \
libressl-dev \ libressl-dev \
postgresql-dev \ postgresql-dev \
python3-dev \ python3-dev && \
py-virtualenv && \
rm -rf /root/* \ rm -rf /root/* \
/var/cache/apk/* \ /var/cache/apk/* \
/opt/heralding /opt/heralding

View File

@ -62,6 +62,7 @@ capabilities:
timeout: 30 timeout: 30
protocol_specific_data: protocol_specific_data:
max_attempts: 3 max_attempts: 3
banner: "+OK POP3 server ready"
pop3s: pop3s:
enabled: true enabled: true
@ -69,6 +70,7 @@ capabilities:
timeout: 30 timeout: 30
protocol_specific_data: protocol_specific_data:
max_attempts: 3 max_attempts: 3
banner: "+OK POP3 server ready"
# if a .pem file is not found in work dir, a new pem file will be created # if a .pem file is not found in work dir, a new pem file will be created
# using these values # using these values
cert: cert:
@ -157,6 +159,25 @@ capabilities:
# If the fqdn option is commented out or empty, then fqdn of the host will be used # If the fqdn option is commented out or empty, then fqdn of the host will be used
fqdn: "" fqdn: ""
smtps:
enabled: true
port: 465
timeout: 30
protocol_specific_data:
banner: "Microsoft ESMTP MAIL service ready"
# If the fqdn option is commented out or empty, then fqdn of the host will be used
fqdn: ""
cert:
common_name: "*"
country: "US"
state: None
locality: None
organization: None
organizational_unit: None
# how many days should the certificate be valid for
valid_days: 365
serial_number: 0
vnc: vnc:
enabled: true enabled: true
port: 5900 port: 5900

View File

@ -23,6 +23,7 @@ services:
- "110:110" - "110:110"
- "143:143" - "143:143"
- "443:443" - "443:443"
- "465:465"
- "993:993" - "993:993"
- "995:995" - "995:995"
- "1080:1080" - "1080:1080"

View File

@ -0,0 +1,65 @@
FROM alpine:3.14
#
# Include dist
ADD dist/ /root/dist/
#
# Install packages
RUN apk -U add \
build-base \
freetds \
freetds-dev \
gcc \
git \
hiredis \
jpeg-dev \
libcap \
libffi-dev \
libpq \
musl-dev \
openssl \
openssl-dev \
postgresql-dev \
py3-pip \
python3 \
python3-dev \
zlib-dev && \
#
# Install honeypots from GitHub and setup
mkdir -p /opt \
/var/log/honeypots && \
cd /opt/ && \
#git clone https://github.com/qeeqbox/honeypots && \
git clone https://github.com/t3chn0m4g3/honeypots && \
cd honeypots && \
#git checkout 7c654a3ef2c564ae6f1247bf302d652037080163 && \
pip3 install --upgrade pip && \
pip3 install --ignore-installed hiredis packaging && \
pip3 install . && \
setcap cap_net_bind_service=+ep /usr/bin/python3.9 && \
#
# Setup user, groups and configs
addgroup -g 2000 honeypots && \
adduser -S -H -s /bin/ash -u 2000 -D -g 2000 honeypots && \
chown honeypots:honeypots -R /opt/honeypots && \
chown honeypots:honeypots -R /var/log/honeypots && \
mv /root/dist/config.json /opt/honeypots/ && \
#
# Clean up
apk del --purge build-base \
freetds-dev \
git \
jpeg-dev \
libffi-dev \
openssl-dev \
postgresql-dev \
python3-dev \
zlib-dev && \
rm -rf /root/* && \
rm -rf /var/cache/apk/*
#
# Start honeypots
STOPSIGNAL SIGINT
USER honeypots:honeypots
WORKDIR /opt/honeypots/
CMD python3 -m honeypots --setup all --config config.json
#CMD python3 -m honeypots --setup telnet --config config.json

144
docker/honeypots/dist/config.json vendored Normal file
View File

@ -0,0 +1,144 @@
{
"logs":"file,terminal",
"logs_location":"/var/log/honeypots/",
"honeypots": {
"dns": {
"port": 53,
"ip": "0.0.0.0",
"username": "administrator",
"password": "123456"
},
"ftp": {
"port": 21,
"ip": "0.0.0.0",
"username": "ftp",
"password": "anonymous"
},
"httpproxy": {
"port": 8080,
"ip": "0.0.0.0",
"username": "admin",
"password": "admin"
},
"http": {
"port": 80,
"ip": "0.0.0.0",
"username": "admin",
"password": "admin"
},
"https": {
"port": 443,
"ip": "0.0.0.0",
"username": "admin",
"password": "admin"
},
"imap": {
"port": 143,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"mysql": {
"port": 3306,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"pop3": {
"port": 110,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"postgres": {
"port": 5432,
"ip": "0.0.0.0",
"username": "postgres",
"password": "123456"
},
"redis": {
"port": 6379,
"ip": "0.0.0.0",
"username": "root",
"password": ""
},
"smb": {
"port": 445,
"ip": "0.0.0.0",
"username": "administrator",
"password": "123456"
},
"smtp": {
"port": 25,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"socks5": {
"port": 1080,
"ip": "0.0.0.0",
"username": "admin",
"password": "admin"
},
"ssh": {
"port": 22,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"telnet": {
"port": 23,
"ip": "0.0.0.0",
"username": "root",
"password": "123456"
},
"vnc": {
"port": 5900,
"ip": "0.0.0.0",
"username": "administrator",
"password": "123456"
},
"elastic": {
"port": 9200,
"ip": "0.0.0.0",
"username": "elastic",
"password": "123456"
},
"mssql": {
"port": 1433,
"ip": "0.0.0.0",
"username": "sa",
"password": ""
},
"ldap": {
"port": 389,
"ip": "0.0.0.0",
"username": "administrator",
"password": "123456"
},
"ntp": {
"port": 123,
"ip": "0.0.0.0",
"username": "administrator",
"password": "123456"
},
"memcache": {
"port": 11211,
"ip": "0.0.0.0",
"username": "admin",
"password": "123456"
},
"oracle": {
"port": 1521,
"ip": "0.0.0.0",
"username": "bi",
"password": "123456"
},
"snmp": {
"port": 161,
"ip": "0.0.0.0",
"username": "privUser",
"password": "123456"
}
}
}

View File

@ -0,0 +1,42 @@
version: '2.3'
networks:
honeypots_local:
services:
# Honeypots service
honeypots:
build: .
container_name: honeypots
stdin_open: true
tty: true
restart: always
tmpfs:
- /tmp:uid=2000,gid=2000
networks:
- honeypots_local
ports:
- "21:21"
- "22:22"
- "23:23"
- "25:25"
- "53:53/udp"
- "80:80"
- "110:110"
- "143:143"
- "389:389"
- "443:443"
- "445:445"
- "1080:1080"
- "1433:1433"
- "3306:3306"
- "5432:5432"
- "5900:5900"
- "6379:6379"
- "8080:8080"
- "9200:9200"
image: "dtagdevsec/honeypots:2006"
read_only: true
volumes:
- /data/honeypots/log:/var/log/honeypots

View File

@ -17,8 +17,9 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
pip install --no-cache-dir virtualenv && \ pip install --no-cache-dir virtualenv && \
# #
# Clone honeypy from git # Clone honeypy from git
git clone --depth=1 https://github.com/foospidy/HoneyPy /opt/honeypy && \ git clone https://github.com/foospidy/HoneyPy /opt/honeypy && \
cd /opt/honeypy && \ cd /opt/honeypy && \
git checkout feccab56ca922bcab01cac4ffd82f588d61ab1c5 && \
sed -i 's/local_host/dest_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \ sed -i 's/local_host/dest_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/local_port/dest_port/g' /opt/honeypy/loggers/file/honeypy_file.py && \ sed -i 's/local_port/dest_port/g' /opt/honeypy/loggers/file/honeypy_file.py && \
sed -i 's/remote_host/src_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \ sed -i 's/remote_host/src_ip/g' /opt/honeypy/loggers/file/honeypy_file.py && \
@ -48,7 +49,7 @@ RUN sed -i 's/dl-cdn/dl-2/g' /etc/apk/repositories && \
rm -rf /root/* && \ rm -rf /root/* && \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
# #
# Set workdir and start mailoney # Set workdir and start honeypy
USER honeypy:honeypy USER honeypy:honeypy
WORKDIR /opt/honeypy WORKDIR /opt/honeypy
CMD ["/opt/honeypy/env/bin/python2", "/opt/honeypy/Honey.py", "-d"] CMD ["/opt/honeypy/env/bin/python2", "/opt/honeypy/Honey.py", "-d"]

Some files were not shown because too many files have changed in this diff Show More