mirror of
https://github.com/telekom-security/tpotce.git
synced 2025-07-02 01:27:27 -04:00
play with layout
This commit is contained in:
@ -1,38 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Backup all ES relevant folders
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myCOUNT=1
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myELKPATH="/data/elk/data"
|
||||
myKIBANAINDEXNAME=$(curl -s -XGET ''$myES'_cat/indices/' | grep .kibana | awk '{ print $4 }')
|
||||
myKIBANAINDEXPATH=$myELKPATH/nodes/0/indices/$myKIBANAINDEXNAME
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
### Start ELK
|
||||
systemctl start tpot
|
||||
echo "### Now starting T-Pot ..."
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Stop T-Pot to lift db lock
|
||||
echo "### Now stopping T-Pot"
|
||||
systemctl stop tpot
|
||||
sleep 2
|
||||
|
||||
# Backup DB in 2 flavors
|
||||
echo "### Now backing up Elasticsearch folders ..."
|
||||
tar cvfz "elkall_"$myDATE".tgz" $myELKPATH
|
||||
tar cvfz "elkbase_"$myDATE".tgz" $myKIBANAINDEXPATH
|
@ -1,219 +0,0 @@
|
||||
#!/bin/bash
|
||||
# T-Pot Container Data Cleaner & Log Rotator
|
||||
|
||||
# Set colors
|
||||
myRED="[0;31m"
|
||||
myGREEN="[0;32m"
|
||||
myWHITE="[0;0m"
|
||||
|
||||
# Set persistence
|
||||
myPERSISTENCE=$1
|
||||
|
||||
# Let's create a function to check if folder is empty
|
||||
fuEMPTY () {
|
||||
local myFOLDER=$1
|
||||
|
||||
echo $(ls $myFOLDER | wc -l)
|
||||
}
|
||||
|
||||
# Let's create a function to rotate and compress logs
|
||||
fuLOGROTATE () {
|
||||
local mySTATUS="/etc/tpot/logrotate/status"
|
||||
local myCONF="/etc/tpot/logrotate/logrotate.conf"
|
||||
local myCOWRIETTYLOGS="/data/cowrie/log/tty/"
|
||||
local myCOWRIETTYTGZ="/data/cowrie/log/ttylogs.tgz"
|
||||
local myCOWRIEDL="/data/cowrie/downloads/"
|
||||
local myCOWRIEDLTGZ="/data/cowrie/downloads.tgz"
|
||||
local myDIONAEABI="/data/dionaea/bistreams/"
|
||||
local myDIONAEABITGZ="/data/dionaea/bistreams.tgz"
|
||||
local myDIONAEABIN="/data/dionaea/binaries/"
|
||||
local myDIONAEABINTGZ="/data/dionaea/binaries.tgz"
|
||||
local myHONEYTRAPATTACKS="/data/honeytrap/attacks/"
|
||||
local myHONEYTRAPATTACKSTGZ="/data/honeytrap/attacks.tgz"
|
||||
local myHONEYTRAPDL="/data/honeytrap/downloads/"
|
||||
local myHONEYTRAPDLTGZ="/data/honeytrap/downloads.tgz"
|
||||
|
||||
# Ensure correct permissions and ownerships for logrotate to run without issues
|
||||
chmod 760 /data/ -R
|
||||
chown tpot:tpot /data -R
|
||||
|
||||
# Run logrotate with force (-f) first, so the status file can be written and race conditions (with tar) be avoided
|
||||
logrotate -f -s $mySTATUS $myCONF
|
||||
|
||||
# Compressing some folders first and rotate them later
|
||||
if [ "$(fuEMPTY $myCOWRIETTYLOGS)" != "0" ]; then tar cvfz $myCOWRIETTYTGZ $myCOWRIETTYLOGS; fi
|
||||
if [ "$(fuEMPTY $myCOWRIEDL)" != "0" ]; then tar cvfz $myCOWRIEDLTGZ $myCOWRIEDL; fi
|
||||
if [ "$(fuEMPTY $myDIONAEABI)" != "0" ]; then tar cvfz $myDIONAEABITGZ $myDIONAEABI; fi
|
||||
if [ "$(fuEMPTY $myDIONAEABIN)" != "0" ]; then tar cvfz $myDIONAEABINTGZ $myDIONAEABIN; fi
|
||||
if [ "$(fuEMPTY $myHONEYTRAPATTACKS)" != "0" ]; then tar cvfz $myHONEYTRAPATTACKSTGZ $myHONEYTRAPATTACKS; fi
|
||||
if [ "$(fuEMPTY $myHONEYTRAPDL)" != "0" ]; then tar cvfz $myHONEYTRAPDLTGZ $myHONEYTRAPDL; fi
|
||||
|
||||
# Ensure correct permissions and ownership for previously created archives
|
||||
chmod 760 $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ
|
||||
chown tpot:tpot $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ
|
||||
|
||||
# Need to remove subfolders since too many files cause rm to exit with errors
|
||||
rm -rf $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
|
||||
# Recreate subfolders with correct permissions and ownership
|
||||
mkdir -p $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
chmod 760 $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
chown tpot:tpot $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL
|
||||
|
||||
# Run logrotate again to account for previously created archives - DO NOT FORCE HERE!
|
||||
logrotate -s $mySTATUS $myCONF
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare conpot data
|
||||
fuCONPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/conpot/*; fi
|
||||
mkdir -p /data/conpot/log
|
||||
chmod 760 /data/conpot -R
|
||||
chown tpot:tpot /data/conpot -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare cowrie data
|
||||
fuCOWRIE () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/cowrie/*; fi
|
||||
mkdir -p /data/cowrie/log/tty/ /data/cowrie/downloads/ /data/cowrie/keys/ /data/cowrie/misc/
|
||||
chmod 760 /data/cowrie -R
|
||||
chown tpot:tpot /data/cowrie -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare dionaea data
|
||||
fuDIONAEA () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dionaea/*; fi
|
||||
mkdir -p /data/dionaea/log /data/dionaea/bistreams /data/dionaea/binaries /data/dionaea/rtp /data/dionaea/roots/ftp /data/dionaea/roots/tftp /data/dionaea/roots/www /data/dionaea/roots/upnp
|
||||
chmod 760 /data/dionaea -R
|
||||
chown tpot:tpot /data/dionaea -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare elasticpot data
|
||||
fuELASTICPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elasticpot/*; fi
|
||||
mkdir -p /data/elasticpot/log
|
||||
chmod 760 /data/elasticpot -R
|
||||
chown tpot:tpot /data/elasticpot -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare elk data
|
||||
fuELK () {
|
||||
# ELK data will be kept for <= 90 days, check /etc/crontab for curator modification
|
||||
# ELK daemon log files will be removed
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elk/log/*; fi
|
||||
mkdir -p /data/elk
|
||||
chmod 760 /data/elk -R
|
||||
chown tpot:tpot /data/elk -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare emobility data
|
||||
fuEMOBILITY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/emobility/*; fi
|
||||
mkdir -p /data/emobility/log
|
||||
chmod 760 /data/emobility -R
|
||||
chown tpot:tpot /data/emobility -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare glastopf data
|
||||
fuGLASTOPF () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glastopf/*; fi
|
||||
mkdir -p /data/glastopf
|
||||
chmod 760 /data/glastopf -R
|
||||
chown tpot:tpot /data/glastopf -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare honeytrap data
|
||||
fuHONEYTRAP () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeytrap/*; fi
|
||||
mkdir -p /data/honeytrap/log/ /data/honeytrap/attacks/ /data/honeytrap/downloads/
|
||||
chmod 760 /data/honeytrap/ -R
|
||||
chown tpot:tpot /data/honeytrap/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare mailoney data
|
||||
fuMAILONEY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi
|
||||
mkdir -p /data/mailoney/log/
|
||||
chmod 760 /data/mailoney/ -R
|
||||
chown tpot:tpot /data/mailoney/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare rdpy data
|
||||
fuRDPY () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/rdpy/*; fi
|
||||
mkdir -p /data/rdpy/log/
|
||||
chmod 760 /data/rdpy/ -R
|
||||
chown tpot:tpot /data/rdpy/ -R
|
||||
}
|
||||
|
||||
# Let's create a function to prepare spiderfoot db
|
||||
fuSPIDERFOOT () {
|
||||
mkdir -p /data/spiderfoot
|
||||
touch /data/spiderfoot/spiderfoot.db
|
||||
chmod 760 -R /data/spiderfoot
|
||||
chown tpot:tpot -R /data/spiderfoot
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare suricata data
|
||||
fuSURICATA () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/suricata/*; fi
|
||||
mkdir -p /data/suricata/log
|
||||
chmod 760 -R /data/suricata
|
||||
chown tpot:tpot -R /data/suricata
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare p0f data
|
||||
fuP0F () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/p0f/*; fi
|
||||
mkdir -p /data/p0f/log
|
||||
chmod 760 -R /data/p0f
|
||||
chown tpot:tpot -R /data/p0f
|
||||
}
|
||||
|
||||
# Let's create a function to clean up and prepare vnclowpot data
|
||||
fuVNCLOWPOT () {
|
||||
if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/vnclowpot/*; fi
|
||||
mkdir -p /data/vnclowpot/log/
|
||||
chmod 760 /data/vnclowpot/ -R
|
||||
chown tpot:tpot /data/vnclowpot/ -R
|
||||
}
|
||||
|
||||
|
||||
# Avoid unwanted cleaning
|
||||
if [ "$myPERSISTENCE" = "" ];
|
||||
then
|
||||
echo $myRED"!!! WARNING !!! - This will delete ALL honeypot logs. "$myWHITE
|
||||
while [ "$myQST" != "y" ] && [ "$myQST" != "n" ];
|
||||
do
|
||||
read -p "Continue? (y/n) " myQST
|
||||
done
|
||||
if [ "$myQST" = "n" ];
|
||||
then
|
||||
echo $myGREEN"Puuh! That was close! Aborting!"$myWHITE
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check persistence, if enabled compress and rotate logs
|
||||
if [ "$myPERSISTENCE" = "on" ];
|
||||
then
|
||||
echo "Persistence enabled, now rotating and compressing logs."
|
||||
fuLOGROTATE
|
||||
else
|
||||
echo "Cleaning up and preparing data folders."
|
||||
fuCONPOT
|
||||
fuCOWRIE
|
||||
fuDIONAEA
|
||||
fuELASTICPOT
|
||||
fuELK
|
||||
fuEMOBILITY
|
||||
fuGLASTOPF
|
||||
fuHONEYTRAP
|
||||
fuMAILONEY
|
||||
fuRDPY
|
||||
fuSPIDERFOOT
|
||||
fuSURICATA
|
||||
fuP0F
|
||||
fuVNCLOWPOT
|
||||
fi
|
||||
|
@ -1,71 +0,0 @@
|
||||
#/bin/bash
|
||||
# Show current status of all running containers
|
||||
myPARAM="$1"
|
||||
myIMAGES="$(cat /etc/tpot/tpot.yml | grep -v '#' | grep container_name | cut -d: -f2)"
|
||||
myRED="[1;31m"
|
||||
myGREEN="[1;32m"
|
||||
myBLUE="[1;34m"
|
||||
myWHITE="[0;0m"
|
||||
myMAGENTA="[1;35m"
|
||||
|
||||
function fuCONTAINERSTATUS {
|
||||
local myNAME="$1"
|
||||
local mySTATUS="$(/usr/bin/docker ps -f name=$myNAME --format "table {{.Status}}" -f status=running -f status=exited | tail -n 1)"
|
||||
myDOWN="$(echo "$mySTATUS" | grep -o -E "(STATUS|NAMES|Exited)")"
|
||||
|
||||
case "$myDOWN" in
|
||||
STATUS)
|
||||
mySTATUS="$myRED"DOWN"$myWHITE"
|
||||
;;
|
||||
NAMES)
|
||||
mySTATUS="$myRED"DOWN"$myWHITE"
|
||||
;;
|
||||
Exited)
|
||||
mySTATUS="$myRED$mySTATUS$myWHITE"
|
||||
;;
|
||||
*)
|
||||
mySTATUS="$myGREEN$mySTATUS$myWHITE"
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "$mySTATUS"
|
||||
}
|
||||
|
||||
function fuCONTAINERPORTS {
|
||||
local myNAME="$1"
|
||||
local myPORTS="$(/usr/bin/docker ps -f name=$myNAME --format "table {{.Ports}}" -f status=running -f status=exited | tail -n 1 | sed s/","/",\n\t\t\t\t\t\t\t"/g)"
|
||||
|
||||
if [ "$myPORTS" != "PORTS" ];
|
||||
then
|
||||
printf "$myBLUE$myPORTS$myWHITE"
|
||||
fi
|
||||
}
|
||||
|
||||
function fuGETSYS {
|
||||
printf "========| System |========\n"
|
||||
printf "%+10s %-20s\n" "Date: " "$(date)"
|
||||
printf "%+10s %-20s\n" "Uptime: " "$(uptime | cut -b 2-)"
|
||||
printf "%+10s %-20s\n" "CPU temp: " "$(sensors | grep 'Physical' | awk '{ print $4" " }' | tr -d [:cntrl:])"
|
||||
echo
|
||||
}
|
||||
|
||||
while true
|
||||
do
|
||||
fuGETSYS
|
||||
printf "%-19s %-36s %s\n" "NAME" "STATUS" "PORTS"
|
||||
for i in $myIMAGES; do
|
||||
myNAME="$myMAGENTA$i$myWHITE"
|
||||
printf "%-32s %-49s %s" "$myNAME" "$(fuCONTAINERSTATUS $i)" "$(fuCONTAINERPORTS $i)"
|
||||
echo
|
||||
if [ "$myPARAM" = "vv" ];
|
||||
then
|
||||
/usr/bin/docker exec -t "$i" /bin/ps awfuwfxwf | egrep -v -E "awfuwfxwf|/bin/ps"
|
||||
fi
|
||||
done
|
||||
if [[ $myPARAM =~ ^([1-9]|[1-9][0-9]|[1-9][0-9][0-9])$ ]];
|
||||
then
|
||||
sleep "$myPARAM"
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
@ -1,45 +0,0 @@
|
||||
#/bin/bash
|
||||
# Dump all ES data
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf tmp
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Set vars
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myINDICES=$(curl -s -XGET ''$myES'_cat/indices/' | grep logstash | awk '{ print $3 }' | sort | grep -v 1970)
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Dumping all ES data
|
||||
echo $myCOL1"### The following indices will be dumped: "$myCOL0
|
||||
echo $myINDICES
|
||||
echo
|
||||
|
||||
mkdir tmp
|
||||
for i in $myINDICES;
|
||||
do
|
||||
echo $myCOL1"### Now dumping: "$i $myCOL0
|
||||
elasticdump --input=$myES$i --output="tmp/"$i --limit 7500
|
||||
echo $myCOL1"### Now compressing: tmp/$i" $myCOL0
|
||||
gzip -f "tmp/"$i
|
||||
done;
|
||||
|
||||
# Build tar archive
|
||||
echo $myCOL1"### Now building tar archive: es_dump_"$myDATE".tgz" $myCOL0
|
||||
tar cvf es_dump_$myDATE.tar tmp/*
|
||||
echo $myCOL1"### Done."$myCOL0
|
@ -1,77 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Export all Kibana objects
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myDATE=$(date +%Y%m%d%H%M)
|
||||
myINDEXCOUNT=$(curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*' | tr '\\' '\n' | grep "scripted" | wc -w)
|
||||
myDASHBOARDS=$(curl -s -XGET ''$myES'.kibana/dashboard/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
myVISUALIZATIONS=$(curl -s -XGET ''$myES'.kibana/visualization/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
mySEARCHES=$(curl -s -XGET ''$myES'.kibana/search/_search?filter_path=hits.hits._id&pretty&size=10000' | jq '.hits.hits[] | {_id}' | jq -r '._id')
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf patterns/ dashboards/ visualizations/ searches/
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Export index patterns
|
||||
mkdir -p patterns
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/index-pattern/logstash-*?' | jq '._source' > patterns/index-patterns.json
|
||||
echo
|
||||
|
||||
# Export dashboards
|
||||
mkdir -p dashboards
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
for i in $myDASHBOARDS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/dashboard/'$i'' | jq '._source' > dashboards/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Export visualizations
|
||||
mkdir -p visualizations
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
for i in $myVISUALIZATIONS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/visualization/'$i'' | jq '._source' > visualizations/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Export searches
|
||||
mkdir -p searches
|
||||
echo $myCOL1"### Now exporting"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
for i in $mySEARCHES;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XGET ''$myES'.kibana/search/'$i'' | jq '._source' > searches/$i.json
|
||||
done;
|
||||
echo
|
||||
|
||||
# Building tar archive
|
||||
echo $myCOL1"### Now building archive"$myCOL0 "kibana-objects_"$myDATE".tgz"
|
||||
tar cvfz kibana-objects_$myDATE.tgz patterns dashboards visualizations searches > /dev/null
|
||||
|
||||
# Stats
|
||||
echo
|
||||
echo $myCOL1"### Statistics"
|
||||
echo $myCOL1"###### Exported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
echo $myCOL1"###### Exported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
echo
|
@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Import Kibana objects
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
echo
|
||||
fi
|
||||
|
||||
# Set vars
|
||||
myDUMP=$1
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf patterns/ dashboards/ visualizations/ searches/
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Check if parameter is given and file exists
|
||||
if [ "$myDUMP" = "" ];
|
||||
then
|
||||
echo $myCOL1"### Please provide a backup file name."$myCOL0
|
||||
echo $myCOL1"### restore-kibana-objects.sh <kibana-objects.tgz>"$myCOL0
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
if ! [ -a $myDUMP ];
|
||||
then
|
||||
echo $myCOL1"### File not found."$myCOL0
|
||||
exit
|
||||
fi
|
||||
|
||||
# Unpack tar
|
||||
tar xvfz $myDUMP > /dev/null
|
||||
|
||||
# Restore index patterns
|
||||
myINDEXCOUNT=$(cat patterns/index-patterns.json | tr '\\' '\n' | grep "scripted" | wc -w)
|
||||
echo $myCOL1"### Now importing"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/index-pattern/logstash-*' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/index-pattern/logstash-*' -T patterns/index-patterns.json > /dev/null
|
||||
echo
|
||||
|
||||
# Restore dashboards
|
||||
myDASHBOARDS=$(ls dashboards/*.json | cut -c 12- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $myDASHBOARDS | wc -w)$myCOL1 "dashboards." $myCOL0
|
||||
for i in $myDASHBOARDS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/dashboard/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/dashboard/'$i'' -T dashboards/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Restore visualizations
|
||||
myVISUALIZATIONS=$(ls visualizations/*.json | cut -c 16- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $myVISUALIZATIONS | wc -w)$myCOL1 "visualizations." $myCOL0
|
||||
for i in $myVISUALIZATIONS;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/visualization/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/visualization/'$i'' -T visualizations/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Restore searches
|
||||
mySEARCHES=$(ls searches/*.json | cut -c 10- | rev | cut -c 6- | rev)
|
||||
echo $myCOL1"### Now importing "$myCOL0$(echo $mySEARCHES | wc -w)$myCOL1 "searches." $myCOL0
|
||||
for i in $mySEARCHES;
|
||||
do
|
||||
echo $myCOL1"###### "$i $myCOL0
|
||||
curl -s -XDELETE ''$myES'.kibana/search/'$i'' > /dev/null
|
||||
curl -s -XPUT ''$myES'.kibana/search/'$i'' -T searches/$i.json > /dev/null
|
||||
done;
|
||||
echo
|
||||
|
||||
# Stats
|
||||
echo
|
||||
echo $myCOL1"### Statistics"
|
||||
echo $myCOL1"###### Imported"$myCOL0 $myINDEXCOUNT $myCOL1"index patterns." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $myDASHBOARDS | wc -w) $myCOL1"dashboards." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $myVISUALIZATIONS | wc -w) $myCOL1"visualizations." $myCOL0
|
||||
echo $myCOL1"###### Imported"$myCOL0 $(echo $mySEARCHES | wc -w) $myCOL1"searches." $myCOL0
|
||||
echo
|
||||
|
@ -1,88 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## Get my external IP
|
||||
|
||||
timeout=2 # seconds to wait for a reply before trying next server
|
||||
verbose=1 # prints which server was used to STDERR
|
||||
|
||||
dnslist=(
|
||||
"dig +short myip.opendns.com @resolver1.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver2.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver3.opendns.com"
|
||||
"dig +short myip.opendns.com @resolver4.opendns.com"
|
||||
"dig +short -4 -t a whoami.akamai.net @ns1-1.akamaitech.net"
|
||||
"dig +short whoami.akamai.net @ns1-1.akamaitech.net"
|
||||
)
|
||||
|
||||
httplist=(
|
||||
alma.ch/myip.cgi
|
||||
api.infoip.io/ip
|
||||
api.ipify.org
|
||||
bot.whatismyipaddress.com
|
||||
canhazip.com
|
||||
checkip.amazonaws.com
|
||||
eth0.me
|
||||
icanhazip.com
|
||||
ident.me
|
||||
ipecho.net/plain
|
||||
ipinfo.io/ip
|
||||
ipof.in/txt
|
||||
ip.tyk.nu
|
||||
l2.io/ip
|
||||
smart-ip.net/myip
|
||||
wgetip.com
|
||||
whatismyip.akamai.com
|
||||
)
|
||||
|
||||
# function to shuffle the global array "array"
|
||||
shuffle() {
|
||||
local i tmp size max rand
|
||||
size=${#array[*]}
|
||||
max=$(( 32768 / size * size ))
|
||||
for ((i=size-1; i>0; i--)); do
|
||||
while (( (rand=$RANDOM) >= max )); do :; done
|
||||
rand=$(( rand % (i+1) ))
|
||||
tmp=${array[i]} array[i]=${array[rand]} array[rand]=$tmp
|
||||
done
|
||||
}
|
||||
|
||||
# if we have dig and a list of dns methods, try that first
|
||||
if hash dig 2>/dev/null && [ ${#dnslist[*]} -gt 0 ]; then
|
||||
eval array=( \"\${dnslist[@]}\" )
|
||||
shuffle
|
||||
|
||||
for cmd in "${array[@]}"; do
|
||||
[ "$verbose" == 1 ] && echo Trying: $cmd 1>&2
|
||||
ip=$(timeout $timeout $cmd)
|
||||
if [ -n "$ip" ]; then
|
||||
echo $ip
|
||||
exit
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# if we haven't succeeded with DNS, try HTTP
|
||||
if [ ${#httplist[*]} == 0 ]; then
|
||||
echo "No hosts in httplist array!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# use curl or wget, depending on which one we find
|
||||
curl_or_wget=$(if hash curl 2>/dev/null; then echo curl; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi);
|
||||
|
||||
if [ -z "$curl_or_wget" ]; then
|
||||
echo "Neither curl nor wget found. Cannot use http method." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval array=( \"\${httplist[@]}\" )
|
||||
shuffle
|
||||
|
||||
for url in "${array[@]}"; do
|
||||
[ "$verbose" == 1 ] && echo Trying: $curl_or_wget -s "$url" 1>&2
|
||||
ip=$(timeout $timeout $curl_or_wget -s "$url")
|
||||
if [ -n "$ip" ]; then
|
||||
echo $ip
|
||||
exit
|
||||
fi
|
||||
done
|
@ -1,61 +0,0 @@
|
||||
#/bin/bash
|
||||
# Restore folder based ES backup
|
||||
# Make sure ES is available
|
||||
myES="http://127.0.0.1:64298/"
|
||||
myESSTATUS=$(curl -s -XGET ''$myES'_cluster/health' | jq '.' | grep -c green)
|
||||
if ! [ "$myESSTATUS" = "1" ]
|
||||
then
|
||||
echo "### Elasticsearch is not available, try starting via 'systemctl start elk'."
|
||||
exit
|
||||
else
|
||||
echo "### Elasticsearch is available, now continuing."
|
||||
fi
|
||||
|
||||
# Let's ensure normal operation on exit or if interrupted ...
|
||||
function fuCLEANUP {
|
||||
rm -rf tmp
|
||||
}
|
||||
trap fuCLEANUP EXIT
|
||||
|
||||
# Set vars
|
||||
myDUMP=$1
|
||||
myCOL1="[0;34m"
|
||||
myCOL0="[0;0m"
|
||||
|
||||
# Check if parameter is given and file exists
|
||||
if [ "$myDUMP" = "" ];
|
||||
then
|
||||
echo $myCOL1"### Please provide a backup file name."$myCOL0
|
||||
echo $myCOL1"### restore-elk.sh <es_dump.tar>"$myCOL0
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
if ! [ -a $myDUMP ];
|
||||
then
|
||||
echo $myCOL1"### File not found."$myCOL0
|
||||
exit
|
||||
fi
|
||||
|
||||
# Unpack tar archive
|
||||
echo $myCOL1"### Now unpacking tar archive: "$myDUMP $myCOL0
|
||||
tar xvf $myDUMP
|
||||
|
||||
# Build indices list
|
||||
myINDICES=$(ls tmp/logstash*.gz | cut -c 5- | rev | cut -c 4- | rev)
|
||||
echo $myCOL1"### The following indices will be restored: "$myCOL0
|
||||
echo $myINDICES
|
||||
echo
|
||||
|
||||
# Restore indices
|
||||
for i in $myINDICES;
|
||||
do
|
||||
# Delete index if it already exists
|
||||
curl -s -XDELETE $myES$i > /dev/null
|
||||
echo $myCOL1"### Now uncompressing: tmp/$i.gz" $myCOL0
|
||||
gunzip -f tmp/$i.gz
|
||||
# Restore index to ES
|
||||
echo $myCOL1"### Now restoring: "$i $myCOL0
|
||||
elasticdump --input=tmp/$i --output=$myES$i --limit 7500
|
||||
rm tmp/$i
|
||||
done;
|
||||
echo $myCOL1"### Done."$myCOL0
|
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file
|
||||
# If the external IP cannot be detected, the internal IP will be inherited.
|
||||
source /etc/environment
|
||||
myLOCALIP=$(hostname -I | awk '{ print $1 }')
|
||||
myEXTIP=$(/usr/share/tpot/bin/myip.sh)
|
||||
if [ "$myEXTIP" = "" ];
|
||||
then
|
||||
myEXTIP=$myLOCALIP
|
||||
fi
|
||||
sed -i "s#IP:.*#IP: $myLOCALIP ($myEXTIP)[0m#" /etc/issue
|
||||
sed -i "s#SSH:.*#SSH: ssh -l tsec -p 64295 $myLOCALIP[0m#" /etc/issue
|
||||
sed -i "s#WEB:.*#WEB: https://$myLOCALIP:64297[0m#" /etc/issue
|
||||
tee /data/ews/conf/ews.ip << EOF
|
||||
[MAIN]
|
||||
ip = $myEXTIP
|
||||
EOF
|
||||
tee /etc/tpot/elk/environment << EOF
|
||||
MY_EXTIP=$myEXTIP
|
||||
MY_INTIP=$myLOCALIP
|
||||
MY_HOSTNAME=$HOSTNAME
|
||||
EOF
|
||||
chown tpot:tpot /data/ews/conf/ews.ip
|
||||
chmod 760 /data/ews/conf/ews.ip
|
@ -1,144 +0,0 @@
|
||||
#
|
||||
# Run-time configuration file for dialog
|
||||
#
|
||||
# Automatically generated by "dialog --create-rc <file>"
|
||||
#
|
||||
#
|
||||
# Types of values:
|
||||
#
|
||||
# Number - <number>
|
||||
# String - "string"
|
||||
# Boolean - <ON|OFF>
|
||||
# Attribute - (foreground,background,highlight?)
|
||||
|
||||
# Set aspect-ration.
|
||||
aspect = 0
|
||||
|
||||
# Set separator (for multiple widgets output).
|
||||
separate_widget = ""
|
||||
|
||||
# Set tab-length (for textbox tab-conversion).
|
||||
tab_len = 0
|
||||
|
||||
# Make tab-traversal for checklist, etc., include the list.
|
||||
visit_items = OFF
|
||||
|
||||
# Shadow dialog boxes? This also turns on color.
|
||||
use_shadow = ON
|
||||
|
||||
# Turn color support ON or OFF
|
||||
use_colors = ON
|
||||
|
||||
# Screen color
|
||||
screen_color = (WHITE,MAGENTA,ON)
|
||||
|
||||
# Shadow color
|
||||
shadow_color = (BLACK,BLACK,ON)
|
||||
|
||||
# Dialog box color
|
||||
dialog_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Dialog box title color
|
||||
title_color = (MAGENTA,WHITE,OFF)
|
||||
|
||||
# Dialog box border color
|
||||
border_color = (WHITE,WHITE,ON)
|
||||
|
||||
# Active button color
|
||||
button_active_color = (WHITE,MAGENTA,OFF)
|
||||
|
||||
# Inactive button color
|
||||
button_inactive_color = dialog_color
|
||||
|
||||
# Active button key color
|
||||
button_key_active_color = button_active_color
|
||||
|
||||
# Inactive button key color
|
||||
button_key_inactive_color = (RED,WHITE,OFF)
|
||||
|
||||
# Active button label color
|
||||
button_label_active_color = (YELLOW,MAGENTA,ON)
|
||||
|
||||
# Inactive button label color
|
||||
button_label_inactive_color = (BLACK,WHITE,OFF)
|
||||
|
||||
# Input box color
|
||||
inputbox_color = dialog_color
|
||||
|
||||
# Input box border color
|
||||
inputbox_border_color = dialog_color
|
||||
|
||||
# Search box color
|
||||
searchbox_color = dialog_color
|
||||
|
||||
# Search box title color
|
||||
searchbox_title_color = title_color
|
||||
|
||||
# Search box border color
|
||||
searchbox_border_color = border_color
|
||||
|
||||
# File position indicator color
|
||||
position_indicator_color = title_color
|
||||
|
||||
# Menu box color
|
||||
menubox_color = dialog_color
|
||||
|
||||
# Menu box border color
|
||||
menubox_border_color = border_color
|
||||
|
||||
# Item color
|
||||
item_color = dialog_color
|
||||
|
||||
# Selected item color
|
||||
item_selected_color = button_active_color
|
||||
|
||||
# Tag color
|
||||
tag_color = title_color
|
||||
|
||||
# Selected tag color
|
||||
tag_selected_color = button_label_active_color
|
||||
|
||||
# Tag key color
|
||||
tag_key_color = button_key_inactive_color
|
||||
|
||||
# Selected tag key color
|
||||
tag_key_selected_color = (RED,MAGENTA,ON)
|
||||
|
||||
# Check box color
|
||||
check_color = dialog_color
|
||||
|
||||
# Selected check box color
|
||||
check_selected_color = button_active_color
|
||||
|
||||
# Up arrow color
|
||||
uarrow_color = (MAGENTA,WHITE,ON)
|
||||
|
||||
# Down arrow color
|
||||
darrow_color = uarrow_color
|
||||
|
||||
# Item help-text color
|
||||
itemhelp_color = (WHITE,BLACK,OFF)
|
||||
|
||||
# Active form text color
|
||||
form_active_text_color = button_active_color
|
||||
|
||||
# Form text color
|
||||
form_text_color = (WHITE,CYAN,ON)
|
||||
|
||||
# Readonly form item color
|
||||
form_item_readonly_color = (CYAN,WHITE,ON)
|
||||
|
||||
# Dialog box gauge color
|
||||
gauge_color = title_color
|
||||
|
||||
# Dialog box border2 color
|
||||
border2_color = dialog_color
|
||||
|
||||
# Input box border2 color
|
||||
inputbox_border2_color = dialog_color
|
||||
|
||||
# Search box border2 color
|
||||
searchbox_border2_color = dialog_color
|
||||
|
||||
# Menu box border2 color
|
||||
menubox_border2_color = dialog_color
|
@ -1,20 +0,0 @@
|
||||
[H[2J
|
||||
[0;35m┌───────────────[1;35m────────────────[0;37m───────────────┐[0m
|
||||
[0;35m│[0m [0;35m_____[0m [1;35m____[0m [1;35m_[0m [0;37m_[0m [0;37m_____[0m [0;37m_[0m [0;1;30;90m___[0m [0;1;30;90m│[0m
|
||||
[1;35m│|_[0m [1;35m_|[0m [1;35m|[0m [1;35m_[0m [0;37m\\[0m [0;37m___[0m [0;37m|[0m [0;37m|_[0m [0;37m/[0m [0;1;30;90m|___[0m [0;1;30;90m/[0m [0;1;30;90m|/[0m [0;1;30;90m_[0m [0;1;30;90m\\[0m [0;1;30;90m│[0m
|
||||
[1;35m│[0m [1;35m|[0m [1;35m|__[0;37m___|[0m [0;37m|_)[0m [0;37m/[0m [0;37m_[0m [0;37m\\|[0m [0;1;30;90m__|[0m [0;1;30;90m|[0m [0;1;30;90m|[0m [0;1;30;90m/[0m [0;1;30;90m/|[0m [0;35m|[0m [0;35m|[0m [0;35m|[0m [0;35m|│[0m
|
||||
[0;37m│[0m [0;37m|[0m [0;37m|_____|[0m [0;37m__[0;1;30;90m/[0m [0;1;30;90m(_)[0m [0;1;30;90m|[0m [0;1;30;90m|_[0m [0;1;30;90m|[0m [0;35m|[0m [0;35m/[0m [0;35m/_|[0m [0;35m|[0m [0;35m|_|[0m [0;35m|│[0m
|
||||
[0;37m│[0m [0;37m|_|[0m [0;1;30;90m|_|[0m [0;1;30;90m\\___/[0m [0;1;30;90m\\[0;35m__|[0m [0;35m|_|/_/(_)_[1;35m|\\___/[0m [1;35m│[0m
|
||||
[0;1;30;90m│[0m [1;35m│[0m
|
||||
[0;1;30;90m└───────[0;35m────────────────[1;35m────────────────[0;37m───────┘[0m
|
||||
|
||||
|
||||
,---- [ [1;35m\n[0m ] [ [0;35m\d[0m ] [ [1;30m\t[0m ]
|
||||
|
|
||||
| [1;35mIP:[0m
|
||||
| [0;35mSSH:[0m
|
||||
| [1;30mWEB:[0m
|
||||
|
|
||||
`----
|
||||
|
||||
|
@ -1,96 +0,0 @@
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# SSL Settings
|
||||
##
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
log_format le_json '{ "timestamp": "$time_iso8601", '
|
||||
'"src_ip": "$remote_addr", '
|
||||
'"remote_user": "$remote_user", '
|
||||
'"body_bytes_sent": "$body_bytes_sent", '
|
||||
'"request_time": "$request_time", '
|
||||
'"status": "$status", '
|
||||
'"request": "$request", '
|
||||
'"request_method": "$request_method", '
|
||||
'"http_referrer": "$http_referer", '
|
||||
'"http_user_agent": "$http_user_agent" }';
|
||||
|
||||
access_log /var/log/nginx/access.log le_json;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
@ -1,13 +0,0 @@
|
||||
-----BEGIN DH PARAMETERS-----
|
||||
MIICCAKCAgEAiHmfakVLOStSULBdaTbZY/zeFyEeQ19GY9Z5CJg06dIIgIzhxk9L
|
||||
4xsQdQk8giKOjP6SfX0ZgF5CYaurQ3ljYlP0UlAQQo9+fEErbqj3hCzAxtIpd6Yj
|
||||
SV6zFdnSjwxWuKAPPywiQNljnHH+Y1KBdbl5VQ9gC3ehtaLo1A4y8q96f6fC5rGU
|
||||
nfgw4lTxLvPD7NwaOdFTCyK8tTxvUGNJIvf7805IxZ0BvAiBuVaXStaMcqf5BHLP
|
||||
fYpvIiVaCrtto4elu18nL0tf2CN5n9ai4hlr0nPmNrE/Zrrur78Re5F4Ien9kr4d
|
||||
xabXvVJJQa9j2NdQO7vk7Cz/dAIiqt/1XKFhll4TTYBqrFVXIwF+FNx636zyOjcO
|
||||
nlZk/V+IL/UTPnZOv2PGt5+WetvJJubi6B9XgOgVLduI07woAp5qnRJJt6fJW1aA
|
||||
M86By6WLy5P31Py6eFj8nYgj1V703XgQ5lESKYpeVgqA0bh7daNzOCoGQvvUKlTP
|
||||
RTu6fs7clw5ta4yYUyvuIKTngH5yGBNdTuP0GWo6Y+Dy1BctVwl2xSw+FhYeuIf/
|
||||
EB2A3129H59HhbWyNH337+1dfntHfQRXBsT0YSyDxPurI5/FNGcmw+GZEYk4BB8j
|
||||
g7TwH3GBjbKnjnr7SnhanqmWgybgQw6oR9gDC399eR4LiOk9sbxpX1MCAQI=
|
||||
-----END DH PARAMETERS-----
|
@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
openssl req -nodes -x509 -sha512 -newkey rsa:8192 -keyout "nginx.key" -out "nginx.crt" -days 3650
|
||||
|
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Got root?
|
||||
myWHOAMI=$(whoami)
|
||||
if [ "$myWHOAMI" != "root" ]
|
||||
then
|
||||
echo "Need to run as root ..."
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$1" = "2048" ] || [ "$1" = "4096" ] || [ "$1" = "8192" ]
|
||||
then
|
||||
openssl dhparam -outform PEM -out dhparam$1.pem $1
|
||||
else
|
||||
echo "Usage: ./gen-dhparam [2048, 4096, 8192]..."
|
||||
fi
|
@ -1,155 +0,0 @@
|
||||
############################################
|
||||
### NGINX T-Pot configuration file by mo ###
|
||||
############################################
|
||||
|
||||
###################################
|
||||
### Allow for 60 reloads per minute
|
||||
###################################
|
||||
limit_req_zone $binary_remote_addr zone=base:1m rate=1r/s;
|
||||
|
||||
server {
|
||||
|
||||
#########################
|
||||
### Basic server settings
|
||||
#########################
|
||||
listen 64297 ssl http2;
|
||||
index tpotweb.html;
|
||||
ssl_protocols TLSv1.2;
|
||||
server_name example.com;
|
||||
error_page 300 301 302 400 401 402 403 404 500 501 502 503 504 /error.html;
|
||||
|
||||
|
||||
##############################################
|
||||
### Remove version number add different header
|
||||
##############################################
|
||||
server_tokens off;
|
||||
more_set_headers 'Server: apache';
|
||||
|
||||
|
||||
##############################################
|
||||
### SSL settings and Cipher Suites
|
||||
##############################################
|
||||
ssl_certificate /etc/nginx/ssl/nginx.crt;
|
||||
ssl_certificate_key /etc/nginx/ssl/nginx.key;
|
||||
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH:!DHE:!SHA:!SHA256';
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_dhparam /etc/nginx/ssl/dhparam4096.pem;
|
||||
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
|
||||
####################################
|
||||
### OWASP recommendations / settings
|
||||
####################################
|
||||
|
||||
### Size Limits & Buffer Overflows
|
||||
### the size may be configured based on the needs.
|
||||
client_body_buffer_size 100K;
|
||||
client_header_buffer_size 1k;
|
||||
client_max_body_size 100k;
|
||||
large_client_header_buffers 2 1k;
|
||||
|
||||
### Mitigate Slow HHTP DoS Attack
|
||||
### Timeouts definition ##
|
||||
client_body_timeout 10;
|
||||
client_header_timeout 10;
|
||||
keepalive_timeout 5 5;
|
||||
send_timeout 10;
|
||||
|
||||
### X-Frame-Options is to prevent from clickJacking attack
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
|
||||
### disable content-type sniffing on some browsers.
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
### This header enables the Cross-site scripting (XSS) filter
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
|
||||
### This will enforce HTTP browsing into HTTPS and avoid ssl stripping attack
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
|
||||
|
||||
|
||||
##################################
|
||||
### Restrict access and basic auth
|
||||
##################################
|
||||
|
||||
# satisfy all;
|
||||
satisfy any;
|
||||
|
||||
# allow 10.0.0.0/8;
|
||||
# allow 172.16.0.0/12;
|
||||
# allow 192.168.0.0/16;
|
||||
allow 127.0.0.1;
|
||||
allow ::1;
|
||||
deny all;
|
||||
|
||||
auth_basic "closed site";
|
||||
auth_basic_user_file /etc/nginx/nginxpasswd;
|
||||
|
||||
|
||||
##############################
|
||||
### Limit brute-force attempts
|
||||
##############################
|
||||
location = / {
|
||||
limit_req zone=base burst=1 nodelay;
|
||||
}
|
||||
|
||||
|
||||
#################
|
||||
### Proxied sites
|
||||
#################
|
||||
|
||||
### Kibana
|
||||
location /kibana/ {
|
||||
proxy_pass http://localhost:64296;
|
||||
rewrite /kibana/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### ES
|
||||
location /es/ {
|
||||
proxy_pass http://localhost:64298/;
|
||||
rewrite /es/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### head standalone
|
||||
location /myhead/ {
|
||||
proxy_pass http://localhost:64302/;
|
||||
rewrite /myhead/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### portainer
|
||||
location /ui {
|
||||
proxy_pass http://127.0.0.1:64299;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_set_header Host $host;
|
||||
proxy_redirect off;
|
||||
rewrite /ui/(.*)$ /$1 break;
|
||||
}
|
||||
### web tty
|
||||
location /wetty {
|
||||
proxy_pass http://127.0.0.1:64300/wetty;
|
||||
}
|
||||
|
||||
### netdata
|
||||
location /netdata/ {
|
||||
proxy_pass http://localhost:64301;
|
||||
rewrite /netdata/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
### spiderfoot
|
||||
location /spiderfoot {
|
||||
proxy_pass http://127.0.0.1:64303;
|
||||
}
|
||||
|
||||
location /static {
|
||||
proxy_pass http://127.0.0.1:64303/spiderfoot/static;
|
||||
}
|
||||
|
||||
location /scanviz {
|
||||
proxy_pass http://127.0.0.1:64303/spiderfoot/scanviz;
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
exit 0
|
@ -1,313 +0,0 @@
|
||||
# T-Pot (Everything)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
conpot_local:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
emobility_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Conpot service
|
||||
conpot:
|
||||
container_name: conpot
|
||||
restart: always
|
||||
networks:
|
||||
- conpot_local
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: "dtagdevsec/conpot:1710"
|
||||
volumes:
|
||||
- /data/conpot/log:/var/log/conpot
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
# - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /etc/tpot/elk/environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Emobility service
|
||||
emobility:
|
||||
container_name: emobility
|
||||
restart: always
|
||||
networks:
|
||||
- emobility_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: "dtagdevsec/emobility:1710"
|
||||
volumes:
|
||||
- /data/emobility:/data/eMobility
|
||||
- /data/ews:/data/ews
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
@ -1,156 +0,0 @@
|
||||
# T-Pot (HP)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
@ -1,176 +0,0 @@
|
||||
# T-Pot (Industrial)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
conpot_local:
|
||||
emobility_local:
|
||||
ewsposter_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
|
||||
services:
|
||||
|
||||
# Conpot service
|
||||
conpot:
|
||||
container_name: conpot
|
||||
restart: always
|
||||
networks:
|
||||
- conpot_local
|
||||
ports:
|
||||
- "1025:1025"
|
||||
- "50100:50100"
|
||||
image: "dtagdevsec/conpot:1710"
|
||||
volumes:
|
||||
- /data/conpot/log:/var/log/conpot
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
# - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /etc/tpot/elk/environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Emobility service
|
||||
emobility:
|
||||
container_name: emobility
|
||||
restart: always
|
||||
networks:
|
||||
- emobility_local
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "8080:8080"
|
||||
image: "dtagdevsec/emobility:1710"
|
||||
volumes:
|
||||
- /data/emobility:/data/eMobility
|
||||
- /data/ews:/data/ews
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
@ -1,283 +0,0 @@
|
||||
# T-Pot (Standard)
|
||||
# For docker-compose ...
|
||||
version: '2.1'
|
||||
|
||||
networks:
|
||||
cowrie_local:
|
||||
dionaea_local:
|
||||
elasticpot_local:
|
||||
ewsposter_local:
|
||||
glastopf_local:
|
||||
mailoney_local:
|
||||
rdpy_local:
|
||||
spiderfoot_local:
|
||||
ui-for-docker_local:
|
||||
vnclowpot_local:
|
||||
|
||||
services:
|
||||
|
||||
# Cowrie service
|
||||
cowrie:
|
||||
container_name: cowrie
|
||||
restart: always
|
||||
networks:
|
||||
- cowrie_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "22:2222"
|
||||
- "23:2223"
|
||||
image: "dtagdevsec/cowrie:1710"
|
||||
volumes:
|
||||
- /data/cowrie/downloads:/home/cowrie/cowrie/dl
|
||||
- /data/cowrie/keys:/home/cowrie/cowrie/etc
|
||||
- /data/cowrie/log:/home/cowrie/cowrie/log
|
||||
- /data/cowrie/log/tty:/home/cowrie/cowrie/log/tty
|
||||
|
||||
# Dionaea service
|
||||
dionaea:
|
||||
container_name: dionaea
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- dionaea_local
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE
|
||||
ports:
|
||||
- "20:20"
|
||||
- "21:21"
|
||||
- "42:42"
|
||||
- "69:69/udp"
|
||||
- "8081:80"
|
||||
- "135:135"
|
||||
- "443:443"
|
||||
- "445:445"
|
||||
- "1433:1433"
|
||||
- "1723:1723"
|
||||
- "1883:1883"
|
||||
- "1900:1900/udp"
|
||||
- "3306:3306"
|
||||
- "5060:5060"
|
||||
- "5060:5060/udp"
|
||||
- "5061:5061"
|
||||
- "27017:27017"
|
||||
image: "dtagdevsec/dionaea:1710"
|
||||
volumes:
|
||||
- /data/dionaea/roots/ftp:/opt/dionaea/var/dionaea/roots/ftp
|
||||
- /data/dionaea/roots/tftp:/opt/dionaea/var/dionaea/roots/tftp
|
||||
- /data/dionaea/roots/www:/opt/dionaea/var/dionaea/roots/www
|
||||
- /data/dionaea/roots/upnp:/opt/dionaea/var/dionaea/roots/upnp
|
||||
- /data/dionaea:/opt/dionaea/var/dionaea
|
||||
- /data/dionaea/binaries:/opt/dionaea/var/dionaea/binaries
|
||||
- /data/dionaea/log:/opt/dionaea/var/log
|
||||
- /data/dionaea/rtp:/opt/dionaea/var/dionaea/rtp
|
||||
|
||||
# Elasticpot service
|
||||
elasticpot:
|
||||
container_name: elasticpot
|
||||
restart: always
|
||||
networks:
|
||||
- elasticpot_local
|
||||
ports:
|
||||
- "9200:9200"
|
||||
image: "dtagdevsec/elasticpot:1710"
|
||||
volumes:
|
||||
- /data/elasticpot/log:/opt/ElasticpotPY/log
|
||||
|
||||
# ELK services
|
||||
## Elasticsearch service
|
||||
elasticsearch:
|
||||
container_name: elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
- bootstrap.memory_lock=true
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
# mem_limit: 2g
|
||||
ports:
|
||||
- "127.0.0.1:64298:9200"
|
||||
image: "dtagdevsec/elasticsearch:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
|
||||
## Kibana service
|
||||
kibana:
|
||||
container_name: kibana
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64296:5601"
|
||||
image: "dtagdevsec/kibana:1710"
|
||||
|
||||
## Logstash service
|
||||
logstash:
|
||||
container_name: logstash
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /etc/tpot/elk/environment
|
||||
image: "dtagdevsec/logstash:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /var/log:/data/host/log
|
||||
|
||||
## Elasticsearch-head service
|
||||
head:
|
||||
container_name: head
|
||||
restart: always
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:64302:9100"
|
||||
image: "dtagdevsec/head:1710"
|
||||
|
||||
# Ewsposter service
|
||||
ewsposter:
|
||||
container_name: ewsposter
|
||||
restart: always
|
||||
networks:
|
||||
- ewsposter_local
|
||||
image: "dtagdevsec/ewsposter:1710"
|
||||
volumes:
|
||||
- /data:/data
|
||||
- /data/ews/conf/ews.ip:/opt/ewsposter/ews.ip
|
||||
|
||||
# Glastopf service
|
||||
glastopf:
|
||||
container_name: glastopf
|
||||
restart: always
|
||||
networks:
|
||||
- glastopf_local
|
||||
ports:
|
||||
- "80:80"
|
||||
image: "dtagdevsec/glastopf:1710"
|
||||
volumes:
|
||||
- /data/glastopf/db:/opt/glastopf/db
|
||||
- /data/glastopf/log:/opt/glastopf/log
|
||||
|
||||
# Honeytrap service
|
||||
honeytrap:
|
||||
container_name: honeytrap
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
image: "dtagdevsec/honeytrap:1710"
|
||||
volumes:
|
||||
- /data/honeytrap/attacks:/opt/honeytrap/var/attacks
|
||||
- /data/honeytrap/downloads:/opt/honeytrap/var/downloads
|
||||
- /data/honeytrap/log:/opt/honeytrap/var/log
|
||||
|
||||
# Mailoney service
|
||||
mailoney:
|
||||
container_name: mailoney
|
||||
restart: always
|
||||
networks:
|
||||
- mailoney_local
|
||||
ports:
|
||||
- "25:2525"
|
||||
image: "dtagdevsec/mailoney:1710"
|
||||
volumes:
|
||||
- /data/mailoney/log:/opt/mailoney/logs
|
||||
|
||||
# Netdata service
|
||||
netdata:
|
||||
container_name: netdata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
elasticsearch:
|
||||
condition: service_healthy
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
image: "dtagdevsec/netdata:1710"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Rdpy service
|
||||
rdpy:
|
||||
container_name: rdpy
|
||||
restart: always
|
||||
networks:
|
||||
- rdpy_local
|
||||
ports:
|
||||
- "3389:3389"
|
||||
image: "dtagdevsec/rdpy:1710"
|
||||
volumes:
|
||||
- /data/rdpy/log:/var/log/rdpy
|
||||
|
||||
# Spiderfoot service
|
||||
spiderfoot:
|
||||
container_name: spiderfoot
|
||||
restart: always
|
||||
networks:
|
||||
- spiderfoot_local
|
||||
ports:
|
||||
- "127.0.0.1:64303:8080"
|
||||
image: "dtagdevsec/spiderfoot:1710"
|
||||
volumes:
|
||||
- /data/spiderfoot/spiderfoot.db:/home/spiderfoot/spiderfoot.db
|
||||
|
||||
# Ui-for-docker service
|
||||
ui-for-docker:
|
||||
container_name: ui-for-docker
|
||||
command: -H unix:///var/run/docker.sock --no-auth
|
||||
restart: always
|
||||
networks:
|
||||
- ui-for-docker_local
|
||||
ports:
|
||||
- "127.0.0.1:64299:9000"
|
||||
image: "dtagdevsec/ui-for-docker:1710"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
# Suricata service
|
||||
suricata:
|
||||
container_name: suricata
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_NICE
|
||||
- NET_RAW
|
||||
image: "dtagdevsec/suricata:1710"
|
||||
volumes:
|
||||
- /data/suricata/log:/var/log/suricata
|
||||
|
||||
# P0f service
|
||||
p0f:
|
||||
container_name: p0f
|
||||
restart: always
|
||||
network_mode: "host"
|
||||
image: "dtagdevsec/p0f:1710"
|
||||
volumes:
|
||||
- /data/p0f/log:/var/log/p0f
|
||||
|
||||
# Vnclowpot service
|
||||
vnclowpot:
|
||||
container_name: vnclowpot
|
||||
restart: always
|
||||
networks:
|
||||
- vnclowpot_local
|
||||
ports:
|
||||
- "5900:5900"
|
||||
image: "dtagdevsec/vnclowpot:1710"
|
||||
volumes:
|
||||
- /data/vnclowpot/log:/var/log/vnclowpot
|
@ -1,26 +0,0 @@
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete indices older than 90 days (based on index name), for logstash-
|
||||
prefixed indices. Ignore the error if the filter does not result in an
|
||||
actionable list of indices (ignore_empty_list) and exit cleanly.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: logstash-
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: 90
|
@ -1,21 +0,0 @@
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
client:
|
||||
hosts:
|
||||
- 127.0.0.1
|
||||
port: 64298
|
||||
url_prefix:
|
||||
use_ssl: False
|
||||
certificate:
|
||||
client_cert:
|
||||
client_key:
|
||||
ssl_no_validate: False
|
||||
http_auth:
|
||||
timeout: 30
|
||||
master_only: False
|
||||
|
||||
logging:
|
||||
loglevel: INFO
|
||||
logfile: /var/log/curator.log
|
||||
logformat: default
|
||||
blacklist: ['elasticsearch', 'urllib3']
|
Binary file not shown.
Binary file not shown.
@ -1,38 +0,0 @@
|
||||
/data/conpot/log/conpot.json
|
||||
/data/conpot/log/conpot.log
|
||||
/data/cowrie/log/cowrie.json
|
||||
/data/cowrie/log/cowrie-textlog.log
|
||||
/data/cowrie/log/lastlog.txt
|
||||
/data/cowrie/log/ttylogs.tgz
|
||||
/data/cowrie/downloads.tgz
|
||||
/data/dionaea/log/dionaea.json
|
||||
/data/dionaea/log/dionaea.sqlite
|
||||
/data/dionaea/bistreams.tgz
|
||||
/data/dionaea/binaries.tgz
|
||||
/data/dionaea/dionaea-errors.log
|
||||
/data/elasticpot/log/elasticpot.log
|
||||
/data/elk/log/*.log
|
||||
/data/emobility/log/centralsystem.log
|
||||
/data/emobility/log/centralsystemEWS.log
|
||||
/data/glastopf/log/glastopf.log
|
||||
/data/glastopf/db/glastopf.db
|
||||
/data/honeytrap/log/*.log
|
||||
/data/honeytrap/log/*.json
|
||||
/data/honeytrap/attacks.tgz
|
||||
/data/honeytrap/downloads.tgz
|
||||
/data/mailoney/log/commands.log
|
||||
/data/p0f/log/p0f.json
|
||||
/data/rdpy/log/rdpy.log
|
||||
/data/suricata/log/*.log
|
||||
/data/suricata/log/*.json
|
||||
/data/vnclowpot/log/vnclowpot.log
|
||||
{
|
||||
su tpot tpot
|
||||
copytruncate
|
||||
create 760 tpot tpot
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
rotate 30
|
||||
compress
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
[Unit]
|
||||
Description=tpot
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Get and set internal, external IP infos, but ignore errors
|
||||
ExecStartPre=-/usr/share/tpot/bin/updateip.sh
|
||||
|
||||
# Clear state or if persistence is enabled rotate and compress logs from /data
|
||||
ExecStartPre=-/bin/bash -c '/usr/share/tpot/bin/clean.sh on'
|
||||
|
||||
# Remove old containers, images and volumes
|
||||
ExecStartPre=-/usr/local/bin/docker-compose -f /etc/tpot/tpot.yml down -v
|
||||
ExecStartPre=-/usr/local/bin/docker-compose -f /etc/tpot/tpot.yml rm -v
|
||||
ExecStartPre=-/bin/bash -c 'docker volume rm $(docker volume ls -q)'
|
||||
ExecStartPre=-/bin/bash -c 'docker rm -v $(docker ps -aq)'
|
||||
ExecStartPre=-/bin/bash -c 'docker rmi $(docker images | grep "<none>" | awk \'{print $3}\')'
|
||||
|
||||
# Get IF, disable offloading, enable promiscious mode for p0f and suricata
|
||||
ExecStartPre=/bin/bash -c '/sbin/ethtool --offload $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) rx off tx off'
|
||||
ExecStartPre=/bin/bash -c '/sbin/ethtool -K $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) gso off gro off'
|
||||
ExecStartPre=/bin/bash -c '/sbin/ip link set $(/sbin/ip address | grep "^2: " | awk \'{ print $2 }\' | tr -d [:punct:]) promisc on'
|
||||
|
||||
# Modify access rights on docker.sock for netdata
|
||||
ExecStartPre=-/bin/chmod 666 /var/run/docker.sock
|
||||
|
||||
# Set iptables accept rules to avoid forwarding to honeytrap / NFQUEUE
|
||||
# Forward all other connections to honeytrap / NFQUEUE
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -s 127.0.0.1 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -d 127.0.0.1 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 64295:64303,7634 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 20:23,25,42,69,80,135,443,445,1433,1723,1883,1900 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 3306,3389,5060,5061,5601,5900,27017 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp -m multiport --dports 1025,50100,8080,8081,9200 -j ACCEPT
|
||||
ExecStartPre=/sbin/iptables -w -A INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
|
||||
|
||||
# Compose T-Pot up
|
||||
ExecStart=/usr/local/bin/docker-compose -f /etc/tpot/tpot.yml up --no-color
|
||||
|
||||
# Compose T-Pot down, remove containers and volumes
|
||||
ExecStop=/usr/local/bin/docker-compose -f /etc/tpot/tpot.yml down -v
|
||||
|
||||
# Remove only previously set iptables rules
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -s 127.0.0.1 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -d 127.0.0.1 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 64295:64303,7634 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 20:23,25,42,69,80,135,443,445,1433,1723,1883,1900 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 3306,3389,5060,5061,5601,5900,27017 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp -m multiport --dports 1025,50100,8080,8081,9200 -j ACCEPT
|
||||
ExecStopPost=/sbin/iptables -w -D INPUT -p tcp --syn -m state --state NEW -j NFQUEUE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=wetty
|
||||
Requires=sshd.service
|
||||
After=sshd.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
User=tsec
|
||||
Group=tsec
|
||||
ExecStart=/usr/bin/node /usr/local/lib/node_modules/wetty/app.js -p 64300 --host 127.0.0.1 --sshhost 127.0.0.1 --sshport 64295
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1 +0,0 @@
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Before Width: | Height: | Size: 805 B |
@ -1,21 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en_US">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>T-Pot</title>
|
||||
</head>
|
||||
<link href="style.css" rel="stylesheet" type="text/css"/>
|
||||
|
||||
<body bgcolor="#E20074">
|
||||
<center>
|
||||
<a href="/tpotweb.html" target="_top" class="btn">Home</a>
|
||||
<a href="/kibana" target="main" class="btn">Kibana</a>
|
||||
<a href="/myhead/" target="main" class="btn">ES Head</a>
|
||||
<a href="/netdata/" target="_blank" class="btn">Netdata</a>
|
||||
<a href="/spiderfoot/" target="main" class="btn">Spiderfoot</a>
|
||||
<a href="/ui/" target="main" class="btn">Portainer</a>
|
||||
<a href="/wetty/ssh/tsec" target="main" class="btn">WebTTY</a>
|
||||
</center>
|
||||
</body>
|
||||
</html>
|
@ -1,17 +0,0 @@
|
||||
.btn {
|
||||
-webkit-border-radius: 0;
|
||||
-moz-border-radius: 0;
|
||||
border-radius: 0px;
|
||||
font-family: Arial;
|
||||
color: #ffffff;
|
||||
font-size: 12px;
|
||||
background: #E20074;
|
||||
padding: 2px 30px 2px 30px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.btn:hover {
|
||||
background: #c2c2c2;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
@ -1,15 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en_US">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>T-Pot</title>
|
||||
</head>
|
||||
|
||||
<frameset rows='20,*' border='0' frameborder='0' framespacing='0'>
|
||||
<frame src='navbar.html' name='navbar' marginwidth='0' marginheight='0' scrolling='no' noresize>
|
||||
<frame src='/kibana' name='main' marginwidth='0' marginheight='0' scrolling='auto' noresize>
|
||||
<noframes>
|
||||
</noframes>
|
||||
</frameset>
|
||||
</html>
|
Reference in New Issue
Block a user