prepare for T-Pot 16.03

This commit is contained in:
marco
2015-12-08 15:47:39 +01:00
parent 0701b5f2f4
commit f06935fe63
72 changed files with 29029 additions and 459 deletions

View File

@ -1,10 +1,10 @@
#!/bin/bash
########################################################
# T-Pot Community Edition #
# T-Pot #
# Check container and services script #
# #
# v0.14 by mo, DTAG, 2015-08-07 #
# v0.02 by mo, DTAG, 2015-08-08 #
########################################################
if [ -a /var/run/check.lock ];
then exit

View File

@ -1,15 +1,33 @@
#!/bin/bash
########################################################
# T-Pot Community Edition #
# T-Pot #
# Container and services restart script #
# #
# v0.14 by mo, DTAG, 2015-08-07 #
# v0.03 by mo, DTAG, 2015-11-02 #
########################################################
myCOUNT=1
if [ -a /var/run/check.lock ];
then exit
fi
while true
do
if ! [ -a /var/run/check.lock ];
then break
fi
sleep 0.1
if [ "$myCOUNT" = "1" ];
then
echo -n "Waiting for services "
else echo -n .
fi
if [ "$myCOUNT" = "6000" ];
then
echo
echo "Overriding check.lock"
rm /var/run/check.lock
break
fi
myCOUNT=$[$myCOUNT +1]
done
myIMAGES=$(cat /data/images.conf)

View File

@ -1,10 +1,10 @@
#!/bin/bash
########################################################
# T-Pot Community Edition #
# T-Pot #
# Container and services status script #
# #
# v0.11 by mo, DTAG, 2015-06-12 #
# v0.04 by mo, DTAG, 2015-08-20 #
########################################################
myCOUNT=1
myIMAGES=$(cat /data/images.conf)
@ -29,12 +29,13 @@ do
done
echo
echo
echo "****************** $(date) ******************"
echo
echo "======| System |======"
echo Date:" "$(date)
echo Uptime:" "$(uptime)
echo CPU temp: $(sensors | grep "Physical" | awk '{ print $4 }')
echo
for i in $myIMAGES
do
echo
echo "======| Container:" $i "|======"
docker exec $i supervisorctl status | GREP_COLORS='mt=01;32' egrep --color=always "(RUNNING)|$" | GREP_COLORS='mt=01;31' egrep --color=always "(STOPPED|FATAL)|$"
echo

View File

@ -0,0 +1,311 @@
[2015-11-16 09:29:38,922][INFO ][node ] [Veil] initialized
[2015-11-16 09:29:38,923][INFO ][node ] [Veil] starting ...
[2015-11-16 09:29:39,081][INFO ][transport ] [Veil] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300}, {[::1]:9300}
[2015-11-16 09:29:39,096][INFO ][discovery ] [Veil] elasticsearch/uYwNByX2TxSVe55Pzdbb0g
[2015-11-16 09:29:42,201][INFO ][cluster.service ] [Veil] new_master {Veil}{uYwNByX2TxSVe55Pzdbb0g}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-join(elected_as_master, [0] joins received)
[2015-11-16 09:29:42,294][INFO ][gateway ] [Veil] recovered [2] indices into cluster_state
[2015-11-16 09:29:42,311][INFO ][http ] [Veil] publish_address {127.0.0.1:9200}, bound_addresses {127.0.0.1:9200}, {[::1]:9200}
[2015-11-16 09:29:42,311][INFO ][node ] [Veil] started
[2015-11-16 09:30:24,102][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] creating index, cause [auto(bulk api)], templates [logstash], shards [5]/[1], mappings [SuricataIDPS-logs, _default_]
[2015-11-16 09:30:24,229][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 09:30:24,813][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 09:30:31,124][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 09:53:30,514][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:03:55,575][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:03:59,745][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:03:59,762][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:04:03,891][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:10:48,444][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:29:23,286][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 10:29:23,307][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] update_mapping [SuricataIDPS-logs]
[2015-11-16 11:21:34,996][INFO ][rest.suppressed ] /.kibana/visualization/Destination-Ports Params: {id=Destination-Ports, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][Destination-Ports]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 11:22:20,042][INFO ][rest.suppressed ] /.kibana/visualization/Destination-Ports Params: {id=Destination-Ports, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][Destination-Ports]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 11:26:11,386][INFO ][cluster.metadata ] [Veil] [logstash-2015.11.16] create_mapping [ews-logs]
[2015-11-16 11:30:22,723][INFO ][rest.suppressed ] /.kibana/index-pattern/[logstash-]YYYY.MM.DD Params: {id=[logstash-]YYYY.MM.DD, index=.kibana, op_type=create, type=index-pattern}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[index-pattern][[logstash-]YYYY.MM.DD]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 12:07:18,928][INFO ][rest.suppressed ] /.kibana/visualization/Destination-Ports Params: {id=Destination-Ports, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][Destination-Ports]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 12:18:27,537][INFO ][rest.suppressed ] /.kibana/visualization/SSH-Software-Version Params: {id=SSH-Software-Version, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][SSH-Software-Version]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 12:22:37,298][INFO ][rest.suppressed ] /.kibana/visualization/SSH-Software-Version Params: {id=SSH-Software-Version, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][SSH-Software-Version]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 12:43:41,414][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 14:33:42,067][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 14:48:17,447][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 14:55:11,489][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 14:58:51,689][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 15:01:17,546][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 15:13:10,208][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 15:21:57,533][INFO ][rest.suppressed ] /.kibana/visualization/Fileinfo-Magic Params: {id=Fileinfo-Magic, index=.kibana, op_type=create, type=visualization}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[visualization][Fileinfo-Magic]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 15:23:22,710][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 16:10:54,364][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 16:14:13,496][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 16:32:20,483][INFO ][rest.suppressed ] /.kibana/dashboard/Default Params: {id=Default, index=.kibana, op_type=create, type=dashboard}
[.kibana][[.kibana][0]] DocumentAlreadyExistsException[[dashboard][Default]: document already exists]
at org.elasticsearch.index.engine.InternalEngine.innerCreateNoLock(InternalEngine.java:411)
at org.elasticsearch.index.engine.InternalEngine.innerCreate(InternalEngine.java:369)
at org.elasticsearch.index.engine.InternalEngine.create(InternalEngine.java:341)
at org.elasticsearch.index.shard.IndexShard.create(IndexShard.java:517)
at org.elasticsearch.index.engine.Engine$Create.execute(Engine.java:789)
at org.elasticsearch.action.support.replication.TransportReplicationAction.executeIndexRequestOnPrimary(TransportReplicationAction.java:1073)
at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:170)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.performOnPrimary(TransportReplicationAction.java:579)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase$1.doRun(TransportReplicationAction.java:452)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
[2015-11-16 16:34:42,196][INFO ][node ] [Veil] stopping ...
[2015-11-16 16:34:42,288][INFO ][node ] [Veil] stopped
[2015-11-16 16:34:42,289][INFO ][node ] [Veil] closing ...
[2015-11-16 16:34:42,297][INFO ][node ] [Veil] closed
[2015-11-16 16:35:06,696][INFO ][node ] [Famine] version[2.0.0], pid[8], build[de54438/2015-10-22T08:09:48Z]
[2015-11-16 16:35:06,697][INFO ][node ] [Famine] initializing ...
[2015-11-16 16:35:06,798][INFO ][plugins ] [Famine] loaded [], sites []
[2015-11-16 16:35:06,915][INFO ][env ] [Famine] using [1] data paths, mounts [[/data/elk (/dev/sda5)]], net usable_space [6.9gb], net total_space [7.3gb], spins? [possibly], types [ext4]
[2015-11-16 16:35:08,561][INFO ][node ] [Famine] initialized
[2015-11-16 16:35:08,561][INFO ][node ] [Famine] starting ...
[2015-11-16 16:35:08,752][INFO ][transport ] [Famine] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300}, {[::1]:9300}
[2015-11-16 16:35:08,758][INFO ][discovery ] [Famine] elasticsearch/viSYKHsKRYar5tp5Av8fLQ
[2015-11-16 16:35:11,809][INFO ][cluster.service ] [Famine] new_master {Famine}{viSYKHsKRYar5tp5Av8fLQ}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-join(elected_as_master, [0] joins received)
[2015-11-16 16:35:11,897][INFO ][gateway ] [Famine] recovered [3] indices into cluster_state
[2015-11-16 16:35:11,945][INFO ][http ] [Famine] publish_address {127.0.0.1:9200}, bound_addresses {127.0.0.1:9200}, {[::1]:9200}
[2015-11-16 16:35:11,945][INFO ][node ] [Famine] started
[2015-11-16 16:39:06,106][INFO ][node ] [Famine] stopping ...
[2015-11-16 16:39:06,223][INFO ][node ] [Famine] stopped
[2015-11-16 16:39:06,223][INFO ][node ] [Famine] closing ...
[2015-11-16 16:39:06,239][INFO ][node ] [Famine] closed

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
[2015-11-14 03:43:15,837][INFO ][node ] [Veil] version[2.0.0], pid[8], build[de54438/2015-10-22T08:09:48Z]
[2015-11-14 03:43:15,838][INFO ][node ] [Veil] initializing ...
[2015-11-14 03:43:15,973][INFO ][plugins ] [Veil] loaded [], sites []
[2015-11-14 03:43:16,175][INFO ][env ] [Veil] using [1] data paths, mounts [[/data/elk (/dev/sda5)]], net usable_space [6.9gb], net total_space [7.3gb], spins? [possibly], types [ext4]

View File

@ -1,7 +1,6 @@
dionaea
glastopf
honeytrap
kippo
cowrie
suricata
ews
elk

View File

@ -0,0 +1,4 @@
dionaea
glastopf
honeytrap
cowrie

View File

@ -1,6 +1,6 @@
T-Pot Community Edition (Beta)
Hostname: \n
IP:
T-Pot 16.03 (Alpha)
Hostname: \n
IP:
___________ _____________________________
@ -13,4 +13,3 @@ ___________ _____________________________
CTRL+ALT+F2 - Display current container status
CTRL+ALT+F1 - Return to this screen

View File

@ -1,6 +1,6 @@
#!/bin/sh -e
# Let's add the first local ip to the /etc/issue and ews.ip file
# export http_proxy=http://your.proxy.server:port/
#!/bin/bash
# Let's add the first local ip to the /etc/issue and external ip to ews.ip file
source /etc/environment
myLOCALIP=$(hostname -I | awk '{ print $1 }')
myEXTIP=$(curl myexternalip.com/raw)
sed -i "s#IP:.*#IP: $myLOCALIP, $myEXTIP#" /etc/issue
@ -12,5 +12,3 @@ chown tpot:tpot /data/ews/conf/ews.ip
if [ -f /var/run/check.lock ];
then rm /var/run/check.lock
fi
setupcon
exit 0

View File

@ -1,14 +1,14 @@
#!/bin/bash
########################################################
# T-Pot Community Edition #
# T-Pot #
# Two-Factor authentication enable script #
# #
# v0.20 by mo, DTAG, 2015-01-27 #
# v0.01 by mo, DTAG, 2015-06-15 #
########################################################
echo "### This script will enable Two-Factor-Authentication based on Google Authenticator for SSH."
while true
while true
do
echo -n "### Do you want to continue (y/n)? "; read myANSWER;
case $myANSWER in

View File

@ -1,10 +1,10 @@
#!/bin/bash
########################################################
# T-Pot Community Edition #
# T-Pot #
# SSH enable script #
# #
# v0.21 by mo, DTAG, 2015-01-27 #
# v0.01 by mo, DTAG, 2015-06-15 #
########################################################
if ! [ -f /etc/init/ssh.override ];
@ -15,7 +15,7 @@ fi
echo "### This script will enable the ssh service (default port tcp/64295)."
echo "### Password authentication is disabled by default."
while true
while true
do
echo -n "### Do you want to continue (y/n)? "; read myANSWER;
case $myANSWER in

312
installer/install.sh Executable file
View File

@ -0,0 +1,312 @@
#!/bin/bash
########################################################
# T-Pot post install script #
# Ubuntu server 14.04.3, x64 #
# #
# v0.10 by mo, DTAG, 2015-10-06 #
########################################################
# Type of install, SENSOR or FULL?
myFLAVOR="FULL"
# Some global vars
myPROXYFILEPATH="/root/tpot/etc/proxy"
myNTPCONFPATH="/root/tpot/etc/ntp"
myPFXPATH="/root/tpot/keys/8021x.pfx"
myPFXPWPATH="/root/tpot/keys/8021x.pw"
myPFXHOSTIDPATH="/root/tpot/keys/8021x.id"
# Let's create a function for colorful output
fuECHO () {
local myRED=1
local myWHT=7
tput setaf $myRED
echo $1 "$2"
tput setaf $myWHT
}
# Let's make sure there is a warning if running for a second time
if [ -f install.log ];
then fuECHO "### Running more than once may complicate things. Erase install.log if you are really sure."
exit 1;
fi
# Let's log for the beauty of it
set -e
exec 2> >(tee "install.err")
exec > >(tee "install.log")
# Let's setup the proxy for env
if [ -f $myPROXYFILEPATH ];
then fuECHO "### Setting up the proxy."
myPROXY=$(cat $myPROXYFILEPATH)
tee -a /etc/environment <<EOF
export http_proxy=$myPROXY
export https_proxy=$myPROXY
export HTTP_PROXY=$myPROXY
export HTTPS_PROXY=$myPROXY
export no_proxy=localhost,127.0.0.1,.sock
EOF
source /etc/environment
# Let's setup the proxy for apt
tee /etc/apt/apt.conf <<EOF
Acquire::http::Proxy "$myPROXY";
Acquire::https::Proxy "$myPROXY";
EOF
fi
# Let's setup the ntp server
if [ -f $myNTPCONFPATH ];
then
fuECHO "### Setting up the ntp server."
cp $myNTPCONFPATH /etc/ntp.conf
fi
# Let's setup 802.1x networking
if [ -f $myPFXPATH ];
then
fuECHO "### Setting up 802.1x networking."
cp $myPFXPATH /etc/wpa_supplicant/
if [ -f $myPFXPWPATH ];
then
fuECHO "### Setting up 802.1x password."
myPFXPW=$(cat $myPFXPWPATH)
fi
myPFXHOSTID=$(cat $myPFXHOSTIDPATH)
tee -a /etc/network/interfaces <<EOF
wpa-driver wired
wpa-conf /etc/wpa_supplicant/wired8021x.conf
### Example wireless config for 802.1x
### This configuration was tested with the IntelNUC series
### If problems occur you can try and change wpa-driver to "iwlwifi"
### Do not forget to enter a ssid in /etc/wpa_supplicant/wireless8021x.conf
#
#auto wlan0
#iface wlan0 inet dhcp
# wpa-driver wext
# wpa-conf /etc/wpa_supplicant/wireless8021x.conf
EOF
tee /etc/wpa_supplicant/wired8021x.conf <<EOF
ctrl_interface=/var/run/wpa_supplicant
ctrl_interface_group=root
eapol_version=1
ap_scan=1
network={
key_mgmt=IEEE8021X
eap=TLS
identity="host/$myPFXHOSTID"
private_key="/etc/wpa_supplicant/8021x.pfx"
private_key_passwd="$myPFXPW"
}
EOF
tee /etc/wpa_supplicant/wireless8021x.conf <<EOF
ctrl_interface=/var/run/wpa_supplicant
ctrl_interface_group=root
eapol_version=1
ap_scan=1
network={
ssid="<your_ssid_here_without_brackets>"
key_mgmt=WPA-EAP
pairwise=CCMP
group=CCMP
eap=TLS
identity="host/$myPFXHOSTID"
private_key="/etc/wpa_supplicant/8021x.pfx"
private_key_passwd="$myPFXPW"
}
EOF
fi
# Let's provide a wireless example config ...
fuECHO "### Providing a wireless example config."
tee -a /etc/network/interfaces <<EOF
### Example wireless config without 802.1x
### This configuration was tested with the IntelNUC series
### If problems occur you can try and change wpa-driver to "iwlwifi"
#
#auto wlan0
#iface wlan0 inet dhcp
# wpa-driver wext
# wpa-ssid <your_ssid_here_without_brackets>
# wpa-ap-scan 1
# wpa-proto RSN
# wpa-pairwise CCMP
# wpa-group CCMP
# wpa-key-mgmt WPA-PSK
# wpa-psk "<your_password_here_without_brackets>"
EOF
# Let's modify the sources list
sed -i '/cdrom/d' /etc/apt/sources.list
# Let's pull some updates
fuECHO "### Pulling Updates."
apt-get update -y
fuECHO "### Installing Upgrades."
apt-get dist-upgrade -y
# Let's install docker
fuECHO "### Installing docker."
wget -qO- https://get.docker.com/gpg | apt-key add -
wget -qO- https://get.docker.com/ | sh
# Let's add proxy settings to docker defaults
if [ -f $myPROXYFILEPATH ];
then fuECHO "### Setting up the proxy for docker."
myPROXY=$(cat $myPROXYFILEPATH)
tee -a /etc/default/docker <<EOF
export http_proxy=$myPROXY
export https_proxy=$myPROXY
export HTTP_PROXY=$myPROXY
export HTTPS_PROXY=$myPROXY
export no_proxy=localhost,127.0.0.1,.sock
EOF
fi
# Let's add a new user
fuECHO "### Adding new user."
addgroup --gid 2000 tpot
adduser --system --no-create-home --uid 2000 --disabled-password --disabled-login --gid 2000 tpot
# Let's set the hostname
fuECHO "### Setting a new hostname."
myHOST=ce$(date +%s)$RANDOM
hostnamectl set-hostname $myHOST
sed -i 's#127.0.1.1.*#127.0.1.1\t'"$myHOST"'#g' /etc/hosts
# Let's patch sshd_config
fuECHO "### Patching sshd_config to listen on port 64295 and deny password authentication."
sed -i 's#Port 22#Port 64295#' /etc/ssh/sshd_config
sed -i 's#\#PasswordAuthentication yes#PasswordAuthentication no#' /etc/ssh/sshd_config
# Let's disable ssh service
echo "manual" >> /etc/init/ssh.override
# Let's patch docker defaults, so we can run images as service
fuECHO "### Patching docker defaults."
tee -a /etc/default/docker <<EOF
DOCKER_OPTS="-r=false"
EOF
# Let's make sure only myFLAVOR images will be downloaded and started
if [ "$myFLAVOR" = "SENSOR" ]
then
cp /root/tpot/data/sensor_images.conf /root/tpot/data/images.conf
echo "manual" >> /etc/init/suricata.override
echo "manual" >> /etc/init/elk.override
else
cp /root/tpot/data/full_images.conf /root/tpot/data/images.conf
fi
# Let's load docker images
fuECHO "### Loading docker images. Please be patient, this may take a while."
if [ -d /root/tpot/images ];
then
fuECHO "### Found cached images and will load from local."
for name in $(cat /root/tpot/data/images.conf)
do
fuECHO "### Now loading dtagdevsec/$name:latest1603"
docker load -i /root/tpot/images/$name:latest1603.img
done
else
for name in $(cat /root/tpot/data/images.conf)
do
docker pull dtagdevsec/$name:latest1603
done
fi
# Let's add the daily update check with a weekly clean interval
fuECHO "### Modifying update checks."
tee /etc/apt/apt.conf.d/10periodic <<EOF
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "0";
APT::Periodic::AutocleanInterval "7";
EOF
# Let's make sure to reboot the system after a kernel panic
fuECHO "### Reboot after kernel panic."
tee -a /etc/sysctl.conf <<EOF
# Reboot after kernel panic, check via /proc/sys/kernel/panic[_on_oops]
kernel.panic = 1
kernel.panic_on_oops = 1
EOF
# Let's add some cronjobs
fuECHO "### Adding cronjobs."
tee -a /etc/crontab <<EOF
# Show running containers every 60s via /dev/tty2
*/2 * * * * root status.sh > /dev/tty2
# Check if containers and services are up
*/5 * * * * root check.sh
# Check if updated images are available and download them
27 1 * * * root for i in \$(cat /data/images.conf); do docker pull dtagdevsec/\$i:latest1603; done
# Restart docker service and containers
27 3 * * * root dcres.sh
# Delete elastic indices older than 30 days
27 4 * * * root docker exec elk bash -c '/usr/local/bin/curator --host 127.0.0.1 delete --older-than 30'
# Update IP and erase check.lock if it exists
27 15 * * * root /etc/rc.local
# Check for updated packages every sunday, upgrade and reboot
27 16 * * 0 root sleep \$((RANDOM %600)); apt-get autoclean -y; apt-get autoremove -y; apt-get update -y; apt-get upgrade -y; apt-get upgrade docker-engine -y; sleep 5; reboot
EOF
# Let's take care of some files and permissions before copying
chmod 500 /root/tpot/bin/*
chmod 600 /root/tpot/data/*
chmod 644 /root/tpot/etc/issue
chmod 755 /root/tpot/etc/rc.local
chmod 700 /root/tpot/home/*
chown tsec:tsec /root/tpot/home/*
chmod 644 /root/tpot/upstart/*
# Let's create some files and folders
fuECHO "### Creating some files and folders."
mkdir -p /data/ews/log /data/ews/conf /data/elk/data /data/elk/log /home/tsec/.ssh/
# Let's copy some files
cp -R /root/tpot/bin/* /usr/bin/
cp -R /root/tpot/data/* /data/
cp -R /root/tpot/etc/issue /etc/
cp -R /root/tpot/home/* /home/tsec/
cp -R /root/tpot/upstart/* /etc/init/
cp /root/tpot/keys/authorized_keys /home/tsec/.ssh/authorized_keys
# Let's take care of some files and permissions
chmod 760 -R /data
chown tpot:tpot -R /data
chmod 600 /home/tsec/.ssh/authorized_keys
chown tsec:tsec /home/tsec/*.sh /home/tsec/.ssh /home/tsec/.ssh/authorized_keys
# Let's clean up apt
apt-get autoclean -y
apt-get autoremove -y
# Let's replace "quiet splash" options, set a console font for more screen canvas and update grub
sed -i 's#GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"#GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0"#' /etc/default/grub
sed -i 's#\#GRUB_GFXMODE=640x480#GRUB_GFXMODE=800x600x32#' /etc/default/grub
tee -a /etc/default/grub <<EOF
GRUB_GFXPAYLOAD=800x600x32
GRUB_GFXPAYLOAD_LINUX=800x600x32
EOF
update-grub
cp /usr/share/consolefonts/Uni2-Terminus12x6.psf.gz /etc/console-setup/
gunzip /etc/console-setup/Uni2-Terminus12x6.psf.gz
sed -i 's#FONTFACE=".*#FONTFACE="Terminus"#' /etc/default/console-setup
sed -i 's#FONTSIZE=".*#FONTSIZE="12x6"#' /etc/default/console-setup
update-initramfs -u
# Final steps
fuECHO "### Thanks for your patience. Now rebooting."
mv /root/tpot/etc/rc.local /etc/rc.local && rm -rf /root/tpot/ && chage -d 0 tsec && sleep 2 && reboot

View File

@ -1,18 +0,0 @@
#!/bin/bash
#############################################################
# T-Pot Community Edition - disable splash boot #
# and consoleblank permanently #
# Ubuntu server 14.04.1, x64 #
# #
# v0.12 by mo, DTAG, 2015-02-15 #
#############################################################
# Let's replace "quiet splash" options and update grub
sed -i 's#GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"#GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0"#' /etc/default/grub
sed -i 's#\#GRUB_GFXMODE=640x480#GRUB_GFXMODE=800x600#' /etc/default/grub
update-grub
sed -i 's#FONTFACE=".*#FONTFACE="Terminus"#' /etc/default/console-setup
sed -i 's#FONTSIZE=".*#FONTSIZE="12x6"#' /etc/default/console-setup
# Let's move the install script to rc.local and reboot
mv /root/tpotce/install2.sh /etc/rc.local && sleep 2 && reboot

View File

@ -1,152 +0,0 @@
#!/bin/bash
########################################################
# T-Pot Community Edition post install script #
# Ubuntu server 14.04, x64 #
# #
# v0.49 by mo, DTAG, 2015-08-14 #
########################################################
# Let's make sure there is a warning if running for a second time
if [ -f install.log ];
then fuECHO "### Running more than once may complicate things. Erase install.log if you are really sure."
exit 1;
fi
# Let's log for the beauty of it
set -e
exec 2> >(tee "install.err")
exec > >(tee "install.log")
# Let's create a function for colorful output
fuECHO () {
local myRED=1
local myWHT=7
tput setaf $myRED
echo $1 "$2"
tput setaf $myWHT
}
# Let's modify the sources list
sed -i '/cdrom/d' /etc/apt/sources.list
# Let's pull some updates
fuECHO "### Pulling Updates."
apt-get update -y
fuECHO "### Installing Upgrades."
apt-get dist-upgrade -y
# Let's install docker
fuECHO "### Installing docker."
wget -qO- https://get.docker.com/gpg | apt-key add -
wget -qO- https://get.docker.com/ | sh
# Let's install all the packages we need
fuECHO "### Installing packages."
apt-get install curl ethtool git ntp libpam-google-authenticator vim -y
# Let's add a new user
fuECHO "### Adding new user."
addgroup --gid 2000 tpot
adduser --system --no-create-home --uid 2000 --disabled-password --disabled-login --gid 2000 tpot
# Let's set the hostname
fuECHO "### Setting a new hostname."
myHOST=ce$(date +%s)$RANDOM
hostnamectl set-hostname $myHOST
sed -i 's#127.0.1.1.*#127.0.1.1\t'"$myHOST"'#g' /etc/hosts
# Let's patch sshd_config
fuECHO "### Patching sshd_config to listen on port 64295 and deny password authentication."
sed -i 's#Port 22#Port 64295#' /etc/ssh/sshd_config
sed -i 's#\#PasswordAuthentication yes#PasswordAuthentication no#' /etc/ssh/sshd_config
# Let's disable ssh service
echo "manual" >> /etc/init/ssh.override
# Let's patch docker defaults, so we can run images as service
fuECHO "### Patching docker defaults."
tee -a /etc/default/docker <<EOF
DOCKER_OPTS="-r=false"
EOF
# Let's load docker images from remote
fuECHO "### Downloading docker images from DockerHub. Please be patient, this may take a while."
for name in $(cat /root/tpotce/data/images.conf)
do
docker pull dtagdevsec/$name
done
# Let's add the daily update check with a weekly clean interval
fuECHO "### Modifying update checks."
tee /etc/apt/apt.conf.d/10periodic <<EOF
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "0";
APT::Periodic::AutocleanInterval "7";
EOF
# Let's wait no longer for network than 60 seconds
fuECHO "### Wait no longer for network than 60 seconds."
sed -i.bak 's#sleep 60#sleep 30#' /etc/init/failsafe.conf
# Let's make sure to reboot the system after a kernel panic
fuECHO "### Reboot after kernel panic."
tee -a /etc/sysctl.conf <<EOF
# Reboot after kernel panic, check via /proc/sys/kernel/panic[_on_oops]
kernel.panic = 1
kernel.panic_on_oops = 1
EOF
# Let's add some cronjobs
fuECHO "### Adding cronjobs."
tee -a /etc/crontab <<EOF
# Show running containers every 60s via /dev/tty2
*/2 * * * * root /usr/bin/status.sh > /dev/tty2
# Check if containers and services are up
*/5 * * * * root /usr/bin/check.sh
# Check if updated images are available and download them
27 1 * * * root for i in \$(cat /data/images.conf); do /usr/bin/docker pull dtagdevsec/\$i:latest; done
# Restart docker service and containers
27 3 * * * root /usr/bin/dcres.sh
# Delete elastic indices older than 30 days
27 4 * * * root /usr/bin/docker exec elk bash -c '/usr/local/bin/curator --host 127.0.0.1 delete --older-than 30'
# Update IP and erase check.lock if it exists
27 15 * * * root /etc/rc.local
# Check for updated packages every sunday, upgrade and reboot
27 16 * * 0 root sleep \$((RANDOM %600)); apt-get autoclean -y; apt-get autoremove -y; apt-get update -y; apt-get upgrade -y; apt-get upgrade docker-engine -y; sleep 5; reboot
EOF
# Let's take care of some files and permissions
chmod 500 /root/tpotce/bin/*
chmod 600 /root/tpotce/data/*
chmod 644 /root/tpotce/etc/issue
chmod 755 /root/tpotce/etc/rc.local
chmod 700 /root/tpotce/home/*
chown tsec:tsec /root/tpotce/home/*
chmod 644 /root/tpotce/upstart/*
# Let's create some files and folders
fuECHO "### Creating some files and folders."
mkdir -p /data/ews/log /data/ews/conf /data/elk/data /data/elk/log
# Let's move some files
cp -R /root/tpotce/bin/* /usr/bin/
cp -R /root/tpotce/data/* /data/
cp -R /root/tpotce/etc/issue /etc/
cp -R /root/tpotce/home/* /home/tsec/
cp -R /root/tpotce/upstart/* /etc/init/
# Let's take care of some files and permissions
chmod 660 -R /data
chown tpot:tpot -R /data
chown tsec:tsec /home/tsec/*.sh
# Final steps
fuECHO "### Thanks for your patience. Now rebooting."
mv /root/tpotce/etc/rc.local /etc/rc.local && rm -rf /root/tpotce/ && chage -d 0 tsec && sleep 2 && reboot

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,4 @@
#!/bin/bash
# Stop plymouth to allow for terminal interaction
plymouth quit
openvt -w -s /root/tpot/install.sh

View File

@ -0,0 +1,24 @@
########################################################
# T-Pot #
# Cowrie upstart script #
# #
# v0.04 by av, DTAG, 2015-10-07 #
########################################################
description "cowrie"
author "av"
start on started docker and filesystem
stop on runlevel [!2345]
respawn
pre-start script
# Remove any existing cowrie containers
myCID=$(docker ps -a | grep cowrie | awk '{ print $1 }')
if [ "$myCID" != "" ];
then docker rm $myCID;
fi
end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name cowrie --rm=true -p 22:2222 -v /data:/data dtagdevsec/cowrie:latest1603
end script

View File

@ -1,13 +1,13 @@
########################################################
# T-Pot Community Edition #
# T-Pot #
# Dionaea upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
# v0.04 by mo, DTAG, 2015-12-08 #
########################################################
description "Dionaea"
author "mo"
start on (started docker and filesystem)
start on started docker and filesystem
stop on runlevel [!2345]
respawn
pre-start script
@ -20,7 +20,7 @@ end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name dionaea --cap-add=NET_BIND_SERVICE --rm=true -p 21:21 -p 42:42 -p 8080:80 -p 135:135 -p 443:443 -p 445:445 -p 1433:1433 -p 3306:3306 -p 5061:5061 -p 5060:5060 -p 69:69/udp -p 5060:5060/udp -v /data/dionaea dtagdevsec/dionaea
/usr/bin/docker run --name dionaea --cap-add=NET_BIND_SERVICE --rm=true -p 21:21 -p 42:42 -p 8080:80 -p 135:135 -p 443:443 -p 445:445 -p 1433:1433 -p 3306:3306 -p 5061:5061 -p 5060:5060 -p 69:69/udp -p 5060:5060/udp -v /data:/data dtagdevsec/dionaea:latest1603
end script
post-start script
sleep $(((RANDOM % 5)+5))

View File

@ -1,13 +1,13 @@
########################################################
# T-Pot Community Edition #
# T-Pot #
# ELK upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
# v0.04 by mo, DTAG, 2015-12-08 #
########################################################
description "ELK"
author "mo"
start on (started docker and filesystem and started ews and started dionaea and started glastopf and started honeytrap and started kippo and started suricata)
start on started docker and filesystem
stop on runlevel [!2345]
respawn
pre-start script
@ -20,7 +20,7 @@ end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name=elk --volumes-from ews --volumes-from suricata -v /data/elk/:/data/elk/ -p 127.0.0.1:64296:8080 --rm=true dtagdevsec/elk
/usr/bin/docker run --name=elk -v /data:/data -p 127.0.0.1:64296:8080 --rm=true dtagdevsec/elk:latest1603
end script
post-start script
sleep $(((RANDOM % 5)+5))

View File

@ -1,27 +0,0 @@
########################################################
# T-Pot Community Edition #
# EWS upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
########################################################
description "EWS"
author "mo"
start on (started docker and filesystem and started dionaea and started glastopf and started honeytrap and started kippo)
stop on runlevel [!2345]
respawn
pre-start script
# Remove any existing ews containers
myCID=$(docker ps -a | grep ews | awk '{ print $1 }')
if [ "$myCID" != "" ];
then docker rm -v $myCID;
fi
end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name ews --volumes-from dionaea --volumes-from glastopf --volumes-from honeytrap --volumes-from kippo --rm=true -v /data/ews/conf/:/data/ews/conf/ -v /data/ews/ --link kippo:kippo dtagdevsec/ews
end script
post-start script
sleep $(((RANDOM % 5)+5))
end script

View File

@ -1,13 +1,13 @@
########################################################
# T-Pot Community Edition #
# T-Pot #
# Glastopf upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
# v0.04 by mo, DTAG, 2015-12-08 #
########################################################
description "Glastopf"
author "mo"
start on (started docker and filesystem)
start on started docker and filesystem
stop on runlevel [!2345]
respawn
pre-start script
@ -20,7 +20,7 @@ end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name glastopf --rm=true -p 80:80 -v /data/glastopf dtagdevsec/glastopf
/usr/bin/docker run --name glastopf --rm=true -v /data:/data -p 80:80 dtagdevsec/glastopf:latest1603
end script
post-start script
sleep $(((RANDOM % 5)+5))

View File

@ -1,8 +1,8 @@
########################################################
# T-Pot Community Edition #
# T-Pot #
# Honeytrap upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
# v0.04 by mo, DTAG, 2015-12-08 #
########################################################
description "Honeytrap"
@ -21,7 +21,7 @@ end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name honeytrap --cap-add=NET_ADMIN --net=host --rm=true -v /data/honeytrap dtagdevsec/honeytrap
/usr/bin/docker run --name honeytrap --cap-add=NET_ADMIN --net=host --rm=true -v /data:/data dtagdevsec/honeytrap:latest1603
end script
post-start script
sleep $(((RANDOM % 5)+5))

View File

@ -1,27 +0,0 @@
########################################################
# T-Pot Community Edition #
# Kippo upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
########################################################
description "Kippo"
author "mo"
start on (started docker and filesystem)
stop on runlevel [!2345]
respawn
pre-start script
# Remove any existing kippo containers
myCID=$(docker ps -a | grep kippo | awk '{ print $1 }')
if [ "$myCID" != "" ];
then docker rm -v $myCID;
fi
end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name kippo --rm=true -p 22:2222 -v /data/kippo dtagdevsec/kippo
end script
post-start script
sleep $(((RANDOM % 5)+5))
end script

View File

@ -1,8 +1,8 @@
########################################################
# T-Pot Community Edition #
# T-Pot #
# Suricata upstart script #
# #
# v0.53 by mo, DTAG, 2015-11-02 #
# v0.04 by mo, DTAG, 2015-12-08 #
########################################################
description "Suricata"
@ -24,7 +24,7 @@ end script
script
# Delayed start to avoid rapid respawning
sleep $(((RANDOM % 5)+5))
/usr/bin/docker run --name suricata --cap-add=NET_ADMIN --net=host --rm=true -v /data/suricata/ dtagdevsec/suricata
/usr/bin/docker run --name suricata --cap-add=NET_ADMIN --net=host --rm=true -v /data:/data dtagdevsec/suricata:latest1603
end script
post-start script
sleep $(((RANDOM % 5)+5))