3 Commits

Author SHA1 Message Date
cea5dc28a2 New command line options for prompts and config files.
* --prompt-file to specify a file from which to read the prompt.
* --prompt to specify a prompt string on the command line
* --config to specify an alternate config file
2025-01-27 13:20:41 -05:00
545d50f294 Added DECEIVE image to README 2025-01-23 11:16:53 -05:00
32441dc4c0 Merge pull request #1 from splunk/user-system-prompt
Streamline the prompting
2025-01-17 19:37:52 +00:00
3 changed files with 35 additions and 5 deletions

BIN
DECEIVE.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 600 KiB

View File

@ -1,5 +1,7 @@
# DECEIVE # DECEIVE
<img align="right" src="DECEIVE.png" alt="A cybercriminal interacts with a ghostly, AI-driven honeypot system">
DECEIVE, the **DECeption with Evaluative Integrated Validation Engine**, is a high-interaction, low-effort honeypot system. Unlike most high-interaction honeypots, DECEIVE doesn't provide attackers with access to any actual system. AI actually does all the work of simulating a realistic honeypot system based on a configurable system prompt that describes what type of system you want to simulate. Unlike many other high-interaction honeypots which require substantial effort to seed with realistic users, data, and applications, DECEIVE's AI backend will do all this for you, automatically. DECEIVE, the **DECeption with Evaluative Integrated Validation Engine**, is a high-interaction, low-effort honeypot system. Unlike most high-interaction honeypots, DECEIVE doesn't provide attackers with access to any actual system. AI actually does all the work of simulating a realistic honeypot system based on a configurable system prompt that describes what type of system you want to simulate. Unlike many other high-interaction honeypots which require substantial effort to seed with realistic users, data, and applications, DECEIVE's AI backend will do all this for you, automatically.
This version of DECEIVE simulates a Linux server via the SSH protocol. It will log all the user inputs, the outputs returned by the LLM backend, as well as a summary of each session after they end. It'll even tell you if it thinks a users' session was benign, suspicious, or outright malicious. This version of DECEIVE simulates a Linux server via the SSH protocol. It will log all the user inputs, the outputs returned by the LLM backend, as well as a summary of each session after they end. It'll even tell you if it thinks a users' session was benign, suspicious, or outright malicious.

View File

@ -1,11 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from configparser import ConfigParser from configparser import ConfigParser
import argparse
import asyncio import asyncio
import asyncssh import asyncssh
import threading import threading
import sys import sys
import json import json
import os
from typing import Optional from typing import Optional
import logging import logging
import datetime import datetime
@ -285,10 +287,24 @@ def choose_llm():
return llm_model return llm_model
def get_prompts() -> dict: def get_prompts(prompt: Optional[str], prompt_file: Optional[str]) -> dict:
system_prompt = config['llm']['system_prompt'] system_prompt = config['llm']['system_prompt']
with open("prompt.txt", "r") as f: if prompt is not None:
user_prompt = f.read() if not prompt.strip():
print("Error: The prompt text cannot be empty.", file=sys.stderr)
sys.exit(1)
user_prompt = prompt
elif prompt_file:
if not os.path.exists(prompt_file):
print(f"Error: The specified prompt file '{prompt_file}' does not exist.", file=sys.stderr)
sys.exit(1)
with open(prompt_file, "r") as f:
user_prompt = f.read()
elif os.path.exists("prompt.txt"):
with open("prompt.txt", "r") as f:
user_prompt = f.read()
else:
raise ValueError("Either prompt or prompt_file must be provided.")
return { return {
"system_prompt": system_prompt, "system_prompt": system_prompt,
"user_prompt": user_prompt "user_prompt": user_prompt
@ -296,12 +312,24 @@ def get_prompts() -> dict:
#### MAIN #### #### MAIN ####
# Parse command line arguments
parser = argparse.ArgumentParser(description='Start the SSH honeypot server.')
parser.add_argument('-c', '--config', type=str, default='config.ini', help='Path to the configuration file')
parser.add_argument('-p', '--prompt', type=str, help='The entire text of the prompt')
parser.add_argument('-f', '--prompt-file', type=str, default='prompt.txt', help='Path to the prompt file')
args = parser.parse_args()
# Check if the config file exists
if not os.path.exists(args.config):
print(f"Error: The specified config file '{args.config}' does not exist.", file=sys.stderr)
sys.exit(1)
# Always use UTC for logging # Always use UTC for logging
logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds")) logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds"))
# Read our configuration file # Read our configuration file
config = ConfigParser() config = ConfigParser()
config.read("config.ini") config.read(args.config)
# Read the user accounts from the configuration file # Read the user accounts from the configuration file
accounts = get_user_accounts() accounts = get_user_accounts()
@ -320,7 +348,7 @@ logger.addFilter(f)
# Now get access to the LLM # Now get access to the LLM
prompts = get_prompts() prompts = get_prompts(args.prompt, args.prompt_file)
llm_system_prompt = prompts["system_prompt"] llm_system_prompt = prompts["system_prompt"]
llm_user_prompt = prompts["user_prompt"] llm_user_prompt = prompts["user_prompt"]