8 Commits

Author SHA1 Message Date
0197b8b1df Update config.ini.TEMPLATE to support non-interactive commands
Oops, I forgot to commit this before.
2025-02-04 14:55:02 -05:00
5f27aeeabb Correctly handle both interactive and non-interactive SSH sessions
SSH servers can take user commands from an interactive session as normal, but users can also include commands on the ssh client command line which are executed on the server (e.g., "ssh <hostname> 'uname -a'"). We now execute these non-interactive commands properly as well.

Also added a new "interactive" flag to all user commands (true/false) to show which type of command execution this was.
2025-02-04 12:29:12 -05:00
585ee66009 Don't print ConnectionLost exceptions to the console
These are far too frequent. We still log them, though, we just don't print them.
2025-01-28 10:48:29 -05:00
7be73a7dff Make peername and sockname calls more robust across platforms
For whatever reason, MacOS returns 4 values from conn.get_extra_info('peername') and conn.get_extra_info('sockname'), but Linux systems only return 2.  On the Mac, it's only the first two that we need anyway. Now we retrieve them all, no matter how many there are, and just use the first two so it will work on both platforms.
2025-01-28 10:39:12 -05:00
788bd26845 Now print exceptions to console when SSH connection is lost 2025-01-28 10:21:27 -05:00
cea5dc28a2 New command line options for prompts and config files.
* --prompt-file to specify a file from which to read the prompt.
* --prompt to specify a prompt string on the command line
* --config to specify an alternate config file
2025-01-27 13:20:41 -05:00
545d50f294 Added DECEIVE image to README 2025-01-23 11:16:53 -05:00
32441dc4c0 Merge pull request #1 from splunk/user-system-prompt
Streamline the prompting
2025-01-17 19:37:52 +00:00
4 changed files with 167 additions and 98 deletions

BIN
DECEIVE.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 600 KiB

View File

@ -1,5 +1,7 @@
# DECEIVE
<img align="right" src="DECEIVE.png" alt="A cybercriminal interacts with a ghostly, AI-driven honeypot system">
DECEIVE, the **DECeption with Evaluative Integrated Validation Engine**, is a high-interaction, low-effort honeypot system. Unlike most high-interaction honeypots, DECEIVE doesn't provide attackers with access to any actual system. AI actually does all the work of simulating a realistic honeypot system based on a configurable system prompt that describes what type of system you want to simulate. Unlike many other high-interaction honeypots which require substantial effort to seed with realistic users, data, and applications, DECEIVE's AI backend will do all this for you, automatically.
This version of DECEIVE simulates a Linux server via the SSH protocol. It will log all the user inputs, the outputs returned by the LLM backend, as well as a summary of each session after they end. It'll even tell you if it thinks a users' session was benign, suspicious, or outright malicious.

View File

@ -51,10 +51,11 @@ system_prompt = Interpret all inputs as though they were SSH commands and provid
responses do not indicate that the user is on a virtual machine, container,
or emulation at all.
On the first call, be sure to include a realistic MOTD.
If {interactive} is True, make sure to include a realistic MOTD on the
first call, and to end all responses with a realistic shell prompt to display to
the user, including a space at the end.
End all responses with a realistic shell prompt to display to the user,
including a space at the end.
If {interactive} is False, do not include a shell prompt or MOTD.
Include ANSI color codes for the terminal with the output of ls commands
(including any flags), or in any other situation where it is appropriate, but

View File

@ -1,11 +1,14 @@
#!/usr/bin/env python3
from configparser import ConfigParser
import argparse
import asyncio
import asyncssh
import threading
import sys
import json
import os
import traceback
from typing import Optional
import logging
import datetime
@ -20,6 +23,7 @@ from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMess
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough
from asyncssh.misc import ConnectionLost
class JSONFormatter(logging.Formatter):
def format(self, record):
@ -33,6 +37,8 @@ class JSONFormatter(logging.Formatter):
"dst_port": record.dst_port,
"message": record.getMessage()
}
if hasattr(record, 'interactive'):
log_record["interactive"] = record.interactive
# Include any additional fields from the extra dictionary
for key, value in record.__dict__.items():
if key not in log_record and key != 'args' and key != 'msg':
@ -46,8 +52,18 @@ class MySSHServer(asyncssh.SSHServer):
def connection_made(self, conn: asyncssh.SSHServerConnection) -> None:
# Get the source and destination IPs and ports
(src_ip, src_port, _, _) = conn.get_extra_info('peername')
(dst_ip, dst_port, _, _) = conn.get_extra_info('sockname')
peername = conn.get_extra_info('peername')
sockname = conn.get_extra_info('sockname')
if peername is not None:
src_ip, src_port = peername[:2]
else:
src_ip, src_port = '-', '-'
if sockname is not None:
dst_ip, dst_port = sockname[:2]
else:
dst_ip, dst_port = '-', '-'
# Store the connection details in thread-local storage
thread_local.src_ip = src_ip
@ -61,6 +77,8 @@ class MySSHServer(asyncssh.SSHServer):
def connection_lost(self, exc: Optional[Exception]) -> None:
if exc:
logger.error('SSH connection error', extra={"error": str(exc)})
if not isinstance(exc, ConnectionLost):
traceback.print_exception(exc)
else:
logger.info("SSH connection closed")
# Ensure session summary is called on connection loss if attributes are set
@ -134,7 +152,8 @@ representative examples.
llm_response = await session.ainvoke(
{
"messages": [HumanMessage(content=prompt)],
"username": process.get_extra_info('username')
"username": process.get_extra_info('username'),
"interactive": True # Ensure interactive flag is passed
},
config=llm_config
)
@ -162,32 +181,47 @@ async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer)
llm_config = {"configurable": {"session_id": task_uuid}}
try:
if process.command:
# Handle non-interactive command execution
command = process.command
logger.info("User input", extra={"details": b64encode(command.encode('utf-8')).decode('utf-8'), "interactive": False})
llm_response = await with_message_history.ainvoke(
{
"messages": [HumanMessage(content=command)],
"username": process.get_extra_info('username'),
"interactive": False
},
config=llm_config
)
process.stdout.write(f"{llm_response.content}")
logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": False})
await session_summary(process, llm_config, with_message_history, server)
process.exit(0)
else:
# Handle interactive session
llm_response = await with_message_history.ainvoke(
{
"messages": [HumanMessage(content="ignore this message")],
"username": process.get_extra_info('username')
"username": process.get_extra_info('username'),
"interactive": True
},
config=llm_config
)
process.stdout.write(f"{llm_response.content}")
logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8')})
logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": True})
# Store process, llm_config, and session in the MySSHServer instance
server._process = process
server._llm_config = llm_config
server._session = with_message_history
try:
async for line in process.stdin:
line = line.rstrip('\n')
logger.info("User input", extra={"details": b64encode(line.encode('utf-8')).decode('utf-8')})
logger.info("User input", extra={"details": b64encode(line.encode('utf-8')).decode('utf-8'), "interactive": True})
# Send the command to the LLM and give the response to the user
llm_response = await with_message_history.ainvoke(
{
"messages": [HumanMessage(content=line)],
"username": process.get_extra_info('username')
"username": process.get_extra_info('username'),
"interactive": True
},
config=llm_config
)
@ -197,7 +231,7 @@ async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer)
return
else:
process.stdout.write(f"{llm_response.content}")
logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8')})
logger.info("LLM response", extra={"details": b64encode(llm_response.content.encode('utf-8')).decode('utf-8'), "interactive": True})
except asyncssh.BreakReceived:
pass
@ -285,10 +319,24 @@ def choose_llm():
return llm_model
def get_prompts() -> dict:
def get_prompts(prompt: Optional[str], prompt_file: Optional[str]) -> dict:
system_prompt = config['llm']['system_prompt']
if prompt is not None:
if not prompt.strip():
print("Error: The prompt text cannot be empty.", file=sys.stderr)
sys.exit(1)
user_prompt = prompt
elif prompt_file:
if not os.path.exists(prompt_file):
print(f"Error: The specified prompt file '{prompt_file}' does not exist.", file=sys.stderr)
sys.exit(1)
with open(prompt_file, "r") as f:
user_prompt = f.read()
elif os.path.exists("prompt.txt"):
with open("prompt.txt", "r") as f:
user_prompt = f.read()
else:
raise ValueError("Either prompt or prompt_file must be provided.")
return {
"system_prompt": system_prompt,
"user_prompt": user_prompt
@ -296,12 +344,25 @@ def get_prompts() -> dict:
#### MAIN ####
try:
# Parse command line arguments
parser = argparse.ArgumentParser(description='Start the SSH honeypot server.')
parser.add_argument('-c', '--config', type=str, default='config.ini', help='Path to the configuration file')
parser.add_argument('-p', '--prompt', type=str, help='The entire text of the prompt')
parser.add_argument('-f', '--prompt-file', type=str, default='prompt.txt', help='Path to the prompt file')
args = parser.parse_args()
# Check if the config file exists
if not os.path.exists(args.config):
print(f"Error: The specified config file '{args.config}' does not exist.", file=sys.stderr)
sys.exit(1)
# Always use UTC for logging
logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds"))
# Read our configuration file
config = ConfigParser()
config.read("config.ini")
config.read(args.config)
# Read the user accounts from the configuration file
accounts = get_user_accounts()
@ -320,7 +381,7 @@ logger.addFilter(f)
# Now get access to the LLM
prompts = get_prompts()
prompts = get_prompts(args.prompt, args.prompt_file)
llm_system_prompt = prompts["system_prompt"]
llm_user_prompt = prompts["user_prompt"]
@ -371,3 +432,8 @@ asyncio.set_event_loop(loop)
loop.run_until_complete(start_server())
loop.run_forever()
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
traceback.print_exc()
sys.exit(1)