21 Commits

Author SHA1 Message Date
e2e47c4e6c Improved --config handling and user accounts from the command line
If all of the necessary options are passed as command line flags, you may no longer even need a config file. in this case, don't complain that a config file wasn't provided.  As part of this, allow the user to set the user account(s) using the -u flag.
2025-02-13 14:13:21 -05:00
87aa843116 Created command-line flags for almost all of the options specified in the config file. 2025-02-13 13:23:48 -05:00
d9ba6b3b61 Fixed server_version_string setting
The config template specified the default server version string as "SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3" but the SSH module automatically prepends "SSH-2.0-" to the beginning. This gave the version string returned to the client a potential fingerprint that could be used to easily identify DECEIVE honeypots. Updated the default value and added comments to document this behavior.
2025-02-13 13:19:21 -05:00
9844f2f59b Merge pull request #4 from Paulslewis66/main
Added Ollama Local LLM's
2025-02-12 10:05:51 -05:00
7ca56b86a5 Update config.ini.TEMPLATE
comment out llama config
2025-02-11 21:49:56 +00:00
ab6296e956 Revert "Update config.ini.TEMPLATE"
This reverts commit 2df4358356.
2025-02-11 21:44:44 +00:00
da0513f526 Revert "Update config.ini.TEMPLATE"
This reverts commit 92ad118de2.
2025-02-11 21:44:31 +00:00
2851120d67 Revert "Added LogViewer"
This reverts commit e60f33b8ea.
2025-02-11 21:44:27 +00:00
ae8c130a1b Revert "Added JSON and removed Base64"
This reverts commit 12fd8aeb70.
2025-02-11 21:44:18 +00:00
17ec8619e0 Revert "Update .gitignore"
This reverts commit f929f4b41d.
2025-02-11 21:44:11 +00:00
e9cdd22b34 Revert "Update .gitignore"
This reverts commit 95f1c5935f.
2025-02-11 21:44:00 +00:00
95f1c5935f Update .gitignore 2025-02-11 16:58:50 +00:00
f929f4b41d Update .gitignore
Added Logviewer .gitignore
2025-02-11 16:58:08 +00:00
12fd8aeb70 Added JSON and removed Base64 2025-02-09 19:17:44 +00:00
e60f33b8ea Added LogViewer 2025-02-09 16:57:00 +00:00
92ad118de2 Update config.ini.TEMPLATE
Update for incept5/llama3.1-claude LLM
2025-02-08 21:38:36 +00:00
2df4358356 Update config.ini.TEMPLATE
commented out Ollama
2025-02-08 21:17:22 +00:00
4bd3cfcdc2 Update config.ini.TEMPLATE
Added Ollama Config Option
2025-02-08 21:05:03 +00:00
f1f6c294e9 Update ssh_server.py
Added Ollama Model
2025-02-08 21:04:02 +00:00
c980fc6167 Update requirements.txt
Update for Ollama
2025-02-08 21:02:31 +00:00
1d0e046924 Added 'sensor_name' and 'sensor_protocol' to logs
* 'sensor_name` is an arbitrary string that identifies the specific honeypot sensor that generated the log. Set it in the config.ini file. If not set, it will default to the honeypot system's hostname.
* 'sensor_protocol' identifies the specific protocol this honeypot sensor uses. For SSH, it's always "ssh" but as other protocols are added to DECEIVE in the future, this will have different values for their logs.
2025-02-06 10:16:42 -05:00
3 changed files with 100 additions and 22 deletions

View File

@ -6,6 +6,10 @@
# The name of the file you wish to write the honeypot log to. # The name of the file you wish to write the honeypot log to.
log_file = ssh_log.log log_file = ssh_log.log
# The name of the sensor, used to identify this honeypot in the logs.
# If you leave this blank, the honeypot will use the system's hostname.
sensor_name = deceive
# Settings for the SSH honeypot # Settings for the SSH honeypot
[ssh] [ssh]
# The port the SSH honeypot will listen on. You will probably want to set # The port the SSH honeypot will listen on. You will probably want to set
@ -14,8 +18,10 @@ port = 8022
# The host key to use for the SSH server. This should be a private key. # The host key to use for the SSH server. This should be a private key.
# See the README for how to generate this key. # See the README for how to generate this key.
host_priv_key = ssh_host_key host_priv_key = ssh_host_key
# The server version string to send to clients. # The server version string to send to clients. The SSH server automatically
server_version_string = SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 # prepends "SSH-2.0-" to this. So "OpenSSH_8.2p1 Ubuntu-4ubuntu0.3" will
# be transformed to "SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3".
server_version_string = OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
# Settings to configure which LLM backend to use. Only one stanza # Settings to configure which LLM backend to use. Only one stanza
# should be uncommented at a time. # should be uncommented at a time.
@ -24,6 +30,10 @@ server_version_string = SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
llm_provider = openai llm_provider = openai
model_name = gpt-4o model_name = gpt-4o
##### ollama llama3
#llm_provider = ollama
#model_name = llama3.3
##### Any model via Amazon Bedrock ##### Any model via Amazon Bedrock
# Valid AWS model names can be found here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html # Valid AWS model names can be found here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
#llm_provider = AWS #llm_provider = AWS

View File

@ -18,14 +18,20 @@ from operator import itemgetter
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_aws import ChatBedrock, ChatBedrockConverse from langchain_aws import ChatBedrock, ChatBedrockConverse
from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_core.messages import HumanMessage, SystemMessage, trim_messages from langchain_core.messages import HumanMessage, SystemMessage, trim_messages
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough from langchain_core.runnables import RunnablePassthrough
from asyncssh.misc import ConnectionLost from asyncssh.misc import ConnectionLost
import socket
class JSONFormatter(logging.Formatter): class JSONFormatter(logging.Formatter):
def __init__(self, sensor_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sensor_name = sensor_name
def format(self, record): def format(self, record):
log_record = { log_record = {
"timestamp": datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T", timespec="milliseconds"), "timestamp": datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T", timespec="milliseconds"),
@ -35,7 +41,9 @@ class JSONFormatter(logging.Formatter):
"src_port": record.src_port, "src_port": record.src_port,
"dst_ip": record.dst_ip, "dst_ip": record.dst_ip,
"dst_port": record.dst_port, "dst_port": record.dst_port,
"message": record.getMessage() "message": record.getMessage(),
"sensor_name": self.sensor_name,
"sensor_protocol": "ssh"
} }
if hasattr(record, 'interactive'): if hasattr(record, 'interactive'):
log_record["interactive"] = record.interactive log_record["interactive"] = record.interactive
@ -168,6 +176,7 @@ representative examples.
judgement = "MALICIOUS" judgement = "MALICIOUS"
logger.info("Session summary", extra={"details": llm_response.content, "judgement": judgement}) logger.info("Session summary", extra={"details": llm_response.content, "judgement": judgement})
server.summary_generated = True server.summary_generated = True
async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer) -> None: async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer) -> None:
@ -269,7 +278,7 @@ class ContextFilter(logging.Filter):
if task: if task:
task_name = task.get_name() task_name = task.get_name()
else: else:
task_name = "-" task_name = thread_local.__dict__.get('session_id', '-')
record.src_ip = thread_local.__dict__.get('src_ip', '-') record.src_ip = thread_local.__dict__.get('src_ip', '-')
record.src_port = thread_local.__dict__.get('src_port', '-') record.src_port = thread_local.__dict__.get('src_port', '-')
@ -277,7 +286,7 @@ class ContextFilter(logging.Filter):
record.dst_port = thread_local.__dict__.get('dst_port', '-') record.dst_port = thread_local.__dict__.get('dst_port', '-')
record.task_name = task_name record.task_name = task_name
return True return True
def llm_get_session_history(session_id: str) -> BaseChatMessageHistory: def llm_get_session_history(session_id: str) -> BaseChatMessageHistory:
@ -296,15 +305,19 @@ def get_user_accounts() -> dict:
return accounts return accounts
def choose_llm(): def choose_llm(llm_provider: Optional[str] = None, model_name: Optional[str] = None):
llm_provider_name = config['llm'].get("llm_provider", "openai") llm_provider_name = llm_provider or config['llm'].get("llm_provider", "openai")
llm_provider_name = llm_provider_name.lower() llm_provider_name = llm_provider_name.lower()
model_name = config['llm'].get("model_name", "gpt-3.5-turbo") model_name = model_name or config['llm'].get("model_name", "gpt-3.5-turbo")
if llm_provider_name == 'openai': if llm_provider_name == 'openai':
llm_model = ChatOpenAI( llm_model = ChatOpenAI(
model=model_name model=model_name
) )
elif llm_provider_name == 'ollama':
llm_model = ChatOllama(
model=model_name
)
elif llm_provider_name == 'aws': elif llm_provider_name == 'aws':
llm_model = ChatBedrockConverse( llm_model = ChatBedrockConverse(
model=model_name, model=model_name,
@ -347,25 +360,79 @@ def get_prompts(prompt: Optional[str], prompt_file: Optional[str]) -> dict:
try: try:
# Parse command line arguments # Parse command line arguments
parser = argparse.ArgumentParser(description='Start the SSH honeypot server.') parser = argparse.ArgumentParser(description='Start the SSH honeypot server.')
parser.add_argument('-c', '--config', type=str, default='config.ini', help='Path to the configuration file') parser.add_argument('-c', '--config', type=str, default=None, help='Path to the configuration file')
parser.add_argument('-p', '--prompt', type=str, help='The entire text of the prompt') parser.add_argument('-p', '--prompt', type=str, help='The entire text of the prompt')
parser.add_argument('-f', '--prompt-file', type=str, default='prompt.txt', help='Path to the prompt file') parser.add_argument('-f', '--prompt-file', type=str, default='prompt.txt', help='Path to the prompt file')
parser.add_argument('-l', '--llm-provider', type=str, help='The LLM provider to use')
parser.add_argument('-m', '--model-name', type=str, help='The model name to use')
parser.add_argument('-t', '--trimmer-max-tokens', type=int, help='The maximum number of tokens to send to the LLM backend in a single request')
parser.add_argument('-s', '--system-prompt', type=str, help='System prompt for the LLM')
parser.add_argument('-P', '--port', type=int, help='The port the SSH honeypot will listen on')
parser.add_argument('-k', '--host-priv-key', type=str, help='The host key to use for the SSH server')
parser.add_argument('-v', '--server-version-string', type=str, help='The server version string to send to clients')
parser.add_argument('-L', '--log-file', type=str, help='The name of the file you wish to write the honeypot log to')
parser.add_argument('-S', '--sensor-name', type=str, help='The name of the sensor, used to identify this honeypot in the logs')
parser.add_argument('-u', '--user-account', action='append', help='User account in the form username=password. Can be repeated.')
args = parser.parse_args() args = parser.parse_args()
# Check if the config file exists # Determine which config file to load
if not os.path.exists(args.config): config = ConfigParser()
print(f"Error: The specified config file '{args.config}' does not exist.", file=sys.stderr) if args.config is not None:
sys.exit(1) # User explicitly set a config file; error if it doesn't exist.
if not os.path.exists(args.config):
print(f"Error: The specified config file '{args.config}' does not exist.", file=sys.stderr)
sys.exit(1)
config.read(args.config)
else:
default_config = "config.ini"
if os.path.exists(default_config):
config.read(default_config)
else:
# Use defaults when no config file found.
config['honeypot'] = {'log_file': 'ssh_log.log', 'sensor_name': socket.gethostname()}
config['ssh'] = {'port': '8022', 'host_priv_key': 'ssh_host_key', 'server_version_string': 'SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3'}
config['llm'] = {'llm_provider': 'openai', 'model_name': 'gpt-3.5-turbo', 'trimmer_max_tokens': '64000', 'system_prompt': ''}
config['user_accounts'] = {}
# Override config values with command line arguments if provided
if args.llm_provider:
config['llm']['llm_provider'] = args.llm_provider
if args.model_name:
config['llm']['model_name'] = args.model_name
if args.trimmer_max_tokens:
config['llm']['trimmer_max_tokens'] = str(args.trimmer_max_tokens)
if args.system_prompt:
config['llm']['system_prompt'] = args.system_prompt
if args.port:
config['ssh']['port'] = str(args.port)
if args.host_priv_key:
config['ssh']['host_priv_key'] = args.host_priv_key
if args.server_version_string:
config['ssh']['server_version_string'] = args.server_version_string
if args.log_file:
config['honeypot']['log_file'] = args.log_file
if args.sensor_name:
config['honeypot']['sensor_name'] = args.sensor_name
# Merge command-line user accounts into the config
if args.user_account:
if 'user_accounts' not in config:
config['user_accounts'] = {}
for account in args.user_account:
if '=' in account:
key, value = account.split('=', 1)
config['user_accounts'][key.strip()] = value.strip()
else:
config['user_accounts'][account.strip()] = ''
# Read the user accounts from the configuration
accounts = get_user_accounts()
# Always use UTC for logging # Always use UTC for logging
logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds")) logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).isoformat(sep="T",timespec="milliseconds"))
# Read our configuration file # Get the sensor name from the config or use the system's hostname
config = ConfigParser() sensor_name = config['honeypot'].get('sensor_name', socket.gethostname())
config.read(args.config)
# Read the user accounts from the configuration file
accounts = get_user_accounts()
# Set up the honeypot logger # Set up the honeypot logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -374,7 +441,7 @@ try:
log_file_handler = logging.FileHandler(config['honeypot'].get("log_file", "ssh_log.log")) log_file_handler = logging.FileHandler(config['honeypot'].get("log_file", "ssh_log.log"))
logger.addHandler(log_file_handler) logger.addHandler(log_file_handler)
log_file_handler.setFormatter(JSONFormatter()) log_file_handler.setFormatter(JSONFormatter(sensor_name))
f = ContextFilter() f = ContextFilter()
logger.addFilter(f) logger.addFilter(f)
@ -385,7 +452,7 @@ try:
llm_system_prompt = prompts["system_prompt"] llm_system_prompt = prompts["system_prompt"]
llm_user_prompt = prompts["user_prompt"] llm_user_prompt = prompts["user_prompt"]
llm = choose_llm() llm = choose_llm(config['llm'].get("llm_provider"), config['llm'].get("model_name"))
llm_sessions = dict() llm_sessions = dict()

View File

@ -7,7 +7,8 @@ langchain_community
langchain_openai langchain_openai
# For Google's Gemini models # For Google's Gemini models
langchain_google_genai langchain_google_genai
# For Ollama models
langchain_ollama
# For AWS # For AWS
langchain_aws langchain_aws
transformers transformers