6 Commits

Author SHA1 Message Date
847e7bce48 No longer send an "ignore this message" at start time. 2025-06-13 10:43:52 -04:00
8c0c3eb81f format cleanup on the default prompt 2025-05-30 14:13:41 -04:00
681ab58750 Changed default LLM to 'gpt-4o-mini'
Only used if the model isn't specified in the config or on the command line
2025-04-25 09:28:08 -04:00
e738379fc4 Updated default temperature to 0.2
This is only used if no temperature is specified on the command line or in the config file.
2025-04-25 09:17:21 -04:00
10e2f11599 Added 'temperature' parameter to control randomness in LLM responses.
Lower temps mean less randomness in the  responses, which increase the chances of consistency between sessions. Not a guarantee, though.
2025-04-25 09:12:40 -04:00
a3c14bbf15 Preliminary support for Azure OpenAI models, plus "porn fix"
This version adds support for Azure OpenAI models. I'm not entirely happy with how each LLM provider has it's own set of params, and am investigating how to make these seem a little more unified, so this support may change in the future.

Also, Azure's content filter flags the "XXX-END-OF-SESSION-XXX" token as "sexual content", so I changed it to use "YYY" instead. I feel so protected!
2025-03-20 15:21:07 -04:00
3 changed files with 42 additions and 11 deletions

View File

@ -30,6 +30,13 @@ server_version_string = OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
llm_provider = openai
model_name = gpt-4o
##### Azure OpenAI
#llm_provider = azure
#azure_deployment = gpt-4o
#azure_api_version = 2025-01-01-preview
#azure_endpoint = <your endpoint url>
#model_name = gpt-4o
##### ollama llama3
#llm_provider = ollama
#model_name = llama3.3
@ -46,6 +53,12 @@ model_name = gpt-4o
#llm_provider = gemini
#model_name = gemini-1.5-pro
# Temperature controls randomness in LLM responses. Values usually range from 0.0 to 2.0.
# Lower values (e.g., 0.2) make responses more focused and deterministic.
# Higher values (e.g., 0.8) make responses more creative and variable.
# Default is 0.2.
temperature = 0.2
# The maximum number of tokens to send to the LLM backend in a single
# request. This includes the message history for the session, so should
# be fairly high. Not all models support large token counts, so be sure

View File

@ -1,2 +1 @@
You are a video game developer's system. Include realistic video game source
and asset files.
You are a video game developer's system. Include realistic video game source and asset files.

View File

@ -15,7 +15,7 @@ import datetime
import uuid
from base64 import b64encode
from operator import itemgetter
from langchain_openai import ChatOpenAI
from langchain_openai import ChatOpenAI, AzureChatOpenAI
from langchain_aws import ChatBedrock, ChatBedrockConverse
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
@ -211,7 +211,7 @@ async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer)
# Handle interactive session
llm_response = await with_message_history.ainvoke(
{
"messages": [HumanMessage(content="ignore this message")],
"messages": [HumanMessage(content="")],
"username": process.get_extra_info('username'),
"interactive": True
},
@ -234,7 +234,7 @@ async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer)
},
config=llm_config
)
if llm_response.content == "XXX-END-OF-SESSION-XXX":
if llm_response.content == "YYY-END-OF-SESSION-YYY":
await session_summary(process, llm_config, with_message_history, server)
process.exit(0)
return
@ -308,24 +308,40 @@ def get_user_accounts() -> dict:
def choose_llm(llm_provider: Optional[str] = None, model_name: Optional[str] = None):
llm_provider_name = llm_provider or config['llm'].get("llm_provider", "openai")
llm_provider_name = llm_provider_name.lower()
model_name = model_name or config['llm'].get("model_name", "gpt-3.5-turbo")
model_name = model_name or config['llm'].get("model_name", "gpt-4o-mini")
# Get temperature parameter from config, default to 0.2 if not specified
temperature = config['llm'].getfloat("temperature", 0.2)
if llm_provider_name == 'openai':
llm_model = ChatOpenAI(
model=model_name
model=model_name,
temperature=temperature
)
elif llm_provider_name == 'azure':
llm_model = AzureChatOpenAI(
azure_deployment=config['llm'].get("azure_deployment"),
azure_endpoint=config['llm'].get("azure_endpoint"),
api_version=config['llm'].get("azure_api_version"),
model=config['llm'].get("model_name"), # Ensure model_name is passed here
temperature=temperature
)
elif llm_provider_name == 'ollama':
llm_model = ChatOllama(
model=model_name
llm_model = ChatOllama(
model=model_name,
temperature=temperature
)
elif llm_provider_name == 'aws':
llm_model = ChatBedrockConverse(
model=model_name,
region_name=config['llm'].get("aws_region", "us-east-1"),
credentials_profile_name=config['llm'].get("aws_credentials_profile", "default") )
credentials_profile_name=config['llm'].get("aws_credentials_profile", "default"),
temperature=temperature
)
elif llm_provider_name == 'gemini':
llm_model = ChatGoogleGenerativeAI(
model=model_name,
temperature=temperature
)
else:
raise ValueError(f"Invalid LLM provider {llm_provider_name}.")
@ -367,6 +383,7 @@ try:
parser.add_argument('-m', '--model-name', type=str, help='The model name to use')
parser.add_argument('-t', '--trimmer-max-tokens', type=int, help='The maximum number of tokens to send to the LLM backend in a single request')
parser.add_argument('-s', '--system-prompt', type=str, help='System prompt for the LLM')
parser.add_argument('-r', '--temperature', type=float, help='Temperature parameter for controlling randomness in LLM responses (0.0-2.0)')
parser.add_argument('-P', '--port', type=int, help='The port the SSH honeypot will listen on')
parser.add_argument('-k', '--host-priv-key', type=str, help='The host key to use for the SSH server')
parser.add_argument('-v', '--server-version-string', type=str, help='The server version string to send to clients')
@ -391,7 +408,7 @@ try:
# Use defaults when no config file found.
config['honeypot'] = {'log_file': 'ssh_log.log', 'sensor_name': socket.gethostname()}
config['ssh'] = {'port': '8022', 'host_priv_key': 'ssh_host_key', 'server_version_string': 'SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3'}
config['llm'] = {'llm_provider': 'openai', 'model_name': 'gpt-3.5-turbo', 'trimmer_max_tokens': '64000', 'system_prompt': ''}
config['llm'] = {'llm_provider': 'openai', 'model_name': 'gpt-3.5-turbo', 'trimmer_max_tokens': '64000', 'temperature': '0.7', 'system_prompt': ''}
config['user_accounts'] = {}
# Override config values with command line arguments if provided
@ -403,6 +420,8 @@ try:
config['llm']['trimmer_max_tokens'] = str(args.trimmer_max_tokens)
if args.system_prompt:
config['llm']['system_prompt'] = args.system_prompt
if args.temperature is not None:
config['llm']['temperature'] = str(args.temperature)
if args.port:
config['ssh']['port'] = str(args.port)
if args.host_priv_key: