From a3c14bbf15c3bf9c326bafaacace112c51f2d896 Mon Sep 17 00:00:00 2001 From: "David J. Bianco" Date: Thu, 20 Mar 2025 15:21:07 -0400 Subject: [PATCH] Preliminary support for Azure OpenAI models, plus "porn fix" This version adds support for Azure OpenAI models. I'm not entirely happy with how each LLM provider has it's own set of params, and am investigating how to make these seem a little more unified, so this support may change in the future. Also, Azure's content filter flags the "XXX-END-OF-SESSION-XXX" token as "sexual content", so I changed it to use "YYY" instead. I feel so protected! --- SSH/config.ini.TEMPLATE | 7 +++++++ SSH/ssh_server.py | 13 ++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/SSH/config.ini.TEMPLATE b/SSH/config.ini.TEMPLATE index 16ac131..903d4cb 100644 --- a/SSH/config.ini.TEMPLATE +++ b/SSH/config.ini.TEMPLATE @@ -30,6 +30,13 @@ server_version_string = OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 llm_provider = openai model_name = gpt-4o +##### Azure OpenAI +#llm_provider = azure +#azure_deployment = gpt-4o +#azure_api_version = 2025-01-01-preview +#azure_endpoint = +#model_name = gpt-4o + ##### ollama llama3 #llm_provider = ollama #model_name = llama3.3 diff --git a/SSH/ssh_server.py b/SSH/ssh_server.py index 2d72982..3bb75c0 100755 --- a/SSH/ssh_server.py +++ b/SSH/ssh_server.py @@ -15,7 +15,7 @@ import datetime import uuid from base64 import b64encode from operator import itemgetter -from langchain_openai import ChatOpenAI +from langchain_openai import ChatOpenAI, AzureChatOpenAI from langchain_aws import ChatBedrock, ChatBedrockConverse from langchain_google_genai import ChatGoogleGenerativeAI from langchain_ollama import ChatOllama @@ -234,7 +234,7 @@ async def handle_client(process: asyncssh.SSHServerProcess, server: MySSHServer) }, config=llm_config ) - if llm_response.content == "XXX-END-OF-SESSION-XXX": + if llm_response.content == "YYY-END-OF-SESSION-YYY": await session_summary(process, llm_config, with_message_history, server) process.exit(0) return @@ -314,8 +314,15 @@ def choose_llm(llm_provider: Optional[str] = None, model_name: Optional[str] = N llm_model = ChatOpenAI( model=model_name ) + elif llm_provider_name == 'azure': + llm_model = AzureChatOpenAI( + azure_deployment=config['llm'].get("azure_deployment"), + azure_endpoint=config['llm'].get("azure_endpoint"), + api_version=config['llm'].get("azure_api_version"), + model=config['llm'].get("model_name") # Ensure model_name is passed here + ) elif llm_provider_name == 'ollama': - llm_model = ChatOllama( + llm_model = ChatOllama( model=model_name ) elif llm_provider_name == 'aws':