mirror of
https://github.com/splunk/DECEIVE.git
synced 2025-07-01 16:47:28 -04:00
Update ssh_server.py
Added Ollama Model
This commit is contained in:
@ -18,6 +18,7 @@ from operator import itemgetter
|
|||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
from langchain_aws import ChatBedrock, ChatBedrockConverse
|
from langchain_aws import ChatBedrock, ChatBedrockConverse
|
||||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||||
|
from langchain_ollama import ChatOllama
|
||||||
from langchain_core.messages import HumanMessage, SystemMessage, trim_messages
|
from langchain_core.messages import HumanMessage, SystemMessage, trim_messages
|
||||||
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
|
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
|
||||||
from langchain_core.runnables.history import RunnableWithMessageHistory
|
from langchain_core.runnables.history import RunnableWithMessageHistory
|
||||||
@ -313,6 +314,10 @@ def choose_llm():
|
|||||||
llm_model = ChatOpenAI(
|
llm_model = ChatOpenAI(
|
||||||
model=model_name
|
model=model_name
|
||||||
)
|
)
|
||||||
|
elif llm_provider_name == 'ollama':
|
||||||
|
llm_model = ChatOllama(
|
||||||
|
model=model_name
|
||||||
|
)
|
||||||
elif llm_provider_name == 'aws':
|
elif llm_provider_name == 'aws':
|
||||||
llm_model = ChatBedrockConverse(
|
llm_model = ChatBedrockConverse(
|
||||||
model=model_name,
|
model=model_name,
|
||||||
|
Reference in New Issue
Block a user