Merge pull request #4 from Paulslewis66/main

Added Ollama Local LLM's
This commit is contained in:
DavidJBianco
2025-02-12 10:05:51 -05:00
committed by GitHub
3 changed files with 11 additions and 1 deletions

View File

@ -28,6 +28,10 @@ server_version_string = SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
llm_provider = openai
model_name = gpt-4o
##### ollama llama3
#llm_provider = ollama
#model_name = llama3.3
##### Any model via Amazon Bedrock
# Valid AWS model names can be found here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
#llm_provider = AWS

View File

@ -18,6 +18,7 @@ from operator import itemgetter
from langchain_openai import ChatOpenAI
from langchain_aws import ChatBedrock, ChatBedrockConverse
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_core.messages import HumanMessage, SystemMessage, trim_messages
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
@ -313,6 +314,10 @@ def choose_llm():
llm_model = ChatOpenAI(
model=model_name
)
elif llm_provider_name == 'ollama':
llm_model = ChatOllama(
model=model_name
)
elif llm_provider_name == 'aws':
llm_model = ChatBedrockConverse(
model=model_name,

View File

@ -7,7 +7,8 @@ langchain_community
langchain_openai
# For Google's Gemini models
langchain_google_genai
# For Ollama models
langchain_ollama
# For AWS
langchain_aws
transformers