Spaces:
Running
Running
import os | |
from getpass import getpass | |
from langchain_groq import ChatGroq | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain_openai import AzureChatOpenAI | |
from langchain_community.llms import Ollama | |
from langchain_openai.chat_models.base import BaseChatOpenAI | |
def azure_openai_service(key,max_retries=3): | |
os.environ["AZURE_OPENAI_API_KEY"] = key | |
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://indus.api.michelin.com/openai-key-weu" | |
model = AzureChatOpenAI( | |
azure_deployment="gpt-4o", # or your deployment | |
api_version="2023-06-01-preview", # or your api version | |
temperature=0, | |
max_tokens=None, | |
timeout=None, | |
max_retries=max_retries) | |
return model | |
def get_ollama(): | |
## terminal --> ollama start | |
llm = Ollama(base_url="http://localhost:11434", model="mistral") | |
return llm | |
def get_googleGemini(key): | |
os.environ["GOOGLE_API_KEY"] = key | |
llm = ChatGoogleGenerativeAI( | |
model="gemini-1.5-pro", | |
temperature=0, | |
max_tokens=None, | |
timeout=None, | |
max_retries=2) | |
return llm | |
def get_groq_model(key,model_name = "gemma2-9b-it"): | |
os.environ["GROQ_API_KEY"] = key | |
llm_groq = ChatGroq(model=model_name) | |
return llm_groq | |
def get_llm(option,key): | |
llm = None | |
if option =='deepseek-r1-distill-llama-70b': | |
llm = get_groq_model(key,model_name = "deepseek-r1-distill-llama-70b") | |
elif option =='gemma2-9b-it': | |
llm = get_groq_model(key,model_name="gemma2-9b-it") | |
elif option == 'llama-3.2-3b-preview': | |
llm = get_groq_model(key,model_name="llama-3.2-3b-preview") | |
elif option == 'llama-3.2-1b-preview': | |
llm = get_groq_model(key,model_name="llama-3.2-1b-preview") | |
elif option == 'llama3-8b-8192': | |
llm = get_groq_model(key,model_name="llama3-8b-8192") | |
elif option == 'Openai': | |
llm = azure_openai_service(key) | |
elif option == 'Google': | |
llm = get_googleGemini(key) | |
elif option == "Ollama" : | |
llm = get_ollama() | |
return llm | |