datasciencedojo commited on
Commit
419e7a3
1 Parent(s): 416760a

Create agents/agents.py

Browse files
Files changed (1) hide show
  1. agents/agents.py +62 -0
agents/agents.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.chat_models import ChatOpenAI
2
+ from langchain_groq import ChatGroq
3
+ import os
4
+ from langchain_huggingface.llms import HuggingFacePipeline
5
+ from dotenv import load_dotenv
6
+ # Import Azure OpenAI
7
+ from langchain_openai import AzureOpenAI
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+ os.getenv("GROQ_API_KEY")
12
+ os.getenv("HF_token")
13
+ os.getenv("AZURE_OPENAI_ENDPOINT")
14
+ os.getenv("AZURE_OPENAI_API_KEY")
15
+ os.getenv("OPENAI_API_VERSION")
16
+
17
+ def get_agent_groq(temperature=0.7):
18
+ # agent = ChatOpenAI(model="gpt-3.5-turbo", temperature=temperature)
19
+
20
+ # Load the Groq model
21
+ chat = ChatGroq(
22
+ temperature=temperature,
23
+ model="llama3-70b-8192", #define model available on Groq
24
+ api_key=os.getenv("GROQ_API_KEY")
25
+ )
26
+ return chat
27
+
28
+ def get_agent_groq_mixt(temperature=0.7):
29
+ # agent = ChatOpenAI(model="gpt-3.5-turbo", temperature=temperature)
30
+
31
+ # Load the Groq model
32
+ chat = ChatGroq(
33
+ temperature=temperature,
34
+ model="mixtral-8x7b-32768", #define model available on Groq
35
+ api_key=os.getenv("GROQ_API_KEY")
36
+ )
37
+ return chat
38
+ def get_agent_groq_llama8(temperature=0.7):
39
+ # agent = ChatOpenAI(model="gpt-3.5-turbo", temperature=temperature)
40
+
41
+ # Load the Groq model
42
+ chat = ChatGroq(
43
+ temperature=temperature,
44
+ model="llama3-8b-8192", #define model available on Groq
45
+ api_key=os.getenv("GROQ_API_KEY")
46
+ )
47
+ return chat
48
+ def agent_openai():
49
+ # Create an instance of Azure OpenAI
50
+ # Replace the deployment name with your own
51
+ llm = AzureOpenAI(name="gpt-35-turbo-instruct",
52
+ deployment_name="gpt-35-turbo-instruct"
53
+ )
54
+ return llm
55
+ def get_agent_llama():
56
+
57
+ hf = HuggingFacePipeline.from_model_id(
58
+ model_id="gpt2",
59
+ task="text-generation",
60
+ pipeline_kwargs={"max_new_tokens": 10},
61
+ )
62
+ return hf