Shashank1406 commited on
Commit
df8d28a
1 Parent(s): 925601e

Update Prompts_and_Chains.py

Browse files
Files changed (1) hide show
  1. Prompts_and_Chains.py +45 -63
Prompts_and_Chains.py CHANGED
@@ -1,63 +1,45 @@
1
- # ******* THIS FILE CONTAINS ALL THE PROMPTS & CHAINS USED IN Functions.py ***********
2
- from Templates import *
3
- from langchain import PromptTemplate
4
- from langchain.chains import LLMChain
5
- from langchain.llms import OpenAI
6
- from dotenv import load_dotenv
7
- import os
8
- import streamlit as st
9
-
10
-
11
- class PromptTemplates:
12
- def __init__(self):
13
-
14
- self.legal_adviser_bot_prompt = PromptTemplate(
15
- input_variables=["chat_history","input",], template=legal_adviser_template
16
- )
17
-
18
- self.case_summary_prompt = PromptTemplate(
19
- input_variables=["case_name", "case_info"], template=case_summary_template
20
- )
21
-
22
- self.legal_case_bot_prompt = PromptTemplate(
23
- input_variables=["case_summary", "context","input"], template=legal_case_bot_template
24
- )
25
-
26
- self.lawyer_recommendations_prompt = PromptTemplate(
27
- input_variables=["user_inputs", "matching_lawyers", "additional_info"], template=lawyer_recommendation_template
28
- )
29
-
30
-
31
- class LLMChains:
32
- def __init__(self):
33
- load_dotenv()
34
- openai_api_key = os.getenv("OPENAI_API_KEY")
35
- obj = PromptTemplates()
36
- model_name = st.session_state["selected_model"]
37
-
38
- # generate summary chain
39
- self.legal_adviser_bot_chain = LLMChain(
40
- llm=OpenAI(model_name='gpt-3.5-turbo-16k', temperature=0.7),
41
- prompt=obj.legal_adviser_bot_prompt,
42
- verbose="true",
43
- )
44
-
45
- # genrate bot conversastion
46
- self.case_summary_chain = LLMChain(
47
- llm=OpenAI(model_name=model_name, temperature=0.7),
48
- prompt=obj.case_summary_prompt,
49
- verbose="true",
50
- )
51
-
52
- # genrate bot conversastion
53
- self.legal_case_bot_chain = LLMChain(
54
- llm=OpenAI(model_name=model_name, temperature=0.7),
55
- prompt=obj.legal_case_bot_prompt,
56
- verbose="true",
57
- )
58
-
59
- self.lawyer_recommendations_chain = LLMChain(
60
- llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7),
61
- prompt=obj.lawyer_recommendations_prompt,
62
- verbose="true",
63
- )
 
1
+ # ******* THIS FILE CONTAINS ALL THE PROMPTS & CHAINS USED IN Functions.py ***********
2
+ from Templates import *
3
+ from langchain import PromptTemplate
4
+ from langchain.chains import LLMChain
5
+ from langchain.llms import OpenAI
6
+ from dotenv import load_dotenv
7
+ import os
8
+ import streamlit as st
9
+
10
+
11
+ class PromptTemplates:
12
+ def __init__(self):
13
+
14
+ self.case_summary_prompt = PromptTemplate(
15
+ input_variables=["case_name", "case_info"], template=case_summary_template
16
+ )
17
+
18
+ self.legal_case_bot_prompt = PromptTemplate(
19
+ input_variables=["case_summary", "context","input"], template=legal_case_bot_template
20
+ )
21
+
22
+
23
+
24
+ class LLMChains:
25
+ def __init__(self):
26
+ load_dotenv()
27
+ openai_api_key = os.getenv("OPENAI_API_KEY")
28
+ obj = PromptTemplates()
29
+ model_name = st.session_state["selected_model"]
30
+
31
+ # genrate bot conversastion
32
+ self.case_summary_chain = LLMChain(
33
+ llm=OpenAI(model_name=model_name,api_key=os.environ.get('OPEN_API_KEY'), temperature=0.7),
34
+ prompt=obj.case_summary_prompt,
35
+ verbose="true",
36
+ )
37
+
38
+ # genrate bot conversastion
39
+ self.legal_case_bot_chain = LLMChain(
40
+ llm=OpenAI(model_name=model_name,api_key=os.environ.get('OPEN_API_KEY'), temperature=0.7),
41
+ prompt=obj.legal_case_bot_prompt,
42
+ verbose="true",
43
+ )
44
+
45
+