Ishaan Shah commited on
Commit
051dc03
·
1 Parent(s): 997488c
Files changed (1) hide show
  1. main.py +42 -35
main.py CHANGED
@@ -15,6 +15,8 @@ import textwrap
15
  from flask_cors import CORS
16
  import socket;
17
 
 
 
18
  app = Flask(__name__)
19
  cors = CORS(app)
20
 
@@ -66,40 +68,45 @@ def default():
66
  return "Hello World!"
67
 
68
 
69
- if __name__ == '__main__':
70
- ip=get_local_ip()
71
- os.environ["OPENAI_API_KEY"] = "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"
72
- # Embed and store the texts
73
- # if(torch.cuda.is_available() == False):
74
- # print("No GPU available")
75
- # exit(1)
76
 
77
- torch.cuda.empty_cache()
78
- torch.max_split_size_mb = 100
79
- instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
80
- model_kwargs={"device": "cpu"})
81
- # Supplying a persist_directory will store the embeddings on disk
82
- persist_directory = 'db'
83
- vectordb2 = Chroma(persist_directory=persist_directory,
84
- embedding_function=instructor_embeddings,
85
- )
86
- retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
87
- vectordb2.persist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # Set up the turbo LLM
90
- turbo_llm = ChatOpenAI(
91
- temperature=0,
92
- model_name='gpt-3.5-turbo'
93
- )
94
- qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
95
- chain_type="stuff",
96
- retriever=retriever,
97
- return_source_documents=True)
98
- qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
99
- Use only the following pieces of context and think step by step to answer. Answer the users question only if they are related to the context given.
100
- If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
101
- Use bullet points to explain when required.
102
- Use only text found in the context as your knowledge source for the answer.
103
- ----------------
104
- {context}"""
105
- app.run(host=ip, port=5000)
 
15
  from flask_cors import CORS
16
  import socket;
17
 
18
+ import gradio as gr
19
+
20
  app = Flask(__name__)
21
  cors = CORS(app)
22
 
 
68
  return "Hello World!"
69
 
70
 
71
+ # if __name__ == '__main__':
72
+ # ip=get_local_ip()
73
+ # os.environ["OPENAI_API_KEY"] = "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"
74
+ # # Embed and store the texts
75
+ # # if(torch.cuda.is_available() == False):
76
+ # # print("No GPU available")
77
+ # # exit(1)
78
 
79
+ # torch.cuda.empty_cache()
80
+ # torch.max_split_size_mb = 100
81
+ # instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
82
+ # model_kwargs={"device": "cpu"})
83
+ # # Supplying a persist_directory will store the embeddings on disk
84
+ # persist_directory = 'db'
85
+ # vectordb2 = Chroma(persist_directory=persist_directory,
86
+ # embedding_function=instructor_embeddings,
87
+ # )
88
+ # retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
89
+ # vectordb2.persist()
90
+
91
+ # # Set up the turbo LLM
92
+ # turbo_llm = ChatOpenAI(
93
+ # temperature=0,
94
+ # model_name='gpt-3.5-turbo'
95
+ # )
96
+ # qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
97
+ # chain_type="stuff",
98
+ # retriever=retriever,
99
+ # return_source_documents=True)
100
+ # qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
101
+ # Use only the following pieces of context and think step by step to answer. Answer the users question only if they are related to the context given.
102
+ # If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
103
+ # Use bullet points to explain when required.
104
+ # Use only text found in the context as your knowledge source for the answer.
105
+ # ----------------
106
+ # {context}"""
107
+ # app.run(host=ip, port=5000)
108
 
109
+ def greet(name):
110
+ return "Hello " + name + "!!"
111
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
112
+ iface.launch()