mediHelp / main.py
Ishaan Shah
test
051dc03
raw
history blame
4.2 kB
from flask import Flask, request
import os
import requests
from langchain.vectorstores import Chroma
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.chat_models import ChatOpenAI
import numpy
import torch
import json
import textwrap
from flask_cors import CORS
import socket;
import gradio as gr
app = Flask(__name__)
cors = CORS(app)
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split('\n')
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
def process_llm_response(llm_response):
response_data = {
'result': wrap_text_preserve_newlines(llm_response['result']),
'sources': []
}
print(wrap_text_preserve_newlines(llm_response['result']))
print('\n\nSources:')
for source in llm_response["source_documents"]:
print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
return json.dumps(response_data)
def get_answer(question):
llm_response = qa_chain(question)
response = process_llm_response(llm_response)
return response
@app.route('/question', methods=['POST'])
def answer():
content_type = request.headers.get('Content-Type')
if (content_type == 'application/json'):
data = request.json
question = data['question']
response = get_answer(question)
return response
else:
return 'Content-Type not supported!'
@app.route('/', methods=['GET'])
def default():
return "Hello World!"
# if __name__ == '__main__':
# ip=get_local_ip()
# os.environ["OPENAI_API_KEY"] = "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"
# # Embed and store the texts
# # if(torch.cuda.is_available() == False):
# # print("No GPU available")
# # exit(1)
# torch.cuda.empty_cache()
# torch.max_split_size_mb = 100
# instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
# model_kwargs={"device": "cpu"})
# # Supplying a persist_directory will store the embeddings on disk
# persist_directory = 'db'
# vectordb2 = Chroma(persist_directory=persist_directory,
# embedding_function=instructor_embeddings,
# )
# retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
# vectordb2.persist()
# # Set up the turbo LLM
# turbo_llm = ChatOpenAI(
# temperature=0,
# model_name='gpt-3.5-turbo'
# )
# qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
# chain_type="stuff",
# retriever=retriever,
# return_source_documents=True)
# qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
# Use only the following pieces of context and think step by step to answer. Answer the users question only if they are related to the context given.
# If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
# Use bullet points to explain when required.
# Use only text found in the context as your knowledge source for the answer.
# ----------------
# {context}"""
# app.run(host=ip, port=5000)
def greet(name):
return "Hello " + name + "!!"
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()