|
from flask import Flask, request
|
|
import os
|
|
import requests
|
|
from langchain.vectorstores import Chroma
|
|
from langchain.llms import OpenAI
|
|
from langchain.chains import RetrievalQA
|
|
from InstructorEmbedding import INSTRUCTOR
|
|
from langchain.embeddings import HuggingFaceInstructEmbeddings
|
|
from langchain.chat_models import ChatOpenAI
|
|
|
|
import numpy
|
|
import torch
|
|
import json
|
|
import textwrap
|
|
from flask_cors import CORS
|
|
import socket;
|
|
|
|
import gradio as gr
|
|
|
|
app = Flask(__name__)
|
|
cors = CORS(app)
|
|
|
|
|
|
def get_local_ip():
|
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
s.connect(("8.8.8.8", 80))
|
|
return s.getsockname()[0]
|
|
|
|
def wrap_text_preserve_newlines(text, width=110):
|
|
|
|
lines = text.split('\n')
|
|
|
|
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
|
|
|
|
wrapped_text = '\n'.join(wrapped_lines)
|
|
return wrapped_text
|
|
|
|
def process_llm_response(llm_response):
|
|
response_data = {
|
|
'result': wrap_text_preserve_newlines(llm_response['result']),
|
|
'sources': []
|
|
}
|
|
print(wrap_text_preserve_newlines(llm_response['result']))
|
|
print('\n\nSources:')
|
|
for source in llm_response["source_documents"]:
|
|
print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
|
|
response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
|
|
return json.dumps(response_data)
|
|
|
|
def get_answer(question):
|
|
llm_response = qa_chain(question)
|
|
response = process_llm_response(llm_response)
|
|
return response
|
|
|
|
@app.route('/question', methods=['POST'])
|
|
def answer():
|
|
content_type = request.headers.get('Content-Type')
|
|
if (content_type == 'application/json'):
|
|
data = request.json
|
|
question = data['question']
|
|
response = get_answer(question)
|
|
return response
|
|
else:
|
|
return 'Content-Type not supported!'
|
|
|
|
@app.route('/', methods=['GET'])
|
|
def default():
|
|
return "Hello World!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def greet(name):
|
|
return "Hello " + name + "!!"
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
|
iface.launch() |