Spaces:
Sleeping
Sleeping
File size: 6,632 Bytes
fe370a3 55b4c5a fe370a3 55b4c5a fe370a3 9a4c626 fe370a3 9a4c626 fe370a3 9aa8f58 fe370a3 9a4c626 fe370a3 9aa8f58 fe370a3 eeaf024 fe370a3 9aa8f58 fe370a3 9aa8f58 fe370a3 55b4c5a fe370a3 7495086 fe370a3 fb4fd4c fe370a3 96ecc62 fe370a3 fb4fd4c fe370a3 55b4c5a fe370a3 9c41670 fe370a3 3186d0c 9c41670 3186d0c 9c41670 55b4c5a fe370a3 9c41670 fe370a3 7dc5361 fe370a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
from fastapi import FastAPI, HTTPException, UploadFile, File
from pydantic import BaseModel, Json
from uuid import uuid4, UUID
from typing import Optional
import pymupdf
from pinecone import Pinecone, ServerlessSpec
import os
from dotenv import load_dotenv
from rag import *
from fastapi.responses import StreamingResponse
import json
from prompts import *
load_dotenv()
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
common_namespace = os.environ.get("COMMON_NAMESPACE")
pc = Pinecone(api_key=pinecone_api_key)
import time
index_name = os.environ.get("INDEX_NAME") # change if desired
existing_indexes = [index_info["name"] for index_info in pc.list_indexes()]
if index_name not in existing_indexes:
pc.create_index(
name=index_name,
dimension=3072,
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
while not pc.describe_index(index_name).status["ready"]:
time.sleep(1)
index = pc.Index(index_name)
app = FastAPI()
class StyleWriter(BaseModel):
style: str
tonality: str
class UserInput(BaseModel):
prompt: str
enterprise_id: str
stream: Optional[bool] = False
messages: Optional[list[dict]] = []
style_tonality: Optional[StyleWriter] = None
class EnterpriseData(BaseModel):
name: str
id: Optional[str] = None
filename: Optional[str] = None
tasks = []
@app.get("/")
def greet_json():
return {"Hello": "World!"}
@app.post("/upload")
async def upload_file(file: UploadFile, enterprise_data: Json[EnterpriseData]):
try:
# Read the uploaded file
contents = await file.read()
enterprise_name = enterprise_data.name.replace(" ","_").replace("-","_").replace(".","_").replace("/","_").replace("\\","_").strip()
if enterprise_data.filename is not None:
filename = enterprise_data.filename
else:
filename = file.filename
# Assign a new UUID if id is not provided
if enterprise_data.id is None:
clean_name = remove_non_standard_ascii(enterprise_name)
enterprise_data.id = f"{clean_name}_{uuid4()}"
# Open the file with PyMuPDF
pdf_document = pymupdf.open(stream=contents, filetype="pdf")
# Extract all text from the document
text = ""
for page in pdf_document:
text += page.get_text()
# Split the text into chunks
text_chunks = get_text_chunks(text)
# Create a vector store
vector_store = get_vectorstore(text_chunks, filename=filename, file_type="pdf", namespace=enterprise_data.id, index=index,enterprise_name=enterprise_name)
if vector_store:
return {
"file_name":filename,
"enterprise_id": enterprise_data.id,
"number_of_chunks": len(text_chunks),
"filename_id":vector_store["filename_id"],
"enterprise_name":enterprise_name
}
else:
raise HTTPException(status_code=500, detail="Could not create vector store")
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
finally:
await file.close()
@app.get("/documents/{enterprise_id}")
def get_documents(enterprise_id: str):
try:
docs_names = []
for ids in index.list(namespace=enterprise_id):
for id in ids:
name_doc = "_".join(id.split("_")[:-1])
if name_doc not in docs_names:
docs_names.append(name_doc)
return docs_names
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
@app.delete("/documents/{enterprise_id}/{filename_id}")
def delete_document(enterprise_id: str, filename_id: str):
try:
for ids in index.list(prefix=f"{filename_id}_", namespace=enterprise_id):
index.delete(ids=ids, namespace=enterprise_id)
return {"message": "Document deleted", "chunks_deleted": ids}
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
@app.delete("/documents/all/{enterprise_id}")
def delete_all_documents(enterprise_id: str):
try:
index.delete(namespace=enterprise_id,delete_all=True)
return {"message": "All documents deleted"}
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
import async_timeout
import asyncio
GENERATION_TIMEOUT_SEC = 60
async def stream_generator(response, prompt):
async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
try:
async for chunk in response:
if isinstance(chunk, bytes):
chunk = chunk.decode('utf-8') # Convert bytes to str if needed
yield json.dumps({"prompt": prompt, "content": chunk})
except asyncio.TimeoutError:
raise HTTPException(status_code=504, detail="Stream timed out")
@app.post("/generate-answer/")
def generate_answer(user_input: UserInput):
try:
prompt = user_input.prompt
enterprise_id = user_input.enterprise_id
template_prompt = base_template
context = get_retreive_answer(enterprise_id, prompt, index, common_namespace)
#final_prompt_simplified = prompt_formatting(prompt,template,context)
if not context:
context = ""
if user_input.style_tonality is None:
prompt_formated = prompt_reformatting(template_prompt,context,prompt)
answer = generate_response_via_langchain(prompt, model="gpt-4o",stream=user_input.stream,context = context , messages=user_input.messages,template=template_prompt)
else:
prompt_formated = prompt_reformatting(template_prompt,context,prompt,style=user_input.style_tonality.style,tonality=user_input.style_tonality.tonality)
answer = generate_response_via_langchain(prompt, model="gpt-4o",stream=user_input.stream,context = context , messages=user_input.messages,style=user_input.style_tonality.style,tonality=user_input.style_tonality.tonality,template=template_prompt)
if user_input.stream:
return StreamingResponse(stream_generator(answer,prompt_formated), media_type="application/json")
return {
"prompt": prompt_formated,
"answer": answer,
"context": context,
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
|