Spaces:
Runtime error
Runtime error
from twilio.rest import Client | |
import yaml | |
import json | |
import os | |
import yaml | |
import json | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain_community.vectorstores import Chroma | |
from helper import retrieve_relevant_context, generate_response_with_context | |
import sys | |
from pathlib import Path | |
file = Path(__file__).resolve() | |
parent, root = file.parent, file.parents[1] | |
sys.path.append(str(root)) | |
print("str(root) :",str(root)) | |
print("parent :",parent) | |
print("CWD :",os.getcwd()) | |
# Load relevant API Keys | |
file_path = '../Config/API_KEYS.yml' | |
with open(file_path, 'r') as file: | |
api_keys = yaml.safe_load(file) | |
# Extract openai username and key | |
openai_key = api_keys['OPEN_AI']['Key'] | |
os.environ["OPENAI_API_KEY"] = openai_key | |
# Extract openai username and key | |
account_sid = api_keys['TWILIO']['account_sid'] | |
auth_token = api_keys['TWILIO']['auth_token'] | |
account_sid = account_sid | |
auth_token = auth_token | |
print("====account_sid:=====",account_sid) | |
# Define the persist directory | |
persist_directory = './vector_db/chroma_v01' | |
# Initialize the embeddings model | |
embedding_model = OpenAIEmbeddings() | |
### Vectorstores | |
from langchain_community.vectorstores import Chroma | |
# Load the Chroma vector store | |
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model) | |
#setup Twilio client | |
client = Client(account_sid, auth_token) | |
from flask import Flask, request, redirect | |
from twilio.twiml.messaging_response import MessagingResponse | |
print("flask app is running") | |
app = Flask(__name__) | |
def incoming_sms(): | |
"""Send a dynamic reply to an incoming text message""" | |
# Get the message the user sent our Twilio number | |
body = request.values.get('Body', None) | |
print("body :",body) | |
##### Process incoming text ############# | |
incoming_msg = body.strip() | |
if not incoming_msg: | |
return str(MessagingResponse()) | |
# Generate response using the RAG-powered system | |
retrieved_texts = retrieve_relevant_context(vectordb, incoming_msg) | |
context = "\n".join(retrieved_texts) | |
response = generate_response_with_context(incoming_msg, context) | |
print("response :",response) | |
##### Process incoming text Done ############# | |
# Start our TwiML response | |
resp = MessagingResponse() | |
print("TwiML resp :", resp) | |
resp.message(response) | |
return str(resp) | |
if __name__ == "__main__": | |
app.run(port=5000, debug=True) | |