<<<<<<< HEAD ======= >>>>>>> 2c4af7da413c1e35cee9d0757790fe2b61319a1e import gradio as gr import os from huggingface_hub import login from dotenv import load_dotenv from embedding import embeddings from db.chroma import load_and_setup_db, search_cases from chat.hermes_llm import ChatManager # Load environment variables load_dotenv() # Login to Hugging Face # login(token=os.getenv("HUGGINGFACEHUB_API_TOKEN"), add_to_git_credential=True) # Initialize components VECTOR_DB_PATH = os.getenv("VECTOR_DB_PATH") vector_store = load_and_setup_db(VECTOR_DB_PATH, embeddings) legal_chat = ChatManager(temperature=0.1) def process_query(query, chat_history): try: # Search relevant cases results = search_cases(vectorstore=vector_store, query=query, k=1) response=None if len(results)>0: # Get response from chat manager response = legal_chat.get_response(results[0]['content'], query=query) response_final = f"""{response}\n\nsource:[case_id:{results[0]['metadata']['case_id']}]""" else : response_final = "No Document match" # Update chat history chat_history.append((query, response_final)) return "", chat_history except Exception as e: return "", chat_history + [(query, f"Error: {str(e)}")] # Create Gradio interface with gr.Blocks(title="Legal Chat Assistant") as demo: gr.Markdown("# Legal Chat Assistant") gr.Markdown("Ask questions about legal cases and get AI-powered responses.") chatbot = gr.Chatbot( [], elem_id="chatbot", bubble_full_width=False, height=400 ) with gr.Row(): query_input = gr.Textbox( placeholder="Enter your query here...", show_label=False, scale=4 ) submit_btn = gr.Button("Send", scale=1) # Set up event handlers submit_btn.click( process_query, inputs=[query_input, chatbot], outputs=[query_input, chatbot] ) query_input.submit( process_query, inputs=[query_input, chatbot], outputs=[query_input, chatbot] ) if __name__ == "__main__": demo.launch(share=True)