Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
from deep_translator import GoogleTranslator | |
# Load model directly from Hugging Face | |
def load_model(): | |
model_name = "meta-llama/Llama-3.1-70B-Instruct" # Replace with your preferred model name | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
torch_dtype=torch.float16, # Use float16 for faster inference (requires a compatible GPU) | |
device_map="auto" # Automatically assigns model layers to available devices | |
) | |
return tokenizer, model | |
# Translate text | |
def translate_text(text, target_language): | |
try: | |
translator = GoogleTranslator(source="auto", target=target_language) | |
return translator.translate(text) | |
except Exception as e: | |
return f"Translation Error: {e}" | |
# Generate response using the model | |
def generate_response(query, tokenizer, model, max_length=500): | |
inputs = tokenizer(query, return_tensors="pt").to("cuda") # Ensure the model uses GPU if available | |
outputs = model.generate( | |
**inputs, | |
max_length=max_length, | |
num_beams=5, | |
early_stopping=True | |
) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Main App | |
def main(): | |
st.set_page_config(page_title="Sanatana Dharma Explorer", page_icon="ποΈ", layout="wide") | |
st.title("Sanatana Dharma Explorer: Wisdom for Everyone ποΈ") | |
st.write( | |
"Ask questions across multiple Hindu scriptures, and I will provide tailored, beginner-friendly, or expert-level answers in your preferred language." | |
) | |
# Load model | |
tokenizer, model = load_model() | |
scriptures = ["Bhagavad Geeta", "Vedas", "Upanishads", "Ramayana", "Mahabharata", "Puranas"] | |
selected_scriptures = st.multiselect("Select the scriptures you want to query:", scriptures) | |
languages = {"English": "en", "Hindi": "hi", "Tamil": "ta", "Telugu": "te", "Bengali": "bn", "Punjabi": "pa", "Gujarati": "gu", "Kannada": "kn"} | |
selected_language = st.selectbox("Select your preferred output language:", languages.keys()) | |
understanding_level = st.slider("Select your level of understanding (1 = Beginner, 10 = Expert):", min_value=1, max_value=10, value=5) | |
user_input = st.text_area("Enter your question (in any language):", height=150) | |
if st.button("Get Answers"): | |
if not user_input.strip(): | |
st.warning("Please enter a valid question.") | |
return | |
with st.spinner("Translating your question..."): | |
translated_query = translate_text(user_input, "en") | |
results = [] | |
with st.spinner("Generating answers..."): | |
for scripture in selected_scriptures: | |
scriptural_query = f"Answer based on {scripture}: {translated_query}. Provide details at level {understanding_level}." | |
response = generate_response(scriptural_query, tokenizer, model) | |
results.append((scripture, response)) | |
with st.spinner(f"Translating answers to {selected_language}..."): | |
translated_results = [ | |
(scripture, translate_text(response, languages[selected_language])) for scripture, response in results | |
] | |
for scripture, response in translated_results: | |
st.write(f"### {scripture} Answer:") | |
st.write(response) | |
st.write("### Additional Information:") | |
st.write(f"**Original Question:** {user_input}") | |
st.write(f"**Translated to English:** {translated_query}") | |
if __name__ == "__main__": | |
main() | |