Josebert's picture
Update app.py
59b0b83 verified
raw
history blame
1.92 kB
import os
import gradio as gr
import requests
# Retrieve the API token from environment variables
api_token = os.getenv("API_TOKEN")
if not api_token:
raise ValueError("API token not found. Please set the API_TOKEN environment variable.")
# Hugging Face Inference API Details
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-1B-Instruct"
HEADERS = {"Authorization": f"Bearer {api_token}"}
def generate_exegesis(passage):
if not passage.strip():
return "Please enter a Bible passage."
prompt = f"You are a professional Bible Scholar, Provide a detailed exegesis of the following biblical verse, including: The original Greek text and transliteration with Word-by-word analysis with meanings. Historical and cultural context. Theological significance. for:\n\n{passage}\n\nExegesis:"
payload = {
"inputs": prompt,
"parameters": {
"max_length": 250,
"temperature": 0.7,
"do_sample": True
}
}
try:
response = requests.post(API_URL, headers=HEADERS, json=payload)
response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0].get("generated_text", "Error: No response from model.")
else:
return "Error: Unexpected response format."
except requests.exceptions.RequestException as e:
return f"API Error: {e}"
# Gradio interface
demo = gr.Interface(
fn=generate_exegesis,
inputs=gr.Textbox(label="Enter Bible Passage", placeholder="e.g., John 3:16"),
outputs=gr.Textbox(label="Exegesis Commentary"),
title="JR Study Bible",
description="Enter a Bible passage to receive insightful exegesis commentary using the Hugging Face API."
)
if __name__ == "__main__":
demo.launch()