Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,34 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def generate_exegesis(passage):
|
10 |
if not passage.strip():
|
11 |
return "Please enter a Bible passage."
|
12 |
|
13 |
-
prompt =
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
payload = {
|
16 |
"inputs": prompt,
|
17 |
"parameters": {
|
18 |
-
"max_length":
|
19 |
"temperature": 0.7,
|
20 |
"do_sample": True
|
21 |
}
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
+
import requests
|
4 |
|
5 |
+
# Run the Hugging Face CLI login command.
|
6 |
+
os.system("huggingface-cli login")
|
|
|
7 |
|
8 |
+
# Retrieve the API token from environment variables
|
9 |
+
model_repo_id = os.getenv("MODEL_REPO_ID")
|
10 |
+
if not model_repo_id:
|
11 |
+
raise ValueError("MODEL_REPO_ID token not found. Please set the MODEL_REPO_ID environment variable.")
|
12 |
+
|
13 |
+
# Hugging Face Inference API Details
|
14 |
+
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-1B-Instruct"
|
15 |
+
HEADERS = {"Authorization": f"Bearer {model_repo_id}"}
|
16 |
|
17 |
def generate_exegesis(passage):
|
18 |
if not passage.strip():
|
19 |
return "Please enter a Bible passage."
|
20 |
|
21 |
+
prompt = (
|
22 |
+
"You are a professional Bible Scholar. Provide a detailed exegesis of the following biblical verse, including:"
|
23 |
+
" The original Greek text and transliteration with word-by-word analysis and meanings, historical and cultural context,"
|
24 |
+
" and theological significance for:\n\n"
|
25 |
+
f"{passage}\n\nExegesis:"
|
26 |
+
)
|
27 |
|
28 |
payload = {
|
29 |
"inputs": prompt,
|
30 |
"parameters": {
|
31 |
+
"max_length": 500,
|
32 |
"temperature": 0.7,
|
33 |
"do_sample": True
|
34 |
}
|