Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,24 +7,26 @@ os.system("huggingface-cli login")
|
|
7 |
|
8 |
# Retrieve the API token from environment variables
|
9 |
model_repo_id = os.getenv("MODEL_REPO_ID")
|
|
|
10 |
if not model_repo_id:
|
11 |
raise ValueError("MODEL_REPO_ID token not found. Please set the MODEL_REPO_ID environment variable.")
|
12 |
|
13 |
# Hugging Face Inference API Details
|
14 |
-
API_URL = "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3"
|
|
|
15 |
HEADERS = {"Authorization": f"Bearer {model_repo_id}"}
|
16 |
|
17 |
def generate_exegesis(passage):
|
18 |
if not passage.strip():
|
19 |
return "Please enter a Bible passage."
|
20 |
-
|
21 |
prompt = (
|
22 |
"You are a professional Bible Scholar. Provide a detailed exegesis of the following biblical verse, including:"
|
23 |
" The original Greek text and transliteration with word-by-word analysis and meanings, historical and cultural context,"
|
24 |
" and theological significance for:\n\n"
|
25 |
f"{passage}\n\nExegesis:"
|
26 |
)
|
27 |
-
|
28 |
payload = {
|
29 |
"inputs": prompt,
|
30 |
"parameters": {
|
@@ -33,12 +35,12 @@ def generate_exegesis(passage):
|
|
33 |
"do_sample": True
|
34 |
}
|
35 |
}
|
36 |
-
|
37 |
try:
|
38 |
response = requests.post(API_URL, headers=HEADERS, json=payload)
|
39 |
response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
|
40 |
result = response.json()
|
41 |
-
|
42 |
if isinstance(result, list) and len(result) > 0:
|
43 |
return result[0].get("generated_text", "Error: No response from model.")
|
44 |
else:
|
@@ -55,5 +57,5 @@ demo = gr.Interface(
|
|
55 |
description="Enter a Bible passage to receive insightful exegesis commentary using the Hugging Face API."
|
56 |
)
|
57 |
|
58 |
-
if __name__ == "__main__":
|
59 |
-
demo.launch()
|
|
|
7 |
|
8 |
# Retrieve the API token from environment variables
|
9 |
model_repo_id = os.getenv("MODEL_REPO_ID")
|
10 |
+
|
11 |
if not model_repo_id:
|
12 |
raise ValueError("MODEL_REPO_ID token not found. Please set the MODEL_REPO_ID environment variable.")
|
13 |
|
14 |
# Hugging Face Inference API Details
|
15 |
+
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" # Corrected URL
|
16 |
+
|
17 |
HEADERS = {"Authorization": f"Bearer {model_repo_id}"}
|
18 |
|
19 |
def generate_exegesis(passage):
|
20 |
if not passage.strip():
|
21 |
return "Please enter a Bible passage."
|
22 |
+
|
23 |
prompt = (
|
24 |
"You are a professional Bible Scholar. Provide a detailed exegesis of the following biblical verse, including:"
|
25 |
" The original Greek text and transliteration with word-by-word analysis and meanings, historical and cultural context,"
|
26 |
" and theological significance for:\n\n"
|
27 |
f"{passage}\n\nExegesis:"
|
28 |
)
|
29 |
+
|
30 |
payload = {
|
31 |
"inputs": prompt,
|
32 |
"parameters": {
|
|
|
35 |
"do_sample": True
|
36 |
}
|
37 |
}
|
38 |
+
|
39 |
try:
|
40 |
response = requests.post(API_URL, headers=HEADERS, json=payload)
|
41 |
response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
|
42 |
result = response.json()
|
43 |
+
|
44 |
if isinstance(result, list) and len(result) > 0:
|
45 |
return result[0].get("generated_text", "Error: No response from model.")
|
46 |
else:
|
|
|
57 |
description="Enter a Bible passage to receive insightful exegesis commentary using the Hugging Face API."
|
58 |
)
|
59 |
|
60 |
+
if __name__ == "__main__": # Corrected this line
|
61 |
+
demo.launch()
|