Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
import os
|
|
|
4 |
|
5 |
# Fetch Hugging Face and Groq API keys from secrets
|
6 |
-
Transalate_token = os.getenv('
|
7 |
-
Image_Token = os.getenv('
|
8 |
-
Content_Token = os.getenv('
|
9 |
-
Image_prompt_token = os.getenv('
|
10 |
|
11 |
# API Headers
|
12 |
Translate = {"Authorization": f"Bearer {Transalate_token}"}
|
@@ -44,18 +45,25 @@ content_models = {
|
|
44 |
# Default content generation model
|
45 |
default_content_model = "llama-3.1-70b-versatile"
|
46 |
|
47 |
-
# Function to query Hugging Face translation model
|
48 |
def translate_text(text):
|
49 |
payload = {"inputs": text}
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
# Function to query Groq content generation model
|
61 |
def generate_content(english_text, max_tokens, temperature, model):
|
@@ -105,8 +113,26 @@ def generate_image(image_prompt, model_url):
|
|
105 |
st.error(f"Image Generation Error {response.status_code}: {response.text}")
|
106 |
return None
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
# Main Streamlit app
|
109 |
def main():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
# Custom CSS for background, borders, and other styling
|
111 |
st.markdown(
|
112 |
"""
|
@@ -153,7 +179,7 @@ def main():
|
|
153 |
st.sidebar.header("Settings")
|
154 |
temperature = st.sidebar.slider("Select Temperature", 0.1, 1.0, 0.7)
|
155 |
max_tokens = st.sidebar.slider("Max Tokens for Content Generation", 100, 400, 200)
|
156 |
-
|
157 |
# Content generation model selection
|
158 |
content_model = st.sidebar.selectbox("Select Content Generation Model", list(content_models.keys()), index=0)
|
159 |
|
@@ -161,11 +187,11 @@ def main():
|
|
161 |
image_model = st.sidebar.selectbox("Select Image Generation Model", list(image_generation_urls.keys()), index=0)
|
162 |
|
163 |
# Reminder about model availability
|
164 |
-
st.sidebar.warning("Note: Based on availability, some models might not work. Please try another model if an error occurs.")
|
165 |
|
166 |
# Suggested inputs
|
167 |
st.write("## Suggested Inputs")
|
168 |
-
suggestions = ["தரவு அறிவியல்", "
|
169 |
selected_suggestion = st.selectbox("Select a suggestion or enter your own:", [""] + suggestions)
|
170 |
|
171 |
# Input box for user
|
@@ -177,22 +203,27 @@ def main():
|
|
177 |
st.write("### Translated English Text:")
|
178 |
english_text = translate_text(tamil_input)
|
179 |
if english_text:
|
180 |
-
st.
|
181 |
-
|
182 |
-
# Step 2:
|
183 |
-
st.write("###
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
st.write("### Generated Image:")
|
191 |
-
with st.spinner('Generating image...'):
|
192 |
image_prompt = generate_image_prompt(english_text)
|
193 |
-
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
|
197 |
if __name__ == "__main__":
|
198 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
import os
|
4 |
+
import time # Import time module for implementing delay
|
5 |
|
6 |
# Fetch Hugging Face and Groq API keys from secrets
|
7 |
+
Transalate_token = os.getenv('Translate')
|
8 |
+
Image_Token = os.getenv('Image_generation')
|
9 |
+
Content_Token = os.getenv('ContentGeneration')
|
10 |
+
Image_prompt_token = os.getenv('Prompt_generation')
|
11 |
|
12 |
# API Headers
|
13 |
Translate = {"Authorization": f"Bearer {Transalate_token}"}
|
|
|
45 |
# Default content generation model
|
46 |
default_content_model = "llama-3.1-70b-versatile"
|
47 |
|
48 |
+
# Function to query Hugging Face translation model with retry mechanism
|
49 |
def translate_text(text):
|
50 |
payload = {"inputs": text}
|
51 |
+
max_retries = 3 # Maximum number of retry attempts
|
52 |
+
delay = 2 # Delay between retries in seconds
|
53 |
+
|
54 |
+
for attempt in range(max_retries):
|
55 |
+
response = requests.post(translation_url, headers=Translate, json=payload)
|
56 |
+
if response.status_code == 200:
|
57 |
+
result = response.json()
|
58 |
+
translated_text = result[0]['generated_text']
|
59 |
+
return translated_text
|
60 |
+
else:
|
61 |
+
st.warning(f"Translation failed (Attempt {attempt+1}/{max_retries}) - Retrying in {delay} seconds...")
|
62 |
+
time.sleep(delay) # Wait for 2 seconds before retrying
|
63 |
+
|
64 |
+
# If all retries fail, show an error
|
65 |
+
st.error(f"Translation failed after {max_retries} attempts. Please reload the page and try again later.")
|
66 |
+
return None
|
67 |
|
68 |
# Function to query Groq content generation model
|
69 |
def generate_content(english_text, max_tokens, temperature, model):
|
|
|
113 |
st.error(f"Image Generation Error {response.status_code}: {response.text}")
|
114 |
return None
|
115 |
|
116 |
+
# User Guide Section
|
117 |
+
def show_user_guide():
|
118 |
+
st.title("FusionMind User Guide")
|
119 |
+
st.write("""
|
120 |
+
### Welcome to the FusionMind User Guide!
|
121 |
+
|
122 |
+
### How to use this app:
|
123 |
+
... (omitted for brevity)
|
124 |
+
""")
|
125 |
+
|
126 |
# Main Streamlit app
|
127 |
def main():
|
128 |
+
# Sidebar Menu
|
129 |
+
st.sidebar.title("FusionMind Options")
|
130 |
+
page = st.sidebar.radio("Select a page:", ["Main App", "User Guide"])
|
131 |
+
|
132 |
+
if page == "User Guide":
|
133 |
+
show_user_guide()
|
134 |
+
return
|
135 |
+
|
136 |
# Custom CSS for background, borders, and other styling
|
137 |
st.markdown(
|
138 |
"""
|
|
|
179 |
st.sidebar.header("Settings")
|
180 |
temperature = st.sidebar.slider("Select Temperature", 0.1, 1.0, 0.7)
|
181 |
max_tokens = st.sidebar.slider("Max Tokens for Content Generation", 100, 400, 200)
|
182 |
+
|
183 |
# Content generation model selection
|
184 |
content_model = st.sidebar.selectbox("Select Content Generation Model", list(content_models.keys()), index=0)
|
185 |
|
|
|
187 |
image_model = st.sidebar.selectbox("Select Image Generation Model", list(image_generation_urls.keys()), index=0)
|
188 |
|
189 |
# Reminder about model availability
|
190 |
+
st.sidebar.warning("Note: Based on availability, some models might not work. Please try another model if an error occurs.By default the perfect model is selected try with it and then experiment with different models")
|
191 |
|
192 |
# Suggested inputs
|
193 |
st.write("## Suggested Inputs")
|
194 |
+
suggestions = ["தரவு அறிவியல்", "உளவியல்", "ராக்கெட் எப்படி வேலை செய்கிறது"]
|
195 |
selected_suggestion = st.selectbox("Select a suggestion or enter your own:", [""] + suggestions)
|
196 |
|
197 |
# Input box for user
|
|
|
203 |
st.write("### Translated English Text:")
|
204 |
english_text = translate_text(tamil_input)
|
205 |
if english_text:
|
206 |
+
st.write(english_text)
|
207 |
+
|
208 |
+
# Step 2: Content Generation
|
209 |
+
st.write("### Educational Content Generated:")
|
210 |
+
content = generate_content(english_text, max_tokens, temperature, content_models[content_model])
|
211 |
+
if content:
|
212 |
+
st.write(content)
|
213 |
+
|
214 |
+
# Step 3: Generate Image Prompt
|
215 |
+
st.write("### Image Prompt:")
|
|
|
|
|
216 |
image_prompt = generate_image_prompt(english_text)
|
217 |
+
if image_prompt:
|
218 |
+
st.write(image_prompt)
|
219 |
+
|
220 |
+
# Step 4: Image Generation
|
221 |
+
st.write("### Generated Image:")
|
222 |
+
image = generate_image(image_prompt, image_generation_urls[image_model])
|
223 |
+
if image:
|
224 |
+
st.image(image)
|
225 |
+
else:
|
226 |
+
st.error("Please enter or select Tamil text.")
|
227 |
|
228 |
if __name__ == "__main__":
|
229 |
main()
|