Update app.py
Browse files
app.py
CHANGED
@@ -49,6 +49,37 @@ def get_predictions(audio_input):
|
|
49 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
50 |
return emotion_prediction # Return a single prediction instead of a list
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
# Create the Gradio interface
|
53 |
with gr.Blocks() as interface:
|
54 |
gr.Markdown("Emotional Machines test: Load or Record an audio file to speech emotion analysis")
|
|
|
49 |
emotion_prediction = predict_emotion_from_audio(audio_input)
|
50 |
return emotion_prediction # Return a single prediction instead of a list
|
51 |
|
52 |
+
|
53 |
+
|
54 |
+
###
|
55 |
+
|
56 |
+
# Define the image generation function using the Stable Diffusion API
|
57 |
+
url = "https://stablediffusionapi.com/api/v3/text2img"
|
58 |
+
title = "<h2><center>Text to Image Generation with Stable Diffusion API</center></h2>"
|
59 |
+
description = "Get the API key by signing up here [Stable Diffusion API](https://stablediffusionapi.com)."
|
60 |
+
|
61 |
+
def get_image(key, prompt, inference_steps, filter):
|
62 |
+
payload = {
|
63 |
+
"key": key,
|
64 |
+
"prompt": prompt,
|
65 |
+
"negative_prompt": "((out of frame)), ((extra fingers)), mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), (((tiling))), ((naked)), ((tile)), ((fleshpile)), ((ugly)), (((abstract))), blurry, ((bad anatomy)), ((bad proportions)), ((extra limbs)), cloned face, (((skinny))), glitchy, ((extra breasts)), ((double torso)), ((extra arms)), ((extra hands)), ((mangled fingers)), ((missing breasts)), (missing lips), ((ugly face)), ((fat)), ((extra legs)), anime",
|
66 |
+
"width": "512",
|
67 |
+
"height": "512",
|
68 |
+
"samples": "1",
|
69 |
+
"num_inference_steps": inference_steps,
|
70 |
+
"safety_checker": filter,
|
71 |
+
"enhance_prompt": "yes",
|
72 |
+
"guidance_scale": 7.5
|
73 |
+
}
|
74 |
+
headers = {}
|
75 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
76 |
+
url1 = str(json.loads(response.text)['output'][0])
|
77 |
+
r = requests.get(url1)
|
78 |
+
i = Image.open(BytesIO(r.content))
|
79 |
+
return i
|
80 |
+
|
81 |
+
####
|
82 |
+
|
83 |
# Create the Gradio interface
|
84 |
with gr.Blocks() as interface:
|
85 |
gr.Markdown("Emotional Machines test: Load or Record an audio file to speech emotion analysis")
|