Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import os
|
2 |
import re
|
3 |
import gradio as gr
|
4 |
-
import edge_tts
|
5 |
import asyncio
|
6 |
import time
|
7 |
import tempfile
|
8 |
from huggingface_hub import InferenceClient
|
|
|
9 |
|
10 |
-
DESCRIPTION = """ # <center><b>JARVIS
|
11 |
### <center>A personal Assistant of Tony Stark for YOU
|
12 |
### <center>Currently It supports text input, But If this space completes 1k hearts than I starts working on Audio Input.</center>
|
13 |
"""
|
@@ -43,11 +43,11 @@ async def generate1(prompt):
|
|
43 |
for response in stream:
|
44 |
output += response.token.text
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
|
52 |
client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
|
53 |
|
@@ -62,17 +62,17 @@ async def generate2(prompt):
|
|
62 |
do_sample=True,
|
63 |
)
|
64 |
formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
|
65 |
-
stream =
|
66 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
67 |
output = ""
|
68 |
for response in stream:
|
69 |
output += response.token.text
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
|
77 |
client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
|
78 |
|
@@ -93,11 +93,11 @@ async def generate3(prompt):
|
|
93 |
for response in stream:
|
94 |
output += response.token.text
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
|
102 |
with gr.Blocks(css="style.css") as demo:
|
103 |
gr.Markdown(DESCRIPTION)
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import gradio as gr
|
|
|
4 |
import asyncio
|
5 |
import time
|
6 |
import tempfile
|
7 |
from huggingface_hub import InferenceClient
|
8 |
+
from gtts import gTTS
|
9 |
|
10 |
+
DESCRIPTION = """ # <center><b>JARVIS⚡ 수정본 </b></center>
|
11 |
### <center>A personal Assistant of Tony Stark for YOU
|
12 |
### <center>Currently It supports text input, But If this space completes 1k hearts than I starts working on Audio Input.</center>
|
13 |
"""
|
|
|
43 |
for response in stream:
|
44 |
output += response.token.text
|
45 |
|
46 |
+
tts = gTTS(text=output, lang='ko')
|
47 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
|
48 |
+
tmp_path = tmp_file.name
|
49 |
+
tts.save(tmp_path)
|
50 |
+
yield tmp_path
|
51 |
|
52 |
client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
|
53 |
|
|
|
62 |
do_sample=True,
|
63 |
)
|
64 |
formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
|
65 |
+
stream = client3.text_generation(
|
66 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
67 |
output = ""
|
68 |
for response in stream:
|
69 |
output += response.token.text
|
70 |
|
71 |
+
tts = gTTS(text=output, lang='ko')
|
72 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
|
73 |
+
tmp_path = tmp_file.name
|
74 |
+
tts.save(tmp_path)
|
75 |
+
yield tmp_path
|
76 |
|
77 |
client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
|
78 |
|
|
|
93 |
for response in stream:
|
94 |
output += response.token.text
|
95 |
|
96 |
+
tts = gTTS(text=output, lang='ko')
|
97 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
|
98 |
+
tmp_path = tmp_file.name
|
99 |
+
tts.save(tmp_path)
|
100 |
+
yield tmp_path
|
101 |
|
102 |
with gr.Blocks(css="style.css") as demo:
|
103 |
gr.Markdown(DESCRIPTION)
|