Ubuntu commited on
Commit
bea8fb1
1 Parent(s): 4f3068f
Files changed (1) hide show
  1. app.py +154 -0
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import uuid
4
+ import os
5
+ ASR_API = "http://astarwiz.com:9998/asr"
6
+ TTS_SPEAK_SERVICE = 'http://astarwiz.com:9603/speak'
7
+ TTS_WAVE_SERVICE = 'http://astarwiz.com:9603/wave'
8
+
9
+ LANGUAGE_MAP = {
10
+ "en": "English",
11
+ "ma": "Malay",
12
+ "ta": "Tamil",
13
+ "zh": "Chinese"
14
+ }
15
+
16
+ # Add a password for developer mode
17
+ DEVELOPER_PASSWORD = os.getenv("DEV_PWD")
18
+
19
+ def inference_via_llm_api(input_text, min_new_tokens=2, max_new_tokens=64):
20
+ print(input_text)
21
+ one_vllm_input = f"<|im_start|>system\nYou are a translation expert.<|im_end|>\n<|im_start|>user\n{input_text}<|im_end|>\n<|im_start|>assistant"
22
+ vllm_api = 'http://astarwiz.com:2333/' + "v1/completions"
23
+ data = {
24
+ "prompt": one_vllm_input,
25
+ 'model': "./Edu-4B-NewTok-V2-20240904/",
26
+ 'min_tokens': min_new_tokens,
27
+ 'max_tokens': max_new_tokens,
28
+ 'temperature': 0.1,
29
+ 'top_p': 0.75,
30
+ 'repetition_penalty': 1.1,
31
+ "stop_token_ids": [151645, ],
32
+ }
33
+ response = requests.post(vllm_api, headers={"Content-Type": "application/json"}, json=data).json()
34
+ print(response)
35
+ if "choices" in response.keys():
36
+ return response["choices"][0]['text'].strip()
37
+ else:
38
+ return "The system got some error during vLLM generation. Please try it again."
39
+
40
+ def transcribe_and_speak(audio, source_lang, target_lang):
41
+ if not audio:
42
+ return "Please provide an audio input.", None, None
43
+
44
+ # ASR
45
+ file_id = str(uuid.uuid4())
46
+ files = {'file': open(audio, 'rb')}
47
+ data = {
48
+ 'language': 'ms' if source_lang == 'ma' else source_lang,
49
+ 'model_name': 'whisper-large-v2-local-cs'
50
+ }
51
+
52
+ asr_response = requests.post(ASR_API, files=files, data=data)
53
+ print(asr_response.json())
54
+ if asr_response.status_code == 200:
55
+ transcription = asr_response.json()['text']
56
+ else:
57
+ return "ASR failed", None, None
58
+
59
+ translation_prompt = f"Translate the following text from {LANGUAGE_MAP[source_lang]} to {LANGUAGE_MAP[target_lang]}: {transcription}"
60
+ translated_text = inference_via_llm_api(translation_prompt)
61
+ print(f"Translation: {translated_text}")
62
+
63
+ # TTS
64
+ tts_params = {
65
+ 'language': target_lang,
66
+ 'speed': 1.1,
67
+ 'speaker': 'MS' if target_lang == 'en' else 'msFemale' if target_lang == 'ma' else 'ta_female1' if target_lang == 'ta' else 'childChinese2',
68
+ 'text': translated_text
69
+ }
70
+
71
+ tts_response = requests.get(TTS_SPEAK_SERVICE, params=tts_params)
72
+ if tts_response.status_code == 200:
73
+ audio_file = tts_response.text.strip()
74
+ audio_url = f"{TTS_WAVE_SERVICE}?file={audio_file}"
75
+ return transcription, translated_text, audio_url
76
+ else:
77
+ return transcription, translated_text, "TTS failed"
78
+
79
+ def check_password(password):
80
+ return password == DEVELOPER_PASSWORD
81
+
82
+ def user_interface(audio, source_lang, target_lang):
83
+ _, _, audio_url = transcribe_and_speak(audio, source_lang, target_lang)
84
+ return audio_url
85
+
86
+ with gr.Blocks() as demo:
87
+ gr.Markdown("# ASR and TTS Demo")
88
+
89
+ with gr.Tab("User Mode"):
90
+ gr.Markdown("Speak into the microphone or upload an audio file. The app will translate and speak it back to you.")
91
+
92
+ with gr.Row():
93
+ user_audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath")
94
+ user_source_lang = gr.Dropdown(choices=["en", "ma", "ta", "zh"], label="Source Language", value="en")
95
+ user_target_lang = gr.Dropdown(choices=["en", "ma", "ta", "zh"], label="Target Language", value="zh")
96
+
97
+ with gr.Row():
98
+ user_button = gr.Button("Translate and Speak")
99
+
100
+ with gr.Row():
101
+ user_audio_output = gr.Audio(label="Translated Speech")
102
+
103
+ user_button.click(
104
+ fn=user_interface,
105
+ inputs=[user_audio_input, user_source_lang, user_target_lang],
106
+ outputs=[user_audio_output]
107
+ )
108
+
109
+ with gr.Tab("Developer Mode"):
110
+ password_input = gr.Textbox(type="password", label="Enter Developer Password")
111
+ login_button = gr.Button("Login")
112
+ login_error = gr.Markdown(visible=False)
113
+
114
+ dev_interface = gr.Column(visible=False)
115
+
116
+ with dev_interface:
117
+ gr.Markdown("Developer Mode: Transcription, Translation, and TTS")
118
+
119
+ with gr.Row():
120
+ dev_audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath")
121
+ dev_source_lang = gr.Dropdown(choices=["en", "ma", "ta", "zh"], label="Source Language", value="en")
122
+ dev_target_lang = gr.Dropdown(choices=["en", "ma", "ta", "zh"], label="Target Language", value="zh")
123
+
124
+ with gr.Row():
125
+ dev_button = gr.Button("Transcribe, Translate, and Speak")
126
+
127
+ with gr.Row():
128
+ dev_text_output = gr.Textbox(label="Transcription")
129
+
130
+ with gr.Row():
131
+ dev_translation_output = gr.Textbox(label="Translation")
132
+
133
+ with gr.Row():
134
+ dev_audio_output = gr.Audio(label="Translated Speech")
135
+
136
+ dev_button.click(
137
+ fn=transcribe_and_speak,
138
+ inputs=[dev_audio_input, dev_source_lang, dev_target_lang],
139
+ outputs=[dev_text_output, dev_translation_output, dev_audio_output]
140
+ )
141
+
142
+ def login(password):
143
+ if check_password(password):
144
+ return gr.Column(visible=True), gr.Markdown(visible=False)
145
+ else:
146
+ return gr.Column(visible=False), gr.Markdown("Incorrect password. Please try again.", visible=True)
147
+
148
+ login_button.click(
149
+ fn=login,
150
+ inputs=[password_input],
151
+ outputs=[dev_interface, login_error]
152
+ )
153
+
154
+ demo.launch()