emirhanbilgic commited on
Commit
7a3e6dc
·
verified ·
1 Parent(s): ea15001

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -15
app.py CHANGED
@@ -37,10 +37,10 @@ def split_text_into_sentences(text):
37
  @spaces.GPU(duration=120)
38
  # Translation function
39
  def translate(source_text, source_lang, target_lang, batch_size=16):
40
- if source_lang== 'en' and target_lang == 'tr':
41
- model_name = f"Helsinki-NLP/opus-mt-tc-big-en-tr"
42
  else:
43
- model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
44
  tokenizer = MarianTokenizer.from_pretrained(model_name)
45
  model = MarianMTModel.from_pretrained(model_name).to(device)
46
 
@@ -124,25 +124,26 @@ with gr.Blocks() as demo:
124
  combined_audio = combine_audio_arrays(all_audio)
125
  all_text += f"**Sentence**: {sentence}\n\n"
126
  yield (sample_rate, combined_audio), all_text
 
127
  examples = [
128
- [
129
- "Once upon a time, in the depth of winter, when the flakes of snow fell like feathers from the clouds, a queen sat sewing at her pal-ace window, which had a carved frame of black wood.",
130
- "In an inferior recording quality, a female speaker delivers her slightly expressive and animated words with a fast pace. There's high level of background noise and a very distant-sounding reverberation. Her voice is slightly higher pitched than average.",
131
- None,
132
- ],
133
- [
134
- "The Importance of AI Safety.pdf",
135
- "Gary's voice is monotone yet slightly fast in delivery, with a very close recording that has no background noise.",
136
- None
137
- ]
138
- ]
139
 
140
  input_mode.change(
141
  fn=lambda choice: [gr.update(visible=choice == "Upload PDF"), gr.update(visible=choice == "Type Text")],
142
  inputs=input_mode,
143
  outputs=[pdf_input, text_input],
144
  )
145
- gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
146
  source_lang.change(update_target_lang, inputs=source_lang, outputs=target_lang)
147
 
148
  run_button.click(run_pipeline, inputs=[input_mode, pdf_input, text_input, translate_checkbox, source_lang, target_lang, description], outputs=[audio_output, markdown_output])
 
37
  @spaces.GPU(duration=120)
38
  # Translation function
39
  def translate(source_text, source_lang, target_lang, batch_size=16):
40
+ if source_lang == 'en' and target_lang == 'tr':
41
+ model_name = f"Helsinki-NLP/opus-mt-tc-big-en-tr"
42
  else:
43
+ model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
44
  tokenizer = MarianTokenizer.from_pretrained(model_name)
45
  model = MarianMTModel.from_pretrained(model_name).to(device)
46
 
 
124
  combined_audio = combine_audio_arrays(all_audio)
125
  all_text += f"**Sentence**: {sentence}\n\n"
126
  yield (sample_rate, combined_audio), all_text
127
+
128
  examples = [
129
+ [
130
+ "Once upon a time, in the depth of winter, when the flakes of snow fell like feathers from the clouds, a queen sat sewing at her palace window, which had a carved frame of black wood.",
131
+ "In an inferior recording quality, a female speaker delivers her slightly expressive and animated words with a fast pace. There's a high level of background noise and a very distant-sounding reverberation. Her voice is slightly higher pitched than average.",
132
+ None,
133
+ ],
134
+ [
135
+ "The Importance of AI Safety.pdf",
136
+ "Gary's voice is monotone yet slightly fast in delivery, with a very close recording that has no background noise.",
137
+ None
138
+ ]
139
+ ]
140
 
141
  input_mode.change(
142
  fn=lambda choice: [gr.update(visible=choice == "Upload PDF"), gr.update(visible=choice == "Type Text")],
143
  inputs=input_mode,
144
  outputs=[pdf_input, text_input],
145
  )
146
+ gr.Examples(examples=examples, fn=run_pipeline, inputs=[input_mode, text_input, description], outputs=[audio_output, markdown_output], cache_examples=True)
147
  source_lang.change(update_target_lang, inputs=source_lang, outputs=target_lang)
148
 
149
  run_button.click(run_pipeline, inputs=[input_mode, pdf_input, text_input, translate_checkbox, source_lang, target_lang, description], outputs=[audio_output, markdown_output])