fantaxy commited on
Commit
ccac5a3
β€’
1 Parent(s): 69c11af

Update back-cod.py

Browse files
Files changed (1) hide show
  1. back-cod.py +11 -3
back-cod.py CHANGED
@@ -17,7 +17,10 @@ from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer,
17
  from typing import Union
18
  from diffusers.utils.torch_utils import randn_tensor
19
  from tqdm import tqdm
 
 
20
 
 
21
  class Tango2Pipeline(DiffusionPipeline):
22
  def __init__(
23
  self,
@@ -243,9 +246,14 @@ pipe = Tango2Pipeline(vae=tango.vae,
243
 
244
  @spaces.GPU(duration=60)
245
  def gradio_generate(prompt, output_format, steps, guidance):
246
- output_wave = pipe(prompt,steps,guidance) ## Using pipeliine automatically uses flash attention for torch2.0 above
247
- #output_wave = tango.generate(prompt, steps, guidance)
248
- # output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
 
 
 
 
 
249
  output_wave = output_wave.audios[0]
250
  output_filename = "temp.wav"
251
  wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
 
17
  from typing import Union
18
  from diffusers.utils.torch_utils import randn_tensor
19
  from tqdm import tqdm
20
+ from transformers import pipeline
21
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
22
 
23
+
24
  class Tango2Pipeline(DiffusionPipeline):
25
  def __init__(
26
  self,
 
246
 
247
  @spaces.GPU(duration=60)
248
  def gradio_generate(prompt, output_format, steps, guidance):
249
+ # ν•œκΈ€μ΄ ν¬ν•¨λ˜μ–΄ μžˆλŠ”μ§€ 확인
250
+ if any(ord('κ°€') <= ord(char) <= ord('힣') for char in prompt):
251
+ # ν•œκΈ€μ„ μ˜μ–΄λ‘œ λ²ˆμ—­
252
+ translation = translator(prompt)[0]['translation_text']
253
+ prompt = translation
254
+ print(f"Translated prompt: {prompt}")
255
+
256
+ output_wave = pipe(prompt,steps,guidance)
257
  output_wave = output_wave.audios[0]
258
  output_filename = "temp.wav"
259
  wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)