Respair commited on
Commit
28ccf35
·
verified ·
1 Parent(s): 7524a65

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -561
app.py DELETED
@@ -1,561 +0,0 @@
1
- INTROTXT = """#
2
- Repo -> [Hugging Face - 🤗](https://huggingface.co/Respair/Project_Kanade_SpeechModel)
3
- This space uses Tsukasa (24khz).
4
- **Check the Read me tabs down below.** <br>
5
- Enjoy!
6
- """
7
- import gradio as gr
8
- import random
9
- import importable
10
- import torch
11
- import os
12
- from cotlet_phon import phonemize
13
- import numpy as np
14
- import pickle
15
-
16
-
17
- voices = {}
18
- example_texts = {}
19
- prompts = []
20
- inputs = []
21
-
22
-
23
- theme = gr.themes.Base(
24
- font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
25
- )
26
-
27
- from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule
28
-
29
- voicelist = [v for v in os.listdir("/home/ubuntu/Kanade_Project/gradio/Tsukasa_Speech/reference_sample_wavs")]
30
-
31
-
32
-
33
- for v in voicelist:
34
- voices[v] = importable.compute_style_through_clip(f'reference_sample_wavs/{v}')
35
-
36
-
37
- with open(f'Inference/random_texts.txt', 'r') as r:
38
- random_texts = [line.strip() for line in r]
39
-
40
- example_texts = {f"{text[:30]}...": text for text in random_texts}
41
-
42
- def update_text_input(preview):
43
-
44
- return example_texts[preview]
45
-
46
- def get_random_text():
47
- return random.choice(random_texts)
48
-
49
-
50
-
51
- with open('Inference/prompt.txt', 'r') as p:
52
- prompts = [line.strip() for line in p]
53
-
54
- with open('Inference/input_for_prompt.txt', 'r') as i:
55
- inputs = [line.strip() for line in i]
56
-
57
-
58
- last_idx = None
59
-
60
- def get_random_prompt_pair():
61
- global last_idx
62
- max_idx = min(len(prompts), len(inputs)) - 1
63
-
64
-
65
- random_idx = random.randint(0, max_idx)
66
- while random_idx == last_idx:
67
- random_idx = random.randint(0, max_idx)
68
-
69
- last_idx = random_idx
70
- return inputs[random_idx], prompts[random_idx]
71
-
72
- def Synthesize_Audio(text, voice, voice2, vcsteps, embscale, alpha, beta, ros, progress=gr.Progress()):
73
-
74
-
75
- text = phonemize(text)
76
-
77
-
78
- if voice2:
79
- voice_style = importable.compute_style_through_clip(voice2)
80
- else:
81
- voice_style = voices[voice]
82
-
83
- wav = importable.inference(
84
- text,
85
- voice_style,
86
- alpha=alpha,
87
- beta=beta,
88
- diffusion_steps=vcsteps,
89
- embedding_scale=embscale,
90
- rate_of_speech=ros
91
- )
92
-
93
- return (24000, wav)
94
-
95
-
96
- def LongformSynth_Text(text, s_prev, Kotodama, alpha, beta, t, diffusion_steps, embedding_scale, rate_of_speech , progress=gr.Progress()):
97
-
98
- japanese = text
99
-
100
- # raw_jpn = japanese[japanese.find(":") + 2:]
101
- # speaker = japanese[:japanese.find(":") + 2]
102
-
103
-
104
- if ":" in japanese[:10]:
105
- raw_jpn = japanese[japanese.find(":") + 2:]
106
- speaker = japanese[:japanese.find(":") + 2]
107
- else:
108
- raw_jpn = japanese
109
- speaker = ""
110
-
111
- sentences = importable.sent_tokenizer.tokenize(raw_jpn)
112
- sentences = importable.merging_sentences(sentences)
113
-
114
- silence = 24000 * 0.5 # 500 ms of silence between outputs for a more natural transition
115
- # sentences = sent_tokenize(text)
116
- print(sentences)
117
- wavs = []
118
- s_prev = None
119
- for text in sentences:
120
-
121
- text_input = phonemize(text)
122
- print('phonemes -> ', text_input)
123
-
124
- Kotodama = importable.Kotodama_Sampler(importable.model, text=speaker + text, device=importable.device)
125
-
126
- wav, s_prev = importable.Longform(text_input,
127
- s_prev,
128
- Kotodama,
129
- alpha = alpha,
130
- beta = beta,
131
- t = t,
132
- diffusion_steps=diffusion_steps, embedding_scale=embedding_scale, rate_of_speech=rate_of_speech)
133
- wavs.append(wav)
134
- wavs.append(np.zeros(int(silence)))
135
-
136
- print('Synthesized: ')
137
- return (24000, np.concatenate(wavs))
138
-
139
-
140
-
141
-
142
- def Inference_Synth_Prompt(text, description, Kotodama, alpha, beta, diffusion_steps, embedding_scale, rate_of_speech , progress=gr.Progress()):
143
-
144
-
145
-
146
- prompt = f"""{description} \n text: {text}"""
147
-
148
- print('prompt ->: ', prompt)
149
-
150
- text = phonemize(text)
151
-
152
- print('phonemes ->: ', text)
153
-
154
- Kotodama = importable.Kotodama_Prompter(importable.model, text=prompt, device=importable.device)
155
-
156
- wav = importable.inference(text,
157
- Kotodama,
158
- alpha = alpha,
159
- beta = beta,
160
- diffusion_steps=diffusion_steps, embedding_scale=embedding_scale, rate_of_speech=rate_of_speech)
161
-
162
- wav = importable.trim_long_silences(wav)
163
-
164
-
165
- print('Synthesized: ')
166
- return (24000, wav)
167
-
168
- with gr.Blocks() as audio_inf:
169
- with gr.Row():
170
- with gr.Column(scale=1):
171
- inp = gr.Textbox(label="Text", info="Enter the text", value="きみの存在は、私の心の中で燃える小さな光のよう。きみがいない時、世界は白黒の写真みたいに寂しくて、何も輝いてない。きみの笑顔だけが、私の灰���の日々に色を塗ってくれる。離れてる時間は、めちゃくちゃ長く感じられて、きみへの想いは風船みたいにどんどん膨らんでいく。きみなしの世界なんて、想像できないよ。", interactive=True, scale=5)
172
- voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value=voicelist[-1], interactive=True)
173
- voice_2 = gr.Audio(label="Upload your own Audio", interactive=True, type='filepath', max_length=300, waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
174
-
175
- with gr.Accordion("Advanced Parameters", open=False):
176
-
177
- alpha = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.1, label="Alpha", info="a Diffusion sampler parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled", interactive=True)
178
- beta = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Beta", info="a Diffusion sampler parameter, higher means less affected by the reference | 0 = diffusion is disabled", interactive=True)
179
- multispeakersteps = gr.Slider(minimum=3, maximum=15, value=5, step=1, label="Diffusion Steps", interactive=True)
180
- embscale = gr.Slider(minimum=1, maximum=5, value=1, step=0.1, label="Intensity", info="will impact the expressiveness, if you raise it too much it'll break.", interactive=True)
181
- rate_of_speech = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label="Rate of Speech", info="Higher -> Faster", interactive=True)
182
-
183
- with gr.Column(scale=1):
184
- btn = gr.Button("Synthesize", variant="primary")
185
- audio = gr.Audio(interactive=False, label="Synthesized Audio", waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
186
- btn.click(Synthesize_Audio, inputs=[inp, voice, voice_2, multispeakersteps, embscale, alpha, beta, rate_of_speech], outputs=[audio], concurrency_limit=4)
187
-
188
- # Kotodama Text sampler Synthesis Block
189
- with gr.Blocks() as longform:
190
- with gr.Row():
191
- with gr.Column(scale=1):
192
- inp_longform = gr.Textbox(
193
- label="Text",
194
- info="Enter the text [Speaker: Text] | Also works without any name.",
195
- value=list(example_texts.values())[0],
196
- interactive=True,
197
- scale=5
198
- )
199
-
200
- with gr.Row():
201
- example_dropdown = gr.Dropdown(
202
- choices=list(example_texts.keys()),
203
- label="Example Texts [pick one!]",
204
- value=list(example_texts.keys())[0],
205
- interactive=True
206
- )
207
-
208
- example_dropdown.change(
209
- fn=update_text_input,
210
- inputs=[example_dropdown],
211
- outputs=[inp_longform]
212
- )
213
-
214
- with gr.Accordion("Advanced Parameters", open=False):
215
-
216
- alpha_longform = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
217
- label="Alpha",
218
- info="a Diffusion parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled",
219
- interactive=True)
220
- beta_longform = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
221
- label="Beta",
222
- info="a Diffusion parameter, higher means less affected by the reference | 0 = diffusion is disabled",
223
- interactive=True)
224
- diffusion_steps_longform = gr.Slider(minimum=3, maximum=15, value=10, step=1,
225
- label="Diffusion Steps",
226
- interactive=True)
227
- embedding_scale_longform = gr.Slider(minimum=1, maximum=5, value=1.25, step=0.1,
228
- label="Intensity",
229
- info="a Diffusion parameter, it will impact the expressiveness, if you raise it too much it'll break.",
230
- interactive=True)
231
-
232
- rate_of_speech_longform = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1,
233
- label="Rate of Speech",
234
- info="Higher = Faster",
235
- interactive=True)
236
-
237
- with gr.Column(scale=1):
238
- btn_longform = gr.Button("Synthesize", variant="primary")
239
- audio_longform = gr.Audio(interactive=False,
240
- label="Synthesized Audio",
241
- waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
242
-
243
- btn_longform.click(LongformSynth_Text,
244
- inputs=[inp_longform,
245
- gr.State(None), # s_prev
246
- gr.State(None), # Kotodama
247
- alpha_longform,
248
- beta_longform,
249
- gr.State(.8), # t parameter
250
- diffusion_steps_longform,
251
- embedding_scale_longform,
252
- rate_of_speech_longform],
253
- outputs=[audio_longform],
254
- concurrency_limit=4)
255
-
256
- # Kotodama prompt sampler Inference Block
257
- with gr.Blocks() as prompt_inference:
258
- with gr.Row():
259
- with gr.Column(scale=1):
260
- text_prompt = gr.Textbox(
261
- label="Text",
262
- info="Enter the text to synthesize. This text will also be fed to the encoder. Make sure to see the Read Me for more details!",
263
- value=inputs[0],
264
- interactive=True,
265
- scale=5
266
- )
267
- description_prompt = gr.Textbox(
268
- label="Description",
269
- info="Enter a highly detailed, descriptive prompt that matches the vibe of your text to guide the synthesis.",
270
- value=prompts[0],
271
- interactive=True,
272
- scale=7
273
- )
274
-
275
- with gr.Row():
276
- random_btn = gr.Button('Random Example', variant='secondary')
277
-
278
- with gr.Accordion("Advanced Parameters", open=True):
279
- embedding_scale_prompt = gr.Slider(minimum=1, maximum=5, value=1, step=0.25,
280
- label="Intensity",
281
- info="it will impact the expressiveness, if you raise it too much it'll break.",
282
- interactive=True)
283
- alpha_prompt = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
284
- label="Alpha",
285
- info="a Diffusion sampler parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled",
286
- interactive=True)
287
- beta_prompt = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
288
- label="Beta",
289
- info="a Diffusion sampler parameter, higher means less affected by the reference | 0 = diffusion is disabled",
290
- interactive=True)
291
- diffusion_steps_prompt = gr.Slider(minimum=3, maximum=15, value=10, step=1,
292
- label="Diffusion Steps",
293
- interactive=True)
294
- rate_of_speech_prompt = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1,
295
- label="Rate of Speech",
296
- info="Higher = Faster",
297
- interactive=True)
298
- with gr.Column(scale=1):
299
- btn_prompt = gr.Button("Synthesize with Prompt", variant="primary")
300
- audio_prompt = gr.Audio(interactive=False,
301
- label="Prompt-based Synthesized Audio",
302
- waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
303
-
304
-
305
- random_btn.click(
306
- fn=get_random_prompt_pair,
307
- inputs=[],
308
- outputs=[text_prompt, description_prompt]
309
- )
310
-
311
- btn_prompt.click(Inference_Synth_Prompt,
312
- inputs=[text_prompt,
313
- description_prompt,
314
- gr.State(None),
315
- alpha_prompt,
316
- beta_prompt,
317
- diffusion_steps_prompt,
318
- embedding_scale_prompt,
319
- rate_of_speech_prompt],
320
- outputs=[audio_prompt],
321
- concurrency_limit=4)
322
-
323
- notes = """
324
- <h1>Notes</h1>
325
-
326
- <p>
327
- This work is somewhat different from your typical speech model. It offers a high degree of control<br>
328
- over the generation process, which means it's easy to inadvertently produce unimpressive outputs.
329
- </p>
330
-
331
- <p>
332
- <b>Kotodama</b> and the <b>Diffusion sampler</b> can significantly help guide the generation towards<br>
333
- something that aligns with your input, but they aren't foolproof.
334
- </p>
335
-
336
- <p>
337
- The model's peak performance is achieved when the Diffusion sampler and Kotodama work seamlessly together.<br>
338
- However, we won't see that level of performance here because this checkpoint is somewhat undertrained<br>
339
- due to my time and resource constraints. (Tsumugi should be better in this regard, <br>
340
- albeit if the diffusion works at all on your hardware.) <br>
341
- Hopefully, you can further fine-tune this model (or train from scratch) to achieve even better results!
342
- </p>
343
-
344
- <p>
345
- The prompt encoder is also highly experimental and should be treated as a proof of concept. Due to the<br>
346
- overwhelming ratio of female to male speakers and the wide variation in both speakers and their expressions,<br>
347
- the prompt encoder may occasionally produce subpar or contradicting outputs. For example, high expressiveness alongside <br>
348
- high pitch has been associated with females speakers simply because I had orders of magnitude more of them in the dataset.<br>
349
- </p>
350
-
351
- <p>
352
- ________________________________________________________ <br>
353
- <strong>A useful note about the voice design and prompting:</strong><br>\n
354
- The vibe of the dialogue impacts the generated voice since the Japanese dialogue <br>
355
- and the prompts were jointly trained. This is a peculiar feature of the Japanese lanuage.<br>
356
- For example if you use 俺 (ore)、僕(boku) or your input is overall masculine <br>
357
- you may get a guy's voice, even if you describe it as female in the prompt. <br> \n
358
- The Japanese text that is fed to the prompt doesn't necessarily have to be <br>
359
- the same as your input, but we can't do it in this demo <br>
360
- to not make the page too convoluted. In a real world scenario, you can just use a <br>
361
- prompt with a suitable Japanese text to guide the model, get the style<br>
362
- then move on to apply it to whatever dialogue you wish your model to speak.<br>
363
-
364
-
365
- </p>
366
- ________________________________________________________ <br>
367
- <p>
368
- The pitch information in my data was accurately calculated, but it only works in comparison to the other speakers <br>
369
- so you may find a deep pitch may not be exactly too deep; although it actually is <br>
370
- when you compare it to others within the same data, also some of the gender labels <br>
371
- are inaccurate since we used a model to annotate them. <br> \n
372
- The main goal of this inference method is to demonstrate that style can be mapped to description's embeddings <br>
373
- yielding reasonably good results.
374
- </p>
375
-
376
- <p>
377
- Overall, I'm confident that with a bit of experimentation, you can achieve reasonbaly good results. <br>
378
- The model should work well out of the box 90% of the time without the need for extensive tweaking.<br>
379
- However, here are some tips in case you encounter issues:
380
- </p>
381
-
382
- <h2>Tips:</h2>
383
-
384
- <ul>
385
- <li>
386
- Ensure that your input closely matches your reference (audio or text prompt) in terms of tone,<br>
387
- non-verbal cues, duration, etc.
388
- </li>
389
-
390
- <li>
391
- If your audio is too long but the input is too short, the speech rate will be slow, and vice versa.
392
- </li>
393
-
394
- <li>
395
- Experiment with the <b>alpha</b>, <b>beta</b>, and <b>Intensity</b> parameters. The Diffusion<br>
396
- sampler is non-deterministic, so regenerate a few times if you're not satisfied with the output.
397
- </li>
398
-
399
- <li>
400
- The speaker's share and expressive distribution in the dataset significantly impact the quality;<br>
401
- you won't necessarily get perfect results with all speakers.
402
- </li>
403
-
404
- <li>
405
- Punctuation is very important, for example if you add «!» mark it will raise the voice or make it more intense.
406
- </li>
407
-
408
- <li>
409
- Not all speakers are equal. Less represented speakers or out-of-distribution inputs may result<br>
410
- in artifacts.
411
- </li>
412
-
413
- <li>
414
- If the Diffusion sampler works but the speaker didn't have a certain expression (e.g., extreme anger)<br>
415
- in the dataset, try raising the diffusion sampler's parameters and let it handle everything. Though<br>
416
- it may result in less speaker similarity, the ideal way to handle this is to cook new vectors by<br>
417
- transferring an emotion from one speaker to another. But you can't do that in this space.
418
- </li>
419
-
420
- <li>
421
- For voice-based inference, you can use litagin's awesome <a href="https://huggingface.co/datasets/litagin/Moe-speech" target="_blank">Moe-speech dataset</a>,<br>
422
- as part of the training data includes a portion of that.
423
- </li>
424
-
425
- <li>
426
- you may also want to tweak the phonemes if you're going for something wild. <br>
427
- i have used cutlet in the backend, but that doesn't seem to like some of my mappings.
428
- </li>
429
-
430
-
431
- </ul>
432
- """
433
-
434
-
435
- notes_jp = """
436
- <h1>メモ</h1>
437
-
438
- <p>
439
- この作業は、典型的なスピーチモデルとは少し異なります。生成プロセスに対して高い制御を提供するため、意図せずに<br>
440
- 比較的にクオリティーの低い出力を生成してしまうことが容易です。
441
- </p>
442
-
443
- <p>
444
- <b>Kotodama</b>と<b>Diffusionサンプラー</b>は、入力に沿ったものを生成するための大きな助けとなりますが、<br>
445
- 万全というわけではありません。
446
- </p>
447
-
448
- <p>
449
- モデルの最高性能は、Diffusion���ンプラーとKotodamaがシームレスに連携することで達成されます。しかし、<br>
450
- このチェックポイントは時間とリソースの制約からややTrain不足であるため、そのレベルの性能はここでは見られません。<br>
451
- (この件について、「紬」のチェックポイントの方がいいかもしれません。でもまぁ、みなさんのハードに互換性があればね。)<br>
452
- おそらく、このモデルをさらにFinetuningする(または最初からTrainする)ことで、より良い結果が得られるでしょう。
453
- </p>
454
-
455
- _____________________________________________<br>\n
456
- <strong>音声デザインとプロンプトに関する有用なメモ:</strong><br>
457
- ダイアログの雰囲気は、日本語のダイアログとプロンプトが共同でTrainされたため、生成される音声に影響を与えます。<br>
458
- これは日本語の特徴的な機能です。例えば、「俺」や「僕」を使用したり、全体的に男性らしい入力をすると、<br>
459
- プロンプトで女性と記述していても、男性の声が得られる可能性があります。<br>
460
- プロンプトに入力される日本語のテキストは、必ずしも入力内容と同じである必要はありませんが、<br>
461
- このデモではページが複雑になりすぎないようにそれを行うことはできません。<br>
462
- 実際のシナリオでは、適切な日本語のテキストを含むプロンプトを使用してモデルを導き、<br>
463
- スタイルを取得した後、それを希望するダイアログに適用することができます。<br>
464
-
465
- _____________________________________________<br>\n
466
-
467
- <p>
468
- プロンプトエンコーダも非常に実験的であり、概念実証として扱うべきです。女性話者対男性話者の比率が圧倒的で、<br>
469
- また話者とその表現に大きなバリエーションがあるため、エンコーダは質の低い出力を生成する可能性があります。<br>
470
- 例えば、高い表現力は、データセットに多く含まれていた女性話者と関連付けられています。<br>
471
- それに、データのピッチ情報は正確に計算されましたが、それは他のスピーカーとの比較でしか機能しません...<br>
472
- だから、深いピッチが必ずしも深すぎるわけではないことに気づくかもしれません。<br>
473
- ただし、実際には、同じデータ内の他の人と比較すると、深すぎます。このインフレンスの主な目的は、<br>
474
- スタイルベクトルを記述にマッピングし、合理的に良い結果を得ることにあります。
475
- </p>
476
-
477
- <p>
478
- 全体として、少しの実験でほぼ望む結果を達成できると自信を持っています。90%のケースで、大幅な調整を必要とせず、<br>
479
- そのままでうまく動作するはずです。しかし、問題が発生した場合のためにいくつかのヒントがあります:
480
- </p>
481
-
482
- <h2>ヒント:</h2>
483
-
484
- <ul>
485
- <li>
486
- 入力がリファレンス(音声またはテキストプロンプト)とトーン、非言語的な手がかり、<br>
487
- 長さなどで密接に一致していることを確認してください。
488
- </li>
489
-
490
- <li>
491
- 音声が長すぎるが入力が短すぎる場合、話速が遅くなります。その逆もまた同様です。
492
- </li>
493
-
494
- <li>
495
- アルファ、ベータ、および埋め込みスケールのパラメータを試行錯誤してください。Diffusionサンプラーは<br>
496
- 非決定的なので、満足のいく出力が得られない場合は何度か再生成してください。
497
- </li>
498
-
499
- <li>
500
- データセット内の話者の分布と表現力の分布は品質に大きく影響します。<br>
501
- すべての話者で必ずしも完璧な結果が得られるわけではありません。
502
- </li>
503
-
504
- <li>
505
- 句読点は重要です。たとえな、「!」を使えば、スタイルのインテンシティが上がります。
506
- </li>
507
-
508
- <li>
509
- すべての話者が平等に表現されているわけではありません。少ない表現の話者や<br>
510
- 分布外の入力はアーティファクトを生じさせる可能性があります。
511
- </li>
512
-
513
- <li>
514
- Diffusionサンプラーが機能しているが、データセット内で特定の表現(例:極度の怒り)がない場合、<br>
515
- Diffusionサンプラーのパラメータを引き上げ、サンプラーにすべてを任せてください。ただし、それにより<br>
516
- 話者の類似性が低下する可能性があります。この問題を理想的に解決する方法は、ある話者から別の話者に<br>
517
- 感情を転送し新しいベクトルを作成することですが、ここではできません。
518
- </li>
519
-
520
- <li>
521
- 音声ベースのインフレンスには、トレーニングデータの一部としてMoe-speechデータセットの一部��含む<br>
522
- <a href="https://huggingface.co/datasets/litagin/Moe-speech" target="_blank">litaginの素晴らしいデータセット</a>を使用できます。
523
- </li>
524
-
525
- <li>
526
- たまには音素の調整が必要になる場合もあります。バックエンドではcutletを使っているのですが、<br>
527
- いくつかのOODマッピングがcutletと相性が良くないみたいです。
528
- </li>
529
- </ul>
530
-
531
- """
532
- with gr.Blocks() as read_me:
533
- with gr.Row():
534
- with gr.Column(scale=1):
535
- gr.Markdown(notes)
536
-
537
- with gr.Blocks() as read_me_jp:
538
- with gr.Row():
539
- with gr.Column(scale=1):
540
- gr.Markdown(notes_jp)
541
-
542
-
543
- custom_css = """
544
- .tab-label {
545
- color: #FFD700 !important;
546
- }
547
- """
548
-
549
-
550
-
551
-
552
- with gr.Blocks(title="Tsukasa 司", css=custom_css + "footer{display:none !important}", theme="Respair/[email protected]") as demo:
553
- # gr.DuplicateButton("Duplicate Space")
554
- gr.Markdown(INTROTXT)
555
-
556
-
557
- gr.TabbedInterface([longform, audio_inf, prompt_inference, read_me, read_me_jp],
558
- ['Kotodama Text Inference', 'Voice-guided Inference','Prompt-guided Inference [Highly Experimental - not optimized]', 'Read Me! [English]', 'Read Me! [日本語]'])
559
-
560
- if __name__ == "__main__":
561
- demo.queue(api_open=False, max_size=15).launch(show_api=False, share=True)