Spaces:
Running
on
Zero
Running
on
Zero
txya900619
commited on
Commit
•
fe80c45
1
Parent(s):
b2ae074
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from omegaconf import OmegaConf
|
4 |
from transformers import pipeline
|
|
|
5 |
|
6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
@@ -22,6 +23,7 @@ OmegaConf.register_new_resolver("load_pipe", load_pipe)
|
|
22 |
|
23 |
models_config = OmegaConf.to_object(OmegaConf.load("configs/models.yaml"))
|
24 |
|
|
|
25 |
def automatic_speech_recognition(model_id: str, dialect_id: str, audio_file: str):
|
26 |
model = models_config[model_id]["model"]
|
27 |
generate_kwargs = {
|
|
|
2 |
import torch
|
3 |
from omegaconf import OmegaConf
|
4 |
from transformers import pipeline
|
5 |
+
import spaces
|
6 |
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
|
|
23 |
|
24 |
models_config = OmegaConf.to_object(OmegaConf.load("configs/models.yaml"))
|
25 |
|
26 |
+
@spaces.GPU
|
27 |
def automatic_speech_recognition(model_id: str, dialect_id: str, audio_file: str):
|
28 |
model = models_config[model_id]["model"]
|
29 |
generate_kwargs = {
|