Automatic Speech Recognition
Transformers
Safetensors
Japanese
whisper
audio
hf-asr-leaderboard
Eval Results
Inference Endpoints
asahi417 commited on
Commit
b20891c
·
verified ·
1 Parent(s): 13bd1c8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -127,7 +127,7 @@ from datasets import load_dataset
127
 
128
  # config
129
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
130
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
131
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
132
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
133
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
@@ -189,7 +189,7 @@ from datasets import load_dataset
189
 
190
  # config
191
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
192
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
193
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
194
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
195
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
@@ -225,7 +225,7 @@ from datasets import load_dataset
225
 
226
  # config
227
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
228
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
229
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
230
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
231
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
@@ -303,7 +303,7 @@ from evaluate import load
303
 
304
  # model config
305
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
306
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
307
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
308
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
309
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
 
127
 
128
  # config
129
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
130
+ torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
131
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
132
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
133
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
 
189
 
190
  # config
191
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
192
+ torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
193
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
194
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
195
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
 
225
 
226
  # config
227
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
228
+ torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
229
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
230
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
231
  generate_kwargs = {"language": "japanese", "task": "transcribe"}
 
303
 
304
  # model config
305
  model_id = "kotoba-tech/kotoba-whisper-v1.0"
306
+ torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
307
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
308
  model_kwargs = {"attn_implementation": "sdpa"} if torch.cuda.is_available() else {}
309
  generate_kwargs = {"language": "japanese", "task": "transcribe"}