Upload classify.py
Browse files- classify.py +66 -0
classify.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from whisper.audio import N_FRAMES, N_MELS, log_mel_spectrogram, pad_or_trim
|
6 |
+
from whisper.model import Whisper
|
7 |
+
from whisper.tokenizer import Tokenizer
|
8 |
+
|
9 |
+
|
10 |
+
@torch.no_grad()
|
11 |
+
def calculate_audio_features(audio_path: Optional[str], model: Whisper) -> torch.Tensor:
|
12 |
+
if audio_path is None:
|
13 |
+
segment = torch.zeros((N_MELS, N_FRAMES), dtype=torch.float32).to(model.device)
|
14 |
+
else:
|
15 |
+
mel = log_mel_spectrogram(audio_path)
|
16 |
+
segment = pad_or_trim(mel, N_FRAMES).to(model.device)
|
17 |
+
return model.embed_audio(segment.unsqueeze(0))
|
18 |
+
|
19 |
+
|
20 |
+
@torch.no_grad()
|
21 |
+
def calculate_average_logprobs(
|
22 |
+
model: Whisper,
|
23 |
+
audio_features: torch.Tensor,
|
24 |
+
class_names: List[str],
|
25 |
+
tokenizer: Tokenizer,
|
26 |
+
) -> torch.Tensor:
|
27 |
+
initial_tokens = (
|
28 |
+
torch.tensor(tokenizer.sot_sequence_including_notimestamps).unsqueeze(0).to(model.device)
|
29 |
+
)
|
30 |
+
eot_token = torch.tensor([tokenizer.eot]).unsqueeze(0).to(model.device)
|
31 |
+
|
32 |
+
average_logprobs = torch.zeros(len(class_names))
|
33 |
+
for i, class_name in enumerate(class_names):
|
34 |
+
class_name_tokens = (
|
35 |
+
torch.tensor(tokenizer.encode(" " + class_name)).unsqueeze(0).to(model.device)
|
36 |
+
)
|
37 |
+
input_tokens = torch.cat([initial_tokens, class_name_tokens, eot_token], dim=1)
|
38 |
+
|
39 |
+
logits = model.logits(input_tokens, audio_features) # (1, T, V)
|
40 |
+
logprobs = F.log_softmax(logits, dim=-1).squeeze(0) # (T, V)
|
41 |
+
logprobs = logprobs[len(tokenizer.sot_sequence_including_notimestamps) - 1 : -1] # (T', V)
|
42 |
+
logprobs = torch.gather(logprobs, dim=-1, index=class_name_tokens.view(-1, 1)) # (T', 1)
|
43 |
+
average_logprob = logprobs.mean().item()
|
44 |
+
average_logprobs[i] = average_logprob
|
45 |
+
|
46 |
+
return average_logprobs
|
47 |
+
|
48 |
+
|
49 |
+
def calculate_internal_lm_average_logprobs(
|
50 |
+
model: Whisper,
|
51 |
+
class_names: List[str],
|
52 |
+
tokenizer: Tokenizer,
|
53 |
+
verbose: bool = False,
|
54 |
+
) -> torch.Tensor:
|
55 |
+
audio_features_from_empty_input = calculate_audio_features(None, model)
|
56 |
+
average_logprobs = calculate_average_logprobs(
|
57 |
+
model=model,
|
58 |
+
audio_features=audio_features_from_empty_input,
|
59 |
+
class_names=class_names,
|
60 |
+
tokenizer=tokenizer,
|
61 |
+
)
|
62 |
+
if verbose:
|
63 |
+
print("Internal LM average log probabilities for each class:")
|
64 |
+
for i, class_name in enumerate(class_names):
|
65 |
+
print(f" {class_name}: {average_logprobs[i]:.3f}")
|
66 |
+
return average_logprobs
|