leenag commited on
Commit
df0824e
1 Parent(s): b09e54b

Upload test.py

Browse files
Files changed (1) hide show
  1. test.py +187 -0
test.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+
5
+ Function to evaluate a model that was already trained : on data the model never saw, calculate the rmse and
6
+ pearson for the prediction made by this model.
7
+ """
8
+
9
+
10
+ import os,sys,inspect
11
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
12
+ parentdir = os.path.dirname(currentdir)
13
+ sys.path.insert(0,parentdir)
14
+
15
+ import numpy as np
16
+ import argparse
17
+ import torch
18
+ import os
19
+ import csv
20
+ import sys
21
+ from Training.tools_learning import load_np_ema_and_mfcc, load_filenames, give_me_common_articulators, criterion_pearson_no_reduction
22
+ import random
23
+ from scipy import signal
24
+ import matplotlib.pyplot as plt
25
+ from Training.model import my_ac2art_model
26
+
27
+ root_folder = os.path.dirname(os.getcwd())
28
+ fileset_path = os.path.join(root_folder, "Preprocessed_data", "fileset")
29
+
30
+ print(sys.argv)
31
+ articulators = ['tt_x', 'tt_y', 'td_x', 'td_y', 'tb_x', 'tb_y', 'li_x', 'li_y',
32
+ 'ul_x', 'ul_y', 'll_x', 'll_y', 'la', 'lp', 'ttcl', 'tbcl', 'v_x', 'v_y']
33
+ def test_model(test_on ,model_name, test_on_per_default = False, ) :
34
+ """
35
+ :param test_on: the speaker test
36
+ :param model_name: the name of the model (of the .txt file, without the ".txt")
37
+ Need to have to weights of the models saved in a txt file located in Training/saved_models/
38
+ for example F01_speaker_indep_Haskins__loss_both_90_filter_fix_0.txt
39
+ The test speaker has to be precised (in fact readable in the begining of the filename ==> future work)
40
+ Depending on the configuration (read in the filename) it tests on parts of the test-speaker the model was not
41
+ trained on.
42
+ It also saves the graphs for one sentence of the predicted and true arti trajectories
43
+ """
44
+ arti_indexes = []
45
+ if 'only_arti_common' in model_name:
46
+ if "train_indep" in model_name:
47
+ name = model_name.split('train_indep')
48
+ test = name[0].split('_')[3]
49
+ try:
50
+ train = [sp for sp in name[1].split('valid')[0].split('_') if (sp != '' and sp != 'train')]
51
+ except:
52
+ train = []
53
+ try:
54
+ valid = [sp for sp in name[1].split('valid')[1].split('loss')[0].split('_') if (sp != '' and sp != 'train')]
55
+ except:
56
+ valid = []
57
+ arti_indexes = give_me_common_articulators([test] + train + valid )
58
+ if "spec" in model_name:
59
+ test = model_name.split('_')[3]
60
+ train = []
61
+ valid = []
62
+ arti_indexes = give_me_common_articulators([test] + train + valid)
63
+ if 'valid__' in model_name and 'indep' in model_name:
64
+ test = model_name.split('_')[3]
65
+ train = [model_name.split('_')[6]]
66
+ valid = []
67
+ arti_indexes = give_me_common_articulators([test] + train + valid)
68
+
69
+
70
+ if test_on_per_default:
71
+ test_on = test
72
+ else:
73
+ train = []
74
+ valid = []
75
+ test= []
76
+
77
+ print(model_name)
78
+ print('train on', train)
79
+ print('valid on', valid)
80
+ print('tested on', test)
81
+ print('here test on', test_on)
82
+ batch_norma = False
83
+ filter_type = "fix"
84
+ to_plot = True
85
+
86
+ cuda_avail = torch.cuda.is_available()
87
+ if cuda_avail:
88
+ device = torch.device("cuda")
89
+ else:
90
+ device = torch.device("cpu")
91
+
92
+ hidden_dim = 300
93
+ input_dim = 429
94
+ batch_size = 10
95
+ output_dim = len(arti_indexes) if arti_indexes != [] else 18
96
+
97
+ model = my_ac2art_model(hidden_dim=hidden_dim, input_dim=input_dim, output_dim=output_dim,
98
+ batch_size=batch_size, cuda_avail=cuda_avail, name_file=model_name,
99
+ filter_type=filter_type, batch_norma=batch_norma)
100
+ model = model.double()
101
+
102
+ file_weights = os.path.join("saved_models", model_name + ".txt")
103
+
104
+ if cuda_avail:
105
+ model = model.to(device=device)
106
+
107
+ loaded_state = torch.load(file_weights, map_location=device)
108
+
109
+ model.load_state_dict(loaded_state)
110
+
111
+ if "indep" in model_name: # the model was not trained on the test speaker
112
+ files_for_test = load_filenames([test_on], part=["train", "valid", "test"])
113
+
114
+ else: # specific or dependant learning
115
+ files_for_test = load_filenames([test_on], part=["test"])
116
+
117
+
118
+ random.shuffle(files_for_test)
119
+ x, y = load_np_ema_and_mfcc(files_for_test)
120
+ print("evaluation on speaker {}".format(test_on))
121
+ std_speaker = np.load(os.path.join(root_folder, "Preprocessing", "norm_values", "std_ema_"+test_on+".npy"))
122
+ arti_per_speaker = os.path.join(root_folder, "Preprocessing", "articulators_per_speaker.csv")
123
+ csv.register_dialect('myDialect', delimiter=';')
124
+ weight_apres = model.lowpass.weight.data[0, 0, :]
125
+
126
+ with open(arti_per_speaker, 'r') as csvFile:
127
+ reader = csv.reader(csvFile, dialect="myDialect")
128
+ next(reader)
129
+ for row in reader:
130
+ if row[0] == test_on:
131
+ arti_to_consider = row[1:19]
132
+ arti_to_consider = [int(x) for x in arti_to_consider]
133
+ if arti_indexes != []:
134
+ arti_to_consider = [1 for k in range(len(arti_indexes))]
135
+
136
+ rmse_per_arti_mean, rmse_per_arti_mean_without_std, pearson_per_arti_mean = model.evaluate_on_test_modified(x,y, std_speaker=std_speaker, to_plot=to_plot
137
+ , to_consider=arti_to_consider, verbose=False,
138
+ index_common= arti_indexes)
139
+
140
+
141
+ show_filter = False #add it in argument
142
+ if show_filter:
143
+ weight_apres = model.lowpass.weight.data[0, 0, :]
144
+ print("GAIN",sum(weight_apres.cpu()))
145
+ freqs, h = signal.freqz(weight_apres.cpu())
146
+ freqs = freqs * 100 / (2 * np.pi) # freq in hz
147
+ plt.plot(freqs, 20 * np.log10(abs(h)), 'r')
148
+ plt.title("Low pass filter pace at the end of filter training")
149
+ plt.ylabel('Amplitude [dB]')
150
+ plt.xlabel("real frequency")
151
+ plt.show()
152
+
153
+ with open('model_results_test.csv', 'a',newline="") as f:
154
+ writer = csv.writer(f, delimiter=",")
155
+ try:
156
+ row_arti = ['model', 'test on', 'value'] + [articulators[i] for i in arti_indexes]
157
+ writer.writerow(row_arti)
158
+ except:
159
+ print('error')
160
+ row_rmse = [model_name,test_on,"rmse"] + rmse_per_arti_mean.tolist() + [model.epoch_ref]
161
+ writer.writerow(row_rmse)
162
+ row_rmse_without_std = [model_name,test_on, "rmse without std"] + rmse_per_arti_mean_without_std.tolist() + [model.epoch_ref]
163
+ writer.writerow(row_rmse_without_std)
164
+ row_pearson = [model_name, test_on, "pearson"] + pearson_per_arti_mean.tolist() + [model.epoch_ref]
165
+ print(row_pearson)
166
+ writer.writerow(row_pearson)
167
+
168
+ return rmse_per_arti_mean, pearson_per_arti_mean
169
+
170
+
171
+ if __name__ == '__main__':
172
+ # For the moment here the std is not included in the results
173
+ parser = argparse.ArgumentParser(description='Train and save a model.')
174
+
175
+ parser.add_argument('test_on', type=str,
176
+ help='the speaker we want to test on')
177
+
178
+ parser.add_argument('model_name', type=str,
179
+ help='name of the model (without .txt)')
180
+
181
+
182
+ args = parser.parse_args()
183
+
184
+ rmse,pearson = test_model(test_on=args.test_on, model_name=args.model_name)
185
+ print("results for model ",args.model_name)
186
+ print("rmse",rmse)
187
+ print("pearson",pearson)