File size: 62,192 Bytes
fb55913
 
 
 
 
 
 
 
 
 
0e7406c
33b4e31
 
4b0e9d9
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9c1bfa
 
 
 
 
 
00d10fe
 
 
 
 
 
 
 
 
 
 
 
 
675075b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00d10fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d46b705
00d10fe
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
81e551c
 
6d1f579
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81e551c
 
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b0e9d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9c1bfa
 
 
 
 
 
 
 
 
 
 
 
0e7406c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
76b58e9
fb55913
 
 
76b58e9
4b0e9d9
 
 
 
fb55913
38fa5c5
76b58e9
 
 
 
fb55913
76b58e9
 
38fa5c5
 
4b0e9d9
 
38fa5c5
4b0e9d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
4b0e9d9
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
6210a22
 
 
 
 
 
 
 
 
c72beff
6210a22
 
c72beff
6210a22
c72beff
8ba6297
 
 
c72beff
96cde07
 
 
 
6210a22
 
 
 
 
 
 
 
 
6717298
 
0f9ddd5
 
 
 
6210a22
 
 
 
 
 
 
 
 
 
 
 
 
 
8ba6297
6210a22
 
 
 
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76b58e9
 
 
e2c67eb
fff497e
6abd7f5
2c6f1eb
fff497e
6abd7f5
e2c67eb
fff497e
 
 
e2c67eb
fff497e
 
a65930b
6abd7f5
a65930b
e2c67eb
 
fff497e
6abd7f5
e2c67eb
fff497e
e2c67eb
 
 
 
6abd7f5
e2c67eb
 
 
 
 
 
 
 
 
 
6abd7f5
e2c67eb
 
6abd7f5
e2c67eb
 
 
 
fff497e
76b58e9
fff497e
2c6f1eb
76b58e9
dbc189a
76b58e9
 
 
950a013
 
 
 
76b58e9
950a013
 
 
4752d55
d01d216
4752d55
 
 
950a013
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
800b3ed
950a013
 
 
 
800b3ed
d0b6db0
800b3ed
 
 
950a013
d0b6db0
02bce6c
 
6abd7f5
02bce6c
 
 
 
 
 
 
6abd7f5
02bce6c
 
 
 
 
 
6abd7f5
02bce6c
 
76b58e9
d0b6db0
6abd7f5
fb55913
6abd7f5
 
 
 
 
 
 
 
 
fb55913
6abd7f5
 
fb55913
6abd7f5
 
fb55913
6abd7f5
fb55913
 
6abd7f5
 
 
 
 
 
779a747
6abd7f5
 
 
 
 
 
 
779a747
6abd7f5
 
 
 
 
 
779a747
6abd7f5
 
779a747
6abd7f5
779a747
6abd7f5
 
 
 
 
 
 
 
 
 
 
 
 
779a747
6abd7f5
 
 
 
 
 
 
 
fb55913
6abd7f5
 
 
 
 
779a747
6abd7f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779a747
6abd7f5
 
779a747
6abd7f5
 
 
fb55913
6abd7f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779a747
6abd7f5
 
 
24743e4
6abd7f5
 
 
fb55913
6abd7f5
 
 
 
 
 
 
 
 
 
 
 
fb55913
6abd7f5
 
 
 
 
 
 
 
 
 
fb55913
6abd7f5
 
 
 
fb55913
6abd7f5
 
947a29a
6abd7f5
 
 
947a29a
6abd7f5
 
 
 
947a29a
6abd7f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5aee394
6abd7f5
 
5aee394
6abd7f5
d0b6db0
fb55913
 
 
 
 
 
6505f7d
 
393757e
fb55913
 
 
 
393757e
 
 
fb55913
6505f7d
fb55913
 
 
0cd1bb4
 
 
fb55913
6505f7d
fb55913
6505f7d
 
 
 
fb55913
 
 
6505f7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6abd7f5
6505f7d
 
 
 
 
 
6abd7f5
6505f7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
6abd7f5
393757e
fb55913
 
6505f7d
fb55913
6505f7d
 
fb55913
 
fff497e
fb55913
38fa5c5
fb55913
 
38fa5c5
 
 
 
bcfd633
38fa5c5
bcfd633
38fa5c5
 
 
05d30e4
38fa5c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7ef104
fb55913
4b0e9d9
fb55913
 
 
 
 
 
 
 
 
 
 
4b0e9d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
f7ef104
4b0e9d9
 
 
 
 
 
 
 
 
 
 
 
fb55913
4b0e9d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
 
 
 
 
 
8209e58
76b58e9
fb55913
8209e58
 
 
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76b58e9
5dae976
fb55913
38fa5c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
 
 
 
 
 
 
 
 
0e7406c
 
 
 
fb55913
 
 
 
0e7406c
fb55913
 
 
 
0e8ac26
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76b58e9
fb55913
 
 
 
 
 
 
 
76b58e9
fb55913
 
 
 
 
6abd7f5
fb55913
 
779a747
 
6abd7f5
779a747
 
b626ce6
 
 
 
 
 
 
 
 
 
 
 
fb55913
 
 
 
 
6abd7f5
fb55913
6abd7f5
fb55913
 
 
b626ce6
 
76b58e9
fb55913
76b58e9
 
 
6abd7f5
76b58e9
 
 
 
6210a22
5aee394
6210a22
 
 
 
 
 
 
 
ced276b
 
 
 
 
 
6210a22
ced276b
2ddd203
 
76b58e9
 
 
 
 
 
fb55913
76b58e9
 
 
fb55913
76b58e9
 
 
fb55913
 
6abd7f5
33b4e31
76b58e9
393757e
76b58e9
fb55913
 
393757e
 
 
 
b9d73c6
 
393757e
 
 
b9d73c6
 
 
 
 
 
 
 
 
 
 
6abd7f5
 
b9d73c6
 
 
 
 
 
 
 
 
 
 
6abd7f5
b9d73c6
 
 
 
 
 
 
6abd7f5
393757e
6abd7f5
76b58e9
4752d55
e28a01e
 
 
 
6abd7f5
fb55913
6abd7f5
 
fb55913
 
 
 
 
 
 
 
4752d55
b41d79a
 
 
 
fb55913
 
 
b41d79a
 
 
fb55913
 
 
 
 
 
 
 
6abd7f5
fb55913
 
 
 
6abd7f5
 
 
 
fb55913
6abd7f5
fb55913
 
 
 
 
 
 
 
76b58e9
fb55913
 
 
 
 
 
 
 
76b58e9
fb55913
 
6abd7f5
fb55913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76b58e9
fb55913
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
import os
import io
import subprocess
import numpy as np
from difflib import SequenceMatcher
from datetime import datetime
from typing import List, Tuple, Dict
import asyncio
import base64
import string
import re
import urllib.request
import gzip
import tempfile

# Set cache environment
os.environ['HF_HOME'] = '/tmp/hf'
os.environ['TORCH_HOME'] = '/tmp/torch'
os.environ['TRANSFORMERS_CACHE'] = '/tmp/hf'
os.environ['XDG_CACHE_HOME'] = '/tmp/hf'
os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib'  # Fix matplotlib permission issue
os.environ['PULSE_CONFIG_PATH'] = '/tmp/pulse'  # Fix PulseAudio errors
os.environ['PULSE_RUNTIME_PATH'] = '/tmp/pulse'
os.makedirs('/tmp/hf', exist_ok=True)
os.makedirs('/tmp/torch', exist_ok=True)
os.makedirs('/tmp/matplotlib', exist_ok=True)
os.makedirs('/tmp/pulse', exist_ok=True)

from fastapi import FastAPI, UploadFile, File, Form
from fastapi.middleware.cors import CORSMiddleware
import torchaudio
import torch
from phonemizer import phonemize
import whisperx  # New: WhisperX for precise alignment
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
import edge_tts

def log(msg):
    print(f"[{datetime.now().strftime('%H:%M:%S')}] {msg}")

app = FastAPI()
app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"])

def normalize_phoneme_string(s: str) -> str:
    """Normalize phoneme string for comparison - remove spaces and extra chars"""
    if not s:
        return s
    
    # Convert to lowercase and remove spaces, stress marks, and length markers
    normalized = s.lower().strip()
    normalized = normalized.replace(' ', '')  # Remove spaces between phonemes
    normalized = normalized.replace('ː', '')  # Remove length markers
    normalized = normalized.replace('ˈ', '')  # Remove primary stress
    normalized = normalized.replace('ˌ', '')  # Remove secondary stress
    normalized = normalized.replace('.', '')  # Remove syllable boundaries
    
    # CRITICAL: Explode affricate ligatures into their component parts
    # These single-character affricates need to be expanded to match decomposed forms
    affricate_ligatures = {
        'ʧ': 'tʃ',    # Voiceless postalveolar affricate (chip)
        'Κ€': 'dΚ’',    # Voiced postalveolar affricate (jump)
        'Κ¦': 'ts',    # Voiceless alveolar affricate (German Zeit)
        'Κ£': 'dz',    # Voiced alveolar affricate (Italian mezzo)
        'Κ¨': 'tΙ•',    # Voiceless alveolo-palatal affricate (Polish Δ‡)
        'Κ₯': 'dΚ‘',    # Voiced alveolo-palatal affricate (Polish dΕΊ)
        'ƛ': 'tɬ',    # Voiceless alveolar lateral affricate (Nahuatl tl)
        'ꜩ': 'tΙ•',    # Variant for voiceless alveolo-palatal affricate
    }
    
    for ligature, expanded in affricate_ligatures.items():
        normalized = normalized.replace(ligature, expanded)
    
    # CRITICAL: Normalize ASCII symbols to proper IPA equivalents
    # Convert all wav2vec2 ASCII characters to standard IPA
    ascii_to_ipa = {
        'g': 'Ι‘',    # ASCII g β†’ IPA script g (voiced velar stop)
        'b': 'b',    # ASCII b β†’ IPA b (already correct, but explicit)
        'd': 'd',    # ASCII d β†’ IPA d (already correct, but explicit)  
        'f': 'f',    # ASCII f β†’ IPA f (already correct, but explicit)
        'h': 'h',    # ASCII h β†’ IPA h (already correct, but explicit)
        'i': 'i',    # ASCII i β†’ IPA i (already correct, but explicit)
        # Note: Most ASCII phonetic chars are already valid IPA, except 'g'
    }
    
    # Normalize variant IPA symbols to consistent forms
    # Handle different representations of the same sounds
    ipa_variants = {
        'ɜ': 'ɝ',    # Open-mid central β†’ r-colored (American English "er")
        'ɚ': 'ɝ',    # R-colored schwa β†’ r-colored vowel (both "er" sounds)
        'ʌ': 'Ι™',    # Open-mid back β†’ schwa (both unstressed "uh" sounds)
        'Γ°': 'ΞΈ',    # Voiced th β†’ voiceless th (accent training - treat as equivalent)
        'ΙΉ': 'r',    # Retroflex approximant β†’ regular r (espeak vs CMUdict difference)
    }
    
    for ascii_char, ipa_char in ascii_to_ipa.items():
        normalized = normalized.replace(ascii_char, ipa_char)
        
    for variant_char, standard_char in ipa_variants.items():
        normalized = normalized.replace(variant_char, standard_char)
    
    return normalized

# Load models once at startup
phoneme_processor = Wav2Vec2Processor.from_pretrained("vitouphy/wav2vec2-xls-r-300m-timit-phoneme")
phoneme_model = Wav2Vec2ForCTC.from_pretrained("vitouphy/wav2vec2-xls-r-300m-timit-phoneme")

# Model inspection complete - wav2vec2 uses ASCII 'g' (token 15), not IPA 'Ι‘'
log("βœ… Phoneme models loaded - using ASCII/IPA normalization")

# WhisperX models - loaded lazily
whisperx_model = None
whisperx_align_model = None
whisperx_metadata = None

# Simple caches
phoneme_cache = {}
tts_cache = {}

# TTS configuration
TTS_VOICE = "en-US-AriaNeural"

# Phoneme to English letter sounds mapping
PHONEME_TO_ENGLISH = {
    # Vowels (monophthongs)
    'Ιͺ': 'IH',      # "bit"
    'Ι›': 'EH',      # "bed"
    'Γ¦': 'AE',      # "cat"
    'ʌ': 'UH',      # "but"
    'Ι‘': 'AH',      # "father"
    'Ι”': 'AW',      # "law"
    'ʊ': 'UU',      # "book"
    'u': 'OO',      # "boot"
    'i': 'EE',      # "beat"
    'Ι™': 'UH',      # "about" (schwa)
    'ɝ': 'ER',      # "bird"
    'ɚ': 'ER',      # "letter"
    
    # Diphthongs
    'eΙͺ': 'AY',     # "day"
    'aΙͺ': 'EYE',    # "my"
    'Ι”Ιͺ': 'OY',     # "boy"
    'aʊ': 'OW',     # "now"
    'oʊ': 'OH',     # "go"
    
    # R-colored vowels
    'Ιͺr': 'EER',    # "near"
    'Ι›r': 'AIR',    # "care"
    'Ι‘r': 'AR',     # "car"
    'Ι”r': 'OR',     # "for"
    'ʊr': 'OOR',    # "tour"
    'Ι™r': 'ER',     # "letter"
    'ɚ': 'ER',     # alternate schwa-r
    
    # Consonants
    'p': 'P',       # "pat"
    'b': 'B',       # "bat"
    't': 'T',       # "tap"
    'd': 'D',       # "dap"
    'k': 'K',       # "cat"
    'g': 'G',       # "gap" (wav2vec2 uses ASCII g)
    'Ι‘': 'G',       # "gap" (IPA script g - normalize to same)
    'f': 'F',       # "fat"
    'v': 'V',       # "vat"
    'ΞΈ': 'TH',      # "think"
    'Γ°': 'TH',      # "this"
    's': 'S',       # "sap"
    'z': 'Z',       # "zap"
    'Κƒ': 'SH',      # "ship"
    'Κ’': 'ZH',      # "measure"
    'h': 'H',       # "hat"
    'm': 'M',       # "mat"
    'n': 'N',       # "nat"
    'Ε‹': 'NG',      # "sing"
    'l': 'L',       # "lap"
    'r': 'R',       # "rap"
    'j': 'Y',       # "yes"
    'w': 'W',       # "wet"
    
    # Affricates
    'tʃ': 'CH',     # "chip"
    'dΚ’': 'J',      # "jump"
    
    # Common combinations that might appear
    ' ': '-',       # space becomes dash
    'ː': '',       # length marker (remove)
    'ˈ': '',       # primary stress (remove)
    'ˌ': '',       # secondary stress (remove)
}

# Phoneme example words - showing the sound in context
PHONEME_EXAMPLES = {
    # Vowels (monophthongs)
    'Ιͺ': 'bit',     # IH sound
    'Ι›': 'bed',     # EH sound
    'Γ¦': 'cat',     # AE sound
    'ʌ': 'but',     # UH sound (stressed)
    'Ι‘': 'father',  # AH sound
    'Ι”': 'law',     # AW sound
    'ʊ': 'book',    # UU sound
    'u': 'boot',    # OO sound
    'i': 'beat',    # EE sound
    'Ι™': 'about',   # schwa (unstressed)
    'ɝ': 'bird',    # ER sound (stressed)
    'ɚ': 'letter',  # ER sound (unstressed)
    
    # Diphthongs
    'eΙͺ': 'day',    # AY sound
    'aΙͺ': 'my',     # EYE sound
    'Ι”Ιͺ': 'boy',    # OY sound
    'aʊ': 'now',    # OW sound
    'oʊ': 'go',     # OH sound
    
    # R-colored vowels
    'Ιͺr': 'near',   # EER sound
    'Ι›r': 'care',   # AIR sound
    'Ι‘r': 'car',    # AR sound
    'Ι”r': 'for',    # OR sound
    'ʊr': 'tour',   # OOR sound
    'Ι™r': 'letter', # ER sound
    
    # Consonants
    'p': 'pat',     # P sound
    'b': 'bat',     # B sound
    't': 'tap',     # T sound
    'd': 'dap',     # D sound
    'k': 'cat',     # K sound
    'g': 'gap',     # G sound (ASCII)
    'Ι‘': 'gap',     # G sound (IPA)
    'f': 'fat',     # F sound
    'v': 'vat',     # V sound
    'ΞΈ': 'think',   # TH sound (voiceless)
    'Γ°': 'this',    # TH sound (voiced)
    's': 'sap',     # S sound
    'z': 'zap',     # Z sound
    'Κƒ': 'ship',    # SH sound
    'Κ’': 'measure', # ZH sound
    'h': 'hat',     # H sound
    'm': 'mat',     # M sound
    'n': 'nat',     # N sound
    'Ε‹': 'sing',    # NG sound
    'l': 'lap',     # L sound
    'r': 'rap',     # R sound
    'j': 'yes',     # Y sound
    'w': 'wet',     # W sound
    
    # Affricates
    'tʃ': 'chip',   # CH sound
    'dΚ’': 'jump',   # J sound
}

def clean_word_for_phonemes(word: str) -> str:
    """
    Clean word by removing punctuation and extra spaces for phoneme processing.
    Keeps only alphabetical characters.
    """
    # Remove punctuation and extra whitespace
    cleaned = word.strip().translate(str.maketrans('', '', string.punctuation))
    cleaned = ''.join(cleaned.split())  # Remove all whitespace
    
    log(f"Word cleaning: '{word}' β†’ '{cleaned}'")
    return cleaned

def convert_digits_to_words(text: str) -> str:
    """Convert digits to word form for better phoneme analysis"""
    
    # Dictionary for number conversion
    number_words = {
        '0': 'zero', '1': 'one', '2': 'two', '3': 'three', '4': 'four',
        '5': 'five', '6': 'six', '7': 'seven', '8': 'eight', '9': 'nine',
        '10': 'ten', '11': 'eleven', '12': 'twelve', '13': 'thirteen', '14': 'fourteen',
        '15': 'fifteen', '16': 'sixteen', '17': 'seventeen', '18': 'eighteen', '19': 'nineteen',
        '20': 'twenty', '30': 'thirty', '40': 'forty', '50': 'fifty',
        '60': 'sixty', '70': 'seventy', '80': 'eighty', '90': 'ninety',
        '100': 'one hundred', '1000': 'one thousand'
    }
    
    def convert_number(match):
        num_str = match.group()
        num = int(num_str)
        
        # Direct lookup for common numbers
        if num_str in number_words:
            return number_words[num_str]
        
        # Handle numbers 21-99
        if 21 <= num <= 99:
            tens = (num // 10) * 10
            ones = num % 10
            if ones == 0:
                return number_words[str(tens)]
            else:
                return number_words[str(tens)] + " " + number_words[str(ones)]
        
        # Handle numbers 101-999 (basic implementation)
        if 101 <= num <= 999:
            hundreds = num // 100
            remainder = num % 100
            result = number_words[str(hundreds)] + " hundred"
            if remainder > 0:
                if remainder < 21:
                    result += " " + number_words[str(remainder)]
                else:
                    tens = (remainder // 10) * 10
                    ones = remainder % 10
                    result += " " + number_words[str(tens)]
                    if ones > 0:
                        result += " " + number_words[str(ones)]
            return result
        
        # For larger numbers or edge cases, return original
        return num_str
    
    # Replace standalone digits/numbers with word equivalents
    converted = re.sub(r'\b\d+\b', convert_number, text)
    log(f"Number conversion: '{text}' β†’ '{converted}'")
    return converted

def load_whisperx_models():
    """Load WhisperX models lazily with English-only configuration"""
    global whisperx_model, whisperx_align_model, whisperx_metadata
    
    if whisperx_model is None:
        log("Loading WhisperX models for English-only processing...")
        
        # First, try to set environment variable to disable executable stack
        os.environ['LD_BIND_NOW'] = '1'
        
        try:
            # Try loading with base.en first
            whisperx_model = whisperx.load_model("base.en", device="cpu", compute_type="float32", language="en")
            log("WhisperX base.en model loaded successfully")
            
            # Load alignment model for English
            whisperx_align_model, whisperx_metadata = whisperx.load_align_model(language_code="en", device="cpu")
            log("WhisperX English alignment model loaded successfully")
            
        except ImportError as ie:
            log(f"Import error loading WhisperX models: {ie}")
            
            # Try to use regular Whisper as fallback
            try:
                log("Attempting to use standard Whisper instead of WhisperX...")
                import whisper
                
                # Load standard whisper model
                whisper_model = whisper.load_model("base.en", device="cpu")
                
                # Create a wrapper to make it compatible with WhisperX interface
                class WhisperWrapper:
                    def __init__(self, model):
                        self.model = model
                    
                    def transcribe(self, audio, batch_size=16, language="en"):
                        result = self.model.transcribe(audio, language=language)
                        # Convert to WhisperX format
                        return {
                            "segments": [{
                                "text": result["text"],
                                "start": 0.0,
                                "end": len(audio) / 16000.0,  # Approximate based on sample rate
                                "words": []  # Will need to handle word-level timing differently
                            }],
                            "language": language
                        }
                
                whisperx_model = WhisperWrapper(whisper_model)
                log("Using standard Whisper as fallback (limited word-level timing)")
                
                # For alignment, we'll need to handle this differently
                whisperx_align_model = None
                whisperx_metadata = None
                
            except Exception as whisper_error:
                log(f"Standard Whisper fallback failed: {whisper_error}")
                
                # Last resort: Create a minimal mock that at least returns something
                class MinimalWhisperMock:
                    def transcribe(self, audio, batch_size=16, language="en"):
                        # Return a minimal valid structure
                        return {
                            "segments": [{
                                "text": "[Audio processing unavailable - WhisperX loading failed]",
                                "start": 0.0,
                                "end": 1.0,
                                "words": []
                            }],
                            "language": language
                        }
                
                whisperx_model = MinimalWhisperMock()
                whisperx_align_model = None
                whisperx_metadata = None
                log("WARNING: Using minimal mock - transcription will be limited")
                
        except Exception as e:
            log(f"Error loading WhisperX models: {e}")
            raise RuntimeError(f"Unable to load speech recognition models: {e}")

def convert_webm_to_wav(bts):
    p = subprocess.run(["ffmpeg", "-i", "pipe:0", "-f", "wav", "-ar", "16000", "-ac", "1", "pipe:1"],
                       input=bts, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if p.returncode != 0:
        raise RuntimeError(p.stderr.decode())
    return io.BytesIO(p.stdout)

def calculate_similarity(detected: str, expected: str) -> float:
    """Calculate similarity between detected and expected phonemes"""
    detected_norm = normalize_phoneme_string(detected)
    expected_norm = normalize_phoneme_string(expected)
    return SequenceMatcher(None, detected_norm, expected_norm).ratio()

def detect_word_boundary_overlap(audio_segment: torch.Tensor, sample_rate: int, word: str) -> float:
    """
    Analyze first 1/3 of audio segment for: [noise] β†’ [silence] β†’ [noise] pattern
    Returns: offset in seconds to skip initial noise, or 0.0 if no pattern found
    """
    if audio_segment.shape[-1] == 0:
        return 0.0
    
    # Analyze only first 1/3 of segment
    first_third_samples = audio_segment.shape[-1] // 3
    if first_third_samples < sample_rate * 0.1:  # Less than 100ms total
        return 0.0
    
    first_third = audio_segment[:, :first_third_samples]
    
    # WORKAROUND: Audio segment appears to be reversed for unknown reason
    # This flip corrects the chronological order for proper boundary detection
    first_third = torch.flip(first_third, [-1])
    
    # Calculate energy in small windows (50ms chunks)
    window_size = int(0.05 * sample_rate)  # 50ms windows
    if window_size <= 0:
        return 0.0
        
    energy_levels = []
    
    for i in range(0, first_third_samples - window_size, window_size):
        window = first_third[:, i:i + window_size]
        energy = torch.mean(window ** 2).item()  # RMS energy
        energy_levels.append(energy)
    
    if len(energy_levels) < 3:
        return 0.0
    
    # Look for pattern: [high energy] β†’ [low energy] β†’ [high energy]
    silence_threshold = np.percentile(energy_levels, 20)  # Bottom 20%
    noise_threshold = silence_threshold * 3
    
    # Find sustained silence (2+ consecutive low-energy windows)
    for i in range(len(energy_levels) - 1):
        if (energy_levels[i] < silence_threshold and 
            energy_levels[i + 1] < silence_threshold):
            
            # Check if there was noise before silence
            noise_before = any(e > noise_threshold for e in energy_levels[:i])
            # Check if there's noise after silence  
            noise_after = any(e > noise_threshold for e in energy_levels[i + 2:])
            
            if noise_before and noise_after:
                # Found the pattern! Return offset to end of silence
                silence_end_sample = (i + 2) * window_size
                offset_seconds = silence_end_sample / sample_rate
                log(f"πŸ”§ Word '{word}': detected boundary overlap, trimming {offset_seconds:.3f}s from start")
                return offset_seconds
    
    return 0.0  # No pattern detected

def extract_audio_segment(waveform: torch.Tensor, sample_rate: int, 
                         start_time: float, end_time: float, word: str, 
                         verbose: bool = True) -> torch.Tensor:
    """Extract audio segment for a specific word"""
    # Convert to samples
    start_sample = int(start_time * sample_rate)
    end_sample = int(end_time * sample_rate)
    end_sample = min(waveform.shape[-1], end_sample)
    
    if end_sample <= start_sample:
        if verbose:
            log(f"Invalid segment for '{word}': {start_time:.3f}s-{end_time:.3f}s")
        return torch.zeros((1, 1600))  # Return 100ms of silence
    
    segment = waveform[:, start_sample:end_sample]
    if verbose:
        log(f"Extracted '{word}': {start_time:.3f}s-{end_time:.3f}s ({segment.shape[-1]} samples)")
    
    return segment

def detect_phoneme_from_audio(audio_segment: torch.Tensor, sample_rate: int, word: str) -> str:
    """Detect phoneme from audio segment using phoneme model"""
    log(f"πŸ” Starting phoneme detection for '{word}'...")
    
    if audio_segment.shape[-1] == 0:
        log(f"⚠️ Empty audio segment for '{word}'")
        return ""
    
    log(f"πŸ”Š Original audio segment: {audio_segment.shape[-1]} samples")
    
    # Pad or truncate to standard length for model
    target_length = 16000  # 1 second
    if audio_segment.shape[-1] < target_length:
        log(f"πŸ”§ Padding audio from {audio_segment.shape[-1]} to {target_length} samples")
        audio_segment = torch.nn.functional.pad(audio_segment, (0, target_length - audio_segment.shape[-1]))
    elif audio_segment.shape[-1] > target_length:
        # Don't truncate long segments - keep full audio for complex words
        log(f"⚠️ Audio longer than target ({audio_segment.shape[-1]} > {target_length}), keeping full length")
        log(f"    This preserves all phonemes for long words like 'sophisticated'")
    else:
        log(f"βœ… Audio segment already correct length: {target_length} samples")
    
    log(f"πŸŽ›οΈ Processing through phoneme processor...")
    start_time = datetime.now()
    
    # Process through phoneme model
    try:
        input_values = phoneme_processor(audio_segment.squeeze(), sampling_rate=sample_rate, return_tensors="pt").input_values
        processor_time = (datetime.now() - start_time).total_seconds()
        log(f"⏱️ Phoneme processor took: {processor_time:.3f}s")
        
        log(f"🧠 Running through phoneme model...")
        model_start_time = datetime.now()
        
        with torch.no_grad():
            logits = phoneme_model(input_values).logits
            predicted_ids = torch.argmax(logits, dim=-1)
            detected_phoneme = phoneme_processor.decode(predicted_ids[0])
        
        model_time = (datetime.now() - model_start_time).total_seconds()
        log(f"⏱️ Phoneme model inference took: {model_time:.3f}s")
        
        total_time = (datetime.now() - start_time).total_seconds()
        log(f"⏱️ Total phoneme detection time: {total_time:.3f}s")
        
    except Exception as e:
        log(f"❌ Error in phoneme detection: {e}")
        return ""
    
    log(f"🎯 Phoneme detection for '{word}': '{detected_phoneme}'")
    return detected_phoneme

def sliding_window_phoneme_match(detected_phoneme: str, expected_phoneme: str, word: str) -> Tuple[str, float, int, int]:
    """
    Find the best matching substring in detected phoneme using sliding window.
    For zero scores, intelligently selects which phoneme substring to return.
    Returns: (best_match_substring, best_score, start_index, end_index)
    """
    detected_norm = normalize_phoneme_string(detected_phoneme)
    expected_norm = normalize_phoneme_string(expected_phoneme)
    
    log(f"πŸ” Sliding window analysis for '{word}':")
    log(f"  Expected (norm): '{expected_norm}' (length: {len(expected_norm)})")
    log(f"  Detected (norm): '{detected_norm}' (length: {len(detected_norm)})")
    
    # If detected is shorter than expected, just compare directly
    if len(detected_norm) < len(expected_norm):
        score = calculate_similarity(detected_norm, expected_norm)
        log(f"  Direct comparison (detected < expected): score = {score:.3f}")
        return detected_norm, score, 0, len(detected_norm)
    
    # Sliding window: detected is longer than expected
    expected_len = len(expected_norm)
    best_score = 0
    best_match = ""
    best_start = 0
    best_end = expected_len
    
    log(f"  Sliding window search (window size: {expected_len}):")
    
    # Slide through all possible positions
    for i in range(len(detected_norm) - expected_len + 1):
        substring = detected_norm[i:i + expected_len]
        score = calculate_similarity(substring, expected_norm)
        
        log(f"    Position {i}: '{substring}' vs '{expected_norm}' = {score:.3f}")
        
        if score > best_score:  # Changed back from >= to > to prefer earlier matches
            best_score = score
            best_match = substring
            best_start = i
            best_end = i + expected_len
            log(f"      βœ… New best match!")
            
            # Exit early on perfect match
            if score >= 1.0:
                log(f"      🎯 Perfect match found, stopping search")
                break
    
    # Handle zero score case - aim for middle substring when possible
    if best_score == 0:
        log(f"  ⚠️ Zero score detected, selecting middle substring for audio alignment")
        total_detected_len = len(detected_norm)
        
        if total_detected_len == expected_len:
            # Same length - use the whole string
            best_start = 0
            best_end = expected_len
            best_match = detected_norm
            log(f"    πŸ” Same length: using full string")
        else:
            # Longer detected - aim for middle
            middle_start = max(0, (total_detected_len - expected_len) // 2)
            best_start = middle_start
            best_end = middle_start + expected_len
            best_match = detected_norm[best_start:best_end]
            log(f"    πŸ” Aiming for middle: position {best_start}-{best_end}")
    
    log(f"  πŸ† Final selection: '{best_match}' at position {best_start}-{best_end} (score: {best_score:.3f})")
    return best_match, best_score, best_start, best_end

def create_word_phoneme_mapping_v2(word: str, expected_phoneme: str) -> Dict[int, str]:
    """
    Create mapping from phoneme positions to original word letters.
    Simplified version that handles common cases more reliably.
    
    Args:
        word: The original word (already cleaned, no punctuation)
        expected_phoneme: The expected phoneme string
    
    Returns:
        Dictionary mapping phoneme index to word letter(s)
    """
    word_lower = word.lower()
    phoneme_norm = normalize_phoneme_string(expected_phoneme)
    
    log(f"πŸ—ΊοΈ Creating mapping for '{word}' β†’ '{phoneme_norm}'")
    log(f"   Word length: {len(word_lower)}, Phoneme length: {len(phoneme_norm)}")
    
    if not phoneme_norm:
        return {}
    
    # Simple cases first
    if len(word_lower) == len(phoneme_norm):
        # Direct 1:1 mapping
        mapping = {i: word[i] for i in range(len(phoneme_norm))}  # Preserve original case
        log(f"  Direct mapping (equal lengths): {mapping}")
        return mapping
    
    # For length mismatches, use proportional distribution
    mapping = {}
    
    if len(phoneme_norm) > len(word_lower):
        # More phonemes than letters (diphthongs, etc.)
        # Distribute letters across phonemes without duplication
        phonemes_per_letter = len(phoneme_norm) / len(word_lower)
        
        for phoneme_idx in range(len(phoneme_norm)):
            # Find which letter this phoneme belongs to
            letter_idx = min(int(phoneme_idx / phonemes_per_letter), len(word_lower) - 1)
            
            # Only assign each letter once (to its first phoneme)
            start_phoneme_for_letter = int(letter_idx * phonemes_per_letter)
            
            if phoneme_idx == start_phoneme_for_letter:
                mapping[phoneme_idx] = word[letter_idx]  # Preserve case
            else:
                mapping[phoneme_idx] = ''  # Empty for additional phonemes
        
    else:
        # More letters than phonemes (silent letters)
        # Distribute letters across available phonemes
        letters_per_phoneme = len(word_lower) / len(phoneme_norm)
        
        for phoneme_idx in range(len(phoneme_norm)):
            # Calculate range of letters for this phoneme
            start_letter = int(phoneme_idx * letters_per_phoneme)
            end_letter = int((phoneme_idx + 1) * letters_per_phoneme)
            
            # Collect all letters for this phoneme
            letter_group = word[start_letter:end_letter]
            mapping[phoneme_idx] = letter_group
    
    log(f"  Final mapping: {mapping}")
    return mapping

def create_character_level_feedback_v2(word: str, expected_norm: str, 
                                       detected_norm: str, 
                                       mapping: Dict[int, str]) -> str:
    """
    Create character-level feedback with simplified logic.
    
    Args:
        word: Original word (for display purposes)
        expected_norm: Normalized expected phonemes
        detected_norm: Normalized detected phonemes  
        mapping: Phoneme position to letter mapping
        
    Returns:
        HTML string with properly formatted feedback
    """
    result = []
    
    log(f"πŸ“ Character feedback for '{word}':")
    log(f"  Expected: '{expected_norm}' (len={len(expected_norm)})")
    log(f"  Detected: '{detected_norm}' (len={len(detected_norm)})")
    
    # Ensure both strings are same length for comparison
    max_len = max(len(expected_norm), len(detected_norm))
    expected_padded = expected_norm.ljust(max_len)
    detected_padded = detected_norm.ljust(max_len)
    
    # Track which word positions have been used
    used_positions = set()
    
    for i in range(min(len(expected_norm), max_len)):
        expected_char = expected_padded[i] if i < len(expected_padded) else ' '
        detected_char = detected_padded[i] if i < len(detected_padded) else ' '
        
        # Get the word letter(s) for this phoneme position
        word_letters = mapping.get(i, '')
        
        # Skip empty mappings (extra phonemes in diphthongs)
        if not word_letters:
            continue
            
        # Check if we've already used these letters
        letter_key = (word_letters, i)
        if letter_key in used_positions:
            continue
        used_positions.add(letter_key)
        
        if expected_char == detected_char:
            # Correct pronunciation - show original letters
            result.append(word_letters)
        else:
            # Incorrect - create error span with tooltip
            expected_english = PHONEME_TO_ENGLISH.get(expected_char, expected_char)
            expected_example = PHONEME_EXAMPLES.get(expected_char, '')
            
            detected_english = PHONEME_TO_ENGLISH.get(detected_char, 'silence' if detected_char == ' ' else detected_char)
            detected_example = PHONEME_EXAMPLES.get(detected_char, '')
            
            # Build tooltip text
            if expected_example and detected_example:
                tooltip = f"Expected '{expected_english}' as in '{expected_example}'<br>You said '{detected_english}' as in '{detected_example}'"
            elif expected_example:
                tooltip = f"Expected '{expected_english}' as in '{expected_example}'<br>You said '{detected_english}'"
            else:
                tooltip = f"Expected '{expected_english}'<br>You said '{detected_english}'"
            
            # Create error span
            error_html = f'<span class="phoneme-error" data-tooltip-html="{tooltip}"><strong><u>{word_letters}</u></strong></span>'
            result.append(error_html)
    
    feedback = ''.join(result)
    log(f"  Final feedback: {feedback}")
    return feedback

def format_output_word_v2(word_original: str, word_clean: str, 
                          similarity_score: float, detected_phoneme: str,
                          expected_phoneme: str, similarity_threshold: float) -> Tuple[str, str]:
    """
    Format word output with cleaner logic.
    
    Args:
        word_original: Original word with punctuation (for display)
        word_clean: Cleaned word (for phoneme processing)
        similarity_score: Similarity score between detected and expected
        detected_phoneme: Detected phoneme string
        expected_phoneme: Expected phoneme string
        similarity_threshold: User's threshold for acceptable pronunciation
        
    Returns:
        Tuple of (display_text, colored_html)
    """
    # Determine color based on score
    if similarity_score < similarity_threshold:
        color = "red"
        needs_feedback = True
    elif similarity_score >= similarity_threshold + (1.0 - similarity_threshold) * 0.3:
        color = "green"
        needs_feedback = False
    else:
        color = "orange"
        needs_feedback = False
    
    score_percentage = int(similarity_score * 100)
    
    if needs_feedback:
        # Poor pronunciation - show character-level feedback
        
        # Create phoneme mapping using cleaned word
        mapping = create_word_phoneme_mapping_v2(word_clean, expected_phoneme)
        
        # Normalize phonemes for comparison
        expected_norm = normalize_phoneme_string(expected_phoneme)
        detected_norm = normalize_phoneme_string(detected_phoneme)
        
        # Generate character-level feedback
        feedback_html = create_character_level_feedback_v2(
            word_clean, expected_norm, detected_norm, mapping
        )
        
        # Preserve original punctuation if present
        if word_original != word_clean:
            # Find trailing punctuation
            punct = ''
            for i in range(len(word_original) - 1, -1, -1):
                if word_original[i] in string.punctuation:
                    punct = word_original[i] + punct
                else:
                    break
            
            # Find leading punctuation
            lead_punct = ''
            for char in word_original:
                if char in string.punctuation:
                    lead_punct += char
                else:
                    break
            
            display_text = lead_punct + feedback_html + punct
        else:
            display_text = feedback_html
            
        # For tooltip, use the cleaned word
        tooltip_text = word_clean
        
    else:
        # Good pronunciation - show original word
        display_text = word_original
        tooltip_text = word_original
    
    # Create final colored HTML with embedded data
    colored_html = f'<span style="color:{color}" data-score="{score_percentage}" data-word="{word_original}" data-tooltip="{tooltip_text}">{display_text}</span>'
    
    return display_text, colored_html

def trim_audio_segment_by_phoneme_position(audio_segment: torch.Tensor, 
                                         detected_phoneme_full: str, 
                                         best_start: int, best_end: int, 
                                         word: str) -> torch.Tensor:
    """
    Trim audio segment based on the position of best matching phoneme substring.
    Uses 85% of calculated trim percentages to be less aggressive.
    Ensures final segment is never shorter than 0.1 seconds.
    Returns the original segment if no trimming is needed.
    """
    detected_norm = normalize_phoneme_string(detected_phoneme_full)
    total_phoneme_len = len(detected_norm)
    
    if total_phoneme_len == 0 or (best_start == 0 and best_end == total_phoneme_len):
        log(f"🎡 No audio trimming needed for '{word}' (using original segment)")
        return None  # Signal to use original WhisperX timing instead of expanded
    
    # Calculate initial trim percentages
    start_trim_pct = best_start / total_phoneme_len
    end_trim_pct = (total_phoneme_len - best_end) / total_phoneme_len
    
    # Apply 85% factor to be less aggressive
    start_trim_pct_adjusted = start_trim_pct * 0.85
    end_trim_pct_adjusted = end_trim_pct * 0.85
    
    # Calculate samples and duration
    total_samples = audio_segment.shape[-1]
    sample_rate = 16000  # Known sample rate
    original_duration = total_samples / sample_rate
    
    # Calculate initial trim amounts
    start_trim_samples = int(total_samples * start_trim_pct_adjusted)
    end_trim_samples = int(total_samples * end_trim_pct_adjusted)
    
    # Calculate resulting duration
    trimmed_samples = total_samples - start_trim_samples - end_trim_samples
    trimmed_duration = trimmed_samples / sample_rate
    
    log(f"🎡 Audio trimming for '{word}':")
    log(f"  Original duration: {original_duration:.3f}s ({total_samples} samples)")
    log(f"  Phoneme position: {best_start}-{best_end-1} of {total_phoneme_len} chars")
    log(f"  Initial trim: start={start_trim_pct_adjusted:.1%} ({start_trim_samples} samples), end={end_trim_pct_adjusted:.1%} ({end_trim_samples} samples)")
    log(f"  Resulting duration: {trimmed_duration:.3f}s")
    
    # MINIMUM DURATION CHECK: Ensure result is at least 0.1 seconds
    min_duration = 0.1
    min_samples = int(min_duration * sample_rate)
    
    if trimmed_samples < min_samples:
        log(f"  ⚠️ Trimmed duration ({trimmed_duration:.3f}s) below minimum ({min_duration}s)")
        
        # Calculate how much we need to preserve
        samples_to_preserve = min_samples
        total_trim_needed = total_samples - samples_to_preserve
        
        if total_trim_needed <= 0:
            log(f"  ⚠️ Original segment already at minimum length, no trimming")
            return None  # Use original WhisperX timing
        
        # Redistribute the trimming proportionally while respecting minimum duration
        original_total_trim = start_trim_samples + end_trim_samples
        
        if original_total_trim > 0:
            # Scale down both trims proportionally
            scale_factor = total_trim_needed / original_total_trim
            start_trim_samples = int(start_trim_samples * scale_factor)
            end_trim_samples = int(end_trim_samples * scale_factor)
            
            # Ensure we don't exceed total available trim
            if start_trim_samples + end_trim_samples > total_trim_needed:
                excess = (start_trim_samples + end_trim_samples) - total_trim_needed
                # Remove excess from the larger trim
                if start_trim_samples > end_trim_samples:
                    start_trim_samples -= excess
                else:
                    end_trim_samples -= excess
            
            log(f"  πŸ”§ Adjusted trim: start={start_trim_samples} samples, end={end_trim_samples} samples")
            log(f"  πŸ”§ Scale factor applied: {scale_factor:.3f}")
        else:
            # Shouldn't happen, but safety check
            start_trim_samples = 0
            end_trim_samples = 0
    
    # Apply final trimming
    trimmed_start = start_trim_samples
    trimmed_end = total_samples - end_trim_samples
    
    if trimmed_end <= trimmed_start:
        log(f"  ⚠️ Invalid trim range after adjustment, using original segment")
        return None  # Signal to use original WhisperX timing
    
    trimmed_segment = audio_segment[:, trimmed_start:trimmed_end]
    final_duration = trimmed_segment.shape[-1] / sample_rate
    
    log(f"  βœ… Final result: {trimmed_segment.shape[-1]} samples ({final_duration:.3f}s)")
    log(f"  βœ… Trimmed: {start_trim_samples} from start, {end_trim_samples} from end")
    
    return trimmed_segment

def get_expected_phonemes(words: List[str]) -> List[str]:
    """Get expected phonemes using espeak phonemizer"""
    cache_key = tuple(words)
    if cache_key in phoneme_cache:
        log(f"πŸ“š Using cached phonemes for: {words}")
        cached_result = phoneme_cache[cache_key]
        log(f"   Cached phonemes: {list(zip(words, cached_result))}")
        return cached_result
    
    log(f"πŸ”€ Getting expected phonemes using phonemizer for: {words}")
    
    try:
        # Use espeak phonemizer to get IPA phonemes
        phonemes = phonemize(words, language='en-us', backend='espeak', strip=True)
        
        # Cache the results
        phoneme_cache[cache_key] = phonemes
        
        # Log the phoneme results
        log(f"βœ… Phonemizer results:")
        for word, phoneme in zip(words, phonemes):
            log(f"   '{word}' β†’ '{phoneme}'")
        
        return phonemes
        
    except Exception as e:
        log(f"❌ Error in phonemizer: {e}")
        log(f"   Returning empty phonemes for all words")
        # Return empty strings as fallback
        empty_results = [""] * len(words)
        phoneme_cache[cache_key] = empty_results
        return empty_results

async def generate_tts_audio(word: str) -> str:
    """Generate TTS audio for a word with silence padding"""
    if word in tts_cache:
        return tts_cache[word]
    
    try:
        communicate = edge_tts.Communicate(word, TTS_VOICE)
        audio_data = b""
        async for chunk in communicate.stream():
            if chunk["type"] == "audio":
                audio_data += chunk["data"]
        
        if audio_data:
            # Add silence padding to TTS audio as well
            # First decode the MP3 to get raw audio
            import tempfile
            with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as tmp_mp3:
                tmp_mp3.write(audio_data)
                tmp_mp3_path = tmp_mp3.name
            
            try:
                # Load the TTS audio
                tts_waveform, tts_sample_rate = torchaudio.load(tmp_mp3_path)
                
                # Resample if needed to match our standard rate
                if tts_sample_rate != 16000:
                    tts_waveform = torchaudio.transforms.Resample(tts_sample_rate, 16000)(tts_waveform)
                    tts_sample_rate = 16000
                
                # Add 0.25s silence padding on each end
                padding_samples = int(0.25 * tts_sample_rate)
                silence_shape = list(tts_waveform.shape)
                silence_shape[-1] = padding_samples
                silence_padding = torch.zeros(silence_shape)
                
                # Concatenate: silence + audio + silence
                padded_waveform = torch.cat([silence_padding, tts_waveform, silence_padding], dim=-1)
                
                # Convert back to base64
                buffer = io.BytesIO()
                torchaudio.save(buffer, padded_waveform, tts_sample_rate, format="wav")
                buffer.seek(0)
                audio_b64 = base64.b64encode(buffer.read()).decode('utf-8')
                
                tts_cache[word] = audio_b64
                log(f"πŸ”‡ TTS for '{word}': Added 0.25s silence padding on each end")
                return audio_b64
                
            finally:
                # Clean up temp file
                if os.path.exists(tmp_mp3_path):
                    os.remove(tmp_mp3_path)
                    
    except Exception as e:
        log(f"TTS failed for '{word}': {e}")
    
    return ""

def audio_to_base64(audio_segment: torch.Tensor, sample_rate: int, add_padding: bool = True) -> str:
    """
    Convert audio tensor to base64 string.
    
    Args:
        audio_segment: The audio tensor to convert
        sample_rate: Sample rate of the audio
        add_padding: If True, adds 0.25s of silence on each end to prevent audio processor lag
    
    Returns:
        Base64 encoded audio string
    """
    try:
        if add_padding:
            # Add 0.25 seconds of silence on each end
            padding_samples = int(0.25 * sample_rate)  # 0.25 seconds worth of samples
            
            # Create silence padding (zeros with same shape as audio segment)
            silence_shape = list(audio_segment.shape)
            silence_shape[-1] = padding_samples
            silence_padding = torch.zeros(silence_shape)
            
            # Concatenate: silence + audio + silence
            padded_segment = torch.cat([silence_padding, audio_segment, silence_padding], dim=-1)
            
            log(f"πŸ”‡ Added silence padding: {padding_samples} samples (0.25s) on each end")
            log(f"   Original: {audio_segment.shape[-1]} samples β†’ Padded: {padded_segment.shape[-1]} samples")
            
            audio_segment = padded_segment
        
        buffer = io.BytesIO()
        torchaudio.save(buffer, audio_segment, sample_rate, format="wav")
        buffer.seek(0)
        return base64.b64encode(buffer.read()).decode('utf-8')
    except Exception as e:
        log(f"Audio conversion failed: {e}")
        return ""

@app.post("/api/transcribe")
async def transcribe(audio: UploadFile = File(...), similarity_threshold: float = Form(0.4)):
    log("=== STARTING WHISPERX ENGLISH-ONLY PHONEME ANALYSIS ===")
    
    # Use similarity threshold from frontend (default 0.4)
    similarity = max(0.0, min(1.0, similarity_threshold))  # Clamp between 0 and 1
    log(f"Using similarity threshold: {similarity:.2f}")
    
    try:
        # Load WhisperX models if needed
        load_whisperx_models()
        
        # 1. Convert and load audio
        data = await audio.read()
        wav_io = convert_webm_to_wav(data)
        
        # Save to temporary file for WhisperX
        temp_audio_path = "/tmp/temp_audio.wav"
        with open(temp_audio_path, "wb") as f:
            f.write(wav_io.getvalue())
        
        # Load audio with WhisperX
        audio_data = whisperx.load_audio(temp_audio_path)
        
        log(f"Audio loaded for WhisperX: {len(audio_data)} samples")
        
        # 2. Get transcription with WhisperX - EXPLICITLY SET TO ENGLISH
        result = whisperx_model.transcribe(audio_data, batch_size=16, language="en")
        
        # 3. Get precise word alignments with WhisperX (if alignment model available)
        if whisperx_align_model is not None:
            aligned_result = whisperx.align(result["segments"], whisperx_align_model, whisperx_metadata, audio_data, device="cpu")
        else:
            log("WARNING: Alignment model not available, using basic word splitting")
            # Fallback: split text into words with approximate timing
            aligned_result = {"segments": []}
            for segment in result["segments"]:
                text = segment.get("text", "").strip()
                if not text:
                    continue
                    
                words = text.split()
                duration = segment["end"] - segment["start"]
                time_per_word = duration / len(words) if words else 0
                
                word_list = []
                for i, word in enumerate(words):
                    word_start = segment["start"] + (i * time_per_word)
                    word_end = segment["start"] + ((i + 1) * time_per_word)
                    word_list.append({
                        "word": word,
                        "start": word_start,
                        "end": word_end,
                        "score": 0.9  # Default confidence
                    })
                
                aligned_result["segments"].append({
                    "text": text,
                    "start": segment["start"],
                    "end": segment["end"],
                    "words": word_list
                })
        
        # Extract word-level data from WhisperX results
        words = []
        word_texts = []  # Original with punctuation for display
        word_texts_clean = []  # Cleaned for phoneme processing
        word_timings = []
        
        for segment in aligned_result["segments"]:
            if "words" in segment:
                for word_info in segment["words"]:
                    if "start" in word_info and "end" in word_info and word_info["word"]:
                        original_word = word_info["word"].strip()
                        
                        # Convert digits to words for better phoneme analysis
                        word_converted = convert_digits_to_words(original_word)
                        cleaned_word = clean_word_for_phonemes(word_converted)
                        
                        # Only process words that have alphabetical content after cleaning
                        if cleaned_word:
                            words.append(word_info)
                            word_texts.append(word_converted)  # Use converted form for display
                            word_texts_clean.append(cleaned_word)  # Clean for processing
                            word_timings.append((word_info["start"], word_info["end"]))
        
        if not words:
            return {"resolved": "", "resolved_colored": "", "audio_data": []}
        
        log(f"Found {len(words)} words with precise WhisperX timings")
        
        # Log WhisperX timings
        log("=== WHISPERX PRECISE TIMINGS ===")
        for i, (word_original, word_clean, (start, end)) in enumerate(zip(word_texts, word_texts_clean, word_timings)):
            gap = ""
            if i > 0:
                prev_end = word_timings[i-1][1]
                gap_duration = start - prev_end
                gap = f" | gap: {gap_duration:.3f}s"
            log(f"Word {i}: '{word_original}' (clean: '{word_clean}') at {start:.3f}s-{end:.3f}s{gap}")
        
        # 4. Get expected phonemes using CLEANED words
        expected_phonemes = get_expected_phonemes(word_texts_clean)
        
        # 5. Load audio as tensor for phoneme analysis
        waveform, sample_rate = torchaudio.load(temp_audio_path)
        
        if sample_rate != 16000:
            waveform = torchaudio.transforms.Resample(sample_rate, 16000)(waveform)
            sample_rate = 16000
        
        # 6. Process each word using expanded timing with sliding window matching
        results = []
        audio_data_list = []
        
        # Generate TTS for all words concurrently (using CLEANED words)
        log("Generating TTS audio...")
        tts_tasks = [generate_tts_audio(word_clean) for word_clean in word_texts_clean]
        tts_results = await asyncio.gather(*tts_tasks)
        
        log("\n=== PROCESSING WORDS WITH EXPANDED TIMING + SLIDING WINDOW ===")
        
        for i, (word_info, word_original, word_clean, (start_time, end_time)) in enumerate(zip(words, word_texts, word_texts_clean, word_timings)):
            expected_phoneme = expected_phonemes[i] if i < len(expected_phonemes) else ""
            
            log(f"\n--- Processing word {i}: '{word_original}' (clean: '{word_clean}') ---")
            log(f"πŸ”Š WhisperX timing: {start_time:.3f}s - {end_time:.3f}s (duration: {end_time - start_time:.3f}s)")
            log(f"🎯 Expected phoneme: '{expected_phoneme}'")
            
            # DEBUGGING: Special attention to problematic words
            if word_clean.lower() in ['go', 'no', 'so', 'to', 'do'] or len(expected_phoneme) > len(word_clean):
                log(f"⚠️ SPECIAL CASE: Word '{word_clean}' has {len(word_clean)} letters but {len(expected_phoneme)} phonemes")
                log(f"    This may be a diphthong case requiring special handling")
            
            # For very short words, expand the WhisperX timing itself before processing
            original_duration = end_time - start_time
            if original_duration < 0.1:
                log(f"πŸ” Ultra-short word detected ({original_duration:.3f}s), expanding WhisperX timing")
                audio_duration = waveform.shape[-1] / sample_rate
                
                # Expand WhisperX boundaries by Β±0.05s
                start_time = max(0, start_time - 0.05)
                end_time = min(audio_duration, end_time + 0.05)
                
                log(f"   Expanded WhisperX timing: {start_time:.3f}s - {end_time:.3f}s (new duration: {end_time - start_time:.3f}s)")
            
            # Show gaps between words
            if i > 0:
                prev_end = word_timings[i-1][1]
                gap = start_time - prev_end
                if gap > 0:
                    log(f"⏸️ Gap from previous word: {gap:.3f}s")
                elif gap < 0:
                    log(f"⚠️ OVERLAP with previous word: {gap:.3f}s")
                else:
                    log(f"πŸ”— No gap (continuous)")
            
            # Calculate expanded timing (Β±0.125s with boundary protection)
            expansion_seconds = 0.125
            audio_duration = waveform.shape[-1] / sample_rate
            
            expanded_start = max(0, start_time - expansion_seconds)
            expanded_end = min(audio_duration, end_time + expansion_seconds)
            
            log(f"πŸ” Timing expansion: {start_time:.3f}s-{end_time:.3f}s β†’ {expanded_start:.3f}s-{expanded_end:.3f}s")
            
            # Extract expanded audio segment
            expanded_audio_segment = extract_audio_segment(waveform, sample_rate, expanded_start, expanded_end, word_clean, verbose=True)
            
            # Check for word boundary overlap and trim if needed
            log(f"πŸ” Checking word boundary overlap for '{word_clean}'...")
            boundary_offset = detect_word_boundary_overlap(expanded_audio_segment, sample_rate, word_clean)
            if boundary_offset > 0:
                log(f"πŸ”§ Detected word overlap, trimming {boundary_offset:.3f}s from start")
                trim_samples = int(boundary_offset * sample_rate)
                expanded_audio_segment = expanded_audio_segment[:, trim_samples:]
                # Update expanded_start for accurate timing logs
                expanded_start += boundary_offset
                log(f"   Updated expanded start: {expanded_start:.3f}s")
                
                # ALSO apply the boundary offset to WhisperX timing
                original_start_time = start_time
                start_time = max(0, start_time + boundary_offset)
                end_time = max(start_time + 0.01, end_time)  # Ensure minimum 10ms duration
                log(f"   Updated WhisperX timing: {original_start_time:.3f}s β†’ {start_time:.3f}s (shifted +{boundary_offset:.3f}s)")
            
            # Also extract WhisperX original timing for comparison (now using updated start_time)
            whisperx_audio_segment = extract_audio_segment(waveform, sample_rate, start_time, end_time, word_clean, verbose=False)
            
            # Detect phoneme from expanded audio segment
            detected_phoneme_raw = detect_phoneme_from_audio(expanded_audio_segment, sample_rate, word_clean)
            
            # Get expected phoneme and normalize both
            detected_phoneme_norm = normalize_phoneme_string(detected_phoneme_raw)
            expected_phoneme_norm = normalize_phoneme_string(expected_phoneme)
            
            log(f"πŸ”Š Raw detected phoneme (expanded): '{detected_phoneme_raw}'")
            log(f"🧹 Normalized detected: '{detected_phoneme_norm}'")
            log(f"🧹 Normalized expected: '{expected_phoneme_norm}'")
            
            # Find best matching substring using sliding window
            best_match_phoneme, similarity_score, match_start, match_end = sliding_window_phoneme_match(
                detected_phoneme_raw, expected_phoneme, word_clean
            )
            
            log(f"πŸ”Š Final similarity score: {similarity_score:.3f}")
            
            # Trim audio segment based on best phoneme match position
            trimmed_audio_segment = trim_audio_segment_by_phoneme_position(
                expanded_audio_segment, detected_phoneme_raw, match_start, match_end, word_clean
            )
            
            # Use original WhisperX timing if no trimming was needed, otherwise use trimmed
            if trimmed_audio_segment is None:
                final_audio_segment = whisperx_audio_segment
                log(f"🎡 Using original WhisperX timing (no trimming needed)")
                log(f"   Final segment: WhisperX original ({whisperx_audio_segment.shape[-1]} samples, {whisperx_audio_segment.shape[-1]/sample_rate:.3f}s)")
                log(f"   Segment timing: {start_time:.3f}s - {end_time:.3f}s")
            else:
                final_audio_segment = trimmed_audio_segment
                log(f"🎡 Using trimmed segment from expanded timing")
                log(f"   Final segment: Processed ({trimmed_audio_segment.shape[-1]} samples, {trimmed_audio_segment.shape[-1]/sample_rate:.3f}s)")
                
                # Calculate the actual timing of the processed segment
                final_duration = trimmed_audio_segment.shape[-1] / sample_rate
                expanded_duration = expanded_end - expanded_start
                
                # Calculate trim amounts based on phoneme positions
                detected_phoneme_norm = normalize_phoneme_string(detected_phoneme_raw)
                total_phoneme_len = len(detected_phoneme_norm)
                
                if total_phoneme_len > 0:
                    start_trim_pct = match_start / total_phoneme_len * 0.85  # Apply 85% factor
                    end_trim_pct = (total_phoneme_len - match_end) / total_phoneme_len * 0.85
                    
                    time_trimmed_from_start = expanded_duration * start_trim_pct
                    time_trimmed_from_end = expanded_duration * end_trim_pct
                    
                    final_start_time = expanded_start + time_trimmed_from_start
                    final_end_time = expanded_end - time_trimmed_from_end
                    
                    log(f"   Segment timing: {final_start_time:.3f}s - {final_end_time:.3f}s")
                else:
                    log(f"   Segment timing: {expanded_start:.3f}s - {expanded_end:.3f}s (no phoneme-based calculation)")
            
            log(f"πŸ”Š Audio segments returned to user:")
            log(f"   1️⃣ Expected (TTS): Generated speech")
            log(f"   2️⃣ User audio: {'WhisperX original' if trimmed_audio_segment is None else 'Processed/trimmed'} ({final_audio_segment.shape[-1]} samples)")
            log(f"   3️⃣ WhisperX raw: Original timing ({whisperx_audio_segment.shape[-1]} samples)")
            
            if trimmed_audio_segment is not None and final_audio_segment.shape[-1] != whisperx_audio_segment.shape[-1]:
                sample_diff = final_audio_segment.shape[-1] - whisperx_audio_segment.shape[-1]
                time_diff = sample_diff / sample_rate
                log(f"   πŸ“Š Segment difference: {sample_diff:+d} samples ({time_diff:+.3f}s) processed vs WhisperX")
            
            log(f"πŸ”Š Final similarity score: {similarity_score:.3f}")
            log(f"🎨 Final audio segment samples: {final_audio_segment.shape[-1]} (duration: {final_audio_segment.shape[-1]/sample_rate:.3f}s)")
            log(f"🎀 WhisperX original segment samples: {whisperx_audio_segment.shape[-1]} (duration: {whisperx_audio_segment.shape[-1]/sample_rate:.3f}s)")
            log(f"⏰ Timing comparison:")
            log(f"   WhisperX original: {start_time:.3f}s - {end_time:.3f}s (duration: {end_time - start_time:.3f}s)")
            log(f"   Expanded timing: {expanded_start:.3f}s - {expanded_end:.3f}s (duration: {expanded_end - expanded_start:.3f}s)")
            
            # Store results - now with both original and clean versions
            results.append({
                'word_original': word_original,  # Original with punctuation for display
                'word_clean': word_clean,  # Cleaned version for phoneme processing
                'detected_phoneme': best_match_phoneme,  # Use best matching substring
                'expected_phoneme': expected_phoneme,
                'similarity_score': float(similarity_score),
                'start_time': float(start_time),
                'end_time': float(end_time),
                'whisperx_confidence': float(word_info.get('score', 1.0))
            })
            
            # Prepare audio data with all three segments (use ORIGINAL word for display)
            # All three audio segments will have 0.25s silence padding added automatically
            user_audio_b64 = audio_to_base64(final_audio_segment, sample_rate)  # Padded
            whisperx_audio_b64 = audio_to_base64(whisperx_audio_segment, sample_rate)  # Padded
            expected_audio_b64 = tts_results[i]  # Already padded in generate_tts_audio
            
            audio_data_list.append({
                "word": word_original,  # Original with punctuation for display
                "expected_audio": expected_audio_b64,  # TTS with padding
                "user_audio": user_audio_b64,  # User's pronunciation with padding
                "whisperx_audio": whisperx_audio_b64,  # WhisperX original with padding
                "start_time": float(start_time),
                "end_time": float(end_time),
                "similarity_score": float(similarity_score),
                "detected_phoneme": best_match_phoneme,  # Use best matching substring
                "expected_phoneme": expected_phoneme,
                "whisperx_confidence": float(word_info.get('score', 1.0))
            })
        
        # 7. Format output using the refactored v2 functions
        resolved_output = []
        resolved_colored = []
        
        for result in results:
            output_text, colored_text = format_output_word_v2(
                result['word_original'],     # Pass both versions
                result['word_clean'],
                result['similarity_score'],
                result['detected_phoneme'],
                result['expected_phoneme'],
                similarity
            )
            resolved_output.append(output_text)
            resolved_colored.append(colored_text)
        
        # Clean up temporary file
        os.remove(temp_audio_path)
        
        log("=== WHISPERX ENGLISH-ONLY PHONEME ANALYSIS COMPLETE ===")
        
        return {
            "resolved": " ".join(resolved_output),
            "resolved_colored": " ".join(resolved_colored),
            "audio_data": audio_data_list,
            "debug_info": {
                "total_words": len(words),
                "similarity_threshold": similarity,
                "alignment_method": "WhisperX English-only + Sliding Window",
                "results_summary": [
                    {
                        "word": r['word_original'],
                        "score": float(r['similarity_score']),
                        "detected": r['detected_phoneme'],
                        "expected": r['expected_phoneme'],
                        "whisperx_confidence": r['whisperx_confidence']
                    }
                    for r in results
                ]
            }
        }
    
    except Exception as e:
        log(f"ERROR in transcribe: {str(e)}")
        import traceback
        log(f"Traceback: {traceback.format_exc()}")
        return {
            "resolved": "Error occurred", 
            "resolved_colored": "Error occurred",
            "audio_data": [],
            "debug_info": {"error": str(e)}
        }

@app.get("/")
def root():
    return "Clean Fonetik with WhisperX English-only + Character-Level Feedback running"

@app.post("/api/clear-cache")
def clear_cache():
    global phoneme_cache, tts_cache
    phoneme_cache.clear()
    tts_cache.clear()
    return {"message": "Cache cleared"}