Spaces:
Running
Running
File size: 1,717 Bytes
166575b f2b616f 166575b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
asr_datsets = {'LibriSpeech-Test-Clean': 'aa',
'LibriSpeech-Test-Other': 'bb',
'Common-Voice-15-En-Test': 'cc',
'Peoples-Speech-Test': 'dd',
'GigaSpeech-Test': 'ee',
'Earnings21-Test': 'ff',
'Earnings22-Test': 'gg',
'Tedlium3-Test': 'hh',
'Tedlium3-Longform-Test': 'ii',
'IMDA-Part1-ASR-Test': 'jj',
'IMDA-Part2-ASR-Test': 'kk'
}
sqa_datasets = {'CN-College-Listen-MCQ-Test': 'aa',
'DREAM-TTS-MCQ-Test': 'bb',
'SLUE-P2-SQA5-Test': 'cc',
'Public-SG-Speech-QA-Test': 'dd',
'Spoken-Squad-v1': 'ee'
}
si_datasets = {'OpenHermes-Audio-Test': 'aa',
'ALPACA-Audio-Test': 'bb'
}
ac_datasets = {
'WavCaps-Test': 'aa',
'AudioCaps-Test': 'bb'
}
asqa_datasets = {
'Clotho-AQA-Test': 'aa',
'WavCaps-QA-Test': 'bb',
'AudioCaps-QA-Test': 'cc'
}
er_datasets = {
'IEMOCAP-Emotion-Test': 'aa',
'MELD-Sentiment-Test': 'bb',
'MELD-Emotion-Test': 'cc'
}
ar_datsets = {
'VoxCeleb-Accent-Test': 'aa'
}
gr_datasets = {
'VoxCeleb-Gender-Test': 'aa',
'IEMOCAP-Gender-Test': 'bb'
}
spt_datasets = {
'Covost2-EN-ID-test': 'aa',
'Covost2-EN-ZH-test': 'bb',
'Covost2-EN-TA-test': 'cc',
'Covost2-ID-EN-test': 'dd',
'Covost2-ZH-EN-test': 'ee',
'Covost2-TA-EN-test': 'ff'
}
cnasr_datasets = {
'Aishell-ASR-ZH-Test': 'aa'
}
metrics = {
'wer': '11',
'llama3_70b_judge_binary': '22',
'llama3_70b_judge': '33',
'meteor': '44',
'bleu': '55'
} |