j commited on
Commit
2e37cc0
·
1 Parent(s): 0fe6d3b

first commit

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
- app_file: app.py
9
  pinned: false
10
  ---
11
 
 
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
+ app_file: demo/app.py
9
  pinned: false
10
  ---
11
 
assets/framework.png ADDED
assets/logo.png ADDED
assets/radar_compare_qwen_audio.png ADDED
demo/demo.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ echo $CUDA_VISIBLE_DEVICES
2
+ SERVER_PORT=9001
3
+ MASTER_ADDR=localhost
4
+ MASTER_PORT="3${SERVER_PORT}"
5
+ NNODES=${WORLD_SIZE:-1}
6
+ NODE_RANK=${RANK:-0}
7
+ GPUS_PER_NODE=1
8
+ python -m torch.distributed.launch --use_env \
9
+ --nproc_per_node $GPUS_PER_NODE --nnodes $NNODES \
10
+ --node_rank $NODE_RANK \
11
+ --master_addr=${MASTER_ADDR:-127.0.0.1} \
12
+ --master_port=$MASTER_PORT \
13
+ web_demo_audio.py \
14
+ --server-port ${SERVER_PORT}
demo/requirements_web_demo.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==4.31.3
2
+ modelscope-studio
demo/web_demo_audio.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import modelscope_studio as mgr
3
+ import librosa
4
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
5
+ from argparse import ArgumentParser
6
+
7
+ DEFAULT_CKPT_PATH = 'Qwen/Qwen2-Audio-7B-Instruct'
8
+
9
+
10
+ def _get_args():
11
+ parser = ArgumentParser()
12
+ parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
13
+ help="Checkpoint name or path, default to %(default)r")
14
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
15
+ parser.add_argument("--inbrowser", action="store_true", default=False,
16
+ help="Automatically launch the interface in a new tab on the default browser.")
17
+ parser.add_argument("--server-port", type=int, default=8000,
18
+ help="Demo server port.")
19
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
20
+ help="Demo server name.")
21
+
22
+ args = parser.parse_args()
23
+ return args
24
+
25
+
26
+ def add_text(chatbot, task_history, input):
27
+ text_content = input.text
28
+ content = []
29
+ if len(input.files) > 0:
30
+ for i in input.files:
31
+ content.append({'type': 'audio', 'audio_url': i.path})
32
+ if text_content:
33
+ content.append({'type': 'text', 'text': text_content})
34
+ task_history.append({"role": "user", "content": content})
35
+
36
+ chatbot.append([{
37
+ "text": input.text,
38
+ "files": input.files,
39
+ }, None])
40
+ return chatbot, task_history, None
41
+
42
+
43
+ def add_file(chatbot, task_history, audio_file):
44
+ """Add audio file to the chat history."""
45
+ task_history.append({"role": "user", "content": [{"audio": audio_file.name}]})
46
+ chatbot.append((f"[Audio file: {audio_file.name}]", None))
47
+ return chatbot, task_history
48
+
49
+
50
+ def reset_user_input():
51
+ """Reset the user input field."""
52
+ return gr.Textbox.update(value='')
53
+
54
+
55
+ def reset_state(task_history):
56
+ """Reset the chat history."""
57
+ return [], []
58
+
59
+
60
+ def regenerate(chatbot, task_history):
61
+ """Regenerate the last bot response."""
62
+ if task_history and task_history[-1]['role'] == 'assistant':
63
+ task_history.pop()
64
+ chatbot.pop()
65
+ if task_history:
66
+ chatbot, task_history = predict(chatbot, task_history)
67
+ return chatbot, task_history
68
+
69
+
70
+ def predict(chatbot, task_history):
71
+ """Generate a response from the model."""
72
+ print(f"{task_history=}")
73
+ print(f"{chatbot=}")
74
+ text = processor.apply_chat_template(task_history, add_generation_prompt=True, tokenize=False)
75
+ audios = []
76
+ for message in task_history:
77
+ if isinstance(message["content"], list):
78
+ for ele in message["content"]:
79
+ if ele["type"] == "audio":
80
+ audios.append(
81
+ librosa.load(ele['audio_url'], sr=processor.feature_extractor.sampling_rate)[0]
82
+ )
83
+
84
+ if len(audios)==0:
85
+ audios=None
86
+ print(f"{text=}")
87
+ print(f"{audios=}")
88
+ inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
89
+ if not _get_args().cpu_only:
90
+ inputs["input_ids"] = inputs.input_ids.to("cuda")
91
+
92
+ generate_ids = model.generate(**inputs, max_length=256)
93
+ generate_ids = generate_ids[:, inputs.input_ids.size(1):]
94
+
95
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
96
+ print(f"{response=}")
97
+ task_history.append({'role': 'assistant',
98
+ 'content': response})
99
+ chatbot.append((None, response)) # Add the response to chatbot
100
+ return chatbot, task_history
101
+
102
+
103
+ def _launch_demo(args):
104
+ with gr.Blocks() as demo:
105
+ gr.Markdown(
106
+ """<p align="center"><img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/assets/blog/qwenaudio/qwen2audio_logo.png" style="height: 80px"/><p>""")
107
+ gr.Markdown("""<center><font size=8>Qwen2-Audio-Instruct Bot</center>""")
108
+ gr.Markdown(
109
+ """\
110
+ <center><font size=3>This WebUI is based on Qwen2-Audio-Instruct, developed by Alibaba Cloud. \
111
+ (本WebUI基于Qwen2-Audio-Instruct打造,实现聊天机器人功能。)</center>""")
112
+ gr.Markdown("""\
113
+ <center><font size=4>Qwen2-Audio <a href="https://modelscope.cn/models/qwen/Qwen2-Audio-7B">🤖 </a>
114
+ | <a href="https://huggingface.co/Qwen/Qwen2-Audio-7B">🤗</a>&nbsp |
115
+ Qwen2-Audio-Instruct <a href="https://modelscope.cn/models/qwen/Qwen2-Audio-7B-Instruct">🤖 </a> |
116
+ <a href="https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct">🤗</a>&nbsp |
117
+ &nbsp<a href="https://github.com/QwenLM/Qwen2-Audio">Github</a></center>""")
118
+ chatbot = mgr.Chatbot(label='Qwen2-Audio-7B-Instruct', elem_classes="control-height", height=750)
119
+
120
+ user_input = mgr.MultimodalInput(
121
+ interactive=True,
122
+ sources=['microphone', 'upload'],
123
+ submit_button_props=dict(value="🚀 Submit (发送)"),
124
+ upload_button_props=dict(value="📁 Upload (上传文件)", show_progress=True),
125
+ )
126
+ task_history = gr.State([])
127
+
128
+ with gr.Row():
129
+ empty_bin = gr.Button("🧹 Clear History (清除历史)")
130
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
131
+
132
+ user_input.submit(fn=add_text,
133
+ inputs=[chatbot, task_history, user_input],
134
+ outputs=[chatbot, task_history, user_input]).then(
135
+ predict, [chatbot, task_history], [chatbot, task_history], show_progress=True
136
+ )
137
+ empty_bin.click(reset_state, outputs=[chatbot, task_history], show_progress=True)
138
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot, task_history], show_progress=True)
139
+
140
+ demo.queue().launch(
141
+ share=False,
142
+ inbrowser=args.inbrowser,
143
+ server_port=args.server_port,
144
+ server_name=args.server_name,
145
+ )
146
+
147
+
148
+ if __name__ == "__main__":
149
+ args = _get_args()
150
+ if args.cpu_only:
151
+ device_map = "cpu"
152
+ else:
153
+ device_map = "auto"
154
+
155
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
156
+ args.checkpoint_path,
157
+ torch_dtype="auto",
158
+ device_map=device_map,
159
+ resume_download=True,
160
+ ).eval()
161
+ model.generation_config.max_new_tokens = 2048 # For chat.
162
+ print("generation_config", model.generation_config)
163
+ processor = AutoProcessor.from_pretrained(args.checkpoint_path, resume_download=True)
164
+ _launch_demo(args)
eval_audio/EVALUATION.md ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Evaluation
2
+
3
+ ### Dependencies
4
+
5
+ ```bash
6
+ apt-get update
7
+ apt-get install openjdk-8-jdk
8
+ pip install evaluate
9
+ pip install sacrebleu==1.5.1
10
+ pip install edit_distance
11
+ pip install editdistance
12
+ pip install jiwer
13
+ pip install scikit-image
14
+ pip install textdistance
15
+ pip install sed_eval
16
+ pip install more_itertools
17
+ pip install zhconv
18
+ ```
19
+ ### ASR
20
+
21
+ - Data
22
+
23
+ > LibriSpeech: https://www.openslr.org/12
24
+
25
+ > Aishell2: https://www.aishelltech.com/aishell_2
26
+
27
+ > common voice 15: https://commonvoice.mozilla.org/en/datasets
28
+
29
+ > Fluers: https://huggingface.co/datasets/google/fleurs
30
+
31
+ ```bash
32
+ mkdir -p data/asr && cd data/asr
33
+
34
+ # download audios from above links
35
+
36
+ # download converted files
37
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/librispeech_eval.jsonl
38
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/aishell2_eval.jsonl
39
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/cv15_asr_en_eval.jsonl
40
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/cv15_asr_zh_eval.jsonl
41
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/cv15_asr_yue_eval.jsonl
42
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/cv15_asr_fr_eval.jsonl
43
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/fleurs_asr_zh_eval.jsonl
44
+ cd ../..
45
+ ```
46
+
47
+ ```bash
48
+ for ds in "librispeech" "aishell2" "cv15_en" "cv15_zh" "cv15_yue" "cv15_fr" "fluers_zh"
49
+ do
50
+ python -m torch.distributed.launch --use_env \
51
+ --nproc_per_node ${NPROC_PER_NODE:-8} --nnodes 1 \
52
+ evaluate_asr.py \
53
+ --checkpoint $checkpoint \
54
+ --dataset $ds \
55
+ --batch-size 20 \
56
+ --num-workers 2
57
+ done
58
+ ```
59
+ ### S2TT
60
+
61
+ - Data
62
+
63
+ > CoVoST 2: https://github.com/facebookresearch/covost
64
+
65
+ ```bash
66
+ mkdir -p data/st && cd data/st
67
+
68
+ # download audios from https://commonvoice.mozilla.org/en/datasets
69
+
70
+ # download converted files
71
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/covost2_eval.jsonl
72
+
73
+ cd ../..
74
+ ```
75
+ - Evaluate
76
+ ```bash
77
+ ds="covost2"
78
+ python -m torch.distributed.launch --use-env \
79
+ --nproc_per_node ${NPROC_PER_NODE:-8} --nnodes 1 \
80
+ evaluate_st.py \
81
+ --checkpoint $checkpoint \
82
+ --dataset $ds \
83
+ --batch-size 8 \
84
+ --num-workers 2
85
+ ```
86
+
87
+ ### SER
88
+ - Data
89
+ > MELD: https://affective-meld.github.io/
90
+
91
+
92
+
93
+ ```bash
94
+ mkdir -p data/ser && cd data/ser
95
+
96
+ # download MELD datasets from above link
97
+
98
+ # download converted files
99
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/meld_eval.jsonl
100
+
101
+
102
+ cd ../..
103
+ ```
104
+
105
+ - Evaluate
106
+
107
+ ```bash
108
+ ds="meld"
109
+ python -m torch.distributed.launch --use-env \
110
+ --nproc_per_node ${NPROC_PER_NODE:-8} --nnodes 1 \
111
+ evaluate_emotion.py \
112
+ --checkpoint $checkpoint \
113
+ --dataset $ds \
114
+ --batch-size 8 \
115
+ --num-workers 2
116
+ ```
117
+
118
+
119
+ ### VSC
120
+ - Data
121
+ > VocalSound: https://github.com/YuanGongND/vocalsound
122
+
123
+
124
+ ```bash
125
+ mkdir -p data/vsc && cd data/vsc
126
+
127
+ # download dataset from the above link
128
+ # download converted files
129
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/vocalsound_eval.jsonl
130
+
131
+
132
+ cd ../..
133
+ ```
134
+
135
+ - Evaluate
136
+
137
+ ```bash
138
+ ds="vocalsound"
139
+ python -m torch.distributed.launch --use-env \
140
+ --nproc_per_node ${NPROC_PER_NODE:-8} --nnodes 1 \
141
+ evaluate_aqa.py \
142
+ --checkpoint $checkpoint \
143
+ --dataset $ds \
144
+ --batch-size 8 \
145
+ --num-workers 2
146
+ ```
147
+
148
+ ### AIR-BENCH
149
+ - Data
150
+ > AIR-BENCH: https://huggingface.co/datasets/qyang1021/AIR-Bench-Dataset
151
+
152
+ ```bash
153
+ mkdir -p data/airbench && cd data/airbench
154
+
155
+ # download dataset from the above link
156
+ # download converted files
157
+ wget https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/evaluation/airbench_level_3_eval.jsonl
158
+
159
+
160
+ cd ../..
161
+ ```
162
+
163
+ ```bash
164
+ ds="airbench_level3"
165
+ python -m torch.distributed.launch --use-env \
166
+ --nproc_per_node ${NPROC_PER_NODE:-8} --nnodes 1 \
167
+ evaluate_chat.py \
168
+ --checkpoint $checkpoint \
169
+ --dataset $ds \
170
+ --batch-size 8 \
171
+ --num-workers 2
172
+ ```
173
+
174
+ ### Acknowledgement
175
+
176
+ Part of these codes are borrowed from [Whisper](https://github.com/openai/whisper) , [speechio](https://github.com/speechio/chinese_text_normalization), thanks for their wonderful work.
eval_audio/cn_tn.py ADDED
@@ -0,0 +1,1204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # coding=utf-8
3
+ # copied from https://github.com/speechio/chinese_text_normalization/blob/master/python/cn_tn.py
4
+ # Authors:
5
+ # 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git)
6
+ # 2019.9 - 2022 Jiayu DU
7
+ #
8
+ # requirements:
9
+ # - python 3.X
10
+ # notes: python 2.X WILL fail or produce misleading results
11
+
12
+ import sys, os, argparse
13
+ import string, re
14
+ import csv
15
+
16
+ # ================================================================================ #
17
+ # basic constant
18
+ # ================================================================================ #
19
+ CHINESE_DIGIS = u'零一二三四五六七八九'
20
+ BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
21
+ BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
22
+ SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
23
+ SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
24
+ LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
25
+ LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
26
+ SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
27
+ SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
28
+
29
+ ZERO_ALT = u'〇'
30
+ ONE_ALT = u'幺'
31
+ TWO_ALTS = [u'两', u'兩']
32
+
33
+ POSITIVE = [u'正', u'正']
34
+ NEGATIVE = [u'负', u'負']
35
+ POINT = [u'点', u'點']
36
+ # PLUS = [u'加', u'加']
37
+ # SIL = [u'杠', u'槓']
38
+
39
+ FILLER_CHARS = ['呃', '啊']
40
+
41
+ ER_WHITELIST = '(儿女|儿子|儿孙|女儿|儿媳|妻儿|' \
42
+ '胎儿|婴儿|新生儿|婴幼儿|幼儿|少儿|小儿|儿歌|儿童|儿科|托儿所|孤儿|' \
43
+ '儿戏|儿化|台儿庄|鹿儿岛|正儿八经|吊儿郎当|生儿育女|托儿带女|养儿防老|痴儿呆女|' \
44
+ '佳儿佳妇|儿怜兽扰|儿无常父|儿不嫌母丑|儿行千里母担忧|儿大不由爷|苏乞儿)'
45
+ ER_WHITELIST_PATTERN = re.compile(ER_WHITELIST)
46
+
47
+ # 中文数字系统类型
48
+ NUMBERING_TYPES = ['low', 'mid', 'high']
49
+
50
+ CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \
51
+ '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)'
52
+ CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)'
53
+ COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \
54
+ '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \
55
+ '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \
56
+ '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \
57
+ '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \
58
+ '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)'
59
+
60
+
61
+ # Punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git)
62
+ CN_PUNCS_STOP = '!?。。'
63
+ CN_PUNCS_NONSTOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏·〈〉-'
64
+ CN_PUNCS = CN_PUNCS_STOP + CN_PUNCS_NONSTOP
65
+
66
+ PUNCS = CN_PUNCS + string.punctuation
67
+ PUNCS_TRANSFORM = str.maketrans(PUNCS, ' ' * len(PUNCS), '') # replace puncs with space
68
+
69
+
70
+ # https://zh.wikipedia.org/wiki/全行和半行
71
+ QJ2BJ = {
72
+ ' ': ' ',
73
+ '!': '!',
74
+ '"': '"',
75
+ '#': '#',
76
+ '$': '$',
77
+ '%': '%',
78
+ '&': '&',
79
+ ''': "'",
80
+ '(': '(',
81
+ ')': ')',
82
+ '*': '*',
83
+ '+': '+',
84
+ ',': ',',
85
+ '-': '-',
86
+ '.': '.',
87
+ '/': '/',
88
+ '0': '0',
89
+ '1': '1',
90
+ '2': '2',
91
+ '3': '3',
92
+ '4': '4',
93
+ '5': '5',
94
+ '6': '6',
95
+ '7': '7',
96
+ '8': '8',
97
+ '9': '9',
98
+ ':': ':',
99
+ ';': ';',
100
+ '<': '<',
101
+ '=': '=',
102
+ '>': '>',
103
+ '?': '?',
104
+ '@': '@',
105
+ 'A': 'A',
106
+ 'B': 'B',
107
+ 'C': 'C',
108
+ 'D': 'D',
109
+ 'E': 'E',
110
+ 'F': 'F',
111
+ 'G': 'G',
112
+ 'H': 'H',
113
+ 'I': 'I',
114
+ 'J': 'J',
115
+ 'K': 'K',
116
+ 'L': 'L',
117
+ 'M': 'M',
118
+ 'N': 'N',
119
+ 'O': 'O',
120
+ 'P': 'P',
121
+ 'Q': 'Q',
122
+ 'R': 'R',
123
+ 'S': 'S',
124
+ 'T': 'T',
125
+ 'U': 'U',
126
+ 'V': 'V',
127
+ 'W': 'W',
128
+ 'X': 'X',
129
+ 'Y': 'Y',
130
+ 'Z': 'Z',
131
+ '[': '[',
132
+ '\': '\\',
133
+ ']': ']',
134
+ '^': '^',
135
+ '_': '_',
136
+ '`': '`',
137
+ 'a': 'a',
138
+ 'b': 'b',
139
+ 'c': 'c',
140
+ 'd': 'd',
141
+ 'e': 'e',
142
+ 'f': 'f',
143
+ 'g': 'g',
144
+ 'h': 'h',
145
+ 'i': 'i',
146
+ 'j': 'j',
147
+ 'k': 'k',
148
+ 'l': 'l',
149
+ 'm': 'm',
150
+ 'n': 'n',
151
+ 'o': 'o',
152
+ 'p': 'p',
153
+ 'q': 'q',
154
+ 'r': 'r',
155
+ 's': 's',
156
+ 't': 't',
157
+ 'u': 'u',
158
+ 'v': 'v',
159
+ 'w': 'w',
160
+ 'x': 'x',
161
+ 'y': 'y',
162
+ 'z': 'z',
163
+ '{': '{',
164
+ '|': '|',
165
+ '}': '}',
166
+ '~': '~',
167
+ }
168
+ QJ2BJ_TRANSFORM = str.maketrans(''.join(QJ2BJ.keys()), ''.join(QJ2BJ.values()), '')
169
+
170
+
171
+ # 2013 China National Standard: https://zh.wikipedia.org/wiki/通用规范汉字表, raw resources:
172
+ # https://github.com/mozillazg/pinyin-data/blob/master/kMandarin_8105.txt with 8105 chinese chars in total
173
+ CN_CHARS_COMMON = (
174
+ '一丁七万丈三上下不与丏丐丑专且丕世丘丙业丛东丝丞丢两严丧个丫中丰串临丸丹为主丽举'
175
+ '乂乃久么义之乌乍乎乏乐乒乓乔乖乘乙乜九乞也习乡书乩买乱乳乸乾了予争事二亍于亏云互'
176
+ '亓五井亘亚些亟亡亢交亥亦产亨亩享京亭亮亲亳亵亶亸亹人亿什仁仂仃仄仅仆仇仉今介仍从'
177
+ '仑仓仔仕他仗付仙仝仞仟仡代令以仨仪仫们仰仲仳仵件价任份仿企伈伉伊伋伍伎伏伐休众优'
178
+ '伙会伛伞伟传伢伣伤伥伦伧伪伫伭伯估伲伴伶伸伺似伽伾佁佃但位低住佐佑体何佖佗佘余佚'
179
+ '佛作佝佞佟你佣佤佥佩佬佯佰佳佴佶佸佺佻佼佽佾使侁侂侃侄侈侉例侍侏侑侔侗侘供依侠侣'
180
+ '侥侦侧侨侩侪侬侮侯侴侵侹便促俄俅俊俍俎俏俐俑俗俘俙俚俜保俞俟信俣俦俨俩俪俫俭修俯'
181
+ '俱俳俵俶俸俺俾倌倍倏倒倓倔倕倘候倚倜倞借倡倥倦倧倨倩倪倬倭倮倴债倻值倾偁偃假偈偌'
182
+ '偎偏偓偕做停偡健偬偭偰偲偶偷偻偾偿傀傃傅傈傉傍傒傕傣傥傧储傩催傲傺傻僇僎像僔僖僚'
183
+ '僦僧僬僭僮僰僳僵僻儆儇儋儒儡儦儳儴儿兀允元兄充兆先光克免兑兔兕兖党兜兢入全八公六'
184
+ '兮兰共关兴兵其具典兹养兼兽冀冁内冈冉册再冏冒冔冕冗写军农冠冢冤冥冬冮冯冰冱冲决况'
185
+ '冶冷冻冼冽净凄准凇凉凋凌减凑凓凘凛凝几凡凤凫凭凯凰凳凶凸凹出击凼函凿刀刁刃分切刈'
186
+ '刊刍刎刑划刖列刘则刚创初删判刨利别刬刭刮到刳制刷券刹刺刻刽刿剀剁剂剃剅削剋剌前剐'
187
+ '剑剔剕剖剜剞剟剡剥剧剩剪副割剽剿劁劂劄劈劐劓力劝办功加务劢劣动助努劫劬劭励劲劳劼'
188
+ '劾势勃勇勉勋勍勐勒勔勖勘勚募勠勤勰勺勾勿匀包匆匈匍匏匐匕化北匙匜匝匠匡匣匦匪匮匹'
189
+ '区医匼匾匿十千卅升午卉半华协卑卒卓单卖南博卜卞卟占卡卢卣卤卦卧卫卬卮卯印危即却卵'
190
+ '卷卸卺卿厂厄厅历厉压厌厍厕厖厘厚厝原厢厣厥厦厨厩厮去厾县叁参叆叇又叉及友双反发叔'
191
+ '叕取受变叙叚叛叟叠口古句另叨叩只叫召叭叮可台叱史右叵叶号司叹叻叼叽吁吃各吆合吉吊'
192
+ '同名后吏吐向吒吓吕吖吗君吝吞吟吠吡吣否吧吨吩含听吭吮启吱吲吴吵吸吹吻吼吽吾呀呃呆'
193
+ '呇呈告呋呐呒呓呔呕呖呗员呙呛呜呢呣呤呦周呱呲味呵呶呷呸呻呼命咀咂咄咆咇咉咋和咍咎'
194
+ '咏咐咒咔咕咖咙咚咛咝咡咣咤咥咦咧咨咩咪咫咬咯咱咳咴咸咺咻咽咿哀品哂哃哄哆哇哈哉哌'
195
+ '响哎哏哐哑哒哓哔哕哗哙哚哝哞哟哢哥哦哧哨哩哪哭哮哱哲哳哺哼哽哿唁唆唇唉唏唐唑唔唛'
196
+ '唝唠唢唣唤唧唪唬售唯唰唱唳唵唷唼唾唿啁啃啄商啉啊啐啕啖啜啡啤啥啦啧啪啫啬啭啮啰啴'
197
+ '啵啶啷啸啻啼啾喀喁喂喃善喆喇喈喉喊喋喏喑喔喘喙喜喝喟喤喧喱喳喵喷喹喻喽喾嗄嗅嗉嗌'
198
+ '嗍嗐嗑嗒嗓嗔嗖嗜嗝嗞嗟嗡嗣嗤嗥嗦嗨嗪嗫嗬嗯嗲嗳嗵嗷嗽嗾嘀嘁嘈嘉嘌嘎嘏嘘嘚嘛嘞嘟嘡'
199
+ '嘣嘤嘧嘬嘭嘱嘲嘴嘶嘹嘻嘿噀噂噇噌噍噎噔噗噘噙噜噢噤器噩噪噫噬噱噶噻噼嚄嚅嚆嚎嚏嚓'
200
+ '嚚嚣嚭嚯嚷嚼囊囔囚四回囟因囡团囤囫园困囱围囵囷囹固国图囿圃圄圆圈圉圊圌圐圙圜土圢'
201
+ '圣在圩圪圫圬圭圮圯地圲圳圹场圻圾址坂均坉坊坋坌坍坎坏坐坑坒块坚坛坜坝坞坟坠坡坤坥'
202
+ '坦坨坩坪坫坬坭坯坰坳坷坻坼坽垂垃垄垆垈型垌垍垎垏垒垓垕垙垚垛垞垟垠垡垢垣垤垦垧垩'
203
+ '垫垭垮垯垱垲垴垵垸垺垾垿埂埃埆埇埋埌城埏埒埔埕埗埘埙埚埝域埠埤埪埫埭埯埴埵埸培基'
204
+ '埼埽堂堃堆堇堉堋堌堍堎堐堑堕堙堞堠堡堤堧堨堪堰堲堵堼堽堾塄塅塆塌塍塑塔塘塝塞塥填'
205
+ '塬塱塾墀墁境墅墈墉墐墒墓墕墘墙墚增墟墡墣墦墨墩墼壁壅壑壕壤士壬壮声壳壶壸壹处备复'
206
+ '夏夐夔夕外夙多夜够夤夥大天太夫夬夭央夯失头夷夸夹夺夼奁奂奄奇奈奉奋奎奏契奓奔奕奖'
207
+ '套奘奚奠奡奢奥奭女奴奶奸她好妁如妃妄妆妇妈妊妍妒妓妖妗妘妙妞妣妤妥妧妨妩妪妫妭妮'
208
+ '妯妲妹妻妾姆姈姊始姐姑姒姓委姗姘姚姜姝姞姣姤姥姨姬姮姱姶姹姻姽姿娀威娃娄娅娆娇娈'
209
+ '娉娌娑娓娘娜娟娠娣娥娩娱娲娴娵娶娼婀婆婉婊婌婍婕婘婚婞婠婢婤婧婪婫婳婴婵婶婷婺婻'
210
+ '婼婿媂媄媆媒媓媖媚媛媞媪媭媱媲媳媵媸媾嫁嫂嫄嫉嫌嫒嫔嫕嫖嫘嫚嫜嫠嫡嫣嫦嫩嫪嫫嫭嫱'
211
+ '嫽嬉嬖嬗嬛嬥嬬嬴嬷嬿孀孅子孑孓孔孕孖字存孙孚孛孜孝孟孢季孤孥学孩孪孬孰孱孳孵孺孽'
212
+ '宁它宄宅宇守安宋完宏宓宕宗官宙定宛宜宝实宠审客宣室宥宦宧宪宫宬宰害宴宵家宸容宽宾'
213
+ '宿寁寂寄寅密寇富寐寒寓寝寞察寡寤寥寨寮寰寸对寺寻导寿封射将尉尊小少尔尕尖尘尚尜尝'
214
+ '尢尤尥尧尨尪尬就尴尸尹尺尻尼尽尾尿局屁层屃居屈屉届屋屎屏屐屑展屙属屠屡屣履屦屯山'
215
+ '屹屺屼屾屿岁岂岈岊岌岍岐岑岔岖岗岘岙岚岛岜岞岠岢岣岨岩岫岬岭岱岳岵岷岸岽岿峁峂峃'
216
+ '峄峋峒峗峘峙峛峡峣峤峥峦峧峨峪峭峰峱峻峿崀崁崂崃崄崆崇崌崎崒崔崖崚崛崞崟崡崤崦崧'
217
+ '崩崭崮崴崶崽崾崿嵁嵅嵇嵊嵋嵌嵎嵖嵘嵚嵛嵝嵩嵫嵬嵯嵲嵴嶂嶅嶍嶒嶓嶙嶝嶟嶦嶲嶷巅巇巉'
218
+ '巍川州巡巢工左巧巨巩巫差巯己已巳巴巷巽巾币市布帅帆师希帏帐帑帔帕帖帘帙帚帛帜帝帡'
219
+ '带帧帨席帮帱帷常帻帼帽幂幄幅幌幔幕幖幛幞幡幢幪干平年并幸幺幻幼幽广庄庆庇床庋序庐'
220
+ '庑库应底庖店庙庚府庞废庠庤庥度座庭庱庳庵庶康庸庹庼庾廆廉廊廋廑廒廓廖廙廛廨廪延廷'
221
+ '建廿开弁异弃弄弆弇弈弊弋式弑弓引弗弘弛弟张弢弥弦弧弨弩弭弯弱弶弸弹强弼彀归当录彖'
222
+ '彗彘彝彟形彤彦彧彩彪彬彭彰影彳彷役彻彼往征徂径待徇很徉徊律徐徒徕得徘徙徛徜御徨循'
223
+ '徭微徵德徼徽心必忆忉忌忍忏忐忑忒忖志忘忙忝忞忠忡忤忧忪快忭忮忱忳念忸忺忻忽忾忿怀'
224
+ '态怂怃怄怅怆怊怍怎怏怒怔怕怖怙怛怜思怠怡急怦性怨怩怪怫怯怵总怼怿恁恂恃恋恍恐恒恓'
225
+ '恔恕恙恚恝恢恣恤恧恨恩恪恫恬恭息恰恳恶恸恹恺恻恼恽恿悃悄悆悈悉悌悍悒悔悖悚悛悝悟'
226
+ '悠悢患悦您悫悬悭悯悰悱悲悴悸悻悼情惆惇惊惋惎惑惔惕惘惙惚惛惜惝惟惠惦惧惨惩惫惬惭'
227
+ '惮惯惰想惴惶惹惺愀愁愃愆愈愉愍愎意愐愔愕愚感愠愣愤愦愧愫愭愿慆慈慊慌慎慑慕慝慢慥'
228
+ '慧慨慬慭慰慵慷憋憎憔憕憙憧憨憩憬憭憷憺憾懂懈懊懋懑懒懔懦懵懿戆戈戊戋戌戍戎戏成我'
229
+ '戒戕或戗战戚戛戟戡戢戣戤戥截戬戭戮戳戴户戽戾房所扁扂扃扅扆扇扈扉扊手才扎扑扒打扔'
230
+ '托扛扞扣扦执扩扪扫扬扭扮扯扰扳扶批扺扼扽找承技抃抄抉把抑抒抓抔投抖抗折抚抛抟抠抡'
231
+ '抢护报抨披抬抱抵抹抻押抽抿拂拃拄担拆拇拈拉拊拌拍拎拐拒拓拔拖拗拘拙招拜拟拢拣拤拥'
232
+ '拦拧拨择括拭拮拯拱拳拴拶拷拼拽拾拿持挂指挈按挎挑挓挖挚挛挝挞挟挠挡挣挤挥挦挨挪挫'
233
+ '振挲挹挺挽捂捃捅捆捉捋捌捍捎捏捐捕捞损捡换捣捧捩捭据捯捶捷捺捻捽掀掂掇授掉掊掌掎'
234
+ '掏掐排掖掘掞掠探掣接控推掩措掬掭掮掰掳掴掷掸掺掼掾揄揆揉揍描提插揕揖揠握揣揩揪揭'
235
+ '揳援揶揸揽揿搀搁搂搅搋搌搏搐搒搓搔搛搜搞搠搡搦搪搬搭搴携搽摁摄摅摆摇摈摊摏摒摔摘'
236
+ '摛摞摧摩摭摴摸摹摽撂撄撅撇撑撒撕撖撙撞撤撩撬播撮撰撵撷撸撺撼擀擂擅操擎擐擒擘擞擢'
237
+ '擤擦擿攀攉攒攘攥攫攮支收攸改攻攽放政故效敉敌敏救敔敕敖教敛敝敞敢散敦敩敫敬数敲整'
238
+ '敷文斋斌斐斑斓斗料斛斜斝斟斠斡斤斥斧斩斫断斯新斶方於施旁旃旄旅旆旋旌旎族旐旒旖旗'
239
+ '旞无既日旦旧旨早旬旭旮旯旰旱旴旵时旷旸旺旻旿昀昂昃昄昆昇昈昉昊昌明昏昒易昔昕昙昝'
240
+ '星映昡昣昤春昧昨昪昫昭是昱昳昴昵昶昺昼昽显晁晃晅晊晋晌晏晐晒晓晔晕晖晗晙晚晞晟晡'
241
+ '晢晤晦晨晪晫普景晰晱晴晶晷智晾暂暄暅暇暌暑暕暖暗暝暧暨暮暲暴暵暶暹暾暿曈曌曙曛曜'
242
+ '曝曦曩曰曲曳更曷曹曼曾替最月有朋服朏朐朓朔朕朗望朝期朦木未末本札术朱朳朴朵朸机朽'
243
+ '杀杂权杄杆杈杉杌李杏材村杓杕杖杙杜杞束杠条来杧杨杩杪杭杯杰杲杳杵杷杻杼松板极构枅'
244
+ '枇枉枋枍析枕林枘枚果枝枞枢枣枥枧枨枪枫枭枯枰枲枳枵架枷枸枹柁柃柄柈柊柏某柑柒染柔'
245
+ '柖柘柙柚柜柝柞柠柢查柩柬柯柰柱柳柴柷柽柿栀栅标栈栉栊栋栌栎栏栐树栒栓栖栗栝栟校栩'
246
+ '株栲栳栴样核根栻格栽栾桀桁桂桃桄桅框案桉桊桌桎桐桑桓桔桕桠桡桢档桤桥桦桧桨桩桫桯'
247
+ '桲桴桶桷桹梁梃梅梆梌梏梓梗梠梢梣梦梧梨梭梯械梳梴梵梼梽梾梿检棁棂棉棋棍棐棒棓棕棘'
248
+ '棚棠棣棤棨棪棫棬森棰棱棵棹棺棻棼棽椀椁椅椆椋植椎椐椑椒椓椟椠椤椪椭椰椴椸椹椽椿楂'
249
+ '楒楔楗楙楚楝楞楠楣楦楩楪楫楮楯楷楸楹楼概榃榄榅榆榇榈榉榍榑榔榕榖榛榜榧榨榫榭榰榱'
250
+ '榴榷榻槁槃槊槌槎槐槔槚槛槜槟槠槭槱槲槽槿樊樗樘樟模樨横樯樱樵樽樾橄橇橐橑橘橙橛橞'
251
+ '橡橥橦橱橹橼檀檄檎檐檑檗檞檠檩檫檬櫆欂欠次欢欣欤欧欲欸欹欺欻款歃歅歆歇歉歌歙止正'
252
+ '此步武歧歪歹死歼殁殂殃殄殆殇殉殊残殍殒殓殖殚殛殡殣殪殳殴段殷殿毁毂毅毋毌母每毐毒'
253
+ '毓比毕毖毗毙毛毡毪毫毯毳毵毹毽氅氆氇氍氏氐民氓气氕氖氘氙氚氛氟氡氢氤氦氧氨氩氪氮'
254
+ '氯氰氲水永氾氿汀汁求汆汇汈汉汊汋汐汔汕汗汛汜汝汞江池污汤汧汨汩汪汫汭汰汲汴汶汹汽'
255
+ '汾沁沂沃沄沅沆沇沈沉沌沏沐沓沔沘沙沚沛沟没沣沤沥沦沧沨沩沪沫沭沮沱河沸油沺治沼沽'
256
+ '沾沿泂泃泄泅泇泉泊泌泐泓泔法泖泗泙泚泛泜泞泠泡波泣泥注泪泫泮泯泰泱泳泵泷泸泺泻泼'
257
+ '泽泾洁洄洇洈洋洌洎洑洒洓洗洘洙洚洛洞洢洣津洧洨洪洫洭洮洱洲洳洴洵洸洹洺活洼洽派洿'
258
+ '流浃浅浆浇浈浉浊测浍济浏浐浑浒浓浔浕浙浚浛浜浞浟浠浡浣浥浦浩浪浬浭浮浯浰浲浴海浸'
259
+ '浼涂涄涅消涉涌涍涎涐涑涓涔涕涘涛涝涞涟涠涡涢涣涤润涧涨涩涪涫涮涯液涴涵涸涿淀淄淅'
260
+ '淆淇淋淌淏淑淖淘淙淜淝淞淟淠淡淤淦淫淬淮淯深淳淴混淹添淼清渊渌渍渎渐渑渔渗渚渝渟'
261
+ '渠渡渣渤渥温渫渭港渰渲渴游渺渼湃湄湉湍湎湑湓湔湖湘湛湜湝湟湣湫湮湲湴湾湿溁溃溅溆'
262
+ '溇溉溍溏源溘溚溜溞溟溠溢溥溦溧溪溯溱溲溴溵溶溷溹溺溻溽滁滂滃滆滇滉滋滍滏滑滓滔滕'
263
+ '滗滘滚滞滟滠满滢滤滥滦滧滨滩滪滫滴滹漂漆漈漉漋漏漓演漕漖漠漤漦漩漪漫漭漯漱漳漴漶'
264
+ '漷漹漻漼漾潆潇潋潍潏潖潘潜潞潟潢潦潩潭潮潲潴潵潸潺潼潽潾澂澄澈澉澌澍澎澛澜澡澥澧'
265
+ '澪澭澳澴澶澹澼澽激濂濉濋濑濒濞濠濡濩濮濯瀌瀍瀑瀔瀚瀛瀣瀱瀵瀹瀼灈灌灏灞火灭灯灰灵'
266
+ '灶灸灼灾灿炀炅炆炉炊炌炎炒炔炕炖炘炙炜炝炟炣炫炬炭炮炯炱炳炷炸点炻炼炽烀烁烂烃烈'
267
+ '烊烔烘烙烛烜烝烟烠烤烦烧烨烩烫烬热烯烶烷烹烺烻烽焆焉焊焌焐焓焕焖焗焘焙焚焜焞焦焯'
268
+ '焰焱然煁煃煅煊煋煌煎煓煜煞煟煤煦照煨煮煲煳煴煸煺煽熄熇熊熏熔熘熙熛熜熟熠熥熨熬熵'
269
+ '熹熻燃燊燋燎燏燔燕燚燠燥燧燮燹爆爇爔爚爝爟爨爪爬爰爱爵父爷爸爹爻爽爿牁牂片版牌牍'
270
+ '牒牖牙牚牛牝牟牡牢牤牥牦牧物牮牯牲牵特牺牻牾牿犀犁犄犇犊犋犍犏犒犟犨犬犯犰犴状犷'
271
+ '犸犹狁狂狃狄狈狉狍狎狐狒狗狙狝狞狠狡狨狩独狭狮狯狰狱狲狳狴狷狸狺狻狼猁猃猄猇猊猎'
272
+ '猕猖猗猛猜猝猞猡猢猥猩猪猫猬献猯猰猱猴猷猹猺猾猿獍獐獒獗獠獬獭獯獴獾玃玄率玉王玎'
273
+ '玑玒玓玕玖玘玙玚玛玞玟玠玡玢玤玥玦玩玫玭玮环现玱玲玳玶玷玹玺玻玼玿珀珂珅珇珈珉珊'
274
+ '珋珌珍珏珐珑珒珕珖珙珛珝珞珠珢珣珥珦珧珩珪珫班珰珲珵珷珸珹珺珽琀球琄琅理琇琈琉琊'
275
+ '琎琏琐琔琚琛琟琡琢琤琥琦琨琪琫琬琭琮琯琰琲琳琴琵琶琼瑀瑁瑂瑃瑄瑅瑆瑑瑓瑔瑕瑖瑗瑙'
276
+ '瑚瑛瑜瑝瑞瑟瑢瑧瑨瑬瑭瑰瑱瑳瑶瑷瑾璀璁璃璆璇璈璋璎璐璒璘璜璞璟璠璥璧璨璩璪璬璮璱'
277
+ '璲璺瓀瓒瓖瓘瓜瓞瓠瓢瓣瓤瓦瓮瓯瓴瓶瓷瓻瓿甄甍甏甑甓甗甘甚甜生甡甥甦用甩甪甫甬甭甯'
278
+ '田由甲申电男甸町画甾畀畅畈畋界畎畏畔畖留畚畛畜畤略畦番畬畯畲畴畸畹畿疁疃疆疍疏疐'
279
+ '疑疔疖疗疙疚疝疟疠疡疢疣疤疥疫疬疭疮疯疰疱疲疳疴疵疸疹疼疽疾痂痃痄病症痈痉痊痍痒'
280
+ '痓痔痕痘痛痞痢痣痤痦痧痨痪痫痰痱痴痹痼痿瘀瘁瘃瘅瘆瘊瘌瘐瘕瘗瘘瘙瘛瘟瘠瘢瘤瘥瘦瘩'
281
+ '瘪瘫瘭瘰瘳瘴瘵瘸瘼瘾瘿癀癃癌癍癔癖癗癜癞癣癫癯癸登白百癿皂的皆皇皈皋皎皑皓皕皖皙'
282
+ '皛皞皤皦皭皮皱皲皴皿盂盅盆盈盉益盍盎盏盐监盒盔盖盗盘盛盟盥盦目盯盱盲直盷相盹盼盾'
283
+ '省眄眇眈眉眊看眍眙眚真眠眢眦眨眩眬眭眯眵眶眷眸眺眼着睁睃睄睇睎睐睑睚睛睡睢督睥睦'
284
+ '睨睫睬睹睽睾睿瞀瞄瞅瞋瞌瞍瞎瞑瞒瞟瞠瞢瞥瞧瞩瞪瞫瞬瞭瞰瞳瞵瞻瞽瞿矍矗矛矜矞矢矣知'
285
+ '矧矩矫矬短矮矰石矶矸矻矼矾矿砀码砂砄砆砉砌砍砑砒研砖砗砘砚砜砝砟砠砣砥砧砫砬砭砮'
286
+ '砰破砵砷砸砹砺砻砼砾础硁硅硇硊硌硍硎硐硒硔硕硖硗硙硚硝硪硫硬硭确硼硿碃碇碈碉碌碍'
287
+ '碎碏碑碓碗碘碚碛碜碟碡碣碥碧碨碰碱碲碳碴碶碹碾磁磅磉磊磋磏磐磔磕磙磜磡磨磬磲磴磷'
288
+ '磹磻礁礅礌礓礞礴礵示礼社祀祁祃祆祇祈祉祊祋祎祏祐祓祕祖祗祚祛祜祝神祟祠祢祥祧票祭'
289
+ '祯祲祷祸祺祼祾禀禁禄禅禊禋福禒禔禘禚禛禤禧禳禹禺离禽禾秀私秃秆秉秋种科秒秕秘租秣'
290
+ '秤秦秧秩秫秬秭积称秸移秽秾稀稂稃稆程稌稍税稑稔稗稙稚稞稠稣稳稷稹稻稼稽稿穄穆穑穗'
291
+ '穙穜穟穰穴究穷穸穹空穿窀突窃窄窅窈窊窍窎窑窒窕窖窗窘窜窝窟窠窣窥窦窨窬窭窳窸窿立'
292
+ '竑竖竘站竞竟章竣童竦竫竭端竹竺竽竿笃笄笆笈笊笋笏笑笔笕笙笛笞笠笤笥符笨笪笫第笮笯'
293
+ '笱笳笸笺笼笾筀筅筇等筋筌筏筐筑筒答策筘筚筛筜筝筠筢筤筥筦筮筱筲筵筶筷筹筻筼签简箅'
294
+ '箍箐箓箔箕箖算箜管箢箦箧箨箩箪箫箬箭箱箴箸篁篆篇篌篑篓篙篚篝篡篥篦篪篮篯篱篷篼篾'
295
+ '簃簇簉簋簌簏簕簖簝簟簠簧簪簰簸簿籀籁籍籥米籴类籼籽粉粑粒粕粗粘粜粝粞粟粢粤粥粪粮'
296
+ '粱粲粳粹粼粽精粿糁糅糇糈糊糌糍糒糕糖糗糙糜糟糠糨糯糵系紊素索紧紫累絜絮絷綦綮縠縢'
297
+ '縻繁繄繇纂纛纠纡红纣纤纥约级纨纩纪纫纬纭纮纯纰纱纲纳纴纵纶纷纸纹纺纻纼纽纾线绀绁'
298
+ '绂练组绅细织终绉绊绋绌绍绎经绐绑绒结绔绕绖绗绘给绚绛络绝绞统绠绡绢绣绤绥绦继绨绩'
299
+ '绪绫续绮绯绰绱绲绳维绵绶绷绸绹绺绻综绽绾绿缀缁缂缃缄缅缆缇缈缉缊缌缎缐缑缒缓缔缕'
300
+ '编缗缘缙缚缛缜缝缞缟缠缡缢缣缤缥缦缧缨缩缪缫缬缭缮缯缰缱缲缳缴缵缶缸缺罂罄罅罍罐'
301
+ '网罔罕罗罘罚罟罡罢罨罩罪置罱署罴罶罹罽罾羁羊羌美羑羓羔羕羖羚羝羞羟羡群羧羯羰羱羲'
302
+ '羸羹羼羽羿翀翁翂翃翅翈翊翌翎翔翕翘翙翚翛翟翠翡翥翦翩翮翯翰翱翳翷翻翼翾耀老考耄者'
303
+ '耆耇耋而耍耏耐耑耒耔耕耖耗耘耙耜耠耢耤耥耦耧耨耩耪耰耱耳耵耶耷耸耻耽耿聂聃聆聊聋'
304
+ '职聍聒联聘聚聩聪聱聿肃肄肆肇肉肋肌肓肖肘肚肛肝肟肠股肢肤肥肩肪肫肭肮肯肱育肴肷肸'
305
+ '肺肼肽肾肿胀胁胂胃胄胆胈背胍胎胖胗胙胚胛胜胝胞胠胡胣胤胥胧胨胩胪胫胬胭胯胰胱胲胳'
306
+ '胴胶胸胺胼能脂脆脉脊脍脎脏脐脑脒脓脔脖脘脚脞脟脩脬脯脱脲脶脸脾脿腆腈腊腋腌腐腑腒'
307
+ '腓腔腕腘腙腚腠腥腧腨腩腭腮腯腰腱腴腹腺腻腼腽腾腿膀膂膈膊膏膑膘膙膛膜膝膦膨膳膺膻'
308
+ '臀臂臃臆臊臌臑臜臣臧自臬臭至致臻臼臾舀舁舂舄舅舆舌舍舐舒舔舛舜舞舟舠舢舣舥航舫般'
309
+ '舭舯舰舱舲舳舴舵舶舷舸船舻舾艄艅艇艉艋艎艏艘艚艟艨艮良艰色艳艴艺艽艾艿节芃芄芈芊'
310
+ '芋芍芎芏芑芒芗芘芙芜芝芟芠芡芣芤芥芦芨芩芪芫芬芭芮芯芰花芳芴芷芸芹芼芽芾苁苄苇苈'
311
+ '苉苊苋苌苍苎苏苑苒苓苔苕苗苘苛苜苞苟苠苡苣苤若苦苧苫苯英苴苷苹苻苾茀茁茂范茄茅茆'
312
+ '茈茉茋茌茎茏茑茓茔茕茗茚茛茜茝茧茨茫茬茭茯茱茳茴茵茶茸茹茺茼茽荀荁荃荄荆荇草荏荐'
313
+ '荑荒荓荔荖荙荚荛荜荞荟荠荡荣荤荥荦荧荨荩荪荫荬荭荮药荷荸荻荼荽莅莆莉莎莒莓莘莙莛'
314
+ '莜莝莞莠莨莩莪莫莰莱莲莳莴莶获莸莹莺莼莽莿菀菁菂菅菇菉菊菌菍菏菔菖菘菜菝菟菠菡菥'
315
+ '菩菪菰菱菲菹菼菽萁萃萄萆萋萌萍萎萏萑萘萚萜萝萣萤营萦萧萨萩萱萳萸萹萼落葆葎葑葖著'
316
+ '葙葚葛葜葡董葩葫葬葭葰葱葳葴葵葶葸葺蒂蒄蒇蒈蒉蒋蒌蒎蒐蒗蒙蒜蒟蒡蒨蒯蒱蒲蒴蒸蒹蒺'
317
+ '蒻蒽蒿蓁蓂蓄蓇蓉蓊蓍蓏蓐蓑蓓蓖蓝蓟蓠蓢蓣蓥蓦蓬蓰蓼蓿蔀蔃蔈蔊蔌蔑蔓蔗蔚蔟蔡蔫蔬蔷'
318
+ '蔸蔹蔺蔻蔼蔽蕃蕈蕉蕊蕖蕗蕙蕞蕤蕨蕰蕲蕴蕹蕺蕻蕾薁薄薅薇薏薛薜薢薤薨薪薮薯薰薳薷薸'
319
+ '薹薿藁藉藏藐藓藕藜藟藠藤藦藨藩藻藿蘅蘑蘖蘘蘧蘩蘸蘼虎虏虐虑虒虓虔虚虞虢虤虫虬虮虱'
320
+ '虷虸虹虺虻虼虽虾虿蚀蚁蚂蚄蚆蚊蚋蚌蚍蚓蚕蚜蚝蚣蚤蚧蚨蚩蚪蚬蚯蚰蚱蚲蚴蚶蚺蛀蛃蛄蛆'
321
+ '蛇蛉蛊蛋蛎蛏蛐蛑蛔蛘蛙蛛蛞蛟蛤蛩蛭蛮蛰蛱蛲蛳蛴蛸蛹蛾蜀蜂蜃蜇蜈蜉蜊蜍蜎蜐蜒蜓蜕蜗'
322
+ '蜘蜚蜜蜞蜡蜢蜣蜥蜩蜮蜱蜴蜷蜻蜾蜿蝇蝈蝉蝌蝎蝓蝗蝘蝙蝠蝣蝤蝥蝮蝰蝲蝴蝶蝻蝼蝽蝾螂螃'
323
+ '螅螈螋融螗螟螠螣螨螫螬螭螯螱螳螵螺螽蟀蟆蟊蟋蟏蟑蟒蟛蟠蟥蟪蟫蟮蟹蟾蠃蠊蠋蠓蠕蠖蠡'
324
+ '蠢蠲蠹蠼血衃衄衅行衍衎衒衔街衙衠衡衢衣补表衩衫衬衮衰衲衷衽衾衿袁袂袄袅袆袈袋袍袒'
325
+ '袖袗袜袢袤袪被袭袯袱袷袼裁裂装裆裈裉裎裒裔裕裘裙裛裟裢裣裤裥裨裰裱裳裴裸裹裼裾褂'
326
+ '褊褐褒褓褕褙褚褛褟褡褥褪褫褯褰褴褶襁襄襕襚襜襞襟襦襫襻西要覃覆见观觃规觅视觇览觉'
327
+ '觊觋觌觎觏觐觑角觖觚觜觞觟解觥触觫觭觯觱觳觿言訄訇訚訾詈詟詹誉誊誓謇警譬计订讣认'
328
+ '讥讦讧讨让讪讫训议讯记讱讲讳讴讵讶讷许讹论讻讼讽设访诀证诂诃评诅识诇诈诉诊诋诌词'
329
+ '诎诏诐译诒诓诔试诖诗诘诙诚诛诜话诞诟诠诡询诣诤该详诧诨诩诫诬语诮误诰诱诲诳说诵请'
330
+ '诸诹诺读诼诽课诿谀谁谂调谄谅谆谇谈谊谋谌谍谎谏谐谑谒谓谔谕谖谗谙谚谛谜谝谞谟谠谡'
331
+ '谢谣谤谥谦谧谨谩谪谫谬谭谮谯谰谱谲谳谴谵谶谷谼谿豁豆豇豉豌豕豚象豢豨豪豫豮豳豸豹'
332
+ '豺貂貅貆貉貊貌貔貘贝贞负贡财责贤败账货质贩贪贫贬购贮贯贰贱贲贳贴贵贶贷贸费贺贻贼'
333
+ '贽贾贿赀赁赂赃资赅赆赇赈赉赊赋赌赍赎赏赐赑赒赓赔赕赖赗赘赙赚赛赜赝赞赟赠赡赢赣赤'
334
+ '赦赧赪赫赭走赳赴赵赶起趁趄超越趋趑趔趟趣趯趱足趴趵趸趺趼趾趿跂跃跄跆跋跌跎跏跐跑'
335
+ '跖跗跚跛距跞跟跣跤跨跪跬路跱跳践跶跷跸跹跺跻跽踅踉踊踌踏踒踔踝踞踟踢踣踦踩踪踬踮'
336
+ '踯踱踵踶踹踺踽蹀蹁蹂蹄蹅蹇蹈蹉蹊蹋蹐蹑蹒蹙蹚蹜蹢蹦蹩蹬蹭蹯蹰蹲蹴蹶蹼蹽蹾蹿躁躅躇'
337
+ '躏躐躔躜躞身躬躯躲躺车轧轨轩轪轫转轭轮软轰轱轲轳轴轵轶轷轸轹轺轻轼载轾轿辀辁辂较'
338
+ '辄辅辆辇辈辉辊辋辌辍辎辏辐辑辒输辔辕辖辗辘辙辚辛辜辞辟辣辨辩辫辰辱边辽达辿迁迂迄'
339
+ '迅过迈迎运近迓返迕还这进远违连迟迢迤迥迦迨迩迪迫迭迮述迳迷迸迹迺追退送适逃逄逅逆'
340
+ '选逊逋逍透逐逑递途逖逗通逛逝逞速造逡逢逦逭逮逯逴逵逶逸逻逼逾遁遂遄遆遇遍遏遐遑遒'
341
+ '道遗遘遛遢遣遥遨遭遮遴遵遹遽避邀邂邃邈邋邑邓邕邗邘邙邛邝邠邡邢那邦邨邪邬邮邯邰邱'
342
+ '邲邳邴邵邶邸邹邺邻邽邾邿郁郃郄郅郇郈郊郎郏郐郑郓郗郚郛郜郝郡郢郤郦郧部郪郫郭郯郴'
343
+ '郸都郾郿鄀鄂鄃鄄鄅鄌鄑鄗鄘鄙鄚鄜鄞鄠鄢鄣鄫鄯鄱鄹酂酃酅酆酉酊酋酌配酎酏酐酒酗酚酝'
344
+ '酞酡酢酣酤酥酦酩酪酬酮酯酰酱酲酴酵酶酷酸酹酺酽酾酿醅醇醉醋醌醍醐醑醒醚醛醢醨醪醭'
345
+ '醮醯醴醵醺醾采釉释里重野量釐金釜鉴銎銮鋆鋈錾鍪鎏鏊鏖鐾鑫钆钇针钉钊钋钌钍钎钏钐钒'
346
+ '钓钔钕钖钗钘钙钚钛钜钝钞钟钠钡钢钣钤钥钦钧钨钩钪钫钬钭钮钯钰钱钲钳钴钵钷钹钺钻钼'
347
+ '钽钾钿铀铁铂铃铄铅铆铈铉铊铋铌铍铎铏铐铑铒铕铖铗铘铙铚铛铜铝铞铟铠铡铢铣铤铥铧铨'
348
+ '铩铪铫铬铭铮铯铰铱铲铳铴铵银铷铸铹铺铻铼铽链铿销锁锂锃锄锅锆锇锈锉锊锋锌锍锎锏锐'
349
+ '锑锒锓锔锕锖锗锘错锚锛锜锝锞锟锡锢锣锤锥锦锧锨锩锪锫锬锭键锯锰锱锲锳锴锵锶锷锸锹'
350
+ '锺锻锼锽锾锿镀镁镂镃镄镅镆镇镈镉镊镋镌镍镎镏镐镑镒镓镔镕镖镗镘镚镛镜镝镞镠镡镢镣'
351
+ '镤镥镦镧镨镩镪镫镬镭镮镯镰镱镲镳镴镵镶长门闩闪闫闭问闯闰闱闲闳间闵闶闷闸闹闺闻闼'
352
+ '闽闾闿阀阁阂阃阄阅阆阇阈阉阊阋阌阍阎阏阐阑阒阔阕阖阗阘阙阚阜队阡阪阮阱防阳阴阵阶'
353
+ '阻阼阽阿陀陂附际陆陇陈陉陋陌降陎限陑陔陕陛陞陟陡院除陧陨险陪陬陲陴陵陶陷隃隅隆隈'
354
+ '隋隍随隐隔隗隘隙障隧隩隰隳隶隹隺隼隽难雀雁雄雅集雇雉雊雌雍雎雏雒雕雠雨雩雪雯雱雳'
355
+ '零雷雹雾需霁霄霅霆震霈霉霍霎霏霓霖霜霞霨霪霭霰露霸霹霾青靓靖静靛非靠靡面靥革靬靰'
356
+ '靳靴靶靸靺靼靽靿鞁鞅鞋鞍鞑鞒鞔鞘鞠鞡鞣鞧鞨鞫鞬鞭鞮鞯鞲鞳鞴韂韦韧韨韩韪韫韬韭音韵'
357
+ '韶页顶顷顸项顺须顼顽顾顿颀颁颂颃预颅领颇颈颉颊颋颌颍颎颏颐频颓颔颖颗题颙颚颛颜额'
358
+ '颞颟颠颡颢颤颥颦颧风飏飐飑飒飓飔飕飗飘飙飞食飧飨餍餐餮饔饕饥饧饨饩饪饫饬饭饮饯饰'
359
+ '饱饲饳饴饵饶饷饸饹饺饻饼饽饿馁馃馄馅馆馇馈馉馊馋馌馍馏馐馑馒馓馔馕首馗馘香馝馞馥'
360
+ '馧馨马驭驮驯驰驱驲驳驴驵驶驷驸驹驺驻驼驽驾驿骀骁骂骃骄骅骆骇骈骉骊骋验骍骎骏骐骑'
361
+ '骒骓骕骖骗骘骙骚骛骜骝骞骟骠骡骢骣骤骥骦骧骨骰骱骶骷骸骺骼髀髁髂髃髅髋髌髎髑髓高'
362
+ '髡髢髦髫髭髯髹髻髽鬃鬈鬏鬒鬓鬘鬟鬣鬯鬲鬶鬷鬻鬼魁魂魃魄魅魆魇魈魉魋魍魏魑魔鱼鱽鱾'
363
+ '鱿鲀鲁鲂鲃鲅鲆鲇鲈鲉鲊鲋鲌鲍鲎鲏鲐鲑鲒鲔鲕鲖鲗鲘鲙鲚鲛鲜鲝鲞鲟鲠鲡鲢鲣鲤鲥鲦鲧鲨'
364
+ '鲩鲪鲫鲬鲭鲮鲯鲰鲱鲲鲳鲴鲵鲷鲸鲹鲺鲻鲼鲽鲾鲿鳀鳁鳂鳃鳄鳅鳇鳈鳉鳊鳌鳍鳎鳏鳐鳑鳒鳓'
365
+ '鳔鳕鳖鳗鳘鳙鳚鳛鳜鳝鳞鳟鳠鳡鳢鳣鳤鸟鸠鸡鸢鸣鸤鸥鸦鸧鸨鸩鸪鸫鸬鸭鸮鸯鸰鸱鸲鸳鸵鸶'
366
+ '鸷鸸鸹鸺鸻鸼鸽鸾鸿鹀鹁鹂鹃鹄鹅鹆鹇鹈鹉鹊鹋鹌鹍鹎鹏鹐鹑鹒鹔鹕鹖鹗鹘鹙鹚鹛鹜鹝鹞鹟'
367
+ '鹠鹡鹢鹣鹤鹦鹧鹨鹩鹪鹫鹬鹭鹮鹯鹰鹱鹲鹳鹴鹾鹿麀麂麇麈麋麑麒麓麖麝麟麦麸麹麻麽麾黄'
368
+ '黇黉黍黎黏黑黔默黛黜黝黟黠黡黢黥黧黩黪黯黹黻黼黾鼋鼍鼎鼐鼒鼓鼗鼙鼠鼢鼩鼫鼬鼯鼱鼷'
369
+ '鼹鼻鼽鼾齁齇齉齐齑齿龀龁龂龃龄龅龆龇龈龉龊龋龌龙龚龛龟龠龢鿍鿎鿏㑇㑊㕮㘎㙍㙘㙦㛃'
370
+ '㛚㛹㟃㠇㠓㤘㥄㧐㧑㧟㫰㬊㬎㬚㭎㭕㮾㰀㳇㳘㳚㴔㵐㶲㸆㸌㺄㻬㽏㿠䁖䂮䃅䃎䅟䌹䎃䎖䏝䏡'
371
+ '䏲䐃䓖䓛䓨䓫䓬䗖䗛䗪䗴䜣䝙䢺䢼䣘䥽䦃䲟䲠䲢䴓䴔䴕䴖䴗䴘䴙䶮𠅤𠙶𠳐𡎚𡐓𣗋𣲗𣲘𣸣𤧛𤩽'
372
+ '𤫉𥔲𥕢𥖨𥻗𦈡𦒍𦙶𦝼𦭜𦰡𧿹𨐈𨙸𨚕𨟠𨭉𨱇𨱏𨱑𨱔𨺙𩽾𩾃𩾌𪟝𪣻𪤗𪨰𪨶𪩘𪾢𫄧𫄨𫄷𫄸𫇭𫌀𫍣𫍯'
373
+ '𫍲𫍽𫐄𫐐𫐓𫑡𫓧𫓯𫓶𫓹𫔍𫔎𫔶𫖮𫖯𫖳𫗧𫗴𫘜𫘝𫘦𫘧𫘨𫘪𫘬𫚕𫚖𫚭𫛭𫞩𫟅𫟦𫟹𫟼𫠆𫠊𫠜𫢸𫫇𫭟'
374
+ '𫭢𫭼𫮃𫰛𫵷𫶇𫷷𫸩𬀩𬀪𬂩𬃊𬇕𬇙𬇹𬉼𬊈𬊤𬌗𬍛𬍡𬍤𬒈𬒔𬒗𬕂𬘓𬘘𬘡𬘩𬘫𬘬𬘭𬘯𬙂𬙊𬙋𬜬𬜯𬞟'
375
+ '𬟁𬟽𬣙𬣞𬣡𬣳𬤇𬤊𬤝𬨂𬨎𬩽𬪩𬬩𬬭𬬮𬬱𬬸𬬹𬬻𬬿𬭁𬭊𬭎𬭚𬭛𬭤𬭩𬭬𬭯𬭳𬭶𬭸𬭼𬮱𬮿𬯀𬯎𬱖𬱟'
376
+ '𬳵𬳶𬳽𬳿𬴂𬴃𬴊𬶋𬶍𬶏𬶐𬶟𬶠𬶨𬶭𬶮𬷕𬸘𬸚𬸣𬸦𬸪𬹼𬺈𬺓'
377
+ )
378
+ CN_CHARS_EXT = '吶诶屌囧飚屄'
379
+
380
+ CN_CHARS = CN_CHARS_COMMON + CN_CHARS_EXT
381
+ IN_CH_CHARS = { c : True for c in CN_CHARS }
382
+
383
+ EN_CHARS = string.ascii_letters + string.digits
384
+ IN_EN_CHARS = { c : True for c in EN_CHARS }
385
+
386
+ VALID_CHARS = CN_CHARS + EN_CHARS + ' '
387
+ IN_VALID_CHARS = { c : True for c in VALID_CHARS }
388
+
389
+ # ================================================================================ #
390
+ # basic class
391
+ # ================================================================================ #
392
+ class ChineseChar(object):
393
+ """
394
+ 中文字符
395
+ 每个字符对应简体和繁体,
396
+ e.g. 简体 = '负', 繁体 = '負'
397
+ 转换时可转换为简体或繁体
398
+ """
399
+
400
+ def __init__(self, simplified, traditional):
401
+ self.simplified = simplified
402
+ self.traditional = traditional
403
+ #self.__repr__ = self.__str__
404
+
405
+ def __str__(self):
406
+ return self.simplified or self.traditional or None
407
+
408
+ def __repr__(self):
409
+ return self.__str__()
410
+
411
+
412
+ class ChineseNumberUnit(ChineseChar):
413
+ """
414
+ 中文数字/数位字符
415
+ 每个字符除繁简体外还有一个额外的大写字符
416
+ e.g. '陆' 和 '陸'
417
+ """
418
+
419
+ def __init__(self, power, simplified, traditional, big_s, big_t):
420
+ super(ChineseNumberUnit, self).__init__(simplified, traditional)
421
+ self.power = power
422
+ self.big_s = big_s
423
+ self.big_t = big_t
424
+
425
+ def __str__(self):
426
+ return '10^{}'.format(self.power)
427
+
428
+ @classmethod
429
+ def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
430
+
431
+ if small_unit:
432
+ return ChineseNumberUnit(power=index + 1,
433
+ simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
434
+ elif numbering_type == NUMBERING_TYPES[0]:
435
+ return ChineseNumberUnit(power=index + 8,
436
+ simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
437
+ elif numbering_type == NUMBERING_TYPES[1]:
438
+ return ChineseNumberUnit(power=(index + 2) * 4,
439
+ simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
440
+ elif numbering_type == NUMBERING_TYPES[2]:
441
+ return ChineseNumberUnit(power=pow(2, index + 3),
442
+ simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
443
+ else:
444
+ raise ValueError(
445
+ 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
446
+
447
+
448
+ class ChineseNumberDigit(ChineseChar):
449
+ """
450
+ 中文数字字符
451
+ """
452
+
453
+ def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
454
+ super(ChineseNumberDigit, self).__init__(simplified, traditional)
455
+ self.value = value
456
+ self.big_s = big_s
457
+ self.big_t = big_t
458
+ self.alt_s = alt_s
459
+ self.alt_t = alt_t
460
+
461
+ def __str__(self):
462
+ return str(self.value)
463
+
464
+ @classmethod
465
+ def create(cls, i, v):
466
+ return ChineseNumberDigit(i, v[0], v[1], v[2], v[3])
467
+
468
+
469
+ class ChineseMath(ChineseChar):
470
+ """
471
+ 中文数位字符
472
+ """
473
+
474
+ def __init__(self, simplified, traditional, symbol, expression=None):
475
+ super(ChineseMath, self).__init__(simplified, traditional)
476
+ self.symbol = symbol
477
+ self.expression = expression
478
+ self.big_s = simplified
479
+ self.big_t = traditional
480
+
481
+
482
+ CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath
483
+
484
+
485
+ class NumberSystem(object):
486
+ """
487
+ 中文数字系统
488
+ """
489
+ pass
490
+
491
+
492
+ class MathSymbol(object):
493
+ """
494
+ 用于中文数字系统的数学符号 (繁/简体), e.g.
495
+ positive = ['正', '正']
496
+ negative = ['负', '負']
497
+ point = ['点', '點']
498
+ """
499
+
500
+ def __init__(self, positive, negative, point):
501
+ self.positive = positive
502
+ self.negative = negative
503
+ self.point = point
504
+
505
+ def __iter__(self):
506
+ for v in self.__dict__.values():
507
+ yield v
508
+
509
+
510
+ # class OtherSymbol(object):
511
+ # """
512
+ # 其他符号
513
+ # """
514
+ #
515
+ # def __init__(self, sil):
516
+ # self.sil = sil
517
+ #
518
+ # def __iter__(self):
519
+ # for v in self.__dict__.values():
520
+ # yield v
521
+
522
+
523
+ # ================================================================================ #
524
+ # basic utils
525
+ # ================================================================================ #
526
+ def create_system(numbering_type=NUMBERING_TYPES[1]):
527
+ """
528
+ 根据数字系统类型返回创建相应的数字系统,默认为 mid
529
+ NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型
530
+ low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
531
+ mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
532
+ high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
533
+ 返回对应的数字系统
534
+ """
535
+
536
+ # chinese number units of '亿' and larger
537
+ all_larger_units = zip(
538
+ LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
539
+ larger_units = [CNU.create(i, v, numbering_type, False)
540
+ for i, v in enumerate(all_larger_units)]
541
+ # chinese number units of '十, 百, 千, 万'
542
+ all_smaller_units = zip(
543
+ SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
544
+ smaller_units = [CNU.create(i, v, small_unit=True)
545
+ for i, v in enumerate(all_smaller_units)]
546
+ # digis
547
+ chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
548
+ BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
549
+ digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
550
+ digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
551
+ digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT
552
+ digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
553
+
554
+ # symbols
555
+ positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
556
+ negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
557
+ point_cn = CM(POINT[0], POINT[1], '.', lambda x,
558
+ y: float(str(x) + '.' + str(y)))
559
+ # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y)))
560
+ system = NumberSystem()
561
+ system.units = smaller_units + larger_units
562
+ system.digits = digits
563
+ system.math = MathSymbol(positive_cn, negative_cn, point_cn)
564
+ # system.symbols = OtherSymbol(sil_cn)
565
+ return system
566
+
567
+
568
+ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
569
+
570
+ def get_symbol(char, system):
571
+ for u in system.units:
572
+ if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
573
+ return u
574
+ for d in system.digits:
575
+ if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
576
+ return d
577
+ for m in system.math:
578
+ if char in [m.traditional, m.simplified]:
579
+ return m
580
+
581
+ def string2symbols(chinese_string, system):
582
+ int_string, dec_string = chinese_string, ''
583
+ for p in [system.math.point.simplified, system.math.point.traditional]:
584
+ if p in chinese_string:
585
+ int_string, dec_string = chinese_string.split(p)
586
+ break
587
+ return [get_symbol(c, system) for c in int_string], \
588
+ [get_symbol(c, system) for c in dec_string]
589
+
590
+ def correct_symbols(integer_symbols, system):
591
+ """
592
+ 一百八 to 一百八十
593
+ 一亿一千三百万 to 一亿 一千万 三百万
594
+ """
595
+
596
+ if integer_symbols and isinstance(integer_symbols[0], CNU):
597
+ if integer_symbols[0].power == 1:
598
+ integer_symbols = [system.digits[1]] + integer_symbols
599
+
600
+ if len(integer_symbols) > 1:
601
+ if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
602
+ integer_symbols.append(
603
+ CNU(integer_symbols[-2].power - 1, None, None, None, None))
604
+
605
+ result = []
606
+ unit_count = 0
607
+ for s in integer_symbols:
608
+ if isinstance(s, CND):
609
+ result.append(s)
610
+ unit_count = 0
611
+ elif isinstance(s, CNU):
612
+ current_unit = CNU(s.power, None, None, None, None)
613
+ unit_count += 1
614
+
615
+ if unit_count == 1:
616
+ result.append(current_unit)
617
+ elif unit_count > 1:
618
+ for i in range(len(result)):
619
+ if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
620
+ result[-i - 1] = CNU(result[-i - 1].power +
621
+ current_unit.power, None, None, None, None)
622
+ return result
623
+
624
+ def compute_value(integer_symbols):
625
+ """
626
+ Compute the value.
627
+ When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
628
+ e.g. '两千万' = 2000 * 10000 not 2000 + 10000
629
+ """
630
+ value = [0]
631
+ last_power = 0
632
+ for s in integer_symbols:
633
+ if isinstance(s, CND):
634
+ value[-1] = s.value
635
+ elif isinstance(s, CNU):
636
+ value[-1] *= pow(10, s.power)
637
+ if s.power > last_power:
638
+ value[:-1] = list(map(lambda v: v *
639
+ pow(10, s.power), value[:-1]))
640
+ last_power = s.power
641
+ value.append(0)
642
+ return sum(value)
643
+
644
+ system = create_system(numbering_type)
645
+ int_part, dec_part = string2symbols(chinese_string, system)
646
+ int_part = correct_symbols(int_part, system)
647
+ int_str = str(compute_value(int_part))
648
+ dec_str = ''.join([str(d.value) for d in dec_part])
649
+ if dec_part:
650
+ return '{0}.{1}'.format(int_str, dec_str)
651
+ else:
652
+ return int_str
653
+
654
+
655
+ def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False,
656
+ traditional=False, alt_zero=False, alt_one=False, alt_two=True,
657
+ use_zeros=True, use_units=True):
658
+
659
+ def get_value(value_string, use_zeros=True):
660
+
661
+ striped_string = value_string.lstrip('0')
662
+
663
+ # record nothing if all zeros
664
+ if not striped_string:
665
+ return []
666
+
667
+ # record one digits
668
+ elif len(striped_string) == 1:
669
+ if use_zeros and len(value_string) != len(striped_string):
670
+ return [system.digits[0], system.digits[int(striped_string)]]
671
+ else:
672
+ return [system.digits[int(striped_string)]]
673
+
674
+ # recursively record multiple digits
675
+ else:
676
+ result_unit = next(u for u in reversed(
677
+ system.units) if u.power < len(striped_string))
678
+ result_string = value_string[:-result_unit.power]
679
+ return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
680
+
681
+ system = create_system(numbering_type)
682
+
683
+ int_dec = number_string.split('.')
684
+ if len(int_dec) == 1:
685
+ int_string = int_dec[0]
686
+ dec_string = ""
687
+ elif len(int_dec) == 2:
688
+ int_string = int_dec[0]
689
+ dec_string = int_dec[1]
690
+ else:
691
+ raise ValueError(
692
+ "invalid input num string with more than one dot: {}".format(number_string))
693
+
694
+ if use_units and len(int_string) > 1:
695
+ result_symbols = get_value(int_string)
696
+ else:
697
+ result_symbols = [system.digits[int(c)] for c in int_string]
698
+ dec_symbols = [system.digits[int(c)] for c in dec_string]
699
+ if dec_string:
700
+ result_symbols += [system.math.point] + dec_symbols
701
+
702
+ if alt_two:
703
+ liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
704
+ system.digits[2].big_s, system.digits[2].big_t)
705
+ for i, v in enumerate(result_symbols):
706
+ if isinstance(v, CND) and v.value == 2:
707
+ next_symbol = result_symbols[i +
708
+ 1] if i < len(result_symbols) - 1 else None
709
+ previous_symbol = result_symbols[i - 1] if i > 0 else None
710
+ if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
711
+ if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
712
+ result_symbols[i] = liang
713
+
714
+ # if big is True, '两' will not be used and `alt_two` has no impact on output
715
+ if big:
716
+ attr_name = 'big_'
717
+ if traditional:
718
+ attr_name += 't'
719
+ else:
720
+ attr_name += 's'
721
+ else:
722
+ if traditional:
723
+ attr_name = 'traditional'
724
+ else:
725
+ attr_name = 'simplified'
726
+
727
+ result = ''.join([getattr(s, attr_name) for s in result_symbols])
728
+
729
+ # if not use_zeros:
730
+ # result = result.strip(getattr(system.digits[0], attr_name))
731
+
732
+ if alt_zero:
733
+ result = result.replace(
734
+ getattr(system.digits[0], attr_name), system.digits[0].alt_s)
735
+
736
+ if alt_one:
737
+ result = result.replace(
738
+ getattr(system.digits[1], attr_name), system.digits[1].alt_s)
739
+
740
+ for i, p in enumerate(POINT):
741
+ if result.startswith(p):
742
+ return CHINESE_DIGIS[0] + result
743
+
744
+ # ^10, 11, .., 19
745
+ if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0],
746
+ SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \
747
+ result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]:
748
+ result = result[1:]
749
+
750
+ return result
751
+
752
+
753
+ # ================================================================================ #
754
+ # different types of rewriters
755
+ # ================================================================================ #
756
+ class Cardinal:
757
+ """
758
+ CARDINAL类
759
+ """
760
+
761
+ def __init__(self, cardinal=None, chntext=None):
762
+ self.cardinal = cardinal
763
+ self.chntext = chntext
764
+
765
+ def chntext2cardinal(self):
766
+ return chn2num(self.chntext)
767
+
768
+ def cardinal2chntext(self):
769
+ return num2chn(self.cardinal)
770
+
771
+ class Digit:
772
+ """
773
+ DIGIT类
774
+ """
775
+
776
+ def __init__(self, digit=None, chntext=None):
777
+ self.digit = digit
778
+ self.chntext = chntext
779
+
780
+ # def chntext2digit(self):
781
+ # return chn2num(self.chntext)
782
+
783
+ def digit2chntext(self):
784
+ return num2chn(self.digit, alt_two=False, use_units=False)
785
+
786
+
787
+ class TelePhone:
788
+ """
789
+ TELEPHONE类
790
+ """
791
+
792
+ def __init__(self, telephone=None, raw_chntext=None, chntext=None):
793
+ self.telephone = telephone
794
+ self.raw_chntext = raw_chntext
795
+ self.chntext = chntext
796
+
797
+ # def chntext2telephone(self):
798
+ # sil_parts = self.raw_chntext.split('<SIL>')
799
+ # self.telephone = '-'.join([
800
+ # str(chn2num(p)) for p in sil_parts
801
+ # ])
802
+ # return self.telephone
803
+
804
+ def telephone2chntext(self, fixed=False):
805
+
806
+ if fixed:
807
+ sil_parts = self.telephone.split('-')
808
+ self.raw_chntext = '<SIL>'.join([
809
+ num2chn(part, alt_two=False, use_units=False) for part in sil_parts
810
+ ])
811
+ self.chntext = self.raw_chntext.replace('<SIL>', '')
812
+ else:
813
+ sp_parts = self.telephone.strip('+').split()
814
+ self.raw_chntext = '<SP>'.join([
815
+ num2chn(part, alt_two=False, use_units=False) for part in sp_parts
816
+ ])
817
+ self.chntext = self.raw_chntext.replace('<SP>', '')
818
+ return self.chntext
819
+
820
+
821
+ class Fraction:
822
+ """
823
+ FRACTION类
824
+ """
825
+
826
+ def __init__(self, fraction=None, chntext=None):
827
+ self.fraction = fraction
828
+ self.chntext = chntext
829
+
830
+ def chntext2fraction(self):
831
+ denominator, numerator = self.chntext.split('分之')
832
+ return chn2num(numerator) + '/' + chn2num(denominator)
833
+
834
+ def fraction2chntext(self):
835
+ numerator, denominator = self.fraction.split('/')
836
+ return num2chn(denominator) + '分之' + num2chn(numerator)
837
+
838
+
839
+ class Date:
840
+ """
841
+ DATE类
842
+ """
843
+
844
+ def __init__(self, date=None, chntext=None):
845
+ self.date = date
846
+ self.chntext = chntext
847
+
848
+ # def chntext2date(self):
849
+ # chntext = self.chntext
850
+ # try:
851
+ # year, other = chntext.strip().split('年', maxsplit=1)
852
+ # year = Digit(chntext=year).digit2chntext() + '年'
853
+ # except ValueError:
854
+ # other = chntext
855
+ # year = ''
856
+ # if other:
857
+ # try:
858
+ # month, day = other.strip().split('月', maxsplit=1)
859
+ # month = Cardinal(chntext=month).chntext2cardinal() + '月'
860
+ # except ValueError:
861
+ # day = chntext
862
+ # month = ''
863
+ # if day:
864
+ # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1]
865
+ # else:
866
+ # month = ''
867
+ # day = ''
868
+ # date = year + month + day
869
+ # self.date = date
870
+ # return self.date
871
+
872
+ def date2chntext(self):
873
+ date = self.date
874
+ try:
875
+ year, other = date.strip().split('年', 1)
876
+ year = Digit(digit=year).digit2chntext() + '年'
877
+ except ValueError:
878
+ other = date
879
+ year = ''
880
+ if other:
881
+ try:
882
+ month, day = other.strip().split('月', 1)
883
+ month = Cardinal(cardinal=month).cardinal2chntext() + '月'
884
+ except ValueError:
885
+ day = date
886
+ month = ''
887
+ if day:
888
+ day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1]
889
+ else:
890
+ month = ''
891
+ day = ''
892
+ chntext = year + month + day
893
+ self.chntext = chntext
894
+ return self.chntext
895
+
896
+
897
+ class Money:
898
+ """
899
+ MONEY类
900
+ """
901
+
902
+ def __init__(self, money=None, chntext=None):
903
+ self.money = money
904
+ self.chntext = chntext
905
+
906
+ # def chntext2money(self):
907
+ # return self.money
908
+
909
+ def money2chntext(self):
910
+ money = self.money
911
+ pattern = re.compile(r'(\d+(\.\d+)?)')
912
+ matchers = pattern.findall(money)
913
+ if matchers:
914
+ for matcher in matchers:
915
+ money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext())
916
+ self.chntext = money
917
+ return self.chntext
918
+
919
+
920
+ class Percentage:
921
+ """
922
+ PERCENTAGE类
923
+ """
924
+
925
+ def __init__(self, percentage=None, chntext=None):
926
+ self.percentage = percentage
927
+ self.chntext = chntext
928
+
929
+ def chntext2percentage(self):
930
+ return chn2num(self.chntext.strip().strip('百分之')) + '%'
931
+
932
+ def percentage2chntext(self):
933
+ return '百分之' + num2chn(self.percentage.strip().strip('%'))
934
+
935
+
936
+ def normalize_nsw(raw_text):
937
+ text = '^' + raw_text + '$'
938
+
939
+ # 规范化日期
940
+ pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)")
941
+ matchers = pattern.findall(text)
942
+ if matchers:
943
+ #print('date')
944
+ for matcher in matchers:
945
+ text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1)
946
+
947
+ # 规范化金钱
948
+ pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)")
949
+ matchers = pattern.findall(text)
950
+ if matchers:
951
+ #print('money')
952
+ for matcher in matchers:
953
+ text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1)
954
+
955
+ # 规范化固话/手机号码
956
+ # 手机
957
+ # http://www.jihaoba.com/news/show/13680
958
+ # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
959
+ # 联通:130、131、132、156、155、186、185、176
960
+ # 电信:133、153、189、180、181、177
961
+ pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D")
962
+ matchers = pattern.findall(text)
963
+ if matchers:
964
+ #print('telephone')
965
+ for matcher in matchers:
966
+ text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1)
967
+ # 固话
968
+ pattern = re.compile(r"\D((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D")
969
+ matchers = pattern.findall(text)
970
+ if matchers:
971
+ # print('fixed telephone')
972
+ for matcher in matchers:
973
+ text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1)
974
+
975
+ # 规范化分数
976
+ pattern = re.compile(r"(\d+/\d+)")
977
+ matchers = pattern.findall(text)
978
+ if matchers:
979
+ #print('fraction')
980
+ for matcher in matchers:
981
+ text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1)
982
+
983
+ # 规范化百分数
984
+ text = text.replace('%', '%')
985
+ pattern = re.compile(r"(\d+(\.\d+)?%)")
986
+ matchers = pattern.findall(text)
987
+ if matchers:
988
+ #print('percentage')
989
+ for matcher in matchers:
990
+ text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1)
991
+
992
+ # 规范化纯数+量词
993
+ pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS)
994
+ matchers = pattern.findall(text)
995
+ if matchers:
996
+ #print('cardinal+quantifier')
997
+ for matcher in matchers:
998
+ text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
999
+
1000
+ # 规范化数字编号
1001
+ pattern = re.compile(r"(\d{4,32})")
1002
+ matchers = pattern.findall(text)
1003
+ if matchers:
1004
+ #print('digit')
1005
+ for matcher in matchers:
1006
+ text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1)
1007
+
1008
+ # 规范化纯数
1009
+ pattern = re.compile(r"(\d+(\.\d+)?)")
1010
+ matchers = pattern.findall(text)
1011
+ if matchers:
1012
+ #print('cardinal')
1013
+ for matcher in matchers:
1014
+ text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
1015
+
1016
+
1017
+ # restore P2P, O2O, B2C, B2B etc
1018
+ pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))")
1019
+ matchers = pattern.findall(text)
1020
+ if matchers:
1021
+ # print('particular')
1022
+ for matcher in matchers:
1023
+ text = text.replace(matcher[0], matcher[1]+'2'+matcher[2], 1)
1024
+
1025
+ return text.lstrip('^').rstrip('$')
1026
+
1027
+
1028
+ def remove_erhua(text):
1029
+ """
1030
+ 去除儿化音词中的儿:
1031
+ 他女儿在那边儿 -> 他女儿在那边
1032
+ """
1033
+
1034
+ new_str=''
1035
+ while re.search('儿',text):
1036
+ a = re.search('儿',text).span()
1037
+ remove_er_flag = 0
1038
+
1039
+ if ER_WHITELIST_PATTERN.search(text):
1040
+ b = ER_WHITELIST_PATTERN.search(text).span()
1041
+ if b[0] <= a[0]:
1042
+ remove_er_flag = 1
1043
+
1044
+ if remove_er_flag == 0 :
1045
+ new_str = new_str + text[0:a[0]]
1046
+ text = text[a[1]:]
1047
+ else:
1048
+ new_str = new_str + text[0:b[1]]
1049
+ text = text[b[1]:]
1050
+
1051
+ text = new_str + text
1052
+ return text
1053
+
1054
+
1055
+ def remove_space(text):
1056
+ tokens = text.split()
1057
+ new = []
1058
+ for k,t in enumerate(tokens):
1059
+ if k != 0:
1060
+ if IN_EN_CHARS.get(tokens[k-1][-1]) and IN_EN_CHARS.get(t[0]):
1061
+ new.append(' ')
1062
+ new.append(t)
1063
+ return ''.join(new)
1064
+
1065
+
1066
+ class TextNorm:
1067
+ def __init__(self,
1068
+ to_banjiao:bool = False,
1069
+ to_upper:bool = False,
1070
+ to_lower:bool = False,
1071
+ remove_fillers:bool = False,
1072
+ remove_erhua:bool = False,
1073
+ check_chars:bool = False,
1074
+ remove_space:bool = False,
1075
+ cc_mode:str = '',
1076
+ ) :
1077
+ self.to_banjiao = to_banjiao
1078
+ self.to_upper = to_upper
1079
+ self.to_lower = to_lower
1080
+ self.remove_fillers = remove_fillers
1081
+ self.remove_erhua = remove_erhua
1082
+ self.check_chars = check_chars
1083
+ self.remove_space = remove_space
1084
+
1085
+ self.cc = None
1086
+ if cc_mode:
1087
+ from opencc import OpenCC # Open Chinese Convert: pip install opencc
1088
+ self.cc = OpenCC(cc_mode)
1089
+
1090
+ def __call__(self, text):
1091
+ if self.cc:
1092
+ text = self.cc.convert(text)
1093
+
1094
+ if self.to_banjiao:
1095
+ text = text.translate(QJ2BJ_TRANSFORM)
1096
+
1097
+ if self.to_upper:
1098
+ text = text.upper()
1099
+
1100
+ if self.to_lower:
1101
+ text = text.lower()
1102
+
1103
+ if self.remove_fillers:
1104
+ for c in FILLER_CHARS:
1105
+ text = text.replace(c, '')
1106
+
1107
+ if self.remove_erhua:
1108
+ text = remove_erhua(text)
1109
+
1110
+ text = normalize_nsw(text)
1111
+
1112
+ text = text.translate(PUNCS_TRANSFORM)
1113
+
1114
+ if self.check_chars:
1115
+ for c in text:
1116
+ if not IN_VALID_CHARS.get(c):
1117
+ print(f'WARNING: illegal char {c} in: {text}', file=sys.stderr)
1118
+ return ''
1119
+
1120
+ if self.remove_space:
1121
+ text = remove_space(text)
1122
+
1123
+ return text
1124
+
1125
+
1126
+ if __name__ == '__main__':
1127
+ p = argparse.ArgumentParser()
1128
+
1129
+ # normalizer options
1130
+ p.add_argument('--to_banjiao', action='store_true', help='convert quanjiao chars to banjiao')
1131
+ p.add_argument('--to_upper', action='store_true', help='convert to upper case')
1132
+ p.add_argument('--to_lower', action='store_true', help='convert to lower case')
1133
+ p.add_argument('--remove_fillers', action='store_true', help='remove filler chars such as "呃, 啊"')
1134
+ p.add_argument('--remove_erhua', action='store_true', help='remove erhua chars such as "他女儿在那边儿 -> 他女儿在那边"')
1135
+ p.add_argument('--check_chars', action='store_true' , help='skip sentences containing illegal chars')
1136
+ p.add_argument('--remove_space', action='store_true' , help='remove whitespace')
1137
+ p.add_argument('--cc_mode', choices=['', 't2s', 's2t'], default='', help='convert between traditional to simplified')
1138
+
1139
+ # I/O options
1140
+ p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines')
1141
+ p.add_argument('--has_key', action='store_true', help="will be deprecated, set --format ark instead")
1142
+ p.add_argument('--format', type=str, choices=['txt', 'ark', 'tsv'], default='txt', help='input format')
1143
+ p.add_argument('ifile', help='input filename, assume utf-8 encoding')
1144
+ p.add_argument('ofile', help='output filename')
1145
+
1146
+ args = p.parse_args()
1147
+
1148
+ if args.has_key:
1149
+ args.format = 'ark'
1150
+
1151
+ normalizer = TextNorm(
1152
+ to_banjiao = args.to_banjiao,
1153
+ to_upper = args.to_upper,
1154
+ to_lower = args.to_lower,
1155
+ remove_fillers = args.remove_fillers,
1156
+ remove_erhua = args.remove_erhua,
1157
+ check_chars = args.check_chars,
1158
+ remove_space = args.remove_space,
1159
+ cc_mode = args.cc_mode,
1160
+ )
1161
+
1162
+ ndone = 0
1163
+ with open(args.ifile, 'r', encoding = 'utf8') as istream, open(args.ofile, 'w+', encoding = 'utf8') as ostream:
1164
+ if args.format == 'tsv':
1165
+ reader = csv.DictReader(istream, delimiter = '\t')
1166
+ assert('TEXT' in reader.fieldnames)
1167
+ print('\t'.join(reader.fieldnames), file=ostream)
1168
+
1169
+ for item in reader:
1170
+ text = item['TEXT']
1171
+
1172
+ if text:
1173
+ text = normalizer(text)
1174
+
1175
+ if text:
1176
+ item['TEXT'] = text
1177
+ print('\t'.join([ item[f] for f in reader.fieldnames ]), file = ostream)
1178
+
1179
+ ndone += 1
1180
+ if ndone % args.log_interval == 0:
1181
+ print(f'text norm: {ndone} lines done.', file = sys.stderr, flush = True)
1182
+ else:
1183
+ for l in istream:
1184
+ key, text = '', ''
1185
+ if args.format == 'ark': # KALDI archive, line format: "key text"
1186
+ cols = l.strip().split(maxsplit=1)
1187
+ key, text = cols[0], cols[1] if len(cols) == 2 else ''
1188
+ else:
1189
+ text = l.strip()
1190
+
1191
+ if text:
1192
+ text = normalizer(text)
1193
+
1194
+ if text:
1195
+ if args.format == 'ark':
1196
+ print(key + '\t' + text, file = ostream)
1197
+ else:
1198
+ print(text, file = ostream)
1199
+
1200
+ ndone += 1
1201
+ if ndone % args.log_interval == 0:
1202
+ print(f'text norm: {ndone} lines done.', file = sys.stderr, flush = True)
1203
+ print(f'text norm: {ndone} lines done in total.', file = sys.stderr, flush = True)
1204
+
eval_audio/evaluate_asr.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import json
4
+ import os
5
+ import random
6
+ import time
7
+ from functools import partial
8
+ import re
9
+ from evaluate_tokenizer import EvaluationTokenizer
10
+ import editdistance as ed
11
+ import torch
12
+ from transformers.pipelines.audio_utils import ffmpeg_read
13
+ import requests
14
+ from whisper_normalizer.english import EnglishTextNormalizer
15
+ from whisper_normalizer.basic import BasicTextNormalizer
16
+ from cn_tn import TextNorm
17
+ import zhconv
18
+ english_normalizer = EnglishTextNormalizer()
19
+ chinese_normalizer = TextNorm(
20
+ to_banjiao = False,
21
+ to_upper = False,
22
+ to_lower = False,
23
+ remove_fillers = False,
24
+ remove_erhua =False,
25
+ check_chars = False,
26
+ remove_space = False,
27
+ cc_mode = '',
28
+ )
29
+ basic_normalizer = BasicTextNormalizer()
30
+
31
+ from tqdm import tqdm
32
+
33
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
34
+
35
+ PUNCS = '!,.?;:'
36
+
37
+ ds_collections = {
38
+ 'librispeech': {'path': 'asr/librispeech_eval.jsonl','language': 'en'},
39
+ 'aishell2': {'path': 'asr/aishell2_eval.jsonl', 'language': 'zh'},
40
+ 'cv15_en': {'path': 'asr/cv15_asr_en_eval.jsonl', 'language': 'en'},
41
+ 'cv15_zh': {'path': 'asr/cv15_asr_zh_eval.jsonl', 'language': 'zh'},
42
+ 'cv15_yue': {'path': 'asr/cv15_asr_yue_eval.jsonl', 'language': 'yue'},
43
+ 'cv15_fr': {'path': 'asr/cv15_asr_fr_eval.jsonl', 'language': 'fr'},
44
+ 'fluers_zh': {'path': 'asr/fleurs_asr_zh_eval.jsonl', 'language': 'zh'},
45
+ }
46
+
47
+
48
+ class AudioDataset(torch.utils.data.Dataset):
49
+
50
+ def __init__(self, ds):
51
+ path = ds['path']
52
+ self.datas = open(path).readlines()
53
+
54
+ def __len__(self):
55
+ return len(self.datas)
56
+
57
+ def __getitem__(self, idx):
58
+ data = json.loads(self.datas[idx].strip())
59
+ audio = data['audio']
60
+ source = data['source']
61
+ prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>"+data['prompt']
62
+ gt = data['gt']
63
+
64
+ return {
65
+ 'audio': audio,
66
+ 'prompt': prompt,
67
+ 'source': source,
68
+ 'gt': gt
69
+ }
70
+
71
+ def read_audio(audio_path):
72
+ if audio_path.startswith("http://") or audio_path.startswith("https://"):
73
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
74
+ # like http_huggingface_co.png
75
+ inputs = requests.get(audio_path).content
76
+ else:
77
+ with open(audio_path, "rb") as f:
78
+ inputs = f.read()
79
+ return inputs
80
+
81
+ def collate_fn(inputs, processor):
82
+ input_texts = [_['prompt'] for _ in inputs]
83
+ source = [_['source'] for _ in inputs]
84
+ gt = [_['gt'] for _ in inputs]
85
+ audio_path = [_['audio'] for _ in inputs]
86
+ input_audios = [ffmpeg_read(read_audio(_['audio']),sampling_rate=processor.feature_extractor.sampling_rate) for _ in inputs]
87
+ inputs = processor(text=input_texts, audios=input_audios, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
88
+ return inputs, audio_path, source, gt
89
+
90
+
91
+ class InferenceSampler(torch.utils.data.sampler.Sampler):
92
+
93
+ def __init__(self, size):
94
+ self._size = int(size)
95
+ assert size > 0
96
+ self._rank = torch.distributed.get_rank()
97
+ self._world_size = torch.distributed.get_world_size()
98
+ self._local_indices = self._get_local_indices(size, self._world_size,
99
+ self._rank)
100
+
101
+ @staticmethod
102
+ def _get_local_indices(total_size, world_size, rank):
103
+ shard_size = total_size // world_size
104
+ left = total_size % world_size
105
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
106
+
107
+ begin = sum(shard_sizes[:rank])
108
+ end = min(sum(shard_sizes[:rank + 1]), total_size)
109
+ return range(begin, end)
110
+
111
+ def __iter__(self):
112
+ yield from self._local_indices
113
+
114
+ def __len__(self):
115
+ return len(self._local_indices)
116
+
117
+ def remove_sp(text, language):
118
+ gt = re.sub(r"<\|.*?\|>", " ", text)
119
+ gt = re.sub(rf"\s+", r" ", gt) # 将文本中的连续空格替换为单个空格
120
+ gt = re.sub(f" ?([{PUNCS}])", r"\1", gt)
121
+ gt = gt.lstrip(" ")
122
+ if language == "zh":
123
+ gt = re.sub(rf"\s+", r"", gt)
124
+ return gt
125
+
126
+ def compute_wer(refs, hyps, language):
127
+ distance = 0
128
+ ref_length = 0
129
+ tokenizer = EvaluationTokenizer(
130
+ tokenizer_type="none",
131
+ lowercase=True,
132
+ punctuation_removal=True,
133
+ character_tokenization=False,
134
+ )
135
+ for i in range(len(refs)):
136
+ ref = refs[i]
137
+ pred = hyps[i]
138
+ if language in ["yue"]:
139
+ ref = zhconv.convert(ref, 'zh-cn')
140
+ pred = zhconv.convert(pred, 'zh-cn')
141
+ if language in ["en"]:
142
+ ref = english_normalizer(ref)
143
+ pred = english_normalizer(pred)
144
+ if language in ["zh"]:
145
+ ref = chinese_normalizer(ref)
146
+ pred = chinese_normalizer(pred)
147
+ else:
148
+ ref = basic_normalizer(ref)
149
+ pred = basic_normalizer(pred)
150
+ ref_items = tokenizer.tokenize(ref).split()
151
+ pred_items = tokenizer.tokenize(pred).split()
152
+ if language in ["zh", "yue"]:
153
+ ref_items = [x for x in "".join(ref_items)]
154
+ pred_items = [x for x in "".join(pred_items)]
155
+ if i==0:
156
+ print(f"ref: {ref}")
157
+ print(f"pred: {pred}")
158
+ print(f"ref_items:\n{ref_items}\n{len(ref_items)}\n{ref_items[0]}")
159
+ print(f"pred_items:\n{pred_items}\n{len(ref_items)}\n{ref_items[0]}")
160
+ distance += ed.eval(ref_items, pred_items)
161
+ ref_length += len(ref_items)
162
+ return distance/ref_length
163
+
164
+
165
+ if __name__ == '__main__':
166
+
167
+ parser = argparse.ArgumentParser()
168
+ parser.add_argument('--checkpoint', type=str, default='Qwen/Qwen2-Audio')
169
+ parser.add_argument('--dataset', type=str, default='')
170
+ parser.add_argument('--batch-size', type=int, default=1)
171
+ parser.add_argument('--num-workers', type=int, default=1)
172
+ parser.add_argument('--seed', type=int, default=0)
173
+ args = parser.parse_args()
174
+
175
+ torch.distributed.init_process_group(
176
+ backend='nccl',
177
+ world_size=int(os.getenv('WORLD_SIZE', '1')),
178
+ rank=int(os.getenv('RANK', '0')),
179
+ )
180
+
181
+ torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
182
+
183
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
184
+ args.checkpoint, device_map='cuda', torch_dtype='auto', trust_remote_code=True).eval()
185
+
186
+ processor = AutoProcessor.from_pretrained(args.checkpoint)
187
+ processor.tokenizer.padding_side = 'left'
188
+
189
+ random.seed(args.seed)
190
+ dataset = AudioDataset(
191
+ ds=ds_collections[args.dataset],
192
+ )
193
+ data_loader = torch.utils.data.DataLoader(
194
+ dataset=dataset,
195
+ sampler=InferenceSampler(len(dataset)),
196
+ batch_size=args.batch_size,
197
+ num_workers=args.num_workers,
198
+ pin_memory=True,
199
+ drop_last=False,
200
+ collate_fn=partial(collate_fn, processor=processor),
201
+ )
202
+
203
+ gts = []
204
+ sources = []
205
+ rets = []
206
+ audio_paths = []
207
+ for _, (inputs, audio_path, source, gt) in tqdm(enumerate(data_loader)):
208
+ inputs['input_ids'] = inputs['input_ids'].to('cuda')
209
+ output_ids = model.generate(**inputs, max_new_tokens=256, min_new_tokens=1, do_sample=False)
210
+ output_ids = output_ids[:, inputs.input_ids.size(1):]
211
+ output = processor.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
212
+ gts.extend(gt)
213
+ rets.extend(output)
214
+ sources.extend(source)
215
+ audio_paths.extend(audio_path)
216
+
217
+ torch.distributed.barrier()
218
+
219
+ world_size = torch.distributed.get_world_size()
220
+ merged_gts = [None for _ in range(world_size)]
221
+ merged_sources = [None for _ in range(world_size)]
222
+ merged_responses = [None for _ in range(world_size)]
223
+ merged_audio_paths = [None for _ in range(world_size)]
224
+ torch.distributed.all_gather_object(merged_gts, gts)
225
+ torch.distributed.all_gather_object(merged_sources, sources)
226
+ torch.distributed.all_gather_object(merged_responses, rets)
227
+ torch.distributed.all_gather_object(merged_audio_paths, audio_paths)
228
+
229
+ merged_gts = [_ for _ in itertools.chain.from_iterable(merged_gts)]
230
+ merged_sources = [_ for _ in itertools.chain.from_iterable(merged_sources)]
231
+ merged_audio_paths = [_ for _ in itertools.chain.from_iterable(merged_audio_paths)]
232
+ merged_responses = [
233
+ _ for _ in itertools.chain.from_iterable(merged_responses)
234
+ ]
235
+
236
+ if torch.distributed.get_rank() == 0:
237
+ print(f"Evaluating {args.dataset} ...")
238
+
239
+ results = []
240
+ for gt, response, source, audio_path in zip(merged_gts, merged_responses, merged_sources, merged_audio_paths):
241
+ results.append({
242
+ 'gt': gt,
243
+ 'response': response,
244
+ 'source': source,
245
+ 'audio_path': audio_path,
246
+ })
247
+ time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
248
+ results_file = f'{args.dataset}_{time_prefix}.json'
249
+ json.dump(results, open(results_file, 'w'))
250
+ results_dict = {}
251
+ for item in tqdm(results):
252
+ source = item["source"]
253
+ results_dict.setdefault(source, []).append(item)
254
+ lan = ds_collections[args.dataset]['language']
255
+ for source in results_dict:
256
+ refs, hyps = [], []
257
+ results_list = results_dict[source]
258
+ for result in results_list:
259
+ gt = result["gt"]
260
+ response = result["response"]
261
+ gt = remove_sp(gt, lan)
262
+ response = remove_sp(response, lan)
263
+ refs.append(gt)
264
+ hyps.append(response)
265
+ wer = compute_wer(refs, hyps, lan)
266
+ print(f"source: {source} cnt: {len(refs)} wer: {wer:.4f}")
267
+
268
+
269
+ torch.distributed.barrier()
eval_audio/evaluate_chat.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import json
4
+ import os
5
+ import random
6
+ import time
7
+ from functools import partial
8
+ import torch
9
+ import requests
10
+ from tqdm import tqdm
11
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
12
+ from transformers.pipelines.audio_utils import ffmpeg_read
13
+
14
+
15
+ ds_collections = {
16
+ 'airbench_level3': {'path': 'chat/airbench-level-3.jsonl'}
17
+ }
18
+
19
+
20
+ class AudioChatDataset(torch.utils.data.Dataset):
21
+
22
+ def __init__(self, ds):
23
+ path = ds['path']
24
+ self.datas = open(path).readlines()
25
+
26
+ def __len__(self):
27
+ return len(self.datas)
28
+
29
+ def __getitem__(self, idx):
30
+ data = json.loads(self.datas[idx].strip())
31
+ audio = data['audio']
32
+ data_idx = data['id']
33
+ query = data['query']
34
+
35
+ return {
36
+ 'audio': audio,
37
+ 'data_idx': data_idx,
38
+ 'query': query,
39
+ }
40
+
41
+ def read_audio(audio_path):
42
+ if audio_path.startswith("http://") or audio_path.startswith("https://"):
43
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
44
+ # like http_huggingface_co.png
45
+ inputs = requests.get(audio_path).content
46
+ else:
47
+ with open(audio_path, "rb") as f:
48
+ inputs = f.read()
49
+ return inputs
50
+
51
+ def collate_fn(inputs, processor):
52
+ text_list = []
53
+ for _ in inputs:
54
+ query = _['query']
55
+ conversation = [
56
+ {'role': 'system', 'content': 'You are a helpful assistant.'},
57
+ {'role': 'user', 'content': query}
58
+ ]
59
+ text = processor.tokenizer.apply_chat_template(
60
+ conversation,
61
+ add_generation_prompt=True,
62
+ return_tensors='pt',
63
+ tokenize=False
64
+ )
65
+ text_list.append(text)
66
+
67
+ audio_path = [_['audio'] for _ in inputs]
68
+ data_idxs = [_['data_idx'] for _ in inputs]
69
+ input_audios = [ffmpeg_read(read_audio(_['audio']), sampling_rate=processor.feature_extractor.sampling_rate) for _ in inputs]
70
+ inputs = processor(text=text_list, audios=input_audios, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
71
+ return inputs, audio_path, data_idxs
72
+
73
+
74
+ class InferenceSampler(torch.utils.data.sampler.Sampler):
75
+
76
+ def __init__(self, size):
77
+ self._size = int(size)
78
+ assert size > 0
79
+ self._rank = torch.distributed.get_rank()
80
+ self._world_size = torch.distributed.get_world_size()
81
+ self._local_indices = self._get_local_indices(size, self._world_size,
82
+ self._rank)
83
+
84
+ @staticmethod
85
+ def _get_local_indices(total_size, world_size, rank):
86
+ shard_size = total_size // world_size
87
+ left = total_size % world_size
88
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
89
+
90
+ begin = sum(shard_sizes[:rank])
91
+ end = min(sum(shard_sizes[:rank + 1]), total_size)
92
+ return range(begin, end)
93
+
94
+ def __iter__(self):
95
+ yield from self._local_indices
96
+
97
+ def __len__(self):
98
+ return len(self._local_indices)
99
+
100
+
101
+ if __name__ == '__main__':
102
+
103
+ parser = argparse.ArgumentParser()
104
+ parser.add_argument('--checkpoint', type=str, default='Qwen/Qwen2-Audio-7B-Instruct')
105
+ parser.add_argument('--dataset', type=str, default='')
106
+ parser.add_argument('--batch-size', type=int, default=1)
107
+ parser.add_argument('--num-workers', type=int, default=1)
108
+ parser.add_argument('--seed', type=int, default=0)
109
+ args = parser.parse_args()
110
+
111
+ torch.distributed.init_process_group(
112
+ backend='nccl',
113
+ world_size=int(os.getenv('WORLD_SIZE', '1')),
114
+ rank=int(os.getenv('RANK', '0')),
115
+ )
116
+
117
+ torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
118
+
119
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
120
+ args.checkpoint, device_map='cuda', torch_dtype='auto', trust_remote_code=True).eval()
121
+
122
+ processor = AutoProcessor.from_pretrained(args.checkpoint)
123
+ processor.tokenizer.padding_side = 'left'
124
+
125
+ random.seed(args.seed)
126
+ dataset = AudioChatDataset(
127
+ ds=ds_collections[args.dataset],
128
+ )
129
+ data_loader = torch.utils.data.DataLoader(
130
+ dataset=dataset,
131
+ sampler=InferenceSampler(len(dataset)),
132
+ batch_size=args.batch_size,
133
+ num_workers=args.num_workers,
134
+ pin_memory=True,
135
+ drop_last=False,
136
+ collate_fn=partial(collate_fn, processor=processor),
137
+ )
138
+
139
+
140
+ idxs = []
141
+ rets = []
142
+ audio_paths = []
143
+ for _, (inputs, audio_path, data_idxs) in tqdm(enumerate(data_loader)):
144
+ inputs['input_ids'] = inputs['input_ids'].to('cuda')
145
+ output_ids = model.generate(**inputs, max_new_tokens=256, min_new_tokens=1,do_sample=False)
146
+ output_ids = output_ids[:, inputs.input_ids.size(1):]
147
+ output = processor.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
148
+ rets.extend(output)
149
+ audio_paths.extend(audio_path)
150
+ idxs.extend(data_idxs)
151
+
152
+ torch.distributed.barrier()
153
+
154
+ world_size = torch.distributed.get_world_size()
155
+ merged_idxs = [None for _ in range(world_size)]
156
+ merged_responses = [None for _ in range(world_size)]
157
+ merged_audio_paths = [None for _ in range(world_size)]
158
+ torch.distributed.all_gather_object(merged_idxs, idxs)
159
+ torch.distributed.all_gather_object(merged_responses, rets)
160
+ torch.distributed.all_gather_object(merged_audio_paths, audio_paths)
161
+
162
+ merged_idxs = [_ for _ in itertools.chain.from_iterable(merged_idxs)]
163
+ merged_audio_paths = [_ for _ in itertools.chain.from_iterable(merged_audio_paths)]
164
+ merged_responses = [
165
+ _ for _ in itertools.chain.from_iterable(merged_responses)
166
+ ]
167
+
168
+ if torch.distributed.get_rank() == 0:
169
+ print(f"Evaluating {args.dataset} ...")
170
+
171
+ results = []
172
+ for idx, response, audio_path in zip(merged_idxs, merged_responses, merged_audio_paths):
173
+ results.append({
174
+ 'idx': idx,
175
+ 'response': response,
176
+ 'audio_path': audio_path,
177
+ })
178
+ time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
179
+ results_file = f'{args.dataset}_{time_prefix}.json'
180
+ json.dump(results, open(results_file, 'w'))
181
+
182
+ torch.distributed.barrier()
eval_audio/evaluate_emotion.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import json
4
+ import os
5
+ import random
6
+ import time
7
+ from functools import partial
8
+ import torch
9
+ import requests
10
+
11
+ from tqdm import tqdm
12
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
13
+ from transformers.pipelines.audio_utils import ffmpeg_read
14
+ from sklearn.metrics import accuracy_score
15
+
16
+
17
+
18
+ ds_collections = {
19
+ 'meld': {'path': 'ser/meld_eval.jsonl'}
20
+ }
21
+
22
+
23
+ class AudioDataset(torch.utils.data.Dataset):
24
+
25
+ def __init__(self, ds):
26
+ path = ds['path']
27
+ self.datas = open(path).readlines()
28
+
29
+ def __len__(self):
30
+ return len(self.datas)
31
+
32
+ def __getitem__(self, idx):
33
+ data = json.loads(self.datas[idx].strip())
34
+ audio = data['audio']
35
+ source = data['source']
36
+ prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>"+data['prompt']
37
+ gt = data['gt']
38
+
39
+ return {
40
+ 'audio': audio,
41
+ 'prompt': prompt,
42
+ 'source': source,
43
+ 'gt': gt
44
+ }
45
+
46
+ def read_audio(audio_path):
47
+ if audio_path.startswith("http://") or audio_path.startswith("https://"):
48
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
49
+ # like http_huggingface_co.png
50
+ inputs = requests.get(audio_path).content
51
+ else:
52
+ with open(audio_path, "rb") as f:
53
+ inputs = f.read()
54
+ return inputs
55
+
56
+ def collate_fn(inputs, processor):
57
+ input_texts = [_['prompt'] for _ in inputs]
58
+ source = [_['source'] for _ in inputs]
59
+ gt = [_['gt'] for _ in inputs]
60
+ audio_path = [_['audio'] for _ in inputs]
61
+ input_audios = [ffmpeg_read(read_audio(_['audio']), sampling_rate=processor.feature_extractor.sampling_rate) for _ in inputs]
62
+ inputs = processor(text=input_texts, audios=input_audios, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
63
+ return inputs, audio_path, source, gt
64
+
65
+
66
+ class InferenceSampler(torch.utils.data.sampler.Sampler):
67
+
68
+ def __init__(self, size):
69
+ self._size = int(size)
70
+ assert size > 0
71
+ self._rank = torch.distributed.get_rank()
72
+ self._world_size = torch.distributed.get_world_size()
73
+ self._local_indices = self._get_local_indices(size, self._world_size,
74
+ self._rank)
75
+ @staticmethod
76
+ def _get_local_indices(total_size, world_size, rank):
77
+ shard_size = total_size // world_size
78
+ left = total_size % world_size
79
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
80
+
81
+ begin = sum(shard_sizes[:rank])
82
+ end = min(sum(shard_sizes[:rank + 1]), total_size)
83
+ return range(begin, end)
84
+
85
+ def __iter__(self):
86
+ yield from self._local_indices
87
+
88
+ def __len__(self):
89
+ return len(self._local_indices)
90
+
91
+
92
+ if __name__ == '__main__':
93
+
94
+ parser = argparse.ArgumentParser()
95
+ parser.add_argument('--checkpoint', type=str, default='Qwen/Qwen2-Audio-7B')
96
+ parser.add_argument('--dataset', type=str, default='')
97
+ parser.add_argument('--batch-size', type=int, default=1)
98
+ parser.add_argument('--num-workers', type=int, default=1)
99
+ parser.add_argument('--seed', type=int, default=0)
100
+ args = parser.parse_args()
101
+
102
+ torch.distributed.init_process_group(
103
+ backend='nccl',
104
+ world_size=int(os.getenv('WORLD_SIZE', '1')),
105
+ rank=int(os.getenv('RANK', '0')),
106
+ )
107
+
108
+ torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
109
+
110
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
111
+ args.checkpoint, device_map='cuda', trust_remote_code=True, torch_dtype='auto').eval()
112
+
113
+ processor = AutoProcessor.from_pretrained(args.checkpoint)
114
+
115
+ processor.tokenizer.padding_side = 'left'
116
+
117
+ random.seed(args.seed)
118
+ dataset = AudioDataset(
119
+ ds=ds_collections[args.dataset],
120
+ )
121
+ data_loader = torch.utils.data.DataLoader(
122
+ dataset=dataset,
123
+ sampler=InferenceSampler(len(dataset)),
124
+ batch_size=args.batch_size,
125
+ num_workers=args.num_workers,
126
+ pin_memory=True,
127
+ drop_last=False,
128
+ collate_fn=partial(collate_fn, processor=processor),
129
+ )
130
+
131
+ gts = []
132
+ sources = []
133
+ rets = []
134
+ audio_paths = []
135
+ for _, (inputs, audio_path, source, gt) in tqdm(enumerate(data_loader)):
136
+ inputs['input_ids'] = inputs['input_ids'].to('cuda')
137
+ output_ids = model.generate(**inputs, max_new_tokens=256, min_new_tokens=1, do_sample=False)
138
+ output_ids = output_ids[:, inputs.input_ids.size(1):]
139
+ output = processor.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
140
+ gts.extend(gt)
141
+ rets.extend(output)
142
+ sources.extend(source)
143
+ audio_paths.extend(audio_path)
144
+
145
+ torch.distributed.barrier()
146
+
147
+ world_size = torch.distributed.get_world_size()
148
+ merged_gts = [None for _ in range(world_size)]
149
+ merged_sources = [None for _ in range(world_size)]
150
+ merged_responses = [None for _ in range(world_size)]
151
+ merged_audio_paths = [None for _ in range(world_size)]
152
+ torch.distributed.all_gather_object(merged_gts, gts)
153
+ torch.distributed.all_gather_object(merged_sources, sources)
154
+ torch.distributed.all_gather_object(merged_responses, rets)
155
+ torch.distributed.all_gather_object(merged_audio_paths, audio_paths)
156
+
157
+ merged_gts = [_ for _ in itertools.chain.from_iterable(merged_gts)]
158
+ merged_sources = [_ for _ in itertools.chain.from_iterable(merged_sources)]
159
+ merged_audio_paths = [_ for _ in itertools.chain.from_iterable(merged_audio_paths)]
160
+ merged_responses = [
161
+ _ for _ in itertools.chain.from_iterable(merged_responses)
162
+ ]
163
+
164
+ if torch.distributed.get_rank() == 0:
165
+ print(f"Evaluating {args.dataset} ...")
166
+
167
+ results = []
168
+ for gt, response, source, audio_path in zip(merged_gts, merged_responses, merged_sources, merged_audio_paths):
169
+ results.append({
170
+ 'gt': gt,
171
+ 'response': response,
172
+ 'source': source,
173
+ 'audio_path': audio_path,
174
+ })
175
+ time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
176
+ results_file = f'{args.dataset}_{time_prefix}.json'
177
+ json.dump(results, open(results_file, 'w'))
178
+ results_dict = {}
179
+ for item in tqdm(results):
180
+ source = item["source"]
181
+ results_dict.setdefault(source, []).append(item)
182
+
183
+ for source in results_dict:
184
+ refs, hyps = [], []
185
+ bi_refs, bi_hyps = [], []
186
+ results_list = results_dict[source]
187
+ for result in results_list:
188
+ gt = result["gt"]
189
+ response = result["response"].lstrip()
190
+ refs.append(gt)
191
+ hyps.append(response)
192
+ score = accuracy_score(refs, hyps)
193
+ print(f"{source} ACC_score:", score, len(hyps))
194
+
195
+ torch.distributed.barrier()
eval_audio/evaluate_st.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import json
4
+ import os
5
+ import random
6
+ import time
7
+ from functools import partial
8
+ import sacrebleu
9
+ import torch
10
+ import requests
11
+ from tqdm import tqdm
12
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
13
+ from transformers.pipelines.audio_utils import ffmpeg_read
14
+
15
+
16
+ ds_collections = {
17
+ 'covost2': {'path': 'st/covost2_eval.jsonl'}
18
+ }
19
+
20
+
21
+ class AudioDataset(torch.utils.data.Dataset):
22
+
23
+ def __init__(self, ds):
24
+ path = ds['path']
25
+ self.datas = open(path).readlines()
26
+
27
+ def __len__(self):
28
+ return len(self.datas)
29
+
30
+ def __getitem__(self, idx):
31
+ data = json.loads(self.datas[idx].strip())
32
+ audio = data['audio']
33
+ source = data['source']
34
+ prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>"+data['prompt']
35
+ gt = data['gt']
36
+
37
+ return {
38
+ 'audio': audio,
39
+ 'prompt': prompt,
40
+ 'source': source,
41
+ 'gt': gt
42
+ }
43
+
44
+ def read_audio(audio_path):
45
+ if audio_path.startswith("http://") or audio_path.startswith("https://"):
46
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
47
+ # like http_huggingface_co.png
48
+ inputs = requests.get(audio_path).content
49
+ else:
50
+ with open(audio_path, "rb") as f:
51
+ inputs = f.read()
52
+ return inputs
53
+
54
+ def collate_fn(inputs, processor):
55
+ input_texts = [_['prompt'] for _ in inputs]
56
+ source = [_['source'] for _ in inputs]
57
+ gt = [_['gt'] for _ in inputs]
58
+ audio_path = [_['audio'] for _ in inputs]
59
+ input_audios = [ffmpeg_read(read_audio(_['audio']),sampling_rate=processor.feature_extractor.sampling_rate) for _ in inputs]
60
+ inputs = processor(text=input_texts, audios=input_audios, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
61
+ return inputs, audio_path, source, gt
62
+
63
+
64
+ class InferenceSampler(torch.utils.data.sampler.Sampler):
65
+
66
+ def __init__(self, size):
67
+ self._size = int(size)
68
+ assert size > 0
69
+ self._rank = torch.distributed.get_rank()
70
+ self._world_size = torch.distributed.get_world_size()
71
+ self._local_indices = self._get_local_indices(size, self._world_size,
72
+ self._rank)
73
+
74
+ @staticmethod
75
+ def _get_local_indices(total_size, world_size, rank):
76
+ shard_size = total_size // world_size
77
+ left = total_size % world_size
78
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
79
+
80
+ begin = sum(shard_sizes[:rank])
81
+ end = min(sum(shard_sizes[:rank + 1]), total_size)
82
+ return range(begin, end)
83
+
84
+ def __iter__(self):
85
+ yield from self._local_indices
86
+
87
+ def __len__(self):
88
+ return len(self._local_indices)
89
+
90
+
91
+ if __name__ == '__main__':
92
+
93
+ parser = argparse.ArgumentParser()
94
+ parser.add_argument('--checkpoint', type=str, default='Qwen/Qwen2-Audio-7B')
95
+ parser.add_argument('--dataset', type=str, default='')
96
+ parser.add_argument('--batch-size', type=int, default=1)
97
+ parser.add_argument('--num-workers', type=int, default=1)
98
+ parser.add_argument('--seed', type=int, default=0)
99
+ args = parser.parse_args()
100
+
101
+ torch.distributed.init_process_group(
102
+ backend='nccl',
103
+ world_size=int(os.getenv('WORLD_SIZE', '1')),
104
+ rank=int(os.getenv('RANK', '0')),
105
+ )
106
+
107
+ torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
108
+
109
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
110
+ args.checkpoint, device_map='cuda', trust_remote_code=True, torch_dtype='auto').eval()
111
+
112
+ processor = AutoProcessor.from_pretrained(args.checkpoint)
113
+
114
+ processor.tokenizer.padding_side = 'left'
115
+
116
+ random.seed(args.seed)
117
+ dataset = AudioDataset(
118
+ ds=ds_collections[args.dataset],
119
+ )
120
+ data_loader = torch.utils.data.DataLoader(
121
+ dataset=dataset,
122
+ sampler=InferenceSampler(len(dataset)),
123
+ batch_size=args.batch_size,
124
+ num_workers=args.num_workers,
125
+ pin_memory=True,
126
+ drop_last=False,
127
+ collate_fn=partial(collate_fn, processor=processor),
128
+ )
129
+
130
+ gts = []
131
+ sources = []
132
+ rets = []
133
+ audio_paths = []
134
+ for _, (inputs, audio_path, source, gt) in tqdm(enumerate(data_loader)):
135
+ inputs['input_ids'] = inputs['input_ids'].to('cuda')
136
+ output_ids = model.generate(**inputs, max_new_tokens=256, min_new_tokens=1, do_sample=False)
137
+ output_ids = output_ids[:, inputs.input_ids.size(1):]
138
+ output = processor.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
139
+ gts.extend(gt)
140
+ rets.extend(output)
141
+ sources.extend(source)
142
+ audio_paths.extend(audio_path)
143
+
144
+ torch.distributed.barrier()
145
+
146
+ world_size = torch.distributed.get_world_size()
147
+ merged_gts = [None for _ in range(world_size)]
148
+ merged_sources = [None for _ in range(world_size)]
149
+ merged_responses = [None for _ in range(world_size)]
150
+ merged_audio_paths = [None for _ in range(world_size)]
151
+ torch.distributed.all_gather_object(merged_gts, gts)
152
+ torch.distributed.all_gather_object(merged_sources, sources)
153
+ torch.distributed.all_gather_object(merged_responses, rets)
154
+ torch.distributed.all_gather_object(merged_audio_paths, audio_paths)
155
+
156
+ merged_gts = [_ for _ in itertools.chain.from_iterable(merged_gts)]
157
+ merged_sources = [_ for _ in itertools.chain.from_iterable(merged_sources)]
158
+ merged_audio_paths = [_ for _ in itertools.chain.from_iterable(merged_audio_paths)]
159
+ merged_responses = [
160
+ _ for _ in itertools.chain.from_iterable(merged_responses)
161
+ ]
162
+
163
+ if torch.distributed.get_rank() == 0:
164
+ print(f"Evaluating {args.dataset} ...")
165
+
166
+ results = []
167
+ for gt, response, source, audio_path in zip(merged_gts, merged_responses, merged_sources, merged_audio_paths):
168
+ results.append({
169
+ 'gt': gt,
170
+ 'response': response,
171
+ 'source': source,
172
+ 'audio_path': audio_path,
173
+ })
174
+ time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
175
+ results_file = f'{args.dataset}_{time_prefix}.json'
176
+ json.dump(results, open(results_file, 'w'))
177
+ results_dict = {}
178
+ for item in tqdm(results):
179
+ source = item["source"]
180
+ results_dict.setdefault(source, []).append(item)
181
+ for source in results_dict:
182
+ text_lan = source.split("_")[-2]
183
+ if text_lan == "ja":
184
+ text_lan = "ja-mecab"
185
+ elif text_lan == "zh":
186
+ text_lan = "zh"
187
+ else:
188
+ text_lan = "13a"
189
+ refs, hyps = [], []
190
+ results_list = results_dict[source]
191
+ for result in results_list:
192
+ gt = result["gt"]
193
+ response = result["response"]
194
+ refs.append(gt)
195
+ hyps.append(response)
196
+ bleu = sacrebleu.corpus_bleu(hyps,[refs], tokenize=text_lan).score
197
+ print(f"source: {source} cnt: {len(refs)} bleu score: {bleu:.4f}")
198
+
199
+
200
+ torch.distributed.barrier()
eval_audio/evaluate_tokenizer.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The OFA-Sys Team. All rights reserved.
2
+ # This source code is licensed under the Apache 2.0 license
3
+ # found in the LICENSE file in the root directory.
4
+
5
+ import unicodedata
6
+
7
+
8
+
9
+ class EvaluationTokenizer(object):
10
+ """A generic evaluation-time tokenizer, which leverages built-in tokenizers
11
+ in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides
12
+ lowercasing, punctuation removal and character tokenization, which are
13
+ applied after sacreBLEU tokenization.
14
+
15
+ Args:
16
+ tokenizer_type (str): the type of sacreBLEU tokenizer to apply.
17
+ lowercase (bool): lowercase the text.
18
+ punctuation_removal (bool): remove punctuation (based on unicode
19
+ category) from text.
20
+ character_tokenization (bool): tokenize the text to characters.
21
+ """
22
+
23
+ SPACE = chr(32)
24
+ SPACE_ESCAPE = chr(9601)
25
+ # ALL_TOKENIZER_TYPES = ChoiceEnum(["none", "13a", "intl", "zh", "ja-mecab"])
26
+
27
+ def __init__(
28
+ self,
29
+ tokenizer_type: str = "13a",
30
+ lowercase: bool = False,
31
+ punctuation_removal: bool = False,
32
+ character_tokenization: bool = False,
33
+ ):
34
+ from sacrebleu.tokenizers import TOKENIZERS
35
+
36
+ assert tokenizer_type in TOKENIZERS, f"{tokenizer_type}, {TOKENIZERS}"
37
+ self.lowercase = lowercase
38
+ self.punctuation_removal = punctuation_removal
39
+ self.character_tokenization = character_tokenization
40
+ self.tokenizer = TOKENIZERS[tokenizer_type]
41
+
42
+ @classmethod
43
+ def remove_punctuation(cls, sent: str):
44
+ """Remove punctuation based on Unicode category."""
45
+ return cls.SPACE.join(
46
+ t for t in sent.split(cls.SPACE) if not all(unicodedata.category(c)[0] == "P" for c in t)
47
+ )
48
+
49
+ def tokenize(self, sent: str):
50
+ tokenized = self.tokenizer()(sent)
51
+
52
+ if self.punctuation_removal:
53
+ tokenized = self.remove_punctuation(tokenized)
54
+
55
+ if self.character_tokenization:
56
+ tokenized = self.SPACE.join(list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE)))
57
+
58
+ if self.lowercase:
59
+ tokenized = tokenized.lower()
60
+
61
+ return tokenized
eval_audio/evaluate_vocal_sound.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import json
4
+ import os
5
+ import random
6
+ import time
7
+ from functools import partial
8
+ import torch
9
+ import requests
10
+ from tqdm import tqdm
11
+ from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration
12
+ from transformers.pipelines.audio_utils import ffmpeg_read
13
+ from sklearn.metrics import accuracy_score
14
+
15
+ ds_collections = {
16
+ 'vocalsound': {'path': 'vsc/vocalsound_eval.jsonl'}
17
+ }
18
+
19
+
20
+ class AudioDataset(torch.utils.data.Dataset):
21
+
22
+ def __init__(self, ds):
23
+ path = ds['path']
24
+ self.datas = open(path).readlines()
25
+
26
+ def __len__(self):
27
+ return len(self.datas)
28
+
29
+ def __getitem__(self, idx):
30
+ data = json.loads(self.datas[idx].strip())
31
+ audio = data['audio']
32
+ source = data['source']
33
+ prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>"+data['prompt']
34
+ gt = data['gt']
35
+
36
+ return {
37
+ 'audio': audio,
38
+ 'prompt': prompt,
39
+ 'source': source,
40
+ 'gt': gt
41
+ }
42
+
43
+ def read_audio(audio_path):
44
+ if audio_path.startswith("http://") or audio_path.startswith("https://"):
45
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
46
+ # like http_huggingface_co.png
47
+ inputs = requests.get(audio_path).content
48
+ else:
49
+ with open(audio_path, "rb") as f:
50
+ inputs = f.read()
51
+ return inputs
52
+
53
+ def collate_fn(inputs, processor):
54
+ input_texts = [_['prompt'] for _ in inputs]
55
+ source = [_['source'] for _ in inputs]
56
+ gt = [_['gt'] for _ in inputs]
57
+ audio_path = [_['audio'] for _ in inputs]
58
+ input_audios = [ffmpeg_read(read_audio(_['audio']),sampling_rate=processor.feature_extractor.sampling_rate) for _ in inputs]
59
+ inputs = processor(text=input_texts, audios=input_audios, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
60
+ return inputs, audio_path, source, gt
61
+
62
+
63
+ class InferenceSampler(torch.utils.data.sampler.Sampler):
64
+
65
+ def __init__(self, size):
66
+ self._size = int(size)
67
+ assert size > 0
68
+ self._rank = torch.distributed.get_rank()
69
+ self._world_size = torch.distributed.get_world_size()
70
+ self._local_indices = self._get_local_indices(size, self._world_size,
71
+ self._rank)
72
+
73
+ @staticmethod
74
+ def _get_local_indices(total_size, world_size, rank):
75
+ shard_size = total_size // world_size
76
+ left = total_size % world_size
77
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
78
+
79
+ begin = sum(shard_sizes[:rank])
80
+ end = min(sum(shard_sizes[:rank + 1]), total_size)
81
+ return range(begin, end)
82
+
83
+ def __iter__(self):
84
+ yield from self._local_indices
85
+
86
+ def __len__(self):
87
+ return len(self._local_indices)
88
+
89
+
90
+ if __name__ == '__main__':
91
+
92
+ parser = argparse.ArgumentParser()
93
+ parser.add_argument('--checkpoint', type=str, default='Qwen/Qwen2-Audio-7B')
94
+ parser.add_argument('--dataset', type=str, default='')
95
+ parser.add_argument('--batch-size', type=int, default=1)
96
+ parser.add_argument('--num-workers', type=int, default=1)
97
+ parser.add_argument('--seed', type=int, default=0)
98
+ args = parser.parse_args()
99
+
100
+ torch.distributed.init_process_group(
101
+ backend='nccl',
102
+ world_size=int(os.getenv('WORLD_SIZE', '1')),
103
+ rank=int(os.getenv('RANK', '0')),
104
+ )
105
+
106
+ torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
107
+
108
+ model = Qwen2AudioForConditionalGeneration.from_pretrained(
109
+ args.checkpoint, device_map='cuda', trust_remote_code=True, torch_dtype='auto').eval()
110
+
111
+ processor = AutoProcessor.from_pretrained(args.checkpoint)
112
+
113
+ processor.tokenizer.padding_side = 'left'
114
+
115
+ random.seed(args.seed)
116
+ dataset = AudioDataset(
117
+ ds=ds_collections[args.dataset],
118
+ )
119
+ data_loader = torch.utils.data.DataLoader(
120
+ dataset=dataset,
121
+ sampler=InferenceSampler(len(dataset)),
122
+ batch_size=args.batch_size,
123
+ num_workers=args.num_workers,
124
+ pin_memory=True,
125
+ drop_last=False,
126
+ collate_fn=partial(collate_fn, processor=processor),
127
+ )
128
+
129
+ gts = []
130
+ sources = []
131
+ rets = []
132
+ audio_paths = []
133
+ for _, (inputs, audio_path, source, gt) in tqdm(enumerate(data_loader)):
134
+ inputs['input_ids'] = inputs['input_ids'].to('cuda')
135
+ output_ids = model.generate(**inputs, max_new_tokens=256, min_new_tokens=1, do_sample=False)
136
+ output_ids = output_ids[:, inputs.input_ids.size(1):]
137
+ output = processor.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
138
+ gts.extend(gt)
139
+ rets.extend(output)
140
+ sources.extend(source)
141
+ audio_paths.extend(audio_path)
142
+
143
+ torch.distributed.barrier()
144
+
145
+ world_size = torch.distributed.get_world_size()
146
+ merged_gts = [None for _ in range(world_size)]
147
+ merged_sources = [None for _ in range(world_size)]
148
+ merged_responses = [None for _ in range(world_size)]
149
+ merged_audio_paths = [None for _ in range(world_size)]
150
+ torch.distributed.all_gather_object(merged_gts, gts)
151
+ torch.distributed.all_gather_object(merged_sources, sources)
152
+ torch.distributed.all_gather_object(merged_responses, rets)
153
+ torch.distributed.all_gather_object(merged_audio_paths, audio_paths)
154
+
155
+ merged_gts = [_ for _ in itertools.chain.from_iterable(merged_gts)]
156
+ merged_sources = [_ for _ in itertools.chain.from_iterable(merged_sources)]
157
+ merged_audio_paths = [_ for _ in itertools.chain.from_iterable(merged_audio_paths)]
158
+ merged_responses = [
159
+ _ for _ in itertools.chain.from_iterable(merged_responses)
160
+ ]
161
+
162
+ if torch.distributed.get_rank() == 0:
163
+ print(f"Evaluating {args.dataset} ...")
164
+
165
+ results = []
166
+ for gt, response, source, audio_path in zip(merged_gts, merged_responses, merged_sources, merged_audio_paths):
167
+ results.append({
168
+ 'gt': gt,
169
+ 'response': response,
170
+ 'source': source,
171
+ 'audio_path': audio_path,
172
+ })
173
+ time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
174
+ results_file = f'{args.dataset}_{time_prefix}.json'
175
+ json.dump(results, open(results_file, 'w'))
176
+ results_dict = {}
177
+ for item in tqdm(results):
178
+ source = item["source"]
179
+ results_dict.setdefault(source, []).append(item)
180
+
181
+ for source in results_dict:
182
+ refs, hyps = [], []
183
+ bi_refs, bi_hyps = [], []
184
+ results_list = results_dict[source]
185
+ for result in results_list:
186
+ gt = result["gt"]
187
+ response = result["response"].lstrip()
188
+ refs.append(gt)
189
+ hyps.append(response)
190
+ score = accuracy_score(refs, hyps)
191
+ print(f"{source} ACC_score:", score, len(hyps))
192
+
193
+ torch.distributed.barrier()
eval_audio/whisper_normalizer/basic.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import unicodedata
3
+
4
+ import regex
5
+
6
+ # non-ASCII letters that are not separated by "NFKD" normalization
7
+ ADDITIONAL_DIACRITICS = {
8
+ "œ": "oe",
9
+ "Œ": "OE",
10
+ "ø": "o",
11
+ "Ø": "O",
12
+ "æ": "ae",
13
+ "Æ": "AE",
14
+ "ß": "ss",
15
+ "ẞ": "SS",
16
+ "đ": "d",
17
+ "Đ": "D",
18
+ "ð": "d",
19
+ "Ð": "D",
20
+ "þ": "th",
21
+ "Þ": "th",
22
+ "ł": "l",
23
+ "Ł": "L",
24
+ }
25
+
26
+
27
+ def remove_symbols_and_diacritics(s: str, keep=""):
28
+ """
29
+ Replace any other markers, symbols, and punctuations with a space,
30
+ and drop any diacritics (category 'Mn' and some manual mappings)
31
+ """
32
+ return "".join(
33
+ c
34
+ if c in keep
35
+ else ADDITIONAL_DIACRITICS[c]
36
+ if c in ADDITIONAL_DIACRITICS
37
+ else ""
38
+ if unicodedata.category(c) == "Mn"
39
+ else " "
40
+ if unicodedata.category(c)[0] in "MSP"
41
+ else c
42
+ for c in unicodedata.normalize("NFKD", s)
43
+ )
44
+
45
+
46
+ def remove_symbols(s: str):
47
+ """
48
+ Replace any other markers, symbols, punctuations with a space, keeping diacritics
49
+ """
50
+ return "".join(
51
+ " " if unicodedata.category(c)[0] in "MSP" else c
52
+ for c in unicodedata.normalize("NFKC", s)
53
+ )
54
+
55
+
56
+ class BasicTextNormalizer:
57
+ def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
58
+ self.clean = (
59
+ remove_symbols_and_diacritics if remove_diacritics else remove_symbols
60
+ )
61
+ self.split_letters = split_letters
62
+
63
+ def __call__(self, s: str):
64
+ s = s.lower()
65
+ s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
66
+ s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
67
+ s = self.clean(s).lower()
68
+
69
+ if self.split_letters:
70
+ s = " ".join(regex.findall(r"\X", s, regex.U))
71
+
72
+ s = re.sub(
73
+ r"\s+", " ", s
74
+ ) # replace any successive whitespace characters with a space
75
+
76
+ return s
eval_audio/whisper_normalizer/english.json ADDED
@@ -0,0 +1,1741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archeological": "archaeological",
86
+ "archaeologically": "archeologically",
87
+ "archaeologist": "archeologist",
88
+ "archaeologists": "archeologists",
89
+ "archaeology": "archeology</span>",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "edoema": "edema",
474
+ "editorialise": "editorialize",
475
+ "editorialised": "editorialized",
476
+ "editorialises": "editorializes",
477
+ "editorialising": "editorializing",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "galvanise": "galvanize",
633
+ "galvanised": "galvanized",
634
+ "galvanises": "galvanizes",
635
+ "galvanising": "galvanizing",
636
+ "gambolled": "gamboled",
637
+ "gambolling": "gamboling",
638
+ "gaol": "jail",
639
+ "gaolbird": "jailbird",
640
+ "gaolbirds": "jailbirds",
641
+ "gaolbreak": "jailbreak",
642
+ "gaolbreaks": "jailbreaks",
643
+ "gaoled": "jailed",
644
+ "gaoler": "jailer",
645
+ "gaolers": "jailers",
646
+ "gaoling": "jailing",
647
+ "gaols": "jails",
648
+ "gasses": "gases",
649
+ "gage": "gauge",
650
+ "gaged": "gauged",
651
+ "gages": "gauges",
652
+ "gaging": "gauging",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamorise": "glamorize",
665
+ "glamorised": "glamorized",
666
+ "glamorises": "glamorizes",
667
+ "glamorising": "glamorizing",
668
+ "glamor": "glamour",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "micrometre": "micrometer",
974
+ "micrometres": "micrometers",
975
+ "militarise": "militarize",
976
+ "militarised": "militarized",
977
+ "militarises": "militarizes",
978
+ "militarising": "militarizing",
979
+ "milligramme": "milligram",
980
+ "milligrammes": "milligrams",
981
+ "millilitre": "milliliter",
982
+ "millilitres": "milliliters",
983
+ "millimetre": "millimeter",
984
+ "millimetres": "millimeters",
985
+ "miniaturisation": "miniaturization",
986
+ "miniaturise": "miniaturize",
987
+ "miniaturised": "miniaturized",
988
+ "miniaturises": "miniaturizes",
989
+ "miniaturising": "miniaturizing",
990
+ "minibusses": "minibuses",
991
+ "minimise": "minimize",
992
+ "minimised": "minimized",
993
+ "minimises": "minimizes",
994
+ "minimising": "minimizing",
995
+ "misbehaviour": "misbehavior",
996
+ "misdemeanour": "misdemeanor",
997
+ "misdemeanours": "misdemeanors",
998
+ "misspelt": "misspelled",
999
+ "mitre": "miter",
1000
+ "mitres": "miters",
1001
+ "mobilisation": "mobilization",
1002
+ "mobilise": "mobilize",
1003
+ "mobilised": "mobilized",
1004
+ "mobilises": "mobilizes",
1005
+ "mobilising": "mobilizing",
1006
+ "modelled": "modeled",
1007
+ "modeller": "modeler",
1008
+ "modellers": "modelers",
1009
+ "modelling": "modeling",
1010
+ "modernise": "modernize",
1011
+ "modernised": "modernized",
1012
+ "modernises": "modernizes",
1013
+ "modernising": "modernizing",
1014
+ "moisturise": "moisturize",
1015
+ "moisturised": "moisturized",
1016
+ "moisturiser": "moisturizer",
1017
+ "moisturisers": "moisturizers",
1018
+ "moisturises": "moisturizes",
1019
+ "moisturising": "moisturizing",
1020
+ "monologue": "monolog",
1021
+ "monologues": "monologs",
1022
+ "monopolisation": "monopolization",
1023
+ "monopolise": "monopolize",
1024
+ "monopolised": "monopolized",
1025
+ "monopolises": "monopolizes",
1026
+ "monopolising": "monopolizing",
1027
+ "moralise": "moralize",
1028
+ "moralised": "moralized",
1029
+ "moralises": "moralizes",
1030
+ "moralising": "moralizing",
1031
+ "motorised": "motorized",
1032
+ "mould": "mold",
1033
+ "moulded": "molded",
1034
+ "moulder": "molder",
1035
+ "mouldered": "moldered",
1036
+ "mouldering": "moldering",
1037
+ "moulders": "molders",
1038
+ "mouldier": "moldier",
1039
+ "mouldiest": "moldiest",
1040
+ "moulding": "molding",
1041
+ "mouldings": "moldings",
1042
+ "moulds": "molds",
1043
+ "mouldy": "moldy",
1044
+ "moult": "molt",
1045
+ "moulted": "molted",
1046
+ "moulting": "molting",
1047
+ "moults": "molts",
1048
+ "moustache": "mustache",
1049
+ "moustached": "mustached",
1050
+ "moustaches": "mustaches",
1051
+ "moustachioed": "mustachioed",
1052
+ "multicoloured": "multicolored",
1053
+ "nationalisation": "nationalization",
1054
+ "nationalisations": "nationalizations",
1055
+ "nationalise": "nationalize",
1056
+ "nationalised": "nationalized",
1057
+ "nationalises": "nationalizes",
1058
+ "nationalising": "nationalizing",
1059
+ "naturalisation": "naturalization",
1060
+ "naturalise": "naturalize",
1061
+ "naturalised": "naturalized",
1062
+ "naturalises": "naturalizes",
1063
+ "naturalising": "naturalizing",
1064
+ "neighbour": "neighbor",
1065
+ "neighbourhood": "neighborhood",
1066
+ "neighbourhoods": "neighborhoods",
1067
+ "neighbouring": "neighboring",
1068
+ "neighbourliness": "neighborliness",
1069
+ "neighbourly": "neighborly",
1070
+ "neighbours": "neighbors",
1071
+ "neutralisation": "neutralization",
1072
+ "neutralise": "neutralize",
1073
+ "neutralised": "neutralized",
1074
+ "neutralises": "neutralizes",
1075
+ "neutralising": "neutralizing",
1076
+ "normalisation": "normalization",
1077
+ "normalise": "normalize",
1078
+ "normalised": "normalized",
1079
+ "normalises": "normalizes",
1080
+ "normalising": "normalizing",
1081
+ "odour": "odor",
1082
+ "odourless": "odorless",
1083
+ "odours": "odors",
1084
+ "oesophagus": "esophagus",
1085
+ "oesophaguses": "esophaguses",
1086
+ "oestrogen": "estrogen",
1087
+ "offence": "offense",
1088
+ "offences": "offenses",
1089
+ "omelette": "omelet",
1090
+ "omelettes": "omelets",
1091
+ "optimise": "optimize",
1092
+ "optimised": "optimized",
1093
+ "optimises": "optimizes",
1094
+ "optimising": "optimizing",
1095
+ "organisation": "organization",
1096
+ "organisational": "organizational",
1097
+ "organisations": "organizations",
1098
+ "organise": "organize",
1099
+ "organised": "organized",
1100
+ "organiser": "organizer",
1101
+ "organisers": "organizers",
1102
+ "organises": "organizes",
1103
+ "organising": "organizing",
1104
+ "orthopaedic": "orthopedic",
1105
+ "orthopaedics": "orthopedics",
1106
+ "ostracise": "ostracize",
1107
+ "ostracised": "ostracized",
1108
+ "ostracises": "ostracizes",
1109
+ "ostracising": "ostracizing",
1110
+ "outmanoeuvre": "outmaneuver",
1111
+ "outmanoeuvred": "outmaneuvered",
1112
+ "outmanoeuvres": "outmaneuvers",
1113
+ "outmanoeuvring": "outmaneuvering",
1114
+ "overemphasise": "overemphasize",
1115
+ "overemphasised": "overemphasized",
1116
+ "overemphasises": "overemphasizes",
1117
+ "overemphasising": "overemphasizing",
1118
+ "oxidisation": "oxidization",
1119
+ "oxidise": "oxidize",
1120
+ "oxidised": "oxidized",
1121
+ "oxidises": "oxidizes",
1122
+ "oxidising": "oxidizing",
1123
+ "paederast": "pederast",
1124
+ "paederasts": "pederasts",
1125
+ "paediatric": "pediatric",
1126
+ "paediatrician": "pediatrician",
1127
+ "paediatricians": "pediatricians",
1128
+ "paediatrics": "pediatrics",
1129
+ "paedophile": "pedophile",
1130
+ "paedophiles": "pedophiles",
1131
+ "paedophilia": "pedophilia",
1132
+ "palaeolithic": "paleolithic",
1133
+ "palaeontologist": "paleontologist",
1134
+ "palaeontologists": "paleontologists",
1135
+ "palaeontology": "paleontology",
1136
+ "panelled": "paneled",
1137
+ "panelling": "paneling",
1138
+ "panellist": "panelist",
1139
+ "panellists": "panelists",
1140
+ "paralyse": "paralyze",
1141
+ "paralysed": "paralyzed",
1142
+ "paralyses": "paralyzes",
1143
+ "paralysing": "paralyzing",
1144
+ "parcelled": "parceled",
1145
+ "parcelling": "parceling",
1146
+ "parlour": "parlor",
1147
+ "parlours": "parlors",
1148
+ "particularise": "particularize",
1149
+ "particularised": "particularized",
1150
+ "particularises": "particularizes",
1151
+ "particularising": "particularizing",
1152
+ "passivisation": "passivization",
1153
+ "passivise": "passivize",
1154
+ "passivised": "passivized",
1155
+ "passivises": "passivizes",
1156
+ "passivising": "passivizing",
1157
+ "pasteurisation": "pasteurization",
1158
+ "pasteurise": "pasteurize",
1159
+ "pasteurised": "pasteurized",
1160
+ "pasteurises": "pasteurizes",
1161
+ "pasteurising": "pasteurizing",
1162
+ "patronise": "patronize",
1163
+ "patronised": "patronized",
1164
+ "patronises": "patronizes",
1165
+ "patronising": "patronizing",
1166
+ "patronisingly": "patronizingly",
1167
+ "pedalled": "pedaled",
1168
+ "pedalling": "pedaling",
1169
+ "pedestrianisation": "pedestrianization",
1170
+ "pedestrianise": "pedestrianize",
1171
+ "pedestrianised": "pedestrianized",
1172
+ "pedestrianises": "pedestrianizes",
1173
+ "pedestrianising": "pedestrianizing",
1174
+ "penalise": "penalize",
1175
+ "penalised": "penalized",
1176
+ "penalises": "penalizes",
1177
+ "penalising": "penalizing",
1178
+ "pencilled": "penciled",
1179
+ "pencilling": "penciling",
1180
+ "personalise": "personalize",
1181
+ "personalised": "personalized",
1182
+ "personalises": "personalizes",
1183
+ "personalising": "personalizing",
1184
+ "pharmacopoeia": "pharmacopeia",
1185
+ "pharmacopoeias": "pharmacopeias",
1186
+ "philosophise": "philosophize",
1187
+ "philosophised": "philosophized",
1188
+ "philosophises": "philosophizes",
1189
+ "philosophising": "philosophizing",
1190
+ "philtre": "filter",
1191
+ "philtres": "filters",
1192
+ "phoney": "phony",
1193
+ "plagiarise": "plagiarize",
1194
+ "plagiarised": "plagiarized",
1195
+ "plagiarises": "plagiarizes",
1196
+ "plagiarising": "plagiarizing",
1197
+ "plough": "plow",
1198
+ "ploughed": "plowed",
1199
+ "ploughing": "plowing",
1200
+ "ploughman": "plowman",
1201
+ "ploughmen": "plowmen",
1202
+ "ploughs": "plows",
1203
+ "ploughshare": "plowshare",
1204
+ "ploughshares": "plowshares",
1205
+ "polarisation": "polarization",
1206
+ "polarise": "polarize",
1207
+ "polarised": "polarized",
1208
+ "polarises": "polarizes",
1209
+ "polarising": "polarizing",
1210
+ "politicisation": "politicization",
1211
+ "politicise": "politicize",
1212
+ "politicised": "politicized",
1213
+ "politicises": "politicizes",
1214
+ "politicising": "politicizing",
1215
+ "popularisation": "popularization",
1216
+ "popularise": "popularize",
1217
+ "popularised": "popularized",
1218
+ "popularises": "popularizes",
1219
+ "popularising": "popularizing",
1220
+ "pouffe": "pouf",
1221
+ "pouffes": "poufs",
1222
+ "practise": "practice",
1223
+ "practised": "practiced",
1224
+ "practises": "practices",
1225
+ "practising": "practicing",
1226
+ "praesidium": "presidium",
1227
+ "praesidiums": "presidiums",
1228
+ "pressurisation": "pressurization",
1229
+ "pressurise": "pressurize",
1230
+ "pressurised": "pressurized",
1231
+ "pressurises": "pressurizes",
1232
+ "pressurising": "pressurizing",
1233
+ "pretence": "pretense",
1234
+ "pretences": "pretenses",
1235
+ "primaeval": "primeval",
1236
+ "prioritisation": "prioritization",
1237
+ "prioritise": "prioritize",
1238
+ "prioritised": "prioritized",
1239
+ "prioritises": "prioritizes",
1240
+ "prioritising": "prioritizing",
1241
+ "privatisation": "privatization",
1242
+ "privatisations": "privatizations",
1243
+ "privatise": "privatize",
1244
+ "privatised": "privatized",
1245
+ "privatises": "privatizes",
1246
+ "privatising": "privatizing",
1247
+ "professionalisation": "professionalization",
1248
+ "professionalise": "professionalize",
1249
+ "professionalised": "professionalized",
1250
+ "professionalises": "professionalizes",
1251
+ "professionalising": "professionalizing",
1252
+ "programme": "program",
1253
+ "programmes": "programs",
1254
+ "prologue": "prolog",
1255
+ "prologues": "prologs",
1256
+ "propagandise": "propagandize",
1257
+ "propagandised": "propagandized",
1258
+ "propagandises": "propagandizes",
1259
+ "propagandising": "propagandizing",
1260
+ "proselytise": "proselytize",
1261
+ "proselytised": "proselytized",
1262
+ "proselytiser": "proselytizer",
1263
+ "proselytisers": "proselytizers",
1264
+ "proselytises": "proselytizes",
1265
+ "proselytising": "proselytizing",
1266
+ "psychoanalyse": "psychoanalyze",
1267
+ "psychoanalysed": "psychoanalyzed",
1268
+ "psychoanalyses": "psychoanalyzes",
1269
+ "psychoanalysing": "psychoanalyzing",
1270
+ "publicise": "publicize",
1271
+ "publicised": "publicized",
1272
+ "publicises": "publicizes",
1273
+ "publicising": "publicizing",
1274
+ "pulverisation": "pulverization",
1275
+ "pulverise": "pulverize",
1276
+ "pulverised": "pulverized",
1277
+ "pulverises": "pulverizes",
1278
+ "pulverising": "pulverizing",
1279
+ "pummelled": "pummel",
1280
+ "pummelling": "pummeled",
1281
+ "pyjama": "pajama",
1282
+ "pyjamas": "pajamas",
1283
+ "pzazz": "pizzazz",
1284
+ "quarrelled": "quarreled",
1285
+ "quarrelling": "quarreling",
1286
+ "radicalise": "radicalize",
1287
+ "radicalised": "radicalized",
1288
+ "radicalises": "radicalizes",
1289
+ "radicalising": "radicalizing",
1290
+ "rancour": "rancor",
1291
+ "randomise": "randomize",
1292
+ "randomised": "randomized",
1293
+ "randomises": "randomizes",
1294
+ "randomising": "randomizing",
1295
+ "rationalisation": "rationalization",
1296
+ "rationalisations": "rationalizations",
1297
+ "rationalise": "rationalize",
1298
+ "rationalised": "rationalized",
1299
+ "rationalises": "rationalizes",
1300
+ "rationalising": "rationalizing",
1301
+ "ravelled": "raveled",
1302
+ "ravelling": "raveling",
1303
+ "realisable": "realizable",
1304
+ "realisation": "realization",
1305
+ "realisations": "realizations",
1306
+ "realise": "realize",
1307
+ "realised": "realized",
1308
+ "realises": "realizes",
1309
+ "realising": "realizing",
1310
+ "recognisable": "recognizable",
1311
+ "recognisably": "recognizably",
1312
+ "recognisance": "recognizance",
1313
+ "recognise": "recognize",
1314
+ "recognised": "recognized",
1315
+ "recognises": "recognizes",
1316
+ "recognising": "recognizing",
1317
+ "reconnoitre": "reconnoiter",
1318
+ "reconnoitred": "reconnoitered",
1319
+ "reconnoitres": "reconnoiters",
1320
+ "reconnoitring": "reconnoitering",
1321
+ "refuelled": "refueled",
1322
+ "refuelling": "refueling",
1323
+ "regularisation": "regularization",
1324
+ "regularise": "regularize",
1325
+ "regularised": "regularized",
1326
+ "regularises": "regularizes",
1327
+ "regularising": "regularizing",
1328
+ "remodelled": "remodeled",
1329
+ "remodelling": "remodeling",
1330
+ "remould": "remold",
1331
+ "remoulded": "remolded",
1332
+ "remoulding": "remolding",
1333
+ "remoulds": "remolds",
1334
+ "reorganisation": "reorganization",
1335
+ "reorganisations": "reorganizations",
1336
+ "reorganise": "reorganize",
1337
+ "reorganised": "reorganized",
1338
+ "reorganises": "reorganizes",
1339
+ "reorganising": "reorganizing",
1340
+ "revelled": "reveled",
1341
+ "reveller": "reveler",
1342
+ "revellers": "revelers",
1343
+ "revelling": "reveling",
1344
+ "revitalise": "revitalize",
1345
+ "revitalised": "revitalized",
1346
+ "revitalises": "revitalizes",
1347
+ "revitalising": "revitalizing",
1348
+ "revolutionise": "revolutionize",
1349
+ "revolutionised": "revolutionized",
1350
+ "revolutionises": "revolutionizes",
1351
+ "revolutionising": "revolutionizing",
1352
+ "rhapsodise": "rhapsodize",
1353
+ "rhapsodised": "rhapsodized",
1354
+ "rhapsodises": "rhapsodizes",
1355
+ "rhapsodising": "rhapsodizing",
1356
+ "rigour": "rigor",
1357
+ "rigours": "rigors",
1358
+ "ritualised": "ritualized",
1359
+ "rivalled": "rivaled",
1360
+ "rivalling": "rivaling",
1361
+ "romanticise": "romanticize",
1362
+ "romanticised": "romanticized",
1363
+ "romanticises": "romanticizes",
1364
+ "romanticising": "romanticizing",
1365
+ "rumour": "rumor",
1366
+ "rumoured": "rumored",
1367
+ "rumours": "rumors",
1368
+ "sabre": "saber",
1369
+ "sabres": "sabers",
1370
+ "saltpetre": "saltpeter",
1371
+ "sanitise": "sanitize",
1372
+ "sanitised": "sanitized",
1373
+ "sanitises": "sanitizes",
1374
+ "sanitising": "sanitizing",
1375
+ "satirise": "satirize",
1376
+ "satirised": "satirized",
1377
+ "satirises": "satirizes",
1378
+ "satirising": "satirizing",
1379
+ "saviour": "savior",
1380
+ "saviours": "saviors",
1381
+ "savour": "savor",
1382
+ "savoured": "savored",
1383
+ "savouries": "savories",
1384
+ "savouring": "savoring",
1385
+ "savours": "savors",
1386
+ "savoury": "savory",
1387
+ "scandalise": "scandalize",
1388
+ "scandalised": "scandalized",
1389
+ "scandalises": "scandalizes",
1390
+ "scandalising": "scandalizing",
1391
+ "sceptic": "skeptic",
1392
+ "sceptical": "skeptical",
1393
+ "sceptically": "skeptically",
1394
+ "scepticism": "skepticism",
1395
+ "sceptics": "skeptics",
1396
+ "sceptre": "scepter",
1397
+ "sceptres": "scepters",
1398
+ "scrutinise": "scrutinize",
1399
+ "scrutinised": "scrutinized",
1400
+ "scrutinises": "scrutinizes",
1401
+ "scrutinising": "scrutinizing",
1402
+ "secularisation": "secularization",
1403
+ "secularise": "secularize",
1404
+ "secularised": "secularized",
1405
+ "secularises": "secularizes",
1406
+ "secularising": "secularizing",
1407
+ "sensationalise": "sensationalize",
1408
+ "sensationalised": "sensationalized",
1409
+ "sensationalises": "sensationalizes",
1410
+ "sensationalising": "sensationalizing",
1411
+ "sensitise": "sensitize",
1412
+ "sensitised": "sensitized",
1413
+ "sensitises": "sensitizes",
1414
+ "sensitising": "sensitizing",
1415
+ "sentimentalise": "sentimentalize",
1416
+ "sentimentalised": "sentimentalized",
1417
+ "sentimentalises": "sentimentalizes",
1418
+ "sentimentalising": "sentimentalizing",
1419
+ "sepulchre": "sepulcher",
1420
+ "sepulchres": "sepulchers",
1421
+ "serialisation": "serialization",
1422
+ "serialisations": "serializations",
1423
+ "serialise": "serialize",
1424
+ "serialised": "serialized",
1425
+ "serialises": "serializes",
1426
+ "serialising": "serializing",
1427
+ "sermonise": "sermonize",
1428
+ "sermonised": "sermonized",
1429
+ "sermonises": "sermonizes",
1430
+ "sermonising": "sermonizing",
1431
+ "sheikh": "sheik",
1432
+ "shovelled": "shoveled",
1433
+ "shovelling": "shoveling",
1434
+ "shrivelled": "shriveled",
1435
+ "shrivelling": "shriveling",
1436
+ "signalise": "signalize",
1437
+ "signalised": "signalized",
1438
+ "signalises": "signalizes",
1439
+ "signalising": "signalizing",
1440
+ "signalled": "signaled",
1441
+ "signalling": "signaling",
1442
+ "smoulder": "smolder",
1443
+ "smouldered": "smoldered",
1444
+ "smouldering": "smoldering",
1445
+ "smoulders": "smolders",
1446
+ "snivelled": "sniveled",
1447
+ "snivelling": "sniveling",
1448
+ "snorkelled": "snorkeled",
1449
+ "snorkelling": "snorkeling",
1450
+ "snowplough": "snowplow",
1451
+ "snowploughs": "snowplow",
1452
+ "socialisation": "socialization",
1453
+ "socialise": "socialize",
1454
+ "socialised": "socialized",
1455
+ "socialises": "socializes",
1456
+ "socialising": "socializing",
1457
+ "sodomise": "sodomize",
1458
+ "sodomised": "sodomized",
1459
+ "sodomises": "sodomizes",
1460
+ "sodomising": "sodomizing",
1461
+ "solemnise": "solemnize",
1462
+ "solemnised": "solemnized",
1463
+ "solemnises": "solemnizes",
1464
+ "solemnising": "solemnizing",
1465
+ "sombre": "somber",
1466
+ "specialisation": "specialization",
1467
+ "specialisations": "specializations",
1468
+ "specialise": "specialize",
1469
+ "specialised": "specialized",
1470
+ "specialises": "specializes",
1471
+ "specialising": "specializing",
1472
+ "spectre": "specter",
1473
+ "spectres": "specters",
1474
+ "spiralled": "spiraled",
1475
+ "spiralling": "spiraling",
1476
+ "splendour": "splendor",
1477
+ "splendours": "splendors",
1478
+ "squirrelled": "squirreled",
1479
+ "squirrelling": "squirreling",
1480
+ "stabilisation": "stabilization",
1481
+ "stabilise": "stabilize",
1482
+ "stabilised": "stabilized",
1483
+ "stabiliser": "stabilizer",
1484
+ "stabilisers": "stabilizers",
1485
+ "stabilises": "stabilizes",
1486
+ "stabilising": "stabilizing",
1487
+ "standardisation": "standardization",
1488
+ "standardise": "standardize",
1489
+ "standardised": "standardized",
1490
+ "standardises": "standardizes",
1491
+ "standardising": "standardizing",
1492
+ "stencilled": "stenciled",
1493
+ "stencilling": "stenciling",
1494
+ "sterilisation": "sterilization",
1495
+ "sterilisations": "sterilizations",
1496
+ "sterilise": "sterilize",
1497
+ "sterilised": "sterilized",
1498
+ "steriliser": "sterilizer",
1499
+ "sterilisers": "sterilizers",
1500
+ "sterilises": "sterilizes",
1501
+ "sterilising": "sterilizing",
1502
+ "stigmatisation": "stigmatization",
1503
+ "stigmatise": "stigmatize",
1504
+ "stigmatised": "stigmatized",
1505
+ "stigmatises": "stigmatizes",
1506
+ "stigmatising": "stigmatizing",
1507
+ "storey": "story",
1508
+ "storeys": "stories",
1509
+ "subsidisation": "subsidization",
1510
+ "subsidise": "subsidize",
1511
+ "subsidised": "subsidized",
1512
+ "subsidiser": "subsidizer",
1513
+ "subsidisers": "subsidizers",
1514
+ "subsidises": "subsidizes",
1515
+ "subsidising": "subsidizing",
1516
+ "succour": "succor",
1517
+ "succoured": "succored",
1518
+ "succouring": "succoring",
1519
+ "succours": "succors",
1520
+ "sulphate": "sulfate",
1521
+ "sulphates": "sulfates",
1522
+ "sulphide": "sulfide",
1523
+ "sulphides": "sulfides",
1524
+ "sulphur": "sulfur",
1525
+ "sulphurous": "sulfurous",
1526
+ "summarise": "summarize",
1527
+ "summarised": "summarized",
1528
+ "summarises": "summarizes",
1529
+ "summarising": "summarizing",
1530
+ "swivelled": "swiveled",
1531
+ "swivelling": "swiveling",
1532
+ "symbolise": "symbolize",
1533
+ "symbolised": "symbolized",
1534
+ "symbolises": "symbolizes",
1535
+ "symbolising": "symbolizing",
1536
+ "sympathise": "sympathize",
1537
+ "sympathised": "sympathized",
1538
+ "sympathiser": "sympathizer",
1539
+ "sympathisers": "sympathizers",
1540
+ "sympathises": "sympathizes",
1541
+ "sympathising": "sympathizing",
1542
+ "synchronisation": "synchronization",
1543
+ "synchronise": "synchronize",
1544
+ "synchronised": "synchronized",
1545
+ "synchronises": "synchronizes",
1546
+ "synchronising": "synchronizing",
1547
+ "synthesise": "synthesize",
1548
+ "synthesised": "synthesized",
1549
+ "synthesiser": "synthesizer",
1550
+ "synthesisers": "synthesizers",
1551
+ "synthesises": "synthesizes",
1552
+ "synthesising": "synthesizing",
1553
+ "syphon": "siphon",
1554
+ "syphoned": "siphoned",
1555
+ "syphoning": "siphoning",
1556
+ "syphons": "siphons",
1557
+ "systematisation": "systematization",
1558
+ "systematise": "systematize",
1559
+ "systematised": "systematized",
1560
+ "systematises": "systematizes",
1561
+ "systematising": "systematizing",
1562
+ "tantalise": "tantalize",
1563
+ "tantalised": "tantalized",
1564
+ "tantalises": "tantalizes",
1565
+ "tantalising": "tantalizing",
1566
+ "tantalisingly": "tantalizingly",
1567
+ "tasselled": "tasseled",
1568
+ "technicolour": "technicolor",
1569
+ "temporise": "temporize",
1570
+ "temporised": "temporized",
1571
+ "temporises": "temporizes",
1572
+ "temporising": "temporizing",
1573
+ "tenderise": "tenderize",
1574
+ "tenderised": "tenderized",
1575
+ "tenderises": "tenderizes",
1576
+ "tenderising": "tenderizing",
1577
+ "terrorise": "terrorize",
1578
+ "terrorised": "terrorized",
1579
+ "terrorises": "terrorizes",
1580
+ "terrorising": "terrorizing",
1581
+ "theatre": "theater",
1582
+ "theatregoer": "theatergoer",
1583
+ "theatregoers": "theatergoers",
1584
+ "theatres": "theaters",
1585
+ "theorise": "theorize",
1586
+ "theorised": "theorized",
1587
+ "theorises": "theorizes",
1588
+ "theorising": "theorizing",
1589
+ "tonne": "ton",
1590
+ "tonnes": "tons",
1591
+ "towelled": "toweled",
1592
+ "towelling": "toweling",
1593
+ "toxaemia": "toxemia",
1594
+ "tranquillise": "tranquilize",
1595
+ "tranquillised": "tranquilized",
1596
+ "tranquilliser": "tranquilizer",
1597
+ "tranquillisers": "tranquilizers",
1598
+ "tranquillises": "tranquilizes",
1599
+ "tranquillising": "tranquilizing",
1600
+ "tranquillity": "tranquility",
1601
+ "tranquillize": "tranquilize",
1602
+ "tranquillized": "tranquilized",
1603
+ "tranquillizer": "tranquilizer",
1604
+ "tranquillizers": "tranquilizers",
1605
+ "tranquillizes": "tranquilizes",
1606
+ "tranquillizing": "tranquilizing",
1607
+ "tranquilly": "tranquility",
1608
+ "transistorised": "transistorized",
1609
+ "traumatise": "traumatize",
1610
+ "traumatised": "traumatized",
1611
+ "traumatises": "traumatizes",
1612
+ "traumatising": "traumatizing",
1613
+ "travelled": "traveled",
1614
+ "traveller": "traveler",
1615
+ "travellers": "travelers",
1616
+ "travelling": "traveling",
1617
+ "travelog": "travelogue",
1618
+ "travelogs": "travelogues",
1619
+ "trialled": "trialed",
1620
+ "trialling": "trialing",
1621
+ "tricolour": "tricolor",
1622
+ "tricolours": "tricolors",
1623
+ "trivialise": "trivialize",
1624
+ "trivialised": "trivialized",
1625
+ "trivialises": "trivializes",
1626
+ "trivialising": "trivializing",
1627
+ "tumour": "tumor",
1628
+ "tumours": "tumors",
1629
+ "tunnelled": "tunneled",
1630
+ "tunnelling": "tunneling",
1631
+ "tyrannise": "tyrannize",
1632
+ "tyrannised": "tyrannized",
1633
+ "tyrannises": "tyrannizes",
1634
+ "tyrannising": "tyrannizing",
1635
+ "tyre": "tire",
1636
+ "tyres": "tires",
1637
+ "unauthorised": "unauthorized",
1638
+ "uncivilised": "uncivilized",
1639
+ "underutilised": "underutilized",
1640
+ "unequalled": "unequaled",
1641
+ "unfavourable": "unfavorable",
1642
+ "unfavourably": "unfavorably",
1643
+ "unionisation": "unionization",
1644
+ "unionise": "unionize",
1645
+ "unionised": "unionized",
1646
+ "unionises": "unionizes",
1647
+ "unionising": "unionizing",
1648
+ "unorganised": "unorganized",
1649
+ "unravelled": "unraveled",
1650
+ "unravelling": "unraveling",
1651
+ "unrecognisable": "unrecognizable",
1652
+ "unrecognised": "unrecognized",
1653
+ "unrivalled": "unrivaled",
1654
+ "unsavoury": "unsavory",
1655
+ "untrammelled": "untrammeled",
1656
+ "urbanisation": "urbanization",
1657
+ "urbanise": "urbanize",
1658
+ "urbanised": "urbanized",
1659
+ "urbanises": "urbanizes",
1660
+ "urbanising": "urbanizing",
1661
+ "utilisable": "utilizable",
1662
+ "utilisation": "utilization",
1663
+ "utilise": "utilize",
1664
+ "utilised": "utilized",
1665
+ "utilises": "utilizes",
1666
+ "utilising": "utilizing",
1667
+ "valour": "valor",
1668
+ "vandalise": "vandalize",
1669
+ "vandalised": "vandalized",
1670
+ "vandalises": "vandalizes",
1671
+ "vandalising": "vandalizing",
1672
+ "vaporisation": "vaporization",
1673
+ "vaporise": "vaporize",
1674
+ "vaporised": "vaporized",
1675
+ "vaporises": "vaporizes",
1676
+ "vaporising": "vaporizing",
1677
+ "vapour": "vapor",
1678
+ "vapours": "vapors",
1679
+ "verbalise": "verbalize",
1680
+ "verbalised": "verbalized",
1681
+ "verbalises": "verbalizes",
1682
+ "verbalising": "verbalizing",
1683
+ "victimisation": "victimization",
1684
+ "victimise": "victimize",
1685
+ "victimised": "victimized",
1686
+ "victimises": "victimizes",
1687
+ "victimising": "victimizing",
1688
+ "videodisc": "videodisk",
1689
+ "videodiscs": "videodisks",
1690
+ "vigour": "vigor",
1691
+ "visualisation": "visualization",
1692
+ "visualisations": "visualizations",
1693
+ "visualise": "visualize",
1694
+ "visualised": "visualized",
1695
+ "visualises": "visualizes",
1696
+ "visualising": "visualizing",
1697
+ "vocalisation": "vocalization",
1698
+ "vocalisations": "vocalizations",
1699
+ "vocalise": "vocalize",
1700
+ "vocalised": "vocalized",
1701
+ "vocalises": "vocalizes",
1702
+ "vocalising": "vocalizing",
1703
+ "vulcanised": "vulcanized",
1704
+ "vulgarisation": "vulgarization",
1705
+ "vulgarise": "vulgarize",
1706
+ "vulgarised": "vulgarized",
1707
+ "vulgarises": "vulgarizes",
1708
+ "vulgarising": "vulgarizing",
1709
+ "waggon": "wagon",
1710
+ "waggons": "wagons",
1711
+ "watercolour": "watercolor",
1712
+ "watercolours": "watercolors",
1713
+ "weaselled": "weaseled",
1714
+ "weaselling": "weaseling",
1715
+ "westernisation": "westernization",
1716
+ "westernise": "westernize",
1717
+ "westernised": "westernized",
1718
+ "westernises": "westernizes",
1719
+ "westernising": "westernizing",
1720
+ "womanise": "womanize",
1721
+ "womanised": "womanized",
1722
+ "womaniser": "womanizer",
1723
+ "womanisers": "womanizers",
1724
+ "womanises": "womanizes",
1725
+ "womanising": "womanizing",
1726
+ "woollen": "woolen",
1727
+ "woollens": "woolens",
1728
+ "woollies": "woolies",
1729
+ "woolly": "wooly",
1730
+ "worshipped": "worshiped",
1731
+ "worshipping": "worshiping",
1732
+ "worshipper": "worshiper",
1733
+ "yodelled": "yodeled",
1734
+ "yodelling": "yodeling",
1735
+ "yoghourt": "yogurt",
1736
+ "yoghourts": "yogurts",
1737
+ "yoghurt": "yogurt",
1738
+ "yoghurts": "yogurts",
1739
+ "mhm": "hmm",
1740
+ "mmm": "hmm"
1741
+ }
eval_audio/whisper_normalizer/english.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from fractions import Fraction
5
+ from typing import Iterator, List, Match, Optional, Union
6
+
7
+ from more_itertools import windowed
8
+
9
+ from .basic import remove_symbols_and_diacritics
10
+
11
+
12
+ class EnglishNumberNormalizer:
13
+ """
14
+ Convert any spelled-out numbers into arabic numbers, while handling:
15
+
16
+ - remove any commas
17
+ - keep the suffixes such as: `1960s`, `274th`, `32nd`, etc.
18
+ - spell out currency symbols after the number. e.g. `$20 million` -> `20000000 dollars`
19
+ - spell out `one` and `ones`
20
+ - interpret successive single-digit numbers as nominal: `one oh one` -> `101`
21
+ """
22
+
23
+ def __init__(self):
24
+ super().__init__()
25
+
26
+ self.zeros = {"o", "oh", "zero"}
27
+ self.ones = {
28
+ name: i
29
+ for i, name in enumerate(
30
+ [
31
+ "one",
32
+ "two",
33
+ "three",
34
+ "four",
35
+ "five",
36
+ "six",
37
+ "seven",
38
+ "eight",
39
+ "nine",
40
+ "ten",
41
+ "eleven",
42
+ "twelve",
43
+ "thirteen",
44
+ "fourteen",
45
+ "fifteen",
46
+ "sixteen",
47
+ "seventeen",
48
+ "eighteen",
49
+ "nineteen",
50
+ ],
51
+ start=1,
52
+ )
53
+ }
54
+ self.ones_plural = {
55
+ "sixes" if name == "six" else name + "s": (value, "s")
56
+ for name, value in self.ones.items()
57
+ }
58
+ self.ones_ordinal = {
59
+ "zeroth": (0, "th"),
60
+ "first": (1, "st"),
61
+ "second": (2, "nd"),
62
+ "third": (3, "rd"),
63
+ "fifth": (5, "th"),
64
+ "twelfth": (12, "th"),
65
+ **{
66
+ name + ("h" if name.endswith("t") else "th"): (value, "th")
67
+ for name, value in self.ones.items()
68
+ if value > 3 and value != 5 and value != 12
69
+ },
70
+ }
71
+ self.ones_suffixed = {**self.ones_plural, **self.ones_ordinal}
72
+
73
+ self.tens = {
74
+ "twenty": 20,
75
+ "thirty": 30,
76
+ "forty": 40,
77
+ "fifty": 50,
78
+ "sixty": 60,
79
+ "seventy": 70,
80
+ "eighty": 80,
81
+ "ninety": 90,
82
+ }
83
+ self.tens_plural = {
84
+ name.replace("y", "ies"): (value, "s") for name, value in self.tens.items()
85
+ }
86
+ self.tens_ordinal = {
87
+ name.replace("y", "ieth"): (value, "th")
88
+ for name, value in self.tens.items()
89
+ }
90
+ self.tens_suffixed = {**self.tens_plural, **self.tens_ordinal}
91
+
92
+ self.multipliers = {
93
+ "hundred": 100,
94
+ "thousand": 1_000,
95
+ "million": 1_000_000,
96
+ "billion": 1_000_000_000,
97
+ "trillion": 1_000_000_000_000,
98
+ "quadrillion": 1_000_000_000_000_000,
99
+ "quintillion": 1_000_000_000_000_000_000,
100
+ "sextillion": 1_000_000_000_000_000_000_000,
101
+ "septillion": 1_000_000_000_000_000_000_000_000,
102
+ "octillion": 1_000_000_000_000_000_000_000_000_000,
103
+ "nonillion": 1_000_000_000_000_000_000_000_000_000_000,
104
+ "decillion": 1_000_000_000_000_000_000_000_000_000_000_000,
105
+ }
106
+ self.multipliers_plural = {
107
+ name + "s": (value, "s") for name, value in self.multipliers.items()
108
+ }
109
+ self.multipliers_ordinal = {
110
+ name + "th": (value, "th") for name, value in self.multipliers.items()
111
+ }
112
+ self.multipliers_suffixed = {
113
+ **self.multipliers_plural,
114
+ **self.multipliers_ordinal,
115
+ }
116
+ self.decimals = {*self.ones, *self.tens, *self.zeros}
117
+
118
+ self.preceding_prefixers = {
119
+ "minus": "-",
120
+ "negative": "-",
121
+ "plus": "+",
122
+ "positive": "+",
123
+ }
124
+ self.following_prefixers = {
125
+ "pound": "£",
126
+ "pounds": "£",
127
+ "euro": "€",
128
+ "euros": "€",
129
+ "dollar": "$",
130
+ "dollars": "$",
131
+ "cent": "¢",
132
+ "cents": "¢",
133
+ }
134
+ self.prefixes = set(
135
+ list(self.preceding_prefixers.values())
136
+ + list(self.following_prefixers.values())
137
+ )
138
+ self.suffixers = {
139
+ "per": {"cent": "%"},
140
+ "percent": "%",
141
+ }
142
+ self.specials = {"and", "double", "triple", "point"}
143
+
144
+ self.words = set(
145
+ [
146
+ key
147
+ for mapping in [
148
+ self.zeros,
149
+ self.ones,
150
+ self.ones_suffixed,
151
+ self.tens,
152
+ self.tens_suffixed,
153
+ self.multipliers,
154
+ self.multipliers_suffixed,
155
+ self.preceding_prefixers,
156
+ self.following_prefixers,
157
+ self.suffixers,
158
+ self.specials,
159
+ ]
160
+ for key in mapping
161
+ ]
162
+ )
163
+ self.literal_words = {"one", "ones"}
164
+
165
+ def process_words(self, words: List[str]) -> Iterator[str]:
166
+ prefix: Optional[str] = None
167
+ value: Optional[Union[str, int]] = None
168
+ skip = False
169
+
170
+ def to_fraction(s: str):
171
+ try:
172
+ return Fraction(s)
173
+ except ValueError:
174
+ return None
175
+
176
+ def output(result: Union[str, int]):
177
+ nonlocal prefix, value
178
+ result = str(result)
179
+ if prefix is not None:
180
+ result = prefix + result
181
+ value = None
182
+ prefix = None
183
+ return result
184
+
185
+ if len(words) == 0:
186
+ return
187
+
188
+ for prev, current, next in windowed([None] + words + [None], 3):
189
+ if skip:
190
+ skip = False
191
+ continue
192
+
193
+ next_is_numeric = next is not None and re.match(r"^\d+(\.\d+)?$", next)
194
+ has_prefix = current[0] in self.prefixes
195
+ current_without_prefix = current[1:] if has_prefix else current
196
+ if re.match(r"^\d+(\.\d+)?$", current_without_prefix):
197
+ # arabic numbers (potentially with signs and fractions)
198
+ f = to_fraction(current_without_prefix)
199
+ assert f is not None
200
+ if value is not None:
201
+ if isinstance(value, str) and value.endswith("."):
202
+ # concatenate decimals / ip address components
203
+ value = str(value) + str(current)
204
+ continue
205
+ else:
206
+ yield output(value)
207
+
208
+ prefix = current[0] if has_prefix else prefix
209
+ if f.denominator == 1:
210
+ value = f.numerator # store integers as int
211
+ else:
212
+ value = current_without_prefix
213
+ elif current not in self.words:
214
+ # non-numeric words
215
+ if value is not None:
216
+ yield output(value)
217
+ yield output(current)
218
+ elif current in self.zeros:
219
+ value = str(value or "") + "0"
220
+ elif current in self.ones:
221
+ ones = self.ones[current]
222
+
223
+ if value is None:
224
+ value = ones
225
+ elif isinstance(value, str) or prev in self.ones:
226
+ if (
227
+ prev in self.tens and ones < 10
228
+ ): # replace the last zero with the digit
229
+ assert value[-1] == "0"
230
+ value = value[:-1] + str(ones)
231
+ else:
232
+ value = str(value) + str(ones)
233
+ elif ones < 10:
234
+ if value % 10 == 0:
235
+ value += ones
236
+ else:
237
+ value = str(value) + str(ones)
238
+ else: # eleven to nineteen
239
+ if value % 100 == 0:
240
+ value += ones
241
+ else:
242
+ value = str(value) + str(ones)
243
+ elif current in self.ones_suffixed:
244
+ # ordinal or cardinal; yield the number right away
245
+ ones, suffix = self.ones_suffixed[current]
246
+ if value is None:
247
+ yield output(str(ones) + suffix)
248
+ elif isinstance(value, str) or prev in self.ones:
249
+ if prev in self.tens and ones < 10:
250
+ assert value[-1] == "0"
251
+ yield output(value[:-1] + str(ones) + suffix)
252
+ else:
253
+ yield output(str(value) + str(ones) + suffix)
254
+ elif ones < 10:
255
+ if value % 10 == 0:
256
+ yield output(str(value + ones) + suffix)
257
+ else:
258
+ yield output(str(value) + str(ones) + suffix)
259
+ else: # eleven to nineteen
260
+ if value % 100 == 0:
261
+ yield output(str(value + ones) + suffix)
262
+ else:
263
+ yield output(str(value) + str(ones) + suffix)
264
+ value = None
265
+ elif current in self.tens:
266
+ tens = self.tens[current]
267
+ if value is None:
268
+ value = tens
269
+ elif isinstance(value, str):
270
+ value = str(value) + str(tens)
271
+ else:
272
+ if value % 100 == 0:
273
+ value += tens
274
+ else:
275
+ value = str(value) + str(tens)
276
+ elif current in self.tens_suffixed:
277
+ # ordinal or cardinal; yield the number right away
278
+ tens, suffix = self.tens_suffixed[current]
279
+ if value is None:
280
+ yield output(str(tens) + suffix)
281
+ elif isinstance(value, str):
282
+ yield output(str(value) + str(tens) + suffix)
283
+ else:
284
+ if value % 100 == 0:
285
+ yield output(str(value + tens) + suffix)
286
+ else:
287
+ yield output(str(value) + str(tens) + suffix)
288
+ elif current in self.multipliers:
289
+ multiplier = self.multipliers[current]
290
+ if value is None:
291
+ value = multiplier
292
+ elif isinstance(value, str) or value == 0:
293
+ f = to_fraction(value)
294
+ p = f * multiplier if f is not None else None
295
+ if f is not None and p.denominator == 1:
296
+ value = p.numerator
297
+ else:
298
+ yield output(value)
299
+ value = multiplier
300
+ else:
301
+ before = value // 1000 * 1000
302
+ residual = value % 1000
303
+ value = before + residual * multiplier
304
+ elif current in self.multipliers_suffixed:
305
+ multiplier, suffix = self.multipliers_suffixed[current]
306
+ if value is None:
307
+ yield output(str(multiplier) + suffix)
308
+ elif isinstance(value, str):
309
+ f = to_fraction(value)
310
+ p = f * multiplier if f is not None else None
311
+ if f is not None and p.denominator == 1:
312
+ yield output(str(p.numerator) + suffix)
313
+ else:
314
+ yield output(value)
315
+ yield output(str(multiplier) + suffix)
316
+ else: # int
317
+ before = value // 1000 * 1000
318
+ residual = value % 1000
319
+ value = before + residual * multiplier
320
+ yield output(str(value) + suffix)
321
+ value = None
322
+ elif current in self.preceding_prefixers:
323
+ # apply prefix (positive, minus, etc.) if it precedes a number
324
+ if value is not None:
325
+ yield output(value)
326
+
327
+ if next in self.words or next_is_numeric:
328
+ prefix = self.preceding_prefixers[current]
329
+ else:
330
+ yield output(current)
331
+ elif current in self.following_prefixers:
332
+ # apply prefix (dollars, cents, etc.) only after a number
333
+ if value is not None:
334
+ prefix = self.following_prefixers[current]
335
+ yield output(value)
336
+ else:
337
+ yield output(current)
338
+ elif current in self.suffixers:
339
+ # apply suffix symbols (percent -> '%')
340
+ if value is not None:
341
+ suffix = self.suffixers[current]
342
+ if isinstance(suffix, dict):
343
+ if next in suffix:
344
+ yield output(str(value) + suffix[next])
345
+ skip = True
346
+ else:
347
+ yield output(value)
348
+ yield output(current)
349
+ else:
350
+ yield output(str(value) + suffix)
351
+ else:
352
+ yield output(current)
353
+ elif current in self.specials:
354
+ if next not in self.words and not next_is_numeric:
355
+ # apply special handling only if the next word can be numeric
356
+ if value is not None:
357
+ yield output(value)
358
+ yield output(current)
359
+ elif current == "and":
360
+ # ignore "and" after hundreds, thousands, etc.
361
+ if prev not in self.multipliers:
362
+ if value is not None:
363
+ yield output(value)
364
+ yield output(current)
365
+ elif current == "double" or current == "triple":
366
+ if next in self.ones or next in self.zeros:
367
+ repeats = 2 if current == "double" else 3
368
+ ones = self.ones.get(next, 0)
369
+ value = str(value or "") + str(ones) * repeats
370
+ skip = True
371
+ else:
372
+ if value is not None:
373
+ yield output(value)
374
+ yield output(current)
375
+ elif current == "point":
376
+ if next in self.decimals or next_is_numeric:
377
+ value = str(value or "") + "."
378
+ else:
379
+ # should all have been covered at this point
380
+ raise ValueError(f"Unexpected token: {current}")
381
+ else:
382
+ # all should have been covered at this point
383
+ raise ValueError(f"Unexpected token: {current}")
384
+
385
+ if value is not None:
386
+ yield output(value)
387
+
388
+ def preprocess(self, s: str):
389
+ # replace "<number> and a half" with "<number> point five"
390
+ results = []
391
+
392
+ segments = re.split(r"\band\s+a\s+half\b", s)
393
+ for i, segment in enumerate(segments):
394
+ if len(segment.strip()) == 0:
395
+ continue
396
+ if i == len(segments) - 1:
397
+ results.append(segment)
398
+ else:
399
+ results.append(segment)
400
+ last_word = segment.rsplit(maxsplit=2)[-1]
401
+ if last_word in self.decimals or last_word in self.multipliers:
402
+ results.append("point five")
403
+ else:
404
+ results.append("and a half")
405
+
406
+ s = " ".join(results)
407
+
408
+ # put a space at number/letter boundary
409
+ s = re.sub(r"([a-z])([0-9])", r"\1 \2", s)
410
+ s = re.sub(r"([0-9])([a-z])", r"\1 \2", s)
411
+
412
+ # but remove spaces which could be a suffix
413
+ s = re.sub(r"([0-9])\s+(st|nd|rd|th|s)\b", r"\1\2", s)
414
+
415
+ return s
416
+
417
+ def postprocess(self, s: str):
418
+ def combine_cents(m: Match):
419
+ try:
420
+ currency = m.group(1)
421
+ integer = m.group(2)
422
+ cents = int(m.group(3))
423
+ return f"{currency}{integer}.{cents:02d}"
424
+ except ValueError:
425
+ return m.string
426
+
427
+ def extract_cents(m: Match):
428
+ try:
429
+ return f"¢{int(m.group(1))}"
430
+ except ValueError:
431
+ return m.string
432
+
433
+ # apply currency postprocessing; "$2 and ¢7" -> "$2.07"
434
+ s = re.sub(r"([€£$])([0-9]+) (?:and )?¢([0-9]{1,2})\b", combine_cents, s)
435
+ s = re.sub(r"[€£$]0.([0-9]{1,2})\b", extract_cents, s)
436
+
437
+ # write "one(s)" instead of "1(s)", just for the readability
438
+ s = re.sub(r"\b1(s?)\b", r"one\1", s)
439
+
440
+ return s
441
+
442
+ def __call__(self, s: str):
443
+ s = self.preprocess(s)
444
+ s = " ".join(word for word in self.process_words(s.split()) if word is not None)
445
+ s = self.postprocess(s)
446
+
447
+ return s
448
+
449
+
450
+ class EnglishSpellingNormalizer:
451
+ """
452
+ Applies British-American spelling mappings as listed in [1].
453
+
454
+ [1] https://www.tysto.com/uk-us-spelling-list.html
455
+ """
456
+
457
+ def __init__(self):
458
+ mapping_path = os.path.join(os.path.dirname(__file__), "english.json")
459
+ self.mapping = json.load(open(mapping_path))
460
+
461
+ def __call__(self, s: str):
462
+ return " ".join(self.mapping.get(word, word) for word in s.split())
463
+
464
+
465
+ class EnglishTextNormalizer:
466
+ def __init__(self):
467
+ self.ignore_patterns = r"\b(hmm|mm|mhm|mmm|uh|um)\b"
468
+ self.replacers = {
469
+ # common contractions
470
+ r"\bwon't\b": "will not",
471
+ r"\bcan't\b": "can not",
472
+ r"\blet's\b": "let us",
473
+ r"\bain't\b": "aint",
474
+ r"\by'all\b": "you all",
475
+ r"\bwanna\b": "want to",
476
+ r"\bgotta\b": "got to",
477
+ r"\bgonna\b": "going to",
478
+ r"\bi'ma\b": "i am going to",
479
+ r"\bimma\b": "i am going to",
480
+ r"\bwoulda\b": "would have",
481
+ r"\bcoulda\b": "could have",
482
+ r"\bshoulda\b": "should have",
483
+ r"\bma'am\b": "madam",
484
+ # contractions in titles/prefixes
485
+ r"\bmr\b": "mister ",
486
+ r"\bmrs\b": "missus ",
487
+ r"\bst\b": "saint ",
488
+ r"\bdr\b": "doctor ",
489
+ r"\bprof\b": "professor ",
490
+ r"\bcapt\b": "captain ",
491
+ r"\bgov\b": "governor ",
492
+ r"\bald\b": "alderman ",
493
+ r"\bgen\b": "general ",
494
+ r"\bsen\b": "senator ",
495
+ r"\brep\b": "representative ",
496
+ r"\bpres\b": "president ",
497
+ r"\brev\b": "reverend ",
498
+ r"\bhon\b": "honorable ",
499
+ r"\basst\b": "assistant ",
500
+ r"\bassoc\b": "associate ",
501
+ r"\blt\b": "lieutenant ",
502
+ r"\bcol\b": "colonel ",
503
+ r"\bjr\b": "junior ",
504
+ r"\bsr\b": "senior ",
505
+ r"\besq\b": "esquire ",
506
+ # prefect tenses, ideally it should be any past participles, but it's harder..
507
+ r"'d been\b": " had been",
508
+ r"'s been\b": " has been",
509
+ r"'d gone\b": " had gone",
510
+ r"'s gone\b": " has gone",
511
+ r"'d done\b": " had done", # "'s done" is ambiguous
512
+ r"'s got\b": " has got",
513
+ # general contractions
514
+ r"n't\b": " not",
515
+ r"'re\b": " are",
516
+ r"'s\b": " is",
517
+ r"'d\b": " would",
518
+ r"'ll\b": " will",
519
+ r"'t\b": " not",
520
+ r"'ve\b": " have",
521
+ r"'m\b": " am",
522
+ }
523
+ self.standardize_numbers = EnglishNumberNormalizer()
524
+ self.standardize_spellings = EnglishSpellingNormalizer()
525
+
526
+ def __call__(self, s: str):
527
+ s = s.lower()
528
+
529
+ s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
530
+ s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
531
+ s = re.sub(self.ignore_patterns, "", s)
532
+ s = re.sub(r"\s+'", "'", s) # when there's a space before an apostrophe
533
+
534
+ for pattern, replacement in self.replacers.items():
535
+ s = re.sub(pattern, replacement, s)
536
+
537
+ s = re.sub(r"(\d),(\d)", r"\1\2", s) # remove commas between digits
538
+ s = re.sub(r"\.([^0-9]|$)", r" \1", s) # remove periods not followed by numbers
539
+ s = remove_symbols_and_diacritics(s, keep=".%$¢€£") # keep numeric symbols
540
+
541
+ s = self.standardize_numbers(s)
542
+ s = self.standardize_spellings(s)
543
+
544
+ # now remove prefix/suffix symbols that are not preceded/followed by numbers
545
+ s = re.sub(r"[.$¢€£]([^0-9])", r" \1", s)
546
+ s = re.sub(r"([^0-9])%", r"\1 ", s)
547
+
548
+ s = re.sub(r"\s+", " ", s) # replace any successive whitespaces with a space
549
+
550
+ return s
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==4.31.3
2
+ modelscope-studio