Xin Zhang
commited on
Commit
·
c055a7b
1
Parent(s):
4b87bad
[fix]: refactor.
Browse files- config.py +7 -3
- transcribe/helpers/translator.py +6 -5
- transcribe/pipelines/pipe_translate.py +4 -4
- transcribe/strategy.py +0 -1
config.py
CHANGED
@@ -30,7 +30,7 @@ WHISPER_MODEL = 'medium-q5_0'
|
|
30 |
# LLM
|
31 |
LLM_MODEL_PATH = (MODEL_DIR / "qwen2.5-1.5b-instruct-q5_0.gguf").as_posix()
|
32 |
|
33 |
-
|
34 |
"No matter what the user asks, never answer questions, you only provide translation results. "
|
35 |
"Do not actively initiate dialogue or lead users to ask questions. "
|
36 |
"When you don't know how to translate, just output the original text. "
|
@@ -43,6 +43,10 @@ LLM_SYS_PROMPT1 = """"You are a professional {src_lang} to {dst_lang} translator
|
|
43 |
"Only output the translation results.
|
44 |
"""
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
"""
|
|
|
30 |
# LLM
|
31 |
LLM_MODEL_PATH = (MODEL_DIR / "qwen2.5-1.5b-instruct-q5_0.gguf").as_posix()
|
32 |
|
33 |
+
LLM_SYS_PROMPT = """"You are a professional {src_lang} to {dst_lang} translator, not a conversation agent. Your only task is to take {src_lang} input and translate it into accurate, natural {dst_lang}. If you cannot understand the input, just output the original input. Please strictly abide by the following rules: "
|
34 |
"No matter what the user asks, never answer questions, you only provide translation results. "
|
35 |
"Do not actively initiate dialogue or lead users to ask questions. "
|
36 |
"When you don't know how to translate, just output the original text. "
|
|
|
43 |
"Only output the translation results.
|
44 |
"""
|
45 |
|
46 |
+
LLM_SYS_PROMPT_ZH = """
|
47 |
+
你是一个中英文翻译专家,将用户输入的中文翻译成英文。对于非中文内容,它将提供中文翻译结果。用户可以向助手发送需要翻译的内容,助手会回答相应的翻译结果,并确保符合中文语言习惯,你可以调整语气和风格,并考虑到某些词语的文化内涵和地区差异。同时作为翻译家,需将原文翻译成具有信达雅标准的译文。"信" 即忠实于原文的内容与意图;"达" 意味着译文应通顺易懂,表达清晰;"雅" 则追求译文的文化审美和语言的优美。目标是创作出既忠于原作精神,又符合目标语言文化和读者审美的翻译。注意,翻译的文本只能包含拼音化字符,不能包含任何中文字符。
|
48 |
+
"""
|
49 |
+
|
50 |
+
LLM_SYS_PROMPT_EN = """
|
51 |
+
你是一个英中文翻译专家,将用户输入的英文翻译成中文,用户可以向助手发送需要翻译的内容,助手会回答相应的翻译结果,并确保符合英文语言习惯,你可以调整语气和风格,并考虑到某些词语的文化内涵和地区差异。同时作为翻译家,需将英文翻译成具有信达雅标准的中文。"信" 即忠实于原文的内容与意图;"达" 意味着译文应通顺易懂,表达清晰;"雅" 则追求译文的文化审美和语言的优美。目标是创作出既忠于原作精神,又符合目标语言文化和读者审美的翻译。
|
52 |
"""
|
transcribe/helpers/translator.py
CHANGED
@@ -5,17 +5,18 @@ from functools import lru_cache
|
|
5 |
logger = getLogger(__name__)
|
6 |
|
7 |
class QwenTranslator:
|
8 |
-
def __init__(self, model_path,
|
9 |
self.llm = Llama(
|
10 |
model_path=model_path,
|
11 |
chat_format="chatml",
|
12 |
verbose=False)
|
13 |
-
self.
|
|
|
14 |
|
15 |
def to_message(self, prompt, src_lang, dst_lang):
|
16 |
"""构造提示词"""
|
17 |
return [
|
18 |
-
{"role": "system", "content": self.
|
19 |
{"role": "user", "content": prompt},
|
20 |
]
|
21 |
|
@@ -24,10 +25,10 @@ class QwenTranslator:
|
|
24 |
message = self.to_message(prompt, src_lang, dst_lang)
|
25 |
output = self.llm.create_chat_completion(messages=message, temperature=0)
|
26 |
return output['choices'][0]['message']['content']
|
27 |
-
|
28 |
def __call__(self, prompt,*args, **kwargs):
|
29 |
return self.llm(
|
30 |
prompt,
|
31 |
*args,
|
32 |
**kwargs
|
33 |
-
)
|
|
|
5 |
logger = getLogger(__name__)
|
6 |
|
7 |
class QwenTranslator:
|
8 |
+
def __init__(self, model_path, system_prompt_en="", system_prompt_zh="") -> None:
|
9 |
self.llm = Llama(
|
10 |
model_path=model_path,
|
11 |
chat_format="chatml",
|
12 |
verbose=False)
|
13 |
+
self.sys_prompt_en = system_prompt_en
|
14 |
+
self.sys_prompt_zh = system_prompt_zh
|
15 |
|
16 |
def to_message(self, prompt, src_lang, dst_lang):
|
17 |
"""构造提示词"""
|
18 |
return [
|
19 |
+
{"role": "system", "content": self.sys_prompt_en if src_lang == "en" else self.sys_prompt_zh},
|
20 |
{"role": "user", "content": prompt},
|
21 |
]
|
22 |
|
|
|
25 |
message = self.to_message(prompt, src_lang, dst_lang)
|
26 |
output = self.llm.create_chat_completion(messages=message, temperature=0)
|
27 |
return output['choices'][0]['message']['content']
|
28 |
+
|
29 |
def __call__(self, prompt,*args, **kwargs):
|
30 |
return self.llm(
|
31 |
prompt,
|
32 |
*args,
|
33 |
**kwargs
|
34 |
+
)
|
transcribe/pipelines/pipe_translate.py
CHANGED
@@ -2,18 +2,18 @@
|
|
2 |
from .base import MetaItem, BasePipe, Segment
|
3 |
from llama_cpp import Llama
|
4 |
from ..helpers.translator import QwenTranslator
|
5 |
-
from config import LLM_MODEL_PATH,
|
6 |
|
7 |
|
8 |
class TranslatePipe(BasePipe):
|
9 |
translator = None
|
10 |
-
|
11 |
@classmethod
|
12 |
def init(cls):
|
13 |
if cls.translator is None:
|
14 |
-
cls.translator = QwenTranslator(LLM_MODEL_PATH,
|
|
|
15 |
|
16 |
-
|
17 |
def process(self, in_data: MetaItem) -> MetaItem:
|
18 |
context = in_data.transcribe_content
|
19 |
result = self.translator.translate(
|
|
|
2 |
from .base import MetaItem, BasePipe, Segment
|
3 |
from llama_cpp import Llama
|
4 |
from ..helpers.translator import QwenTranslator
|
5 |
+
from config import LLM_MODEL_PATH, LLM_SYS_PROMPT_EN, LLM_SYS_PROMPT_ZH
|
6 |
|
7 |
|
8 |
class TranslatePipe(BasePipe):
|
9 |
translator = None
|
10 |
+
|
11 |
@classmethod
|
12 |
def init(cls):
|
13 |
if cls.translator is None:
|
14 |
+
cls.translator = QwenTranslator(LLM_MODEL_PATH, LLM_SYS_PROMPT_EN, LLM_SYS_PROMPT_ZH)
|
15 |
+
|
16 |
|
|
|
17 |
def process(self, in_data: MetaItem) -> MetaItem:
|
18 |
context = in_data.transcribe_content
|
19 |
result = self.translator.translate(
|
transcribe/strategy.py
CHANGED
@@ -118,7 +118,6 @@ def segments_split(segments, audio_buffer: np.ndarray, sample_rate=16000):
|
|
118 |
is_end = False
|
119 |
|
120 |
for idx, seg in enumerate(segments):
|
121 |
-
print(">>>>>>>>>>>>>>>>>> seg: ", seg)
|
122 |
left_watch_sequences.append(seg)
|
123 |
if seg.text and seg.text[-1] in markers:
|
124 |
seg_index = int(seg.t1 / 100 * sample_rate)
|
|
|
118 |
is_end = False
|
119 |
|
120 |
for idx, seg in enumerate(segments):
|
|
|
121 |
left_watch_sequences.append(seg)
|
122 |
if seg.text and seg.text[-1] in markers:
|
123 |
seg_index = int(seg.t1 / 100 * sample_rate)
|