david commited on
Commit
5acd018
·
1 Parent(s): 68d9ae0

rewrite segments analysis

Browse files
config.py CHANGED
@@ -3,7 +3,7 @@ import re
3
  import logging
4
 
5
  logging.basicConfig(
6
- level=logging.INFO,
7
  format="%(asctime)s - %(levelname)s - %(message)s",
8
  datefmt="%H:%M:%S"
9
  )
@@ -14,6 +14,8 @@ logging.getLogger("pywhispercpp").setLevel(logging.WARNING)
14
  BASE_DIR = pathlib.Path(__file__).parent
15
  MODEL_DIR = BASE_DIR / "moyoyo_asr_models"
16
  ASSERT_DIR = BASE_DIR / "assets"
 
 
17
  # 标点
18
  SENTENCE_END_MARKERS = ['.', '!', '?', '。', '!', '?', ';', ';', ':', ':']
19
  PAUSE_END_MARKERS = [',', ',', '、']
 
3
  import logging
4
 
5
  logging.basicConfig(
6
+ level=logging.DEBUG,
7
  format="%(asctime)s - %(levelname)s - %(message)s",
8
  datefmt="%H:%M:%S"
9
  )
 
14
  BASE_DIR = pathlib.Path(__file__).parent
15
  MODEL_DIR = BASE_DIR / "moyoyo_asr_models"
16
  ASSERT_DIR = BASE_DIR / "assets"
17
+
18
+ SAMPLE_RATE = 16000
19
  # 标点
20
  SENTENCE_END_MARKERS = ['.', '!', '?', '。', '!', '?', ';', ';', ':', ':']
21
  PAUSE_END_MARKERS = [',', ',', '、']
tests/test_transcript_buffer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from transcribe.strategy import TranscriptBuffer # 请替换为你的实际模块名
3
+
4
+ class TestTranscriptBuffer(unittest.TestCase):
5
+
6
+ def setUp(self):
7
+ self.buffer = TranscriptBuffer()
8
+
9
+ def test_update_pending_text(self):
10
+ self.buffer.update_pending_text("Hello")
11
+ self.assertEqual(self.buffer.pending_text, "Hello")
12
+
13
+ def test_commit_line(self):
14
+ self.buffer.update_pending_text("This is a line.")
15
+ self.buffer.commit_line()
16
+ self.assertEqual(self.buffer.paragraph, "This is a line.")
17
+ self.assertEqual(self.buffer.pending_text, "")
18
+
19
+ def test_commit_paragraph(self):
20
+ self.buffer.update_pending_text("Sentence 1.")
21
+ self.buffer.commit_line()
22
+ self.buffer.update_pending_text("Sentence 2.")
23
+ self.buffer.commit_line()
24
+ self.buffer.commit_paragraph(end_of_sentence=True)
25
+
26
+ self.assertEqual(self.buffer.get_seg_id(), 1)
27
+ self.assertEqual(self.buffer.latest_paragraph, "Sentence 1.Sentence 2.")
28
+ self.assertEqual(self.buffer.paragraph, "")
29
+ self.assertEqual(self.buffer.pending_text, "")
30
+
31
+ def test_commit_paragraph_without_end(self):
32
+ self.buffer.update_pending_text("Incomplete sentence.")
33
+ self.buffer.commit_line()
34
+ self.buffer.commit_paragraph(end_of_sentence=False)
35
+
36
+ # 段落不应提交
37
+ self.assertEqual(self.buffer.get_seg_id(), 0)
38
+ self.assertEqual(self.buffer.paragraph, "Incomplete sentence.")
39
+
40
+ def test_update_and_commit_end_sentence(self):
41
+ self.buffer.update_and_commit("Stable.", "Remaining", is_end_sentence=True)
42
+
43
+ self.assertEqual(self.buffer.latest_paragraph, "Stable.")
44
+ self.assertEqual(self.buffer.pending_text, "Remaining")
45
+ self.assertEqual(self.buffer.paragraph, "")
46
+
47
+ def test_update_and_commit_partial_sentence(self):
48
+ self.buffer.update_and_commit("Partial", "New Buffer", is_end_sentence=False)
49
+
50
+ self.assertEqual(self.buffer.paragraph, "Partial")
51
+ self.assertEqual(self.buffer.pending_text, "New Buffer")
52
+ self.assertEqual(self.buffer.get_seg_id(), 0)
53
+
54
+ def test_current_not_commit_text(self):
55
+ self.buffer.update_and_commit("Part 1.", "Live text", is_end_sentence=False)
56
+ self.assertEqual(self.buffer.current_not_commit_text, "Part 1.Live text")
57
+
58
+ if __name__ == "__main__":
59
+ unittest.main()
tests/test_transcript_chunk.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from transcribe.strategy import TranscriptChunk, TranscriptToken, SplitMode
3
+
4
+ class TestTranscriptChunk(unittest.TestCase):
5
+
6
+ def setUp(self):
7
+ self.tokens = [
8
+ TranscriptToken(text="Hello", t0=0, t1=100),
9
+ TranscriptToken(text=",", t0=100, t1=200),
10
+ TranscriptToken(text="world", t0=200, t1=300),
11
+ TranscriptToken(text=".", t0=300, t1=400),
12
+ ]
13
+ self.chunk = TranscriptChunk(items=self.tokens, separator=" ")
14
+
15
+ def test_split_by_punctuation(self):
16
+ chunks = self.chunk.split_by(SplitMode.PUNCTUATION)
17
+ self.assertEqual(len(chunks), 3)
18
+ self.assertEqual(chunks[0].join(), "Hello ,")
19
+ self.assertEqual(chunks[1].join(), "world .")
20
+ self.assertEqual(chunks[2].join(), "")
21
+
22
+ def test_get_split_first_rest(self):
23
+ first, rest = self.chunk.get_split_first_rest(SplitMode.PUNCTUATION)
24
+ self.assertEqual(first.join(), "Hello ,")
25
+ self.assertEqual(len(rest), 2)
26
+ self.assertEqual(rest[0].join(), "world .")
27
+ self.assertEqual(rest[1].join(), "")
28
+
29
+ def test_punctuation_numbers(self):
30
+ self.assertEqual(self.chunk.puncation_numbers(), 2)
31
+
32
+ def test_length(self):
33
+ self.assertEqual(self.chunk.length(), 4)
34
+
35
+ def test_join(self):
36
+ self.assertEqual(self.chunk.join(), "Hello , world .")
37
+
38
+ def test_compare(self):
39
+ other_chunk = TranscriptChunk(items=[
40
+ TranscriptToken(text="Hello", t0=0, t1=100),
41
+ TranscriptToken(text="!", t0=100, t1=200),
42
+ ], separator=" ")
43
+ similarity = self.chunk.compare(other_chunk)
44
+ self.assertTrue(0 < similarity < 1)
45
+
46
+ def test_has_punctuation(self):
47
+ self.assertTrue(self.chunk.has_punctuation())
48
+
49
+ def test_get_buffer_index(self):
50
+ # t1 = 400 -> index = 400 / 100 * 16000 = 64000
51
+ self.assertEqual(self.chunk.get_buffer_index(), 64000)
52
+
53
+ def test_is_end_sentence(self):
54
+ self.assertTrue(self.chunk.is_end_sentence())
55
+
56
+ if __name__ == '__main__':
57
+ unittest.main()
transcribe/strategy.py CHANGED
@@ -3,303 +3,291 @@ import collections
3
  import logging
4
  from difflib import SequenceMatcher
5
  from itertools import chain
6
- from dataclasses import dataclass
7
- from typing import List, Tuple, Optional, Deque, Any, Iterator
8
- from config import SENTENCE_END_MARKERS, ALL_MARKERS,SENTENCE_END_PATTERN,REGEX_MARKERS
9
  import numpy as np
10
-
11
  logger = logging.getLogger("TranscriptionStrategy")
12
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  @dataclass
15
- class TranscriptSegment:
16
  """表示一个转录片段,包含文本和时间信息"""
17
- text: str
18
  t0: float # 开始时间(百分之一秒)
19
  t1: float # 结束时间(百分之一秒)
20
 
21
- def join_segment_text(segments: List[TranscriptSegment], separator: str = "") -> str:
22
- """连接多个片段的文本"""
23
- return separator.join(seg.text for seg in segments)
24
-
25
- class TextStabilityBuffer:
26
- """
27
- 通过比较连续文本样本的相似度来确定转录文本的稳定性。
28
- 当连续样本的相似度超过阈值时,认为文本已稳定。
29
- """
30
- def __init__(self, max_history: int = 2):
31
- self.history: Deque[Tuple[str, int]] = collections.deque(maxlen=max_history)
32
-
33
- def add_entry(self, text: str, index: int) -> None:
34
- """
35
- 添加新的文本和索引到历史记录中
36
-
37
- Args:
38
- text: 文本内容
39
- index: 当前buffer的相对下标
40
- """
41
- self.history.append((text, index))
42
-
43
- def get_stable_index(self, similarity_threshold: float = 0.7) -> Optional[int]:
44
- """
45
- 根据文本相似度,判断文本是否稳定,返回稳定文本的索引
46
-
47
- Args:
48
- similarity_threshold: 相似度阈值,超过此值认为文本稳定
49
-
50
- Returns:
51
- 稳定文本的索引,如果没有找到稳定文本则返回None
52
- """
53
- if len(self.history) < 2:
54
- return None
55
 
56
- text1, _ = self.history[0]
57
- text2, idx2 = self.history[1]
58
 
59
- similarity = self._calculate_similarity(text1, text2)
60
-
61
- if similarity >= similarity_threshold:
62
- self.history.clear()
63
- return idx2
64
- return None
65
 
66
  @staticmethod
67
  def _calculate_similarity(text1: str, text2: str) -> float:
68
  """计算两段文本的相似度"""
69
  return SequenceMatcher(None, text1, text2).ratio()
70
 
 
 
 
 
 
 
 
 
 
 
71
 
 
 
 
 
 
 
72
 
73
- class TranscriptionManager:
74
- """
75
- 管理转录文本的分级结构:临时字符串 -> 短句 -> 完整段落
76
-
77
- |-- 已确认文本 --|-- 观察窗口 --|-- 新输入 --|
78
- """
79
- def __init__(self):
80
- self._committed_segments: List[str] = [] # 确认的完整段落
81
- self._committed_sentences: List[str] = [] # 确认的短句
82
- self._temp_string: str = "" # 临时字符串缓冲
83
-
84
- def check_line_break(self, min_length: int = 20) -> bool:
85
- """检查当前短句长度是否达到换行标准"""
86
- return self.sentence_length >= min_length
87
-
88
- def force_line_break(self) -> None:
89
- """强制换行,保留当前内容但创建新段落"""
90
- if self.current_sentence:
91
- self._committed_segments.append(self.current_sentence)
92
- self._committed_sentences = []
93
-
94
- @property
95
- def current_sentence(self) -> str:
96
- """当前已确认的短句组合"""
97
- return "".join(self._committed_sentences)
98
 
99
- @property
100
- def remaining_text(self) -> str:
101
- return self._temp_string
 
 
102
 
103
- @property
104
- def latest_segment(self) -> str:
105
- """最新确认的完整段落"""
106
- return self._committed_segments[-1] if self._committed_segments else ""
 
 
 
 
 
 
 
107
 
108
- @property
109
- def segment_count(self) -> int:
110
- """已确认的段落数量"""
111
- return len(self._committed_segments)
 
112
 
113
- @property
114
- def sentence_length(self) -> int:
115
- """当前短句的总字符长度"""
116
- return sum(len(s) for s in self._committed_sentences)
117
 
118
- def update_temp(self, text: str) -> 'TranscriptionManager':
119
- """更新临时字符串"""
120
- self._temp_string = text
121
- return self
122
 
123
- def commit_sentence(self) -> None:
124
- """将临时字符串提交到短句列表"""
125
- if self._temp_string:
126
- self._committed_sentences.append(self._temp_string)
127
- self._temp_string = ""
 
 
 
 
128
 
129
- def commit_segment(self, is_end_of_sentence: bool = False) -> None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  """
131
- 提交当前内容到适当的层级
132
 
133
  Args:
134
- is_end_of_sentence: 是否为完整句子的结束
135
- """
136
- self.commit_sentence()
137
- if is_end_of_sentence and self._committed_sentences:
138
- self._committed_segments.append(self.current_sentence)
139
- self._committed_sentences = []
140
-
141
- def get_all_text(self) -> str:
142
- """获取所有已提交的文本"""
143
- all_segments = self._committed_segments.copy()
144
- if self.current_sentence:
145
- all_segments.append(self.current_sentence)
146
- if self._temp_string:
147
- all_segments.append(self._temp_string)
148
- return "\n".join(all_segments)
149
-
150
-
151
- class TranscriptionSplitter:
152
- """负责根据语音和文本特征拆分转录片段"""
153
-
154
- @staticmethod
155
- def group_by_sentences(segments: List[TranscriptSegment]) -> List[List[TranscriptSegment]]:
156
- """将片段按照完整句子分组"""
157
- sequences = []
158
- temp_seq = []
159
-
160
- for seg in segments:
161
- temp_seq.append(seg)
162
- if any(marker in seg.text for marker in SENTENCE_END_MARKERS):
163
- sequences.append(temp_seq.copy())
164
- temp_seq = []
165
-
166
- if temp_seq:
167
- sequences.append(temp_seq)
168
- return sequences
169
-
170
- @staticmethod
171
- def split_by_punctuation(
172
- segments: List[TranscriptSegment],
173
- sample_rate: int = 16000,
174
- segment_skip_index= 0
175
- ) -> Tuple[int, List[TranscriptSegment], List[TranscriptSegment], bool]:
176
- """
177
- 根据标点符号将片段分为左侧(已确认)和右侧(待确认)
178
-
179
- Returns:
180
- (分割索引, 左侧片段, 右侧片段, 是否为句子结束)
181
  """
182
- left_segments = []
183
- right_segments = []
184
- split_index = 0
185
- is_sentence_end = False
186
-
187
- # # 短音频使用所有标点符号作为分割依据
188
- # buffer_duration = len(audio_buffer) / sample_rate
189
- # markers = ALL_MARKERS if buffer_duration < 12 else SENTENCE_END_MARKERS
190
- skip_segments = segments[:segment_skip_index+1]
191
- skipped_segments = segments[segment_skip_index:]
192
-
193
- markers = ALL_MARKERS
194
- for idx, seg in enumerate(skipped_segments):
195
- left_segments.append(seg)
196
- if seg.text and seg.text[-1] in markers:
197
- split_index = int(seg.t1 / 100 * sample_rate)
198
- is_sentence_end = bool(SENTENCE_END_PATTERN.search(seg.text))
199
- right_segments = skipped_segments[min(idx+1, len(skipped_segments)):]
200
- break
201
- left_segments = skip_segments+ left_segments
202
- return split_index, left_segments, right_segments, is_sentence_end
203
-
204
- @staticmethod
205
- def split_by_sequences(
206
- segments: List[TranscriptSegment],
207
- sample_rate: int = 16000
208
- ) -> Tuple[int, Iterator[TranscriptSegment], Iterator[TranscriptSegment], bool]:
209
- """
210
- 对于长文本,按照句子组保留最新的两句
211
-
212
- Returns:
213
- (分割索引, 左侧片段, 右侧片段, 是否为句子结束)
214
- """
215
- sequences = TranscriptionSplitter.group_by_sentences(segments)
216
-
217
- if len(sequences) > 2:
218
- logger.info(f"Buffer clip via sequence, current length: {len(sequences)}")
219
- left_segments = chain(*sequences[:-2])
220
- right_segments = chain(*sequences[-2:])
221
-
222
- # 确定切分点
223
- last_sequence = sequences[-3]
224
- last_segment = last_sequence[-1]
225
- split_index = int(last_segment.t1 / 100 * sample_rate)
226
-
227
- return split_index, left_segments, right_segments, True
228
-
229
- return 0, iter([]), iter(segments), False
230
-
231
-
232
- class TranscriptionStabilizer(TranscriptionSplitter):
233
- """
234
- 转录结果稳定器,负责确认和管理转录片段
235
- """
236
- def __init__(self, sample_rate: int = 16000):
237
- self.text_manager = TranscriptionManager()
238
- self.sample_rate = sample_rate
239
 
 
 
 
 
 
 
 
 
 
 
 
240
  @property
241
- def latest_segment(self):
242
- return self.text_manager.latest_segment
 
243
 
 
 
 
 
244
 
245
  @property
246
- def segment_count(self):
247
- return self.text_manager.segment_count
248
-
 
249
 
250
  @property
251
- def remaining_text(self):
252
- return self.text_manager.remaining_text
 
 
 
 
 
 
 
 
 
 
253
 
254
- @property
255
- def stable_string(self):
256
- return self.text_manager.current_sentence
257
 
 
 
 
 
 
 
 
 
 
258
 
259
- def process_segments(self, segments: List[TranscriptSegment]) -> Tuple[Optional[int], bool]:
260
- """
261
- 处理转录片段,确认稳定的文本
262
-
263
- Args:
264
- segments: 转录片段列表
265
-
266
- Returns:
267
- (音频分割点索引, 是否达到足够长度需要换行)
268
- """
269
- # 查找第一个包含标点的片段作为分割点
270
- split_index = None
271
- stable_segments = []
272
- force_split = False
273
- if len(segments) < 20:
274
- remaining_text = join_segment_text(segments)
275
- self.text_manager.update_temp(remaining_text)
276
- return split_index, False, join_segment_text(segments), self.text_manager.remaining_text
277
-
278
- # 查找20个长度后的标点符号
279
- split_index, left_segments, right_segments, is_sentence_end = self.split_by_punctuation(segments[20:],sample_rate=self.sample_rate)
280
-
281
- if split_index is not None: # 找到标点,确认标点前的内容
282
- stable_text = join_segment_text(left_segments)
283
- self.text_manager.update_temp(stable_text).commit_sentence()
284
-
285
- # 更新剩余文本
286
- remaining_text = join_segment_text(right_segments)
287
- self.text_manager.update_temp(remaining_text)
288
  else:
289
- # 如果没有标点 但是累计超过22个字符 直接从20个字符的位置切掉
290
- if len(segments) > 22 and not REGEX_MARKERS.search(join_segment_text(segments)):
291
- split_index = int(segments[20].t1 / 100 * self.sample_rate)
292
- stable_idx = 21 # 直接使用22个字符的索引
293
- force_split = True
294
- stable_text = join_segment_text(segments[:stable_idx])
295
- self.text_manager.update_temp(stable_text).commit_sentence()
296
- self.text_manager.update_temp(join_segment_text(segments[stable_idx:]))
297
- else:
298
- # 没有找到标点,全部作为临时文本
299
- self.text_manager.update_temp(join_segment_text(segments))
300
-
301
- # 检查是否达到换行标准
302
- should_linebreak = self.text_manager.sentence_length >= 20 or force_split
303
-
304
- return split_index, should_linebreak, join_segment_text(stable_segments), self.text_manager.remaining_text
305
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import logging
4
  from difflib import SequenceMatcher
5
  from itertools import chain
6
+ from dataclasses import dataclass, field
7
+ from typing import List, Tuple, Optional, Deque, Any, Iterator,Literal
8
+ from config import SENTENCE_END_MARKERS, ALL_MARKERS,SENTENCE_END_PATTERN,REGEX_MARKERS, PAUSEE_END_PATTERN,SAMPLE_RATE
9
  import numpy as np
10
+ from enum import Enum
11
  logger = logging.getLogger("TranscriptionStrategy")
12
 
13
 
14
+ class SplitMode(Enum):
15
+ PUNCTUATION = "punctuation"
16
+ PAUSE = "pause"
17
+ END = "end"
18
+
19
+
20
+
21
+ @dataclass
22
+ class TranscriptResult:
23
+ seg_id: int = 0
24
+ cut_index: int = 0
25
+ is_end_sentence: bool = False
26
+ context: str = ""
27
+
28
+ def partial(self):
29
+ return not self.is_end_sentence
30
+
31
  @dataclass
32
+ class TranscriptToken:
33
  """表示一个转录片段,包含文本和时间信息"""
34
+ text: str # 转录的文本内容
35
  t0: float # 开始时间(百分之一秒)
36
  t1: float # 结束时间(百分之一秒)
37
 
38
+ def is_punctuation(self):
39
+ """检查文本是否包含标点符号"""
40
+ return REGEX_MARKERS.search(self.text) is not None
41
+
42
+ def is_end(self):
43
+ """检查文本是否为句子结束标记"""
44
+ return SENTENCE_END_PATTERN.search(self.text) is not None
45
+
46
+ def is_pause(self):
47
+ """检查文本是否为暂停标记"""
48
+ return PAUSEE_END_PATTERN.search(self.text) is not None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ def buffer_index(self) -> int:
51
+ return int(self.t1 / 100 * SAMPLE_RATE)
52
 
53
+ @dataclass
54
+ class TranscriptChunk:
55
+ """表示一组转录片段,支持分割和比较操作"""
56
+ separator: str = "" # 用于连接片段的分隔符
57
+ items: list[TranscriptToken] = field(default_factory=list) # 转录片段列表
 
58
 
59
  @staticmethod
60
  def _calculate_similarity(text1: str, text2: str) -> float:
61
  """计算两段文本的相似度"""
62
  return SequenceMatcher(None, text1, text2).ratio()
63
 
64
+ def split_by(self, mode: SplitMode) -> list['TranscriptChunk']:
65
+ """根据文本中的标点符号分割片段列表"""
66
+ if mode == SplitMode.PUNCTUATION:
67
+ indexes = [i for i, seg in enumerate(self.items) if seg.is_punctuation()]
68
+ elif mode == SplitMode.PAUSE:
69
+ indexes = [i for i, seg in enumerate(self.items) if seg.is_pause()]
70
+ elif mode == SplitMode.END:
71
+ indexes = [i for i, seg in enumerate(self.items) if seg.is_end()]
72
+ else:
73
+ raise ValueError(f"Unsupported mode: {mode}")
74
 
75
+ # 每个切分点向后移一个索引,表示“分隔符归前段”
76
+ cut_points = [0] + sorted(i + 1 for i in indexes) + [len(self.items)]
77
+ return [
78
+ TranscriptChunk(items=self.items[start:end], separator=self.separator)
79
+ for start, end in zip(cut_points, cut_points[1:])
80
+ ]
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ def get_split_first_rest(self, mode: SplitMode):
84
+ chunks = self.split_by(mode)
85
+ fisrt_chunk = chunks[0] if chunks else self
86
+ rest_chunks = chunks[1:] if chunks else []
87
+ return fisrt_chunk, rest_chunks
88
 
89
+ def puncation_numbers(self) -> int:
90
+ """计算片段中标点符号的数量"""
91
+ return sum(1 for seg in self.items if REGEX_MARKERS.search(seg.text))
92
+
93
+ def length(self) -> int:
94
+ """返回片段列表的长度"""
95
+ return len(self.items)
96
+
97
+ def join(self) -> str:
98
+ """将片段连接为一个字符串"""
99
+ return self.separator.join(seg.text for seg in self.items)
100
 
101
+ def compare(self, chunk: Optional['TranscriptChunk'] = None) -> float:
102
+ """比较当前片段与另一个片段的相似度"""
103
+ if not chunk:
104
+ return 0
105
+ return self._calculate_similarity(self.join(), chunk.join())
106
 
107
+ def has_punctuation(self) -> bool:
108
+ return any(seg.is_punctuation() for seg in self.items)
 
 
109
 
110
+ def get_buffer_index(self) -> int:
111
+ return self.items[-1].buffer_index()
 
 
112
 
113
+ def is_end_sentence(self) ->bool:
114
+ return self.items[-1].is_end()
115
+
116
+
117
+ class TranscriptHistory:
118
+ """管理转录片段的历史记录"""
119
+
120
+ def __init__(self) -> None:
121
+ self.history = collections.deque(maxlen=2) # 存储最近的两个片段
122
 
123
+ def add(self, chunk: TranscriptChunk):
124
+ """添加新的片段到历史记录"""
125
+ self.history.appendleft(chunk)
126
+
127
+ def previous_chunk(self) -> Optional[TranscriptChunk]:
128
+ """获取上一个片段(如果存在)"""
129
+ return self.history[1] if len(self.history) == 2 else None
130
+
131
+ def lastest_chunk(self):
132
+ """获取最后一个片段"""
133
+ return self.history[-1]
134
+
135
+
136
+ class TranscriptBuffer:
137
+ """
138
+ 管理转录文本的分级结构:临时字符串 -> 短句 -> 完整段落
139
+
140
+ |-- 已确认文本 --|-- 观察窗口 --|-- 新输入 --|
141
+
142
+ 管理 pending -> line -> paragraph 的缓冲逻辑
143
+
144
+ """
145
+
146
+ def __init__(self):
147
+ self._segments: List[str] = [] # 确认的完整段落
148
+ self._sentences: List[str] = [] # 当前段落中的短句
149
+ self._buffer: str = "" # 当前缓冲中的文本
150
+
151
+ def update_pending_text(self, text: str) -> None:
152
+ """更新临时缓冲字符串"""
153
+ self._buffer = text
154
+
155
+ def commit_line(self) -> None:
156
+ """将缓冲字符串提交为短句"""
157
+ if self._buffer:
158
+ self._sentences.append(self._buffer)
159
+ self._buffer = ""
160
+
161
+ def commit_paragraph(self, end_of_sentence: bool = False) -> None:
162
  """
163
+ 提交当前短句为完整段落(如句子结束)
164
 
165
  Args:
166
+ end_of_sentence: 是否为句子结尾(如检测到句号)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  """
168
+ self.commit_line()
169
+ if end_of_sentence and self._sentences:
170
+ self._segments.append("".join(self._sentences))
171
+ self._sentences.clear()
172
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
+ def update_and_commit(self, stable_string: str, remaining_string:str, is_end_sentence=False):
175
+ self.update_pending_text(stable_string)
176
+ if is_end_sentence:
177
+ self.commit_paragraph(end_of_sentence=True)
178
+ else:
179
+ self.commit_line()
180
+ self.update_pending_text(remaining_string)
181
+
182
+ def get_seg_id(self) -> int:
183
+ return len(self._segments)
184
+
185
  @property
186
+ def paragraph(self) -> str:
187
+ """当前短句组合"""
188
+ return "".join(self._sentences)
189
 
190
+ @property
191
+ def pending_text(self) -> str:
192
+ """当前缓冲内容"""
193
+ return self._buffer
194
 
195
  @property
196
+ def latest_paragraph(self) -> str:
197
+ """最新确认的段落"""
198
+ return self._segments[-1] if self._segments else ""
199
+
200
 
201
  @property
202
+ def current_not_commit_text(self) -> str:
203
+ return self.paragraph + self.pending_text
204
+
205
+
206
+
207
+ class TranscriptStabilityAnalyzer:
208
+ def __init__(self) -> None:
209
+ self._transcript_buffer = TranscriptBuffer()
210
+ self._transcript_history = TranscriptHistory()
211
+
212
+ def merge_chunks(self, chunks: List[TranscriptChunk])->str:
213
+ return "".join(r.join() for r in chunks)
214
 
 
 
 
215
 
216
+ def analysis(self, separator, current: TranscriptChunk, buffer_duration: float) -> Iterator[TranscriptResult]:
217
+ current = TranscriptChunk(items=current, separator=separator)
218
+ self._transcript_history.add(current)
219
+
220
+ prev = self._transcript_history.previous_chunk()
221
+
222
+ if not prev:
223
+ yield TranscriptResult(context=current.join())
224
+ return
225
 
226
+ self._transcript_buffer.update_pending_text(current.join())
227
+
228
+ if buffer_duration <= 12:
229
+ yield from self._handle_short_buffer(current, prev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  else:
231
+ yield from self._handle_long_buffer(current)
232
+
233
+
234
+ def _handle_short_buffer(self, curr: TranscriptChunk, prev: TranscriptChunk) -> Iterator[TranscriptResult]:
235
+ curr_first, curr_rest = curr.get_split_first_rest(SplitMode.PUNCTUATION)
236
+ prev_first, _ = prev.get_split_first_rest(SplitMode.PUNCTUATION)
237
+ core = curr_first.compare(prev_first)
238
+ has_punctuation = curr_first.has_punctuation()
239
+ logger.debug(f"Compare with rev score:{core},is end :{curr_first.is_end_sentence()}, has_punctuation: {has_punctuation}, current_first: {curr_first.join()},")
240
+ if core >= 0.8:
241
+ yield from self._yield_commit_results(curr_first, curr_rest, curr_first.is_end_sentence())
242
+ else:
243
+ yield TranscriptResult(
244
+ seg_id=self._transcript_buffer.get_seg_id(),
245
+ context=self._transcript_buffer.current_not_commit_text
246
+ )
247
+
248
+
249
+ def _handle_long_buffer(self, curr: TranscriptChunk) -> Iterator[TranscriptResult]:
250
+ chunks = curr.split_by(SplitMode.PUNCTUATION)
251
+ if len(chunks) > 2:
252
+ stable, remaining = chunks[:-2], chunks[-2:]
253
+ stable_str = self.merge_chunks(stable)
254
+ remaining_str = self.merge_chunks(remaining)
255
+ yield from self._yield_commit_results(
256
+ stable[-1], remaining, is_end_sentence=True # 暂时硬编码为True
257
+ )
258
+ else:
259
+ yield TranscriptResult(
260
+ seg_id=self._transcript_buffer.get_seg_id(),
261
+ context=self._transcript_buffer.current_not_commit_text
262
+ )
263
+
264
+
265
+ def _yield_commit_results(self, stable_chunk, remaining_chunks, is_end_sentence: bool) -> Iterator[TranscriptResult]:
266
+ stable_str = stable_chunk.join() if hasattr(stable_chunk, "join") else self.merge_chunks([stable_chunk])
267
+ remaining_str = self.merge_chunks(remaining_chunks)
268
+
269
+ frame_cut_index = stable_chunk.get_buffer_index()
270
+ logger.debug(f"Current cut index: {frame_cut_index}, Stable string: {stable_str}, Remaining_str:{remaining_str}")
271
+
272
+ prev_seg_id = self._transcript_buffer.get_seg_id()
273
+ self._transcript_buffer.update_and_commit(stable_str, remaining_str, is_end_sentence)
274
+ curr_seg_id = self._transcript_buffer.get_seg_id()
275
+
276
+ logger.debug(f"current buffer: {self._transcript_buffer.__dict__}")
277
+
278
+ if curr_seg_id > prev_seg_id:
279
+ # 表示生成了一个新段落
280
+ yield TranscriptResult(
281
+ seg_id=prev_seg_id,
282
+ cut_index=frame_cut_index,
283
+ context=self._transcript_buffer.latest_paragraph,
284
+ is_end_sentence=True
285
+ )
286
+
287
+ # 如果还有挂起的文本
288
+ if (pending_text := self._transcript_buffer.pending_text.strip()):
289
+ yield TranscriptResult(
290
+ seg_id=self._transcript_buffer.get_seg_id(),
291
+ cut_index=frame_cut_index,
292
+ context=pending_text
293
+ )
transcribe/whisper_llm_serve.py CHANGED
@@ -13,12 +13,7 @@ from .server import ServeClientBase
13
  from .utils import log_block, save_to_wave
14
  from .translatepipes import TranslatePipes
15
  from .strategy import (
16
- TextStabilityBuffer,
17
- TranscriptionManager,
18
- TranscriptionSplitter,
19
- TranscriptSegment,
20
- TranscriptionStabilizer,
21
- join_segment_text)
22
 
23
  logger = getLogger("TranscriptionService")
24
 
@@ -34,8 +29,7 @@ class WhisperTranscriptionService(ServeClientBase):
34
  self.target_language = dst_lang # 目标翻译语言
35
 
36
  # 转录结果稳定性管理
37
- self._text_stability_buffer = TextStabilityBuffer()
38
- self._transcription_manager = TranscriptionManager()
39
  self._translate_pipe = pipe
40
 
41
  # 音频处理相关
@@ -56,7 +50,6 @@ class WhisperTranscriptionService(ServeClientBase):
56
  self.translate_thread = self._start_thread(self._transcription_processing_loop)
57
  self.frame_processing_thread = self._start_thread(self._frame_processing_loop)
58
 
59
- self.text_stablizer = TranscriptionStabilizer()
60
 
61
  def _start_thread(self, target_function) -> threading.Thread:
62
  """启动守护线程执行指定函数"""
@@ -138,7 +131,7 @@ class WhisperTranscriptionService(ServeClientBase):
138
 
139
  return frames.copy()
140
 
141
- def _transcribe_audio(self, audio_buffer: np.ndarray) -> List[TranscriptSegment]:
142
  """转录音频并返回转录片段"""
143
  log_block("Audio buffer length", f"{audio_buffer.shape[0]/self.sample_rate:.2f}", "s")
144
  start_time = time.perf_counter()
@@ -149,7 +142,10 @@ class WhisperTranscriptionService(ServeClientBase):
149
  log_block("Whisper transcription output", f"{''.join(seg.text for seg in segments)}", "")
150
  log_block("Whisper transcription time", f"{(time.perf_counter() - start_time):.3f}", "s")
151
 
152
- return segments
 
 
 
153
 
154
  def _translate_text(self, text: str) -> str:
155
  """将文本翻译为目标语言"""
@@ -167,66 +163,7 @@ class WhisperTranscriptionService(ServeClientBase):
167
 
168
  return translated_text
169
 
170
- def _find_best_split_position(self, segments: list, target_length: int = 20) -> int:
171
- """找到最适合分割的位置,尽量靠近目标长度且在词/字的边界"""
172
- if len(segments) <= target_length:
173
- return 0
174
-
175
- # 从目标长度位置向前搜索适合的分割点
176
- for i in range(target_length, min(target_length + 10, len(segments))):
177
- # 对于中文,每个字符都可以作为分割点
178
- # 对于英文,在空格处分割
179
- if self.source_language == "zh" or segments[i] == " ":
180
- return i
181
-
182
- # 如果找不到理想分割点,就在目标长度处分割
183
- return target_length
184
-
185
- def _analyze_segments(self, segments: List[TranscriptSegment], audio_buffer: np.ndarray) -> Tuple[Optional[int], str, str, bool]:
186
- """
187
- 分析转录片段,确定稳定部分和需要继续观察的部分
188
-
189
- Returns:
190
- (分割索引, 左侧稳定文本, 右侧观察文本, 是否为句子结束)
191
- """
192
- # 尝试基于标点符号进行分割
193
- left_idx, left_segments, right_segments, is_end = TranscriptionSplitter.split_by_punctuation(
194
- segments, audio_buffer, self.sample_rate
195
- )
196
-
197
- left_text = join_segment_text(left_segments, self.text_separator)
198
- right_text = join_segment_text(right_segments, self.text_separator)
199
-
200
- # 如果找到分割点,检查左侧文本稳定性
201
- if left_idx != 0:
202
- self._text_stability_buffer.add_entry(left_text, left_idx)
203
- stable_idx = self._text_stability_buffer.get_stable_index()
204
- if stable_idx:
205
- should_break = True if (self._transcription_manager.sentence_length>= 20) else False
206
- return stable_idx, left_text, right_text, should_break
207
-
208
- # 如果基于标点的方法没有找到稳定点,尝试检查句子的长度
209
- if len(segments) >= 20: # 设置更长的阈值,确保有足够内容进行分割
210
- # 尝试在约20字符处找一个词的边界进行分割
211
- split_pos = self._find_best_split_position(segments)
212
- if split_pos > 0:
213
- left_text = join_segment_text(segments[:split_pos], self.text_separator)
214
- right_text = join_segment_text(segments[split_pos:], self.text_separator)
215
- audio_pos = int(segments[split_pos].t1 / 100 * self.sample_rate)
216
- return audio_pos, left_text, right_text, True
217
 
218
- # 如果基于标点的方法未找到稳定点,尝试基于句子序列的方法
219
- left_idx, left_segments, right_segments, is_end = TranscriptionSplitter.split_by_sequences(
220
- segments, self.sample_rate
221
- )
222
-
223
- if left_idx != 0:
224
- left_text = join_segment_text(left_segments, self.text_separator)
225
- right_text = join_segment_text(right_segments, self.text_separator)
226
- return left_idx, left_text, right_text, is_end
227
-
228
- # 如果都没有找到分割点
229
- return None, left_text, right_text, is_end
230
 
231
  def _transcription_processing_loop(self) -> None:
232
  """主转录处理循环"""
@@ -248,79 +185,49 @@ class WhisperTranscriptionService(ServeClientBase):
248
  time.sleep(0.2)
249
  continue
250
 
251
- c+= 1
252
- save_to_wave(f"dev-{c}.wav", audio_buffer)
253
 
254
  # try:
255
  segments = self._transcribe_audio(audio_buffer)
256
 
257
  # 处理转录结果并发送到客户端
258
  for result in self._process_transcription_results(segments, audio_buffer):
 
259
  self._send_result_to_client(result)
260
 
261
  # except Exception as e:
262
  # logger.error(f"Error processing audio: {e}")
263
 
264
- def _process_transcription_results(self, segments: List[TranscriptSegment], audio_buffer: np.ndarray) -> Iterator[TransResult]:
265
  """
266
  处理转录结果,生成翻译结果
267
 
268
  Returns:
269
  TransResult对象的迭代器
270
  """
271
- # 合并所有片段的文本
272
- full_text = self.text_separator.join(seg.text for seg in segments)
273
- if not full_text:
274
  return
275
-
276
- cut_index, is_sentence_end, stable_text, remaining_text = self.text_stablizer.process_segments(segments)
277
- # 如果找到稳定的分割点
278
- if cut_index:
279
- # 更新音频缓冲区,移除已处理部分
280
- self._update_audio_buffer(cut_index)
281
-
282
- # 提交稳定的文本
283
- log_block("Stable transcription", f"{stable_text}")
284
 
285
- # 如果是句子结束,发送完整句子的翻译结果
286
- if is_sentence_end:
287
- segment_text = self.text_stablizer.latest_segment
288
- segment_id = self.text_stablizer.segment_count - 1
289
-
290
- # 生成已确认句子的翻译结果
291
- yield TransResult(
292
- seg_id=segment_id,
293
- context=segment_text,
294
- from_=self.source_language,
295
- to=self.target_language,
296
- tran_content=self._translate_text(segment_text),
297
- partial=False
298
- )
299
-
300
- # 如果还有剩余部分,生成临时翻译结果
301
- if self.text_stablizer.remaining_text.strip():
302
- yield TransResult(
303
- seg_id=segment_id + 1,
304
- context=self.text_stablizer.remaining_text,
305
- from_=self.source_language,
306
- to=self.target_language,
307
- tran_content=self._translate_text(self.text_stablizer.remaining_text.strip()),
308
- partial=True
309
- )
310
- else:
311
- # 没有找到稳定点,发送当前所有内容的临时翻译结果
312
- segment_id = self.text_stablizer.segment_count
313
- current_text = self.text_stablizer.stable_string + self.text_stablizer.remaining_text
314
-
315
  yield TransResult(
316
- seg_id=segment_id,
317
- context=current_text,
318
  from_=self.source_language,
319
  to=self.target_language,
320
- tran_content=self._translate_text(current_text),
321
- partial=True
322
  )
323
 
 
324
  def _send_result_to_client(self, result: TransResult) -> None:
325
  """发送翻译结果到客户端"""
326
  try:
 
13
  from .utils import log_block, save_to_wave
14
  from .translatepipes import TranslatePipes
15
  from .strategy import (
16
+ TranscriptStabilityAnalyzer, TranscriptToken)
 
 
 
 
 
17
 
18
  logger = getLogger("TranscriptionService")
19
 
 
29
  self.target_language = dst_lang # 目标翻译语言
30
 
31
  # 转录结果稳定性管理
32
+ self._transcrible_analysis = TranscriptStabilityAnalyzer()
 
33
  self._translate_pipe = pipe
34
 
35
  # 音频处理相关
 
50
  self.translate_thread = self._start_thread(self._transcription_processing_loop)
51
  self.frame_processing_thread = self._start_thread(self._frame_processing_loop)
52
 
 
53
 
54
  def _start_thread(self, target_function) -> threading.Thread:
55
  """启动守护线程执行指定函数"""
 
131
 
132
  return frames.copy()
133
 
134
+ def _transcribe_audio(self, audio_buffer: np.ndarray) -> List[TranscriptToken]:
135
  """转录音频并返回转录片段"""
136
  log_block("Audio buffer length", f"{audio_buffer.shape[0]/self.sample_rate:.2f}", "s")
137
  start_time = time.perf_counter()
 
142
  log_block("Whisper transcription output", f"{''.join(seg.text for seg in segments)}", "")
143
  log_block("Whisper transcription time", f"{(time.perf_counter() - start_time):.3f}", "s")
144
 
145
+ return [
146
+ TranscriptToken(text=s.text, t0=s.t0, t1=s.t1)
147
+ for s in segments
148
+ ]
149
 
150
  def _translate_text(self, text: str) -> str:
151
  """将文本翻译为目标语言"""
 
163
 
164
  return translated_text
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  def _transcription_processing_loop(self) -> None:
169
  """主转录处理循环"""
 
185
  time.sleep(0.2)
186
  continue
187
 
188
+ # c+= 1
189
+ # save_to_wave(f"dev-{c}.wav", audio_buffer)
190
 
191
  # try:
192
  segments = self._transcribe_audio(audio_buffer)
193
 
194
  # 处理转录结果并发送到客户端
195
  for result in self._process_transcription_results(segments, audio_buffer):
196
+ print(result)
197
  self._send_result_to_client(result)
198
 
199
  # except Exception as e:
200
  # logger.error(f"Error processing audio: {e}")
201
 
202
+ def _process_transcription_results(self, segments: List[TranscriptToken], audio_buffer: np.ndarray) -> Iterator[TransResult]:
203
  """
204
  处理转录结果,生成翻译结果
205
 
206
  Returns:
207
  TransResult对象的迭代器
208
  """
209
+
210
+ if not segments:
 
211
  return
212
+
213
+ for ana_result in self._transcrible_analysis.analysis(
214
+ self.text_separator,segments, len(audio_buffer)/self.sample_rate):
215
+ if (cut_index :=ana_result.cut_index)>0:
216
+ # 更新音频缓冲区,移除已处理部分
217
+ self._update_audio_buffer(cut_index)
 
 
 
218
 
219
+ translated_context = self._translate_text(ana_result.context)
220
+ log_block("Translated context:", translated_context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  yield TransResult(
222
+ seg_id=ana_result.seg_id,
223
+ context=ana_result.context,
224
  from_=self.source_language,
225
  to=self.target_language,
226
+ tran_content=translated_context,
227
+ partial=ana_result.partial()
228
  )
229
 
230
+
231
  def _send_result_to_client(self, result: TransResult) -> None:
232
  """发送翻译结果到客户端"""
233
  try: