Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- improvisation_lab/application/interval_practice/web_interval_app.py +60 -11
- improvisation_lab/presentation/interval_practice/web_interval_view.py +46 -6
- improvisation_lab/presentation/view_text_manager.py +7 -2
- tests/application/interval_practice/test_web_interval_app.py +27 -2
- tests/presentation/interval_practice/test_web_interval_view.py +2 -0
- tests/presentation/test_view_text_manager.py +5 -0
improvisation_lab/application/interval_practice/web_interval_app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
"""Web application for interval practice."""
|
2 |
|
3 |
-
|
|
|
4 |
|
5 |
import numpy as np
|
6 |
|
@@ -39,6 +40,10 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
39 |
config=config,
|
40 |
)
|
41 |
self.base_note = "-"
|
|
|
|
|
|
|
|
|
42 |
|
43 |
def _process_audio_callback(self, audio_data: np.ndarray):
|
44 |
"""Process incoming audio data and update the application state.
|
@@ -56,10 +61,15 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
56 |
result = self.service.process_audio(audio_data, current_note)
|
57 |
|
58 |
# Update status display
|
59 |
-
self.text_manager.update_pitch_result(result)
|
60 |
|
61 |
# Progress to next note if current note is complete
|
62 |
-
if
|
|
|
|
|
|
|
|
|
|
|
63 |
self._advance_to_next_note()
|
64 |
|
65 |
self.text_manager.update_phrase_text(self.current_phrase_idx, self.phrases)
|
@@ -68,6 +78,8 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
68 |
"""Advance to the next note or phrase."""
|
69 |
if self.phrases is None:
|
70 |
return
|
|
|
|
|
71 |
self.current_note_idx += 1
|
72 |
if self.current_note_idx >= len(self.phrases[self.current_phrase_idx]):
|
73 |
self.current_note_idx = 0
|
@@ -78,41 +90,50 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
78 |
self.current_note_idx
|
79 |
].value
|
80 |
|
81 |
-
def handle_audio(self, audio: Tuple[int, np.ndarray]) -> Tuple[str, str, str]:
|
82 |
"""Handle audio input from Gradio interface.
|
83 |
|
84 |
Args:
|
85 |
audio: Audio data to process.
|
86 |
|
87 |
Returns:
|
88 |
-
Tuple[str, str, str]:
|
89 |
The current base note including the next base note,
|
90 |
-
target note, and
|
91 |
"""
|
92 |
if not self.is_running:
|
93 |
-
return "-", "Not running", "Start the session first"
|
94 |
|
95 |
self.audio_processor.process_audio(audio)
|
|
|
96 |
return (
|
97 |
self.base_note,
|
98 |
self.text_manager.phrase_text,
|
99 |
self.text_manager.result_text,
|
|
|
100 |
)
|
101 |
|
102 |
def start(
|
103 |
-
self,
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
105 |
"""Start a new practice session.
|
106 |
|
107 |
Args:
|
108 |
interval: Interval to move to and back.
|
109 |
direction: Direction to move to and back.
|
110 |
number_problems: Number of problems to generate.
|
|
|
|
|
111 |
|
112 |
Returns:
|
113 |
-
Tuple[str, str, str]:
|
114 |
The current base note including the next base note,
|
115 |
-
target note, and
|
116 |
"""
|
117 |
semitone_interval = Intervals.INTERVALS_MAP.get(interval, 0)
|
118 |
|
@@ -133,11 +154,17 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
133 |
self.audio_processor.start_recording()
|
134 |
|
135 |
self.text_manager.update_phrase_text(self.current_phrase_idx, self.phrases)
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
return (
|
138 |
self.base_note,
|
139 |
self.text_manager.phrase_text,
|
140 |
self.text_manager.result_text,
|
|
|
141 |
)
|
142 |
|
143 |
def stop(self) -> Tuple[str, str, str]:
|
@@ -162,3 +189,25 @@ class WebIntervalPracticeApp(BasePracticeApp):
|
|
162 |
def launch(self, **kwargs):
|
163 |
"""Launch the application."""
|
164 |
self.ui.launch(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Web application for interval practice."""
|
2 |
|
3 |
+
import time
|
4 |
+
from typing import Any, List, Tuple
|
5 |
|
6 |
import numpy as np
|
7 |
|
|
|
40 |
config=config,
|
41 |
)
|
42 |
self.base_note = "-"
|
43 |
+
self.results_table: List[List[Any]] = []
|
44 |
+
self.progress_timer: float = 0.0
|
45 |
+
self.is_auto_advance = False
|
46 |
+
self.note_duration = 1.5
|
47 |
|
48 |
def _process_audio_callback(self, audio_data: np.ndarray):
|
49 |
"""Process incoming audio data and update the application state.
|
|
|
61 |
result = self.service.process_audio(audio_data, current_note)
|
62 |
|
63 |
# Update status display
|
64 |
+
self.text_manager.update_pitch_result(result, self.is_auto_advance)
|
65 |
|
66 |
# Progress to next note if current note is complete
|
67 |
+
if self.is_auto_advance:
|
68 |
+
current_time = time.time()
|
69 |
+
if current_time - self.progress_timer >= self.note_duration:
|
70 |
+
self._advance_to_next_note()
|
71 |
+
self.progress_timer = current_time
|
72 |
+
elif result.remaining_time <= 0:
|
73 |
self._advance_to_next_note()
|
74 |
|
75 |
self.text_manager.update_phrase_text(self.current_phrase_idx, self.phrases)
|
|
|
78 |
"""Advance to the next note or phrase."""
|
79 |
if self.phrases is None:
|
80 |
return
|
81 |
+
|
82 |
+
self.update_results_table()
|
83 |
self.current_note_idx += 1
|
84 |
if self.current_note_idx >= len(self.phrases[self.current_phrase_idx]):
|
85 |
self.current_note_idx = 0
|
|
|
90 |
self.current_note_idx
|
91 |
].value
|
92 |
|
93 |
+
def handle_audio(self, audio: Tuple[int, np.ndarray]) -> Tuple[str, str, str, List]:
|
94 |
"""Handle audio input from Gradio interface.
|
95 |
|
96 |
Args:
|
97 |
audio: Audio data to process.
|
98 |
|
99 |
Returns:
|
100 |
+
Tuple[str, str, str, List]:
|
101 |
The current base note including the next base note,
|
102 |
+
target note, result text, and results table.
|
103 |
"""
|
104 |
if not self.is_running:
|
105 |
+
return "-", "Not running", "Start the session first", []
|
106 |
|
107 |
self.audio_processor.process_audio(audio)
|
108 |
+
|
109 |
return (
|
110 |
self.base_note,
|
111 |
self.text_manager.phrase_text,
|
112 |
self.text_manager.result_text,
|
113 |
+
self.results_table,
|
114 |
)
|
115 |
|
116 |
def start(
|
117 |
+
self,
|
118 |
+
interval: str,
|
119 |
+
direction: str,
|
120 |
+
number_problems: int,
|
121 |
+
is_auto_advance: bool,
|
122 |
+
note_duration: float,
|
123 |
+
) -> Tuple[str, str, str, List]:
|
124 |
"""Start a new practice session.
|
125 |
|
126 |
Args:
|
127 |
interval: Interval to move to and back.
|
128 |
direction: Direction to move to and back.
|
129 |
number_problems: Number of problems to generate.
|
130 |
+
is_auto_advance: Whether to automatically advance to the next note.
|
131 |
+
note_duration: Duration of each note in seconds.
|
132 |
|
133 |
Returns:
|
134 |
+
Tuple[str, str, str, List]:
|
135 |
The current base note including the next base note,
|
136 |
+
target note, result text, and results table.
|
137 |
"""
|
138 |
semitone_interval = Intervals.INTERVALS_MAP.get(interval, 0)
|
139 |
|
|
|
154 |
self.audio_processor.start_recording()
|
155 |
|
156 |
self.text_manager.update_phrase_text(self.current_phrase_idx, self.phrases)
|
157 |
+
self.results_table = []
|
158 |
+
|
159 |
+
self.is_auto_advance = is_auto_advance
|
160 |
+
self.note_duration = note_duration
|
161 |
+
self.progress_timer = time.time()
|
162 |
|
163 |
return (
|
164 |
self.base_note,
|
165 |
self.text_manager.phrase_text,
|
166 |
self.text_manager.result_text,
|
167 |
+
self.results_table,
|
168 |
)
|
169 |
|
170 |
def stop(self) -> Tuple[str, str, str]:
|
|
|
189 |
def launch(self, **kwargs):
|
190 |
"""Launch the application."""
|
191 |
self.ui.launch(**kwargs)
|
192 |
+
|
193 |
+
def update_results_table(self):
|
194 |
+
"""Update the results table with the latest result."""
|
195 |
+
if not self.is_auto_advance:
|
196 |
+
return
|
197 |
+
|
198 |
+
target_note = self.phrases[self.current_phrase_idx][self.current_note_idx].value
|
199 |
+
if self.base_note == target_note:
|
200 |
+
return
|
201 |
+
detected_note = self.text_manager.result_text.split("|")[1].strip()
|
202 |
+
detected_note = detected_note.replace("Your note: ", "").replace(" ", "")
|
203 |
+
# Result determination
|
204 |
+
result = "⭕️" if detected_note == target_note else "X"
|
205 |
+
new_result = [
|
206 |
+
self.current_phrase_idx + 1,
|
207 |
+
self.base_note,
|
208 |
+
target_note,
|
209 |
+
detected_note,
|
210 |
+
result,
|
211 |
+
]
|
212 |
+
|
213 |
+
self.results_table.append(new_result)
|
improvisation_lab/presentation/interval_practice/web_interval_view.py
CHANGED
@@ -4,7 +4,7 @@ This module provides a web interface using Gradio for visualizing
|
|
4 |
and interacting with interval practice sessions.
|
5 |
"""
|
6 |
|
7 |
-
from typing import Callable, Tuple
|
8 |
|
9 |
import gradio as gr
|
10 |
import numpy as np
|
@@ -19,9 +19,11 @@ class WebIntervalPracticeView(WebPracticeView):
|
|
19 |
|
20 |
def __init__(
|
21 |
self,
|
22 |
-
on_generate_melody: Callable[
|
|
|
|
|
23 |
on_end_practice: Callable[[], Tuple[str, str, str]],
|
24 |
-
on_audio_input: Callable[[Tuple[int, np.ndarray]], Tuple[str, str, str]],
|
25 |
config: Config,
|
26 |
):
|
27 |
"""Initialize the UI with callback functions.
|
@@ -78,6 +80,15 @@ class WebIntervalPracticeView(WebPracticeView):
|
|
78 |
self.number_problems_box = gr.Number(
|
79 |
label="Number of Problems", value=self.init_num_problems
|
80 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
self.generate_melody_button = gr.Button("Generate Melody")
|
83 |
self.base_note_box = gr.Textbox(
|
@@ -86,6 +97,19 @@ class WebIntervalPracticeView(WebPracticeView):
|
|
86 |
with gr.Row():
|
87 |
self.phrase_info_box = gr.Textbox(label="Problem Information", value="")
|
88 |
self.pitch_result_box = gr.Textbox(label="Pitch Result", value="")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
self._add_audio_input()
|
90 |
self.end_practice_button = gr.Button("End Practice")
|
91 |
|
@@ -142,8 +166,19 @@ class WebIntervalPracticeView(WebPracticeView):
|
|
142 |
# Connect button callbacks
|
143 |
self.generate_melody_button.click(
|
144 |
fn=self.on_generate_melody,
|
145 |
-
inputs=[
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
)
|
148 |
|
149 |
self.end_practice_button.click(
|
@@ -166,7 +201,12 @@ class WebIntervalPracticeView(WebPracticeView):
|
|
166 |
audio_input.stream(
|
167 |
fn=self.on_audio_input,
|
168 |
inputs=audio_input,
|
169 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
170 |
show_progress=False,
|
171 |
stream_every=0.1,
|
172 |
)
|
|
|
4 |
and interacting with interval practice sessions.
|
5 |
"""
|
6 |
|
7 |
+
from typing import Callable, List, Tuple
|
8 |
|
9 |
import gradio as gr
|
10 |
import numpy as np
|
|
|
19 |
|
20 |
def __init__(
|
21 |
self,
|
22 |
+
on_generate_melody: Callable[
|
23 |
+
[str, str, int, bool, float], Tuple[str, str, str, List]
|
24 |
+
],
|
25 |
on_end_practice: Callable[[], Tuple[str, str, str]],
|
26 |
+
on_audio_input: Callable[[Tuple[int, np.ndarray]], Tuple[str, str, str, List]],
|
27 |
config: Config,
|
28 |
):
|
29 |
"""Initialize the UI with callback functions.
|
|
|
80 |
self.number_problems_box = gr.Number(
|
81 |
label="Number of Problems", value=self.init_num_problems
|
82 |
)
|
83 |
+
with gr.Row():
|
84 |
+
self.auto_advance_checkbox = gr.Checkbox(
|
85 |
+
label="Auto Advance Mode",
|
86 |
+
value=True,
|
87 |
+
)
|
88 |
+
self.note_duration_box = gr.Number(
|
89 |
+
label="Note Duration (seconds)",
|
90 |
+
value=1.5,
|
91 |
+
)
|
92 |
|
93 |
self.generate_melody_button = gr.Button("Generate Melody")
|
94 |
self.base_note_box = gr.Textbox(
|
|
|
97 |
with gr.Row():
|
98 |
self.phrase_info_box = gr.Textbox(label="Problem Information", value="")
|
99 |
self.pitch_result_box = gr.Textbox(label="Pitch Result", value="")
|
100 |
+
self.results_table = gr.DataFrame(
|
101 |
+
headers=[
|
102 |
+
"Problem Number",
|
103 |
+
"Base Note",
|
104 |
+
"Target Note",
|
105 |
+
"Detected Note",
|
106 |
+
"Result",
|
107 |
+
],
|
108 |
+
datatype=["number", "str", "str", "str", "str"],
|
109 |
+
value=[],
|
110 |
+
label="Result History",
|
111 |
+
)
|
112 |
+
|
113 |
self._add_audio_input()
|
114 |
self.end_practice_button = gr.Button("End Practice")
|
115 |
|
|
|
166 |
# Connect button callbacks
|
167 |
self.generate_melody_button.click(
|
168 |
fn=self.on_generate_melody,
|
169 |
+
inputs=[
|
170 |
+
self.interval_box,
|
171 |
+
self.direction_box,
|
172 |
+
self.number_problems_box,
|
173 |
+
self.auto_advance_checkbox,
|
174 |
+
self.note_duration_box,
|
175 |
+
],
|
176 |
+
outputs=[
|
177 |
+
self.base_note_box,
|
178 |
+
self.phrase_info_box,
|
179 |
+
self.pitch_result_box,
|
180 |
+
self.results_table,
|
181 |
+
],
|
182 |
)
|
183 |
|
184 |
self.end_practice_button.click(
|
|
|
201 |
audio_input.stream(
|
202 |
fn=self.on_audio_input,
|
203 |
inputs=audio_input,
|
204 |
+
outputs=[
|
205 |
+
self.base_note_box,
|
206 |
+
self.phrase_info_box,
|
207 |
+
self.pitch_result_box,
|
208 |
+
self.results_table,
|
209 |
+
],
|
210 |
show_progress=False,
|
211 |
stream_every=0.1,
|
212 |
)
|
improvisation_lab/presentation/view_text_manager.py
CHANGED
@@ -31,17 +31,22 @@ class ViewTextManager(ABC):
|
|
31 |
"""Set the text to waiting for audio."""
|
32 |
self.result_text = "Waiting for audio..."
|
33 |
|
34 |
-
def update_pitch_result(
|
|
|
|
|
35 |
"""Update the pitch result text.
|
36 |
|
37 |
Args:
|
38 |
pitch_result: The result of the pitch detection.
|
|
|
|
|
|
|
39 |
"""
|
40 |
result_text = (
|
41 |
f"Target: {pitch_result.target_note} | "
|
42 |
f"Your note: {pitch_result.current_base_note or '---'}"
|
43 |
)
|
44 |
-
if pitch_result.current_base_note is not None:
|
45 |
result_text += f" | Remaining: {pitch_result.remaining_time:.1f}s"
|
46 |
self.result_text = result_text
|
47 |
|
|
|
31 |
"""Set the text to waiting for audio."""
|
32 |
self.result_text = "Waiting for audio..."
|
33 |
|
34 |
+
def update_pitch_result(
|
35 |
+
self, pitch_result: PitchResult, is_auto_advance: bool = False
|
36 |
+
):
|
37 |
"""Update the pitch result text.
|
38 |
|
39 |
Args:
|
40 |
pitch_result: The result of the pitch detection.
|
41 |
+
is_auto_advance:
|
42 |
+
Whether to automatically advance to the next note.
|
43 |
+
Default is False.
|
44 |
"""
|
45 |
result_text = (
|
46 |
f"Target: {pitch_result.target_note} | "
|
47 |
f"Your note: {pitch_result.current_base_note or '---'}"
|
48 |
)
|
49 |
+
if pitch_result.current_base_note is not None and not is_auto_advance:
|
50 |
result_text += f" | Remaining: {pitch_result.remaining_time:.1f}s"
|
51 |
self.result_text = result_text
|
52 |
|
tests/application/interval_practice/test_web_interval_app.py
CHANGED
@@ -65,11 +65,14 @@ class TestWebIntervalPracticeApp:
|
|
65 |
with patch.object(
|
66 |
self.app.audio_processor, "process_audio", return_value=None
|
67 |
) as mock_process_audio:
|
68 |
-
base_note, phrase_text, result_text = self.app.handle_audio(
|
|
|
|
|
69 |
mock_process_audio.assert_called_once_with(audio_data)
|
70 |
assert base_note == self.app.base_note
|
71 |
assert phrase_text == self.app.text_manager.phrase_text
|
72 |
assert result_text == self.app.text_manager.result_text
|
|
|
73 |
|
74 |
@pytest.mark.usefixtures("init_module")
|
75 |
def test_start(self):
|
@@ -78,12 +81,15 @@ class TestWebIntervalPracticeApp:
|
|
78 |
with patch.object(
|
79 |
self.app.audio_processor, "start_recording", return_value=None
|
80 |
) as mock_start_recording:
|
81 |
-
base_note, phrase_text, result_text = self.app.start(
|
|
|
|
|
82 |
mock_start_recording.assert_called_once()
|
83 |
assert self.app.is_running
|
84 |
assert base_note == self.app.base_note
|
85 |
assert phrase_text == self.app.text_manager.phrase_text
|
86 |
assert result_text == self.app.text_manager.result_text
|
|
|
87 |
|
88 |
@pytest.mark.usefixtures("init_module")
|
89 |
def test_stop(self):
|
@@ -98,3 +104,22 @@ class TestWebIntervalPracticeApp:
|
|
98 |
assert base_note == "-"
|
99 |
assert phrase_text == self.app.text_manager.phrase_text
|
100 |
assert result_text == self.app.text_manager.result_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
with patch.object(
|
66 |
self.app.audio_processor, "process_audio", return_value=None
|
67 |
) as mock_process_audio:
|
68 |
+
base_note, phrase_text, result_text, results_table = self.app.handle_audio(
|
69 |
+
audio_data
|
70 |
+
)
|
71 |
mock_process_audio.assert_called_once_with(audio_data)
|
72 |
assert base_note == self.app.base_note
|
73 |
assert phrase_text == self.app.text_manager.phrase_text
|
74 |
assert result_text == self.app.text_manager.result_text
|
75 |
+
assert results_table == self.app.results_table
|
76 |
|
77 |
@pytest.mark.usefixtures("init_module")
|
78 |
def test_start(self):
|
|
|
81 |
with patch.object(
|
82 |
self.app.audio_processor, "start_recording", return_value=None
|
83 |
) as mock_start_recording:
|
84 |
+
base_note, phrase_text, result_text, results_table = self.app.start(
|
85 |
+
"minor 2nd", "Up", 10, True, 1.5
|
86 |
+
)
|
87 |
mock_start_recording.assert_called_once()
|
88 |
assert self.app.is_running
|
89 |
assert base_note == self.app.base_note
|
90 |
assert phrase_text == self.app.text_manager.phrase_text
|
91 |
assert result_text == self.app.text_manager.result_text
|
92 |
+
assert results_table == self.app.results_table
|
93 |
|
94 |
@pytest.mark.usefixtures("init_module")
|
95 |
def test_stop(self):
|
|
|
104 |
assert base_note == "-"
|
105 |
assert phrase_text == self.app.text_manager.phrase_text
|
106 |
assert result_text == self.app.text_manager.result_text
|
107 |
+
|
108 |
+
@pytest.mark.usefixtures("init_module")
|
109 |
+
@pytest.mark.parametrize(
|
110 |
+
"detected_note, expected_result",
|
111 |
+
[("C#", "⭕️"), ("D", "X")], # Correct case # Incorrect case
|
112 |
+
)
|
113 |
+
def test_update_results_table(self, detected_note, expected_result):
|
114 |
+
"""Test updating the results table with correct and incorrect results."""
|
115 |
+
self.app.phrases = [[Notes.C, Notes.C_SHARP, Notes.C]]
|
116 |
+
self.app.current_phrase_idx = 0
|
117 |
+
self.app.current_note_idx = 1
|
118 |
+
self.app.base_note = "C"
|
119 |
+
self.app.text_manager.result_text = f"Target: C# | Your note: {detected_note}"
|
120 |
+
|
121 |
+
self.app.is_auto_advance = True
|
122 |
+
self.app.update_results_table()
|
123 |
+
|
124 |
+
expected_entry = [1, "C", "C#", detected_note, expected_result]
|
125 |
+
assert self.app.results_table[-1] == expected_entry
|
tests/presentation/interval_practice/test_web_interval_view.py
CHANGED
@@ -57,6 +57,8 @@ class TestWebIntervalPracticeView:
|
|
57 |
assert isinstance(self.web_view.base_note_box, gr.Textbox)
|
58 |
assert isinstance(self.web_view.phrase_info_box, gr.Textbox)
|
59 |
assert isinstance(self.web_view.pitch_result_box, gr.Textbox)
|
|
|
|
|
60 |
|
61 |
@pytest.mark.usefixtures("init_module")
|
62 |
def test_create_control_buttons(self):
|
|
|
57 |
assert isinstance(self.web_view.base_note_box, gr.Textbox)
|
58 |
assert isinstance(self.web_view.phrase_info_box, gr.Textbox)
|
59 |
assert isinstance(self.web_view.pitch_result_box, gr.Textbox)
|
60 |
+
assert isinstance(self.web_view.auto_advance_checkbox, gr.Checkbox)
|
61 |
+
assert isinstance(self.web_view.note_duration_box, gr.Number)
|
62 |
|
63 |
@pytest.mark.usefixtures("init_module")
|
64 |
def test_create_control_buttons(self):
|
tests/presentation/test_view_text_manager.py
CHANGED
@@ -46,8 +46,13 @@ class TestViewTextManager:
|
|
46 |
pitch_result = PitchResult(
|
47 |
target_note="C", current_base_note="A", is_correct=False, remaining_time=2.5
|
48 |
)
|
|
|
49 |
self.text_manager.update_pitch_result(pitch_result)
|
50 |
assert (
|
51 |
self.text_manager.result_text
|
52 |
== "Target: C | Your note: A | Remaining: 2.5s"
|
53 |
)
|
|
|
|
|
|
|
|
|
|
46 |
pitch_result = PitchResult(
|
47 |
target_note="C", current_base_note="A", is_correct=False, remaining_time=2.5
|
48 |
)
|
49 |
+
# Test without auto advance
|
50 |
self.text_manager.update_pitch_result(pitch_result)
|
51 |
assert (
|
52 |
self.text_manager.result_text
|
53 |
== "Target: C | Your note: A | Remaining: 2.5s"
|
54 |
)
|
55 |
+
|
56 |
+
# Test with auto advance
|
57 |
+
self.text_manager.update_pitch_result(pitch_result, is_auto_advance=True)
|
58 |
+
assert self.text_manager.result_text == "Target: C | Your note: A"
|