Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Commit 
							
							·
						
						091f6f5
	
1
								Parent(s):
							
							56bd1e9
								
Hide record button while AI is answering
Browse files- ui/coding.py +5 -3
    	
        ui/coding.py
    CHANGED
    
    | @@ -312,15 +312,17 @@ def get_problem_solving_ui( | |
| 312 | 
             
                    TIME_STEP = 0.1
         | 
| 313 | 
             
                    STEPS = int(WAIT_TIME / TIME_STEP)
         | 
| 314 |  | 
| 315 | 
            -
                    stop_audio_recording = audio_input.stop_recording(fn=lambda | 
| 316 | 
            -
                    for _ in range(STEPS | 
| 317 | 
             
                        stop_audio_recording = stop_audio_recording.success(fn=lambda x: time.sleep(TIME_STEP) if x else None, inputs=[is_transcribing])
         | 
| 318 |  | 
| 319 | 
             
                    stop_audio_recording.success(
         | 
| 320 | 
             
                        fn=send_request_partial,
         | 
| 321 | 
             
                        inputs=[code, previous_code, chat_history, chat],
         | 
| 322 | 
             
                        outputs=[chat_history, chat, previous_code, audio_output],
         | 
| 323 | 
            -
                    ).then(fn=lambda: (np.array([], dtype=np.int16), "", False), outputs=[audio_buffer, hidden_text, is_transcribing])
         | 
|  | |
|  | |
| 324 |  | 
| 325 | 
             
                    interview_type_select.change(
         | 
| 326 | 
             
                        fn=lambda x: gr.update(choices=topic_lists[x], value=np.random.choice(topic_lists[x])),
         | 
|  | |
| 312 | 
             
                    TIME_STEP = 0.1
         | 
| 313 | 
             
                    STEPS = int(WAIT_TIME / TIME_STEP)
         | 
| 314 |  | 
| 315 | 
            +
                    stop_audio_recording = audio_input.stop_recording(fn=lambda: gr.update(visible=False), outputs=[audio_input])
         | 
| 316 | 
            +
                    for _ in range(STEPS):
         | 
| 317 | 
             
                        stop_audio_recording = stop_audio_recording.success(fn=lambda x: time.sleep(TIME_STEP) if x else None, inputs=[is_transcribing])
         | 
| 318 |  | 
| 319 | 
             
                    stop_audio_recording.success(
         | 
| 320 | 
             
                        fn=send_request_partial,
         | 
| 321 | 
             
                        inputs=[code, previous_code, chat_history, chat],
         | 
| 322 | 
             
                        outputs=[chat_history, chat, previous_code, audio_output],
         | 
| 323 | 
            +
                    ).then(fn=lambda: (np.array([], dtype=np.int16), "", False), outputs=[audio_buffer, hidden_text, is_transcribing]).then(
         | 
| 324 | 
            +
                        fn=lambda: gr.update(visible=True), outputs=[audio_input]
         | 
| 325 | 
            +
                    )
         | 
| 326 |  | 
| 327 | 
             
                    interview_type_select.change(
         | 
| 328 | 
             
                        fn=lambda x: gr.update(choices=topic_lists[x], value=np.random.choice(topic_lists[x])),
         | 
