CHATBOT / app.py
Marcos12886's picture
Update app.py
4e570c9 verified
raw
history blame
4.03 kB
import gradio as gr
from huggingface_hub import InferenceClient
import os
from transformers import pipeline
import numpy as np
from model import SAMPLING_RATE, FEATURE_EXTRACTOR
token = os.getenv("HF_TOKEN")
# modelo = "mixed-data"
modelo = "cry-detector"
pipe = pipeline(
"audio-classification",
model=f"A-POR-LOS-8000/distilhubert-finetuned-{modelo}",
use_auth_token=token
)
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=token)
# client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407", token=token)
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
my_theme = gr.themes.Soft(
primary_hue="emerald",
secondary_hue="green",
shadow_spread='*button_shadow_active'
)
def mostrar_pagina_1():
return gr.update(visible=False), gr.update(visible=True)
def mostrar_pagina_2():
return gr.update(visible=False), gr.update(visible=True)
def redirigir_a_pantalla_inicial():
return gr.update(visible=True), gr.update(visible=False)
def transcribe(audio):
_, y = audio
y = y.astype(np.float32) # con torch.float32 da error
y /= np.max(np.abs(y))
results = pipe({"sampling_rate": SAMPLING_RATE, "raw": y})
top_result = results[0] # Get the top result (most likely classification)
label = top_result["label"] # Extract the label from the top result
return label
with gr.Blocks(theme=my_theme) as demo:
with gr.Column(visible=True, elem_id="pantalla-inicial") as pantalla_inicial:
gr.HTML(
gr.Markdown("<h2>Predictor</h2>")
audio_input = gr.Audio(
min_length=1.0,
# max_length=10.0,
format="wav",
# type="numpy",
label="Baby recorder"
),
classify_btn = gr.Button("¿Por qué llora?")
classification_output = gr.Textbox(label="Tu bebé llora por:")
classify_btn.click(transcribe, inputs=audio_input, outputs=classification_output)
with gr.Column():
gr.Markdown("<h2>Assistant</h2>")
system_message = "You are a Chatbot specialized in baby health and care."
temperature = 0.7
top_p = 0.95
chatbot = gr.ChatInterface(
respond,
additional_inputs=[
gr.State(value=system_message),
gr.State(value=max_tokens),
],
)
gr.Markdown("Este chatbot no sustituye a un profesional de la salud. Ante cualquier preocupación o duda, consulta con tu pediatra.")
boton_volver_inicio_1 = gr.Button("Volver a la pantalla inicial")
boton_volver_inicio_1.click(redirigir_a_pantalla_inicial, inputs=None, outputs=[pantalla_inicial, pagina_1])
with gr.Column(visible=False) as pagina_2:
gr.Markdown("<h2>Monitor</h2>")
gr.Markdown("Contenido de la Página 2")
boton_volver_inicio_2 = gr.Button("Volver a la pantalla inicial")
boton_volver_inicio_2.click(redirigir_a_pantalla_inicial, inputs=None, outputs=[pantalla_inicial, pagina_2])
boton_pagina_1.click(mostrar_pagina_1, inputs=None, outputs=[pantalla_inicial, pagina_1])
boton_pagina_2.click(mostrar_pagina_2, inputs=None, outputs=[pantalla_inicial, pagina_2])
demo.launch()