File size: 3,085 Bytes
9405ec1
3567e13
 
 
87d5c87
3567e13
49b763b
9405ec1
3567e13
 
 
9405ec1
 
87d5c87
3567e13
49b763b
3567e13
 
 
87d5c87
3567e13
 
 
 
 
d8afa5e
 
 
 
3567e13
d447aed
3567e13
d8afa5e
3567e13
 
a9cd71f
d8afa5e
3567e13
 
 
d8afa5e
 
3567e13
d8afa5e
87d5c87
3567e13
 
 
 
 
49b763b
3567e13
 
 
49b763b
 
3567e13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49b763b
 
 
 
59f483d
49b763b
 
 
3567e13
49b763b
 
 
87d5c87
3567e13
87d5c87
 
3567e13
49b763b
 
87d5c87
 
 
 
 
 
 
49b763b
3567e13
3b7e81b
3567e13
49b763b
 
3567e13
49b763b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from datetime import datetime
from typing import List, Optional, Tuple


import gradio as gr
from datasets import load_dataset
from huggingface_hub import InferenceClient

from config.prompt_gui import (
    prompt_template_gui,
    template_gui,
)


from util.data_config import extrair_dados_config

regras, desenvolvedor_name, country, name_gui, desenvolvedor_description = (
    extrair_dados_config()
)

try:
    with open("static/assets/js/script.js", "r", encoding="UTF-8") as js_file:
        js_code = js_file.read()
except:
    raise "Erro ao carrega codigo js"




now: datetime = datetime.now()
model: str = "meta-llama/Llama-3.2-3B-Instruct"
js=js_code

template_gui = template_gui()
prompt_template = prompt_template_gui(template_gui)


client: InferenceClient = InferenceClient(
    model=model
)


dataset = load_dataset("wendellast/GUI-Ban")


def get_response_from_huggingface_dataset(message: str, dataset) -> Optional[str]:
    for data in dataset["train"]:
        if "dialog" in data and len(data["dialog"]) > 1:
            input_text: str = data["dialog"][0].lower()
            response_text: str = data["dialog"][1]

            if input_text == message.lower():
                return response_text
    return None


def respond(
    message: str,
    history: List[Tuple[str, str]],
    system_message: str,
    max_tokens: int,
    temperature: float,
    top_p: float,
) -> any:

    response: Optional[str] = get_response_from_huggingface_dataset(message, dataset)
    if response:
        yield response
        return

    historico: str = ""
    for user_msg, bot_reply in history:
        if user_msg:
            historico += f"Usuário: {user_msg}\n"
        if bot_reply:
            historico += f"IA: {bot_reply}\n"

    prompt: str = prompt_template.format(
        name=name_gui,
        data_atual=now.strftime("%d/%m/%Y %H:%M:%S"),
        regras=regras,
        desenvolvedor_name=desenvolvedor_name,
        desenvolvedor_description=desenvolvedor_description,
        pais=country,
        historico=historico.strip(),
        mensagem=message,
    )

    messages: List[dict] = [{"role": "system", "content": prompt}]
    response: str = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token: str = message.choices[0].delta.content
        response += token
        yield response


demo: gr.ChatInterface = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p (nucleus sampling)",
        ),
    ],
    theme="gstaff/xkcd",
    title="GUI",
    js=js
)

# Inicializar a aplicação
if __name__ == "__main__":
    demo.launch()