RachidAb02 commited on
Commit
6e8b96e
·
verified ·
1 Parent(s): 9a8766a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -14
app.py CHANGED
@@ -1,14 +1,125 @@
1
- import torch
2
- from peft import PeftModel, PeftConfig
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipelin
4
- bnb_config = BitsAndBytesConfig(load_in_4bit=True,
5
- bnb_4bit_quant_type="nf4",
6
- bnb_4bit_compute_dtype=torch.bfloat16,
7
- bnb_4bit_use_double_quant=False)
8
-
9
- model_id = "meta-llama/Meta-Llama-3-8B"
10
- tokenizer = AutoTokenizer.from_pretrained(model_id)
11
- model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config = bnb_config,device_map={"":0})
12
-
13
-
14
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cohere
3
+ import os
4
+ import re
5
+ import uuid
6
+ import secrets
7
+
8
+
9
+
10
+ Mistralai = "19NAIlRYNoVaNYoNoxkdq1mCmbdAgxaa"
11
+ co = cohere.Client(Mistralai, client_name="huggingface-rp")
12
+
13
+
14
+ def trigger_example(example):
15
+ chat, updated_history = generate_response(example)
16
+ return chat, updated_history
17
+
18
+ def generate_response(user_message, cid, token, history=None):
19
+
20
+ if not token:
21
+ raise gr.Error("Error loading.")
22
+
23
+ if history is None:
24
+ history = []
25
+ if cid == "" or None:
26
+ cid = str(uuid.uuid4())
27
+
28
+ print(f"cid: {cid} prompt:{user_message}")
29
+
30
+ history.append(user_message)
31
+
32
+ stream = co.chat_stream(message=user_message, conversation_id=cid, model='Codestral-22B-v0.1', connectors=[], temperature=0.3)
33
+
34
+ output = ""
35
+
36
+ for idx, response in enumerate(stream):
37
+ if response.event_type == "text-generation":
38
+ output += response.text
39
+ if idx == 0:
40
+ history.append(" " + output)
41
+ else:
42
+ history[-1] = output
43
+ chat = [
44
+ (history[i].strip(), history[i + 1].strip())
45
+ for i in range(0, len(history) - 1, 2)
46
+ ]
47
+ yield chat, history, cid
48
+
49
+ return chat, history, cid
50
+
51
+
52
+ def clear_chat():
53
+ return [], [], str(uuid.uuid4())
54
+
55
+
56
+ examples = [
57
+ "Pouvez-vous expliquer les règles relatives aux heures supplémentaires selon le Code du travail ?",
58
+ "Quels sont les droits et obligations concernant les congés payés selon le Code du travail ?",
59
+ "Quelles sont les conditions requises pour un licenciement pour motif économique en vertu du Code du travail ?",
60
+ "Quelles sont les mesures prévues par le Code du travail en cas de harcèlement au travail ?",
61
+
62
+ ]
63
+
64
+ custom_css = """
65
+ #logo-img {
66
+ border: none !important;
67
+ }
68
+ #chat-message {
69
+ font-size: 14px;
70
+ min-height: 300px;
71
+ }
72
+ """
73
+
74
+ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
75
+ cid = gr.State("")
76
+ token = gr.State(value=None)
77
+
78
+ with gr.Row():
79
+ with gr.Column(scale=1):
80
+ gr.Image("logohf1.jpg", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
81
+ with gr.Column(scale=3):
82
+ gr.Markdown("""##### Bienvenue sur votre Assistant Code du Travail Français ! """)
83
+
84
+ with gr.Column():
85
+ with gr.Row():
86
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
87
+
88
+ with gr.Row():
89
+ user_message = gr.Textbox(lines=1, placeholder="Question ...", label="Input", show_label=False)
90
+
91
+
92
+ with gr.Row():
93
+ submit_button = gr.Button("Envoyer")
94
+ clear_button = gr.Button("Supprimer la discussion")
95
+
96
+
97
+ history = gr.State([])
98
+
99
+ user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
100
+ submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
101
+
102
+ clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
103
+
104
+ user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
105
+ submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
106
+ clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
107
+
108
+ with gr.Row():
109
+ gr.Examples(
110
+ examples=examples,
111
+ inputs=user_message,
112
+ cache_examples=False,
113
+ fn=trigger_example,
114
+ outputs=[chatbot],
115
+ examples_per_page=100
116
+ )
117
+
118
+ demo.load(lambda: secrets.token_hex(16), None, token)
119
+
120
+ if __name__ == "__main__":
121
+ # demo.launch(debug=True)
122
+ try:
123
+ demo.queue(api_open=False, max_size=40).launch(show_api=False)
124
+ except Exception as e:
125
+ print(f"Error: {e}")