JustKiddo commited on
Commit
801c513
·
verified ·
1 Parent(s): ae63702

Create app.bak

Browse files
Files changed (1) hide show
  1. app.bak +71 -0
app.bak ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from datasets import load_dataset
4
+
5
+ """
6
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ """
8
+
9
+ #Update: Using a new base model
10
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-gemma-v0.1")
12
+ #topic_model = BERTopic.load("MaartenGr/BERTopic_Wikipedia")
13
+ # Train model
14
+ #topic_model = BERTopic("english")
15
+ #topics, probs = topic_model.fit_transform(docs)
16
+ dataset = load_dataset("JustKiddo/KiddosVault")
17
+
18
+ def respond(
19
+ message,
20
+ history: list[tuple[str, str]],
21
+ system_message,
22
+ max_tokens,
23
+ temperature,
24
+ top_p,
25
+ ):
26
+ messages = [{"role": "system", "content": system_message}]
27
+
28
+ for val in history:
29
+ if val[0]:
30
+ messages.append({"role": "user", "content": val[0]})
31
+ if val[1]:
32
+ messages.append({"role": "assistant", "content": val[1]})
33
+
34
+ messages.append({"role": "user", "content": message})
35
+
36
+ response = ""
37
+
38
+ for message in client.chat_completion(
39
+ messages,
40
+ max_tokens=max_tokens,
41
+ stream=True,
42
+ temperature=temperature,
43
+ top_p=top_p,
44
+ ):
45
+ token = message.choices[0].delta.content
46
+
47
+ response += token
48
+ yield response
49
+
50
+ """
51
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
52
+ """
53
+ demo = gr.ChatInterface(
54
+ respond,
55
+ additional_inputs=[
56
+ gr.Textbox(value="You are a professional Mental Healthcare Chatbot.", label="System message"),
57
+ gr.Slider(minimum=1, maximum=6144, value=6144, step=1, label="Max new tokens"),
58
+ gr.Slider(minimum=0.1, maximum=4.0, value=1, step=0.1, label="Temperature"),
59
+ gr.Slider(
60
+ minimum=0.1,
61
+ maximum=1.0,
62
+ value=0.95,
63
+ step=0.05,
64
+ label="Top-p (nucleus sampling)",
65
+ ),
66
+ ],
67
+ )
68
+
69
+
70
+ if __name__ == "__main__":
71
+ demo.launch(debug=True)