Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- README.txt +13 -0
- TriageScript.txt +6 -0
- app.py +134 -0
- requirements.txt +6 -0
README.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: 💬ChatBack - Chatbot🧠 Memory💾
|
3 |
+
emoji: 💬🧠💾
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.4
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
TriageScript.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
How do you treat a serious allergic reaction?
|
2 |
+
I take an allergy medication, but it doesn't work as well as I'd like.
|
3 |
+
How do you treat serious shock?
|
4 |
+
I have to take an epi-pen every time I go to the doctor.
|
5 |
+
What do you do if you are confused?
|
6 |
+
I just have to keep an eye on my throat and try not to sneeze.
|
app.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
# PersistDataset -----
|
7 |
+
import os
|
8 |
+
import csv
|
9 |
+
import gradio as gr
|
10 |
+
from gradio import inputs, outputs
|
11 |
+
import huggingface_hub
|
12 |
+
from huggingface_hub import Repository, hf_hub_download, upload_file
|
13 |
+
from datetime import datetime
|
14 |
+
|
15 |
+
|
16 |
+
# -------------------------------------------- For Memory - you will need to set up a dataset and HF_TOKEN ---------
|
17 |
+
#DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/ChatbotMemory.csv"
|
18 |
+
#DATASET_REPO_ID = "awacke1/ChatbotMemory.csv"
|
19 |
+
#DATA_FILENAME = "ChatbotMemory.csv"
|
20 |
+
#DATA_FILE = os.path.join("data", DATA_FILENAME)
|
21 |
+
#HF_TOKEN = os.environ.get("HF_TOKEN")
|
22 |
+
|
23 |
+
#SCRIPT = """
|
24 |
+
#<script>
|
25 |
+
#if (!window.hasBeenRun) {
|
26 |
+
# window.hasBeenRun = true;
|
27 |
+
# console.log("should only happen once");
|
28 |
+
# document.querySelector("button.submit").click();
|
29 |
+
#}
|
30 |
+
#</script>
|
31 |
+
#"""
|
32 |
+
|
33 |
+
#try:
|
34 |
+
# hf_hub_download(
|
35 |
+
# repo_id=DATASET_REPO_ID,
|
36 |
+
# filename=DATA_FILENAME,
|
37 |
+
# cache_dir=DATA_DIRNAME,
|
38 |
+
# force_filename=DATA_FILENAME
|
39 |
+
# )
|
40 |
+
#except:
|
41 |
+
# print("file not found")
|
42 |
+
#repo = Repository(
|
43 |
+
# local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
44 |
+
#)
|
45 |
+
|
46 |
+
#def store_message(name: str, message: str):
|
47 |
+
# if name and message:
|
48 |
+
# with open(DATA_FILE, "a") as csvfile:
|
49 |
+
# writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
|
50 |
+
# writer.writerow(
|
51 |
+
# {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())}
|
52 |
+
# )
|
53 |
+
# uncomment line below to begin saving. If creating your own copy you will need to add a access token called "HF_TOKEN" to your profile, then create a secret for your repo with the access code naming it "HF_TOKEN" For the CSV as well you can copy the header and first few lines to your own then update the paths above which should work to save to your own repository for datasets.
|
54 |
+
# commit_url = repo.push_to_hub()
|
55 |
+
# return ""
|
56 |
+
|
57 |
+
#iface = gr.Interface(
|
58 |
+
# store_message,
|
59 |
+
# [
|
60 |
+
# inputs.Textbox(placeholder="Your name"),
|
61 |
+
# inputs.Textbox(placeholder="Your message", lines=2),
|
62 |
+
# ],
|
63 |
+
# "html",
|
64 |
+
# css="""
|
65 |
+
# .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; }
|
66 |
+
# """,
|
67 |
+
# title="Reading/writing to a HuggingFace dataset repo from Spaces",
|
68 |
+
# description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.",
|
69 |
+
# article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})",
|
70 |
+
#)
|
71 |
+
# --------------------------------------------------- For Memory
|
72 |
+
|
73 |
+
mname = "facebook/blenderbot-400M-distill"
|
74 |
+
model = BlenderbotForConditionalGeneration.from_pretrained(mname)
|
75 |
+
tokenizer = BlenderbotTokenizer.from_pretrained(mname)
|
76 |
+
|
77 |
+
def take_last_tokens(inputs, note_history, history):
|
78 |
+
"""Filter the last 128 tokens"""
|
79 |
+
if inputs['input_ids'].shape[1] > 128:
|
80 |
+
inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
|
81 |
+
inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
|
82 |
+
note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
|
83 |
+
history = history[1:]
|
84 |
+
return inputs, note_history, history
|
85 |
+
|
86 |
+
def add_note_to_history(note, note_history):
|
87 |
+
"""Add a note to the historical information"""
|
88 |
+
note_history.append(note)
|
89 |
+
note_history = '</s> <s>'.join(note_history)
|
90 |
+
return [note_history]
|
91 |
+
|
92 |
+
title = "State of the Art Chatbot with Memory Dataset"
|
93 |
+
description = """Chatbot With Memory"""
|
94 |
+
|
95 |
+
def chat(message, history):
|
96 |
+
history = history or []
|
97 |
+
if history:
|
98 |
+
history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
|
99 |
+
else:
|
100 |
+
history_useful = []
|
101 |
+
history_useful = add_note_to_history(message, history_useful)
|
102 |
+
inputs = tokenizer(history_useful, return_tensors="pt")
|
103 |
+
inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
|
104 |
+
reply_ids = model.generate(**inputs)
|
105 |
+
response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
|
106 |
+
history_useful = add_note_to_history(response, history_useful)
|
107 |
+
list_history = history_useful[0].split('</s> <s>')
|
108 |
+
history.append((list_history[-2], list_history[-1]))
|
109 |
+
# store_message(message, response) # Save to dataset -- uncomment with code above, create a dataset to store and add your HF_TOKEN from profile to this repo to use.
|
110 |
+
return history, history
|
111 |
+
|
112 |
+
gr.Interface(
|
113 |
+
fn=chat,
|
114 |
+
theme="huggingface",
|
115 |
+
css=".footer {display:none !important}",
|
116 |
+
inputs=["text", "state"],
|
117 |
+
outputs=["chatbot", "state"],
|
118 |
+
title=title,
|
119 |
+
allow_flagging="never",
|
120 |
+
description=f"Gradio chatbot backed by memory in a dataset repository.",
|
121 |
+
# article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})"
|
122 |
+
).launch(debug=True)
|
123 |
+
|
124 |
+
#demo = gr.Blocks()
|
125 |
+
#with demo:
|
126 |
+
# audio_file = gr.inputs.Audio(source="microphone", type="filepath")
|
127 |
+
# text = gr.Textbox(label="Speech to Text")
|
128 |
+
# TTSchoice = gr.inputs.Radio( label="Pick a Text to Speech Model", choices=MODEL_NAMES, )
|
129 |
+
# audio = gr.Audio(label="Output", interactive=False)
|
130 |
+
# b1 = gr.Button("Recognize Speech")
|
131 |
+
# b5 = gr.Button("Read It Back Aloud")
|
132 |
+
# b1.click(speech_to_text, inputs=audio_file, outputs=text)
|
133 |
+
# b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
|
134 |
+
#demo.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
Werkzeug
|
5 |
+
huggingface_hub
|
6 |
+
Pillow
|