Spaces:
Sleeping
Sleeping
File size: 2,640 Bytes
e2d12b2 8b5d762 868b39f 8b5d762 e2d12b2 8b5d762 e2d12b2 8b5d762 578c957 8b5d762 e943252 8b5d762 e2d12b2 8b5d762 995cb38 8b5d762 995cb38 e2d12b2 8b5d762 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import gradio as gr
from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
import tensorflow as tf
import json
import os
from datetime import datetime
from huggingface_hub import login, create_repo, upload_file
login(os.environ.get("hf_token"))
data = {"Interactions":[]}
with open("question_answer.json", "w") as file:
json.dump(data, file, indent=4)
print("Loading the model......")
model_name = "WICKED4950/Esther_V1"
strategy = tf.distribute.MirroredStrategy()
tf.config.optimizer.set_jit(True) # Enable XLA
tokenizer = AutoTokenizer.from_pretrained(model_name)
with strategy.scope():
model = TFBlenderbotForConditionalGeneration.from_pretrained(model_name)
def save_question(question,answer,path = "question_answer.json"):
with open(path, "r") as file:
data = json.load(file)
data["Interactions"].append({"Question:":question,"Answer:":answer,"Time:":datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
with open(path, "w") as file:
json.dump(data, file, indent=4)
print("Interface getting done....")
# Define the chatbot function
def predict(user_input):
if user_input == "Print_data_hmm":
with open("question_answer.json", "r") as file:
print(json.load(file))
print()
return "Done"
else:
inputs = tokenizer(user_input, return_tensors="tf", padding=True, truncation=True,max_length=128)
# Generate the response using the model
response_id = model.generate(
inputs['input_ids'],
max_length=128, # Set max length of response
do_sample=True, # Sampling for variability
top_k=20, # Consider top 50 tokens
top_p=0.90, # Nucleus sampling
temperature=0.8 # Adjusts creativity of response
)
# Decode the response
response = tokenizer.decode(response_id[0], skip_special_tokens=True)
save_question(question = user_input,answer=response)
print("Q:",user_input)
print("A:",response)
print("T:",datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print()
return response
# Gradio interface
gr.Interface(
fn=predict,
inputs=gr.Textbox(label="Ask anything!"),
outputs=gr.Textbox(label="Response"),
examples=[
["Hey! What is your name?"],
["Who created you? And why?"],
],
description="A chatbot trained to provide friendly and comforting responses. Type your question below and let Esther help!",
title="Esther - Your Friendly Mental Health Chatbot",
).launch() |