# import gradio as gr | |
# def greet(name): | |
# return "Hello " + name + "!!" | |
# iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
# iface.launch() | |
# model = AutoModelForSequenceClassification.from_pretrained("tabibu-ai/mental-health-chatbot") | |
# write a gradio interface for tabibu-ai/mental-health-chatbot in huggingfacehub | |
# Path: app.py | |
import pickle | |
import numpy as np | |
import gradio as gr | |
# install transformers and torch in requirements.txt | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
# tokenizer = AutoTokenizer.from_pretrained("tabibu-ai/mental-health-chatbot") | |
# tokenizer = AutoTokenizer.from_pretrained("rabiaqayyum/autotrain-mental-health-analysis-752423172") | |
model = pickle.load(open("model.pkl", "rb")) | |
def classify_text(inp): | |
# input_ids = tokenizer.encode(inp, return_tensors='pt') | |
# output = model.predict(input_ids) | |
# return output.logits.argmax().item() | |
# vectorizer = TfidfVectorizer() | |
# X = vectorizer.fit_transform(inp) | |
# reshape the input to 2D | |
# convert the input to a numpy array | |
# return model.predict( np.array(inp).reshape(1, -1) ) | |
reshaped = np.array(inp).reshape(1, -1) | |
return model.predict(reshaped) | |
# # encode the input text | |
# encoded_input = tokenizer(text, return_tensors='pt') | |
# # get the prediction | |
# output = model(**encoded_input) | |
# # get the label | |
# label = output[0].argmax().item() | |
# # return the label | |
# return label | |
iface = gr.Interface(fn=classify_text, inputs="text", outputs="label", | |
interpretation="default", examples=[ | |
["I am feeling depressed"], | |
["I am feeling anxious"], | |
["I am feeling stressed"], | |
["I am feeling sad"], | |
]) | |
iface.launch() | |