Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import gradio as gr | |
import os | |
# Define the device | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained('stabilityai/stablelm-zephyr-3b') | |
model = AutoModelForCausalLM.from_pretrained( | |
'stabilityai/stablelm-zephyr-3b', | |
trust_remote_code=True, | |
device_map="auto" | |
) | |
model.to(device) | |
class ChatBot: | |
def __init__(self): | |
self.history = [] | |
def predict(self, user_input, system_prompt="You are an expert analyst and provide assessment:"): | |
prompt = [{'role': 'user', 'content': user_input + "\n" + system_prompt + ":"}] | |
inputs = tokenizer.apply_chat_template( | |
prompt, | |
add_generation_prompt=True, | |
return_tensors='pt' | |
) | |
# Generate a response using the model | |
tokens = model.generate( | |
inputs.to(model.device), | |
max_new_tokens=250, | |
temperature=0.8, | |
do_sample=False | |
) | |
# Decode the response | |
response_text = tokenizer.decode(tokens[0], skip_special_tokens=False) | |
# Free up memory | |
del tokens | |
torch.cuda.empty_cache() | |
return response_text | |
bot = ChatBot() | |
title = "๐๐ปWelcome to ๐Tonic's๐ฝStable๐LM 3B๐Chat" | |
description = """ | |
You can use this Space to test out the current model [stabilityai/stablelm-zephyr-3b](https://huggingface.co/stabilityai/stablelm-zephyr-3b) | |
You can also use ๐ทStableMedโ๏ธ on your laptop & by cloning this space. ๐งฌ๐ฌ๐ Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/TonicsStableLM3B?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3> | |
Join us : ๐TeamTonic๐ is always making cool demos! Join our active builder's๐ ๏ธcommunity on ๐ปDiscord: [Discord](https://discord.gg/GWpVpekp) On ๐คHuggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On ๐Github: [Polytonic](https://github.com/tonic-ai) & contribute to ๐ [PolyGPT](https://github.com/tonic-ai/polygpt-alpha) | |
""" | |
examples = [["What is the proper treatment for buccal herpes?", "Please provide information on the most effective antiviral medications and home remedies for treating buccal herpes."]] | |
iface = gr.Interface( | |
fn=bot.predict, | |
title=title, | |
description=description, | |
examples=examples, | |
inputs=["text", "text"], | |
outputs="text", | |
theme="ParityError/Anime" | |
) | |
iface.launch() | |