Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
MODEL_NAME = "lcw99/ko-dialoGPT-korean-chit-chat" | |
# ํ ํฌ๋์ด์ ๋ฐ ๋ชจ๋ธ ๋ก๋ | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) | |
# ์ฑ๋ด ์๋ต ํจ์ | |
def chat_with_ai(history, message): | |
input_text = message + tokenizer.eos_token | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
response_ids = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id) | |
response_text = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
history.append((message, response_text)) # Gradio Chatbot ํ์ ์ ์ง | |
return history, "" # ์ ๋ ฅ์ฐฝ ๋น์ฐ๊ธฐ | |
# Gradio ์ธํฐํ์ด์ค ์ค์ | |
with gr.Blocks() as demo: | |
gr.Markdown("## ๐จ๏ธ Ko-DialoGPT Chatbot") | |
chatbot = gr.Chatbot(label="Ko-DialoGPT Chatbot") | |
message = gr.Textbox(label="์ ๋ ฅ ๋ฉ์์ง", placeholder="๋ฉ์์ง๋ฅผ ์ ๋ ฅํ์ธ์...") | |
clear_btn = gr.Button("์ด๊ธฐํ") | |
# ๋ฉ์์ง ์ ๋ ฅ ์ ์ฑํ ๊ธฐ๋ก ์ ๋ฐ์ดํธ ๋ฐ ์ ๋ ฅ์ฐฝ ์ด๊ธฐํ | |
message.submit(chat_with_ai, [chatbot, message], [chatbot, message]) | |
# ์ด๊ธฐํ ๋ฒํผ ํด๋ฆญ ์ ์ฑํ ๊ธฐ๋ก ์ญ์ | |
clear_btn.click(lambda: [], [], chatbot) | |
# โ ์๋ฒ ํฌํธ ๋ฐ ์ฃผ์ ์ถ๊ฐ | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |