Spaces:
Sleeping
Sleeping
File size: 883 Bytes
d84bdab 6551988 d84bdab e165854 d84bdab 6551988 76b397c b6627c8 d84bdab b6627c8 e01f817 bbbaf27 59e2d87 6551988 9a09cff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import streamlit as st
#from transformers import AutoModelForCausalLM, AutoTokenizer
#model_name = "deepseek-ai/DeepSeek-R1"
#tokenizer = AutoTokenizer.from_pretrained(model_name)
#model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,quantization_config=None)
#model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
#tokenizer = AutoTokenizer.from_pretrained(model_id)
#model = AutoModelForCausalLM.from_pretrained(model_id)
# Use a pipeline as a high-level helper
from transformers import pipeline
st.title("DeepSeek Chatbot")
prompt = st.text_input("Enter your message:")
if st.button("Run"):
messages = [{"role": "user", "content": prompt},]
pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", trust_remote_code=True)
response= pipe(messages)
st.text_area("Long Text Box", response, height=1000) |