Spaces:
Sleeping
Sleeping
import streamlit as st | |
#from transformers import AutoModelForCausalLM, AutoTokenizer | |
#model_name = "deepseek-ai/DeepSeek-R1" | |
#tokenizer = AutoTokenizer.from_pretrained(model_name) | |
#model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,quantization_config=None) | |
#model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" | |
#tokenizer = AutoTokenizer.from_pretrained(model_id) | |
#model = AutoModelForCausalLM.from_pretrained(model_id) | |
# Use a pipeline as a high-level helper | |
from transformers import pipeline | |
st.title("DeepSeek Chatbot") | |
prompt = st.text_input("Enter your message:") | |
if st.button("Run"): | |
messages = [{"role": "user", "content": prompt},] | |
pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", trust_remote_code=True) | |
response= pipe(messages) | |
st.text_area("Long Text Box", response, height=1000) |