Spaces:
Sleeping
Sleeping
File size: 3,574 Bytes
3b55d4b 2da3f27 9ab0176 2da3f27 94576e1 2da3f27 76792d2 e72ed81 51e9476 e72ed81 2da3f27 e72ed81 2da3f27 76792d2 e72ed81 76792d2 789e9e5 2da3f27 e72ed81 76792d2 e72ed81 2da3f27 e72ed81 76792d2 e72ed81 2da3f27 e72ed81 2da3f27 e72ed81 2da3f27 e72ed81 2da3f27 e72ed81 2da3f27 e72ed81 76792d2 e72ed81 2da3f27 e72ed81 f150754 b1c2780 2da3f27 b1c2780 2da3f27 b1c2780 2da3f27 b1c2780 2da3f27 e72ed81 f150754 76792d2 f150754 2da3f27 77c7a99 b1c2780 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
# app.py
import os
import requests
import streamlit as st
from models import get_hf_api
# Configure API
API_URL = get_hf_api()
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
# Page configuration
st.set_page_config(
page_title="DeepSeek Chatbot - ruslanmv.com",
page_icon="🤖",
layout="centered"
)
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar configuration
with st.sidebar:
st.header("Model Configuration")
st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
system_message = st.text_area(
"System Message",
value="You are a friendly Chatbot created by ruslanmv.com",
height=100
)
max_tokens = st.slider(
"Max Tokens",
1, 4000, 512
)
temperature = st.slider(
"Temperature",
0.1, 4.0, 0.7
)
top_p = st.slider(
"Top-p",
0.1, 1.0, 0.9
)
# Chat interface
st.title("🤖 DeepSeek Chatbot")
st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handle input
if prompt := st.chat_input("Type your message..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
try:
with st.spinner("Generating response..."):
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
# Make API request
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": full_prompt,
"parameters": {
"max_new_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"return_full_text": False
}
}
)
# Handle API errors
if response.status_code != 200:
error_msg = response.json().get('error', 'Unknown API error')
st.error(f"API Error: {error_msg}")
if "loading" in error_msg.lower():
st.info("Please wait a moment and try again. The model might be loading.")
return
# Process successful response
result = response.json()
if isinstance(result, list):
# Handle normal response format
assistant_response = result[0].get('generated_text', 'No response generated')
# Clean up response
if "Assistant:" in assistant_response:
assistant_response = assistant_response.split("Assistant:")[-1].strip()
elif isinstance(result, dict) and 'error' in result:
# Handle error format
st.error(f"API Error: {result['error']}")
return
else:
st.error("Unexpected response format from API")
return
with st.chat_message("assistant"):
st.markdown(assistant_response)
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
except Exception as e:
st.error(f"Application Error: {str(e)}") |