Spaces:
Sleeping
Sleeping
from flask import Flask, render_template, request, jsonify | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import datetime | |
app = Flask(__name__) | |
# Load the model and tokenizer | |
model_path = "phamhai/Llama-3.2-3B-Instruct-Frog" | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForCausalLM.from_pretrained(model_path) | |
# Store chat history | |
chat_history = [] | |
def index(): | |
return render_template('index.html', history=chat_history) | |
def chat(): | |
user_message = request.json['message'] | |
# Append user message to chat history | |
timestamp = datetime.datetime.now().strftime("%H:%M:%S") | |
chat_history.append({'timestamp': timestamp, 'user': user_message}) | |
# Generate a response | |
input_text = user_message # Modify this as needed for your model's input | |
inputs = tokenizer(input_text, return_tensors='pt') | |
outputs = model.generate(**inputs, max_new_tokens=128) | |
bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Append bot response to chat history | |
chat_history.append({'timestamp': timestamp, 'bot': bot_response}) | |
return jsonify({'timestamp': timestamp, 'response': bot_response}) | |
def clear(): | |
global chat_history | |
chat_history = [] # Clear chat history | |
return jsonify(success=True) | |
if __name__ == '__main__': | |
app.run(host='0.0.0.0', port=7860) # Set host and port for Hugging Face Spaces | |