|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
|
import torch |
|
import spaces |
|
|
|
|
|
model_ids = { |
|
"7B": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", |
|
"32B-Unsloth": "unsloth/DeepSeek-R1-Distill-Qwen-32B-bnb-4bit", |
|
} |
|
|
|
models = {} |
|
tokenizers = {} |
|
|
|
|
|
bnb_config_4bit = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.bfloat16, |
|
) |
|
|
|
|
|
def get_model_and_tokenizer(size): |
|
if size not in models: |
|
model_id = model_ids[size] |
|
print(f"Loading {size} model: {model_id} on demand") |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
if size == "32B-Unsloth": |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
quantization_config=bnb_config_4bit, |
|
torch_dtype=torch.bfloat16, |
|
device_map='auto', |
|
trust_remote_code=True |
|
) |
|
else: |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
torch_dtype=torch.bfloat16, |
|
device_map='auto', |
|
trust_remote_code=True |
|
) |
|
models[size] = model |
|
tokenizers[size] = tokenizer |
|
print(f"Loaded {size} model on demand.") |
|
return models[size], tokenizers[size] |
|
|
|
|
|
|
|
default_prompt_1_5b = """**Code Analysis Task** |
|
As a Senior Code Analyst, analyze this programming problem: |
|
|
|
**User Request:** |
|
{user_prompt} |
|
|
|
**Relevant Context:** |
|
{context_1_5b} |
|
|
|
**Analysis Required:** |
|
1. Briefly break down the problem, including key constraints and edge cases. |
|
2. Suggest 2-3 potential approach options (algorithms/data structures). |
|
3. Recommend ONE primary strategy and briefly justify your choice. |
|
4. Provide a very brief initial pseudocode sketch of the core logic.""" |
|
|
|
|
|
default_prompt_7b = """**Code Implementation Task** |
|
As a Principal Software Engineer, provide production-ready Streamlit/Python code based on this analysis: |
|
|
|
**Initial Analysis:** |
|
{response_1_5b} |
|
|
|
**Relevant Context:** |
|
{context_7b} |
|
|
|
**Code Requirements:** |
|
1. Generate concise, production-grade Python code for a Streamlit app. |
|
2. Include necessary imports, UI elements, and basic functionality. |
|
3. Add comments for clarity. |
|
""" |
|
|
|
|
|
|
|
shared_memory = [] |
|
|
|
def store_in_memory(memory_item): |
|
shared_memory.append(memory_item) |
|
print(f"\n[Memory Stored]: {memory_item[:50]}...") |
|
|
|
def retrieve_from_memory(query, top_k=2): |
|
relevant_memories = [] |
|
query_lower = query.lower() |
|
for memory_item in shared_memory: |
|
if query_lower in memory_item.lower(): |
|
relevant_memories.append(memory_item) |
|
|
|
if not relevant_memories: |
|
print("\n[Memory Retrieval]: No relevant memories found.") |
|
return [] |
|
|
|
print(f"\n[Memory Retrieval]: Found {len(relevant_memories)} relevant memories.") |
|
return relevant_memories[:top_k] |
|
|
|
|
|
|
|
@spaces.GPU |
|
def swarm_agent_sequential_rag(user_prompt, prompt_1_5b_template, prompt_7b_template, temperature=0.5, top_p=0.9, max_new_tokens=300): |
|
global shared_memory |
|
shared_memory = [] |
|
|
|
print(f"\n--- Swarm Agent Processing with Shared Memory (RAG) - GPU ACCELERATED - Final Model: 32B Unsloth ---") |
|
|
|
|
|
print("\n[7B Model - Brainstorming] - GPU Accelerated") |
|
model_7b, tokenizer_7b = get_model_and_tokenizer("7B") |
|
retrieved_memory_7b = retrieve_from_memory(user_prompt) |
|
context_7b = "\n".join([f"- {mem}" for mem in retrieved_memory_7b]) if retrieved_memory_7b else "No relevant context found in memory." |
|
|
|
|
|
prompt_7b_brainstorm = prompt_1_5b_template.format(user_prompt=user_prompt, context_1_5b=context_7b) |
|
|
|
input_ids_7b = tokenizer_7b.encode(prompt_7b_brainstorm, return_tensors="pt").to(model_7b.device) |
|
output_7b = model_7b.generate( |
|
input_ids_7b, |
|
max_new_tokens=max_new_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
do_sample=True |
|
) |
|
response_7b = tokenizer_7b.decode(output_7b[0], skip_special_tokens=True) |
|
print(f"7B Response (Brainstorming):\n{response_7b}") |
|
store_in_memory(f"7B Model Initial Response: {response_7b[:200]}...") |
|
|
|
|
|
final_model, final_tokenizer = get_model_and_tokenizer("32B-Unsloth") |
|
print("\n[32B Unsloth Model - Final Code Generation] - GPU Accelerated") |
|
model_stage_name = "32B Unsloth Model - Final Code" |
|
final_max_new_tokens = max_new_tokens + 200 |
|
|
|
retrieved_memory_final = retrieve_from_memory(response_7b) |
|
context_final = "\n".join([f"- {mem}" for mem in retrieved_memory_final]) if retrieved_memory_final else "No relevant context found in memory." |
|
|
|
|
|
prompt_final = prompt_7b_template.format(response_1_5b=response_7b, context_7b=context_final) |
|
|
|
|
|
input_ids_final = final_tokenizer.encode(prompt_final, return_tensors="pt").to(final_model.device) |
|
output_final = final_model.generate( |
|
input_ids_final, |
|
max_new_tokens=final_max_new_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
do_sample=True |
|
) |
|
response_final = final_tokenizer.decode(output_final[0], skip_special_tokens=True) |
|
print(f"{model_stage_name} Response:\n{response_final}") |
|
store_in_memory(f"{model_stage_name} Response: {response_final[:200]}...") |
|
|
|
return response_final |
|
|
|
|
|
|
|
def gradio_interface(message, history, temp, top_p, max_tokens, prompt_1_5b_text, prompt_7b_text): |
|
|
|
response = swarm_agent_sequential_rag( |
|
message, |
|
prompt_1_5b_template=prompt_1_5b_text, |
|
prompt_7b_template=prompt_7b_text, |
|
temperature=temp, |
|
top_p=top_p, |
|
max_new_tokens=int(max_tokens) |
|
) |
|
return response |
|
|
|
iface = gr.ChatInterface( |
|
fn=gradio_interface, |
|
|
|
additional_inputs=[ |
|
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature"), |
|
gr.Slider(minimum=0.01, maximum=1.0, step=0.05, value=0.9, label="Top P"), |
|
gr.Number(value=300, label="Max Tokens", precision=0), |
|
gr.Textbox(value=default_prompt_1_5b, lines=10, label="Brainstorming Model Prompt Template (7B Model)"), |
|
gr.Textbox(value=default_prompt_7b, lines=10, label="Code Generation Prompt Template (32B Unsloth Model)"), |
|
], |
|
title="DeepSeek Agent Swarm Chat (ZeroGPU Demo - Fixed Models: 7B + 32B Unsloth)", |
|
description="Chat with a DeepSeek agent swarm (7B + 32B Unsloth) with shared memory, adjustable settings, **and customizable prompts!** **GPU accelerated using ZeroGPU!** (Requires Pro Space)", |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |