Spaces:
Runtime error
Runtime error
# Instalação das dependências necessárias | |
!pip install transformers torch gradio datasets | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextDataset, DataCollatorForLanguageModeling, Trainer, TrainingArguments | |
import gradio as gr | |
import json | |
import os | |
class GameAIAssistant: | |
def __init__(self, model_name="deepseek-ai/DeepSeek-R1"): | |
self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
self.model = AutoModelForCausalLM.from_pretrained(model_name) | |
self.memory = [] | |
self.max_memory = 10 | |
self.game_knowledge_file = "game_knowledge.txt" | |
# Carregar conhecimento existente do jogo | |
self.load_game_knowledge() | |
def load_game_knowledge(self): | |
if os.path.exists(self.game_knowledge_file): | |
with open(self.game_knowledge_file, 'r', encoding='utf-8') as f: | |
self.game_knowledge = f.read() | |
else: | |
self.game_knowledge = "" | |
def save_game_knowledge(self, new_knowledge): | |
with open(self.game_knowledge_file, 'a', encoding='utf-8') as f: | |
f.write(new_knowledge + "\n") | |
self.game_knowledge += new_knowledge + "\n" | |
def generate_response(self, user_input): | |
# Combinar memória, conhecimento do jogo e entrada do usuário | |
context = f"""Conhecimento do Jogo: | |
{self.game_knowledge} | |
Histórico de Conversas: | |
{' '.join([f'{m["role"]}: {m["content"]}' for m in self.memory[-5:]])} | |
Usuário: {user_input} | |
Assistente:""" | |
# Gerar resposta | |
inputs = self.tokenizer(context, return_tensors="pt", max_length=1024, truncation=True) | |
outputs = self.model.generate( | |
inputs["input_ids"], | |
max_length=2048, | |
temperature=0.7, | |
top_p=0.9, | |
pad_token_id=self.tokenizer.eos_token_id | |
) | |
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
response = response.split("Assistente:")[-1].strip() | |
# Atualizar memória | |
self.memory.append({"role": "user", "content": user_input}) | |
self.memory.append({"role": "assistant", "content": response}) | |
# Manter apenas as últimas N mensagens | |
if len(self.memory) > self.max_memory: | |
self.memory = self.memory[-self.max_memory:] | |
return response | |
def train_on_new_data(self, training_text): | |
# Salvar dados de treinamento | |
with open("train_data.txt", "w", encoding="utf-8") as f: | |
f.write(training_text) | |
# Criar dataset | |
dataset = TextDataset( | |
tokenizer=self.tokenizer, | |
file_path="train_data.txt", | |
block_size=128 | |
) | |
data_collator = DataCollatorForLanguageModeling( | |
tokenizer=self.tokenizer, | |
mlm=False | |
) | |
# Configurar treinamento | |
training_args = TrainingArguments( | |
output_dir="./game_ai_model", | |
overwrite_output_dir=True, | |
num_train_epochs=3, | |
per_device_train_batch_size=4, | |
save_steps=10_000, | |
save_total_limit=2, | |
) | |
# Iniciar treinamento | |
trainer = Trainer( | |
model=self.model, | |
args=training_args, | |
data_collator=data_collator, | |
train_dataset=dataset, | |
) | |
trainer.train() | |
# Salvar como conhecimento do jogo | |
self.save_game_knowledge(training_text) | |
return "Treinamento concluído e conhecimento salvo!" | |
# Inicializar o assistente | |
assistant = GameAIAssistant() | |
# Criar interface Gradio | |
with gr.Blocks() as interface: | |
gr.Markdown("# Assistente de IA para Desenvolvimento de Jogos") | |
with gr.Row(): | |
with gr.Column(): | |
chatbot = gr.Textbox(label="Chat") | |
msg = gr.Textbox(label="Sua mensagem") | |
send = gr.Button("Enviar") | |
with gr.Column(): | |
training_data = gr.Textbox(label="Dados de Treinamento", lines=10) | |
train = gr.Button("Treinar IA") | |
# Funções de callback | |
def chat(message): | |
response = assistant.generate_response(message) | |
return response | |
def train_model(text): | |
return assistant.train_on_new_data(text) | |
# Conectar componentes | |
send.click(chat, inputs=msg, outputs=chatbot) | |
train.click(train_model, inputs=training_data, outputs=chatbot) | |
# Iniciar a interface | |
interface.launch(share=True) |