SHMT / multi_modal_model.py
zeroMN's picture
Rename app.py to multi_modal_model.py
06134e8 verified
raw
history blame
7.72 kB
import os
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import (
BartForConditionalGeneration,
AutoModelForCausalLM,
BertModel,
Wav2Vec2ForCTC,
CLIPModel,
AutoTokenizer
)
import numpy as np
import random
import soundfile as sf
import resampy
import copy
class MultiModalModel(nn.Module):
def __init__(self):
super(MultiModalModel, self).__init__()
# 初始化子模型
self.text_generator = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
self.code_generator = AutoModelForCausalLM.from_pretrained('gpt2')
self.nlp_encoder = BertModel.from_pretrained('bert-base-uncased')
self.speech_encoder = Wav2Vec2ForCTC.from_pretrained('facebook/wav2vec2-base-960h')
self.vision_encoder = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
# 初始化分词器和处理器
self.text_tokenizer = AutoTokenizer.from_pretrained('facebook/bart-base')
self.code_tokenizer = AutoTokenizer.from_pretrained('gpt2')
self.nlp_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
self.speech_processor = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
self.vision_processor = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch32')
# 创建5层神经网络
self.neural_network = nn.Sequential(
nn.Linear(768, 1024),
nn.ReLU(),
nn.Linear(1024, 2048),
nn.ReLU(),
nn.Linear(2048, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 256)
)
def forward(self, task, inputs):
if task == 'text_generation':
attention_mask = inputs.attention_mask
outputs = self.text_generator.generate(
inputs.input_ids,
max_new_tokens=50,
pad_token_id=self.text_tokenizer.eos_token_id,
attention_mask=attention_mask,
top_p=0.95,
top_k=50,
temperature=1.2,
do_sample=True
)
return self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
elif task == 'code_generation':
attention_mask = inputs.attention_mask
outputs = self.code_generator.generate(
inputs.input_ids,
max_new_tokens=50,
pad_token_id=self.code_tokenizer.eos_token_id,
attention_mask=attention_mask,
top_p=0.95,
top_k=50,
temperature=1.2,
do_sample=True
)
return self.code_tokenizer.decode(outputs[0], skip_special_tokens=True)
elif task == 'text_understanding':
outputs = self.nlp_encoder(**inputs)
return self.neural_network(outputs.last_hidden_state)
elif task == 'speech_recognition':
inputs = self.speech_processor(audio=inputs, sampling_rate=16000, return_tensors="pt", padding=True)
outputs = self.speech_encoder(**inputs).logits
predicted_ids = torch.argmax(outputs, dim=-1)
transcription = self.speech_processor.batch_decode(predicted_ids)[0]
return transcription
elif task == 'vision_understanding':
outputs = self.vision_encoder.get_image_features(**inputs)
return outputs
class EvolutionaryMultiModalNetwork(nn.Module):
def __init__(self, device='cuda' if torch.cuda.is_available() else 'cpu'):
super(EvolutionaryMultiModalNetwork, self).__init__()
self.device = device
self.multi_modal_model = MultiModalModel().to(self.device)
self.mutation_params = {
'mutation_rate': 0.2,
'mutation_scale': 0.05
}
def mutate_model(self, model):
for param in model.parameters():
if param.requires_grad:
noise = torch.normal(
mean=torch.zeros_like(param.data),
std=self.mutation_params['mutation_scale']
).to(self.device)
if random.random() < self.mutation_params['mutation_rate']:
param.data.add_(noise)
return model
def evaluate_model(self, model, task, test_input):
try:
with torch.no_grad():
output = model(task, test_input)
complexity = sum(p.numel() for p in model.parameters())
performance = len(output) # 示例性能评估指标
return complexity, performance
except Exception as e:
print(f"模型评估错误: {e}")
return 0, 0
def evolutionary_training(self, epochs=5):
print("🧬 开始进化训练...")
for epoch in range(epochs):
print(f"\n🌟 第 {epoch+1} 代:")
# 模型变异
self.multi_modal_model = self.mutate_model(self.multi_modal_model)
# 模型评估
test_input_text = self.multi_modal_model.text_tokenizer("Hello, how are you?", return_tensors='pt').to(self.device)
test_input_code = self.multi_modal_model.code_tokenizer("def add(a, b): return a + b", return_tensors='pt').to(self.device)
# 加载音频文件并处理
audio_path = "C:/Users/baby7/Desktop/推理/sample-3s.wav"
audio_input, sample_rate = sf.read(audio_path)
if audio_input.ndim > 1:
audio_input = np.mean(audio_input, axis=1) # 转换为单声道
if sample_rate != 16000:
audio_input = resampy.resample(audio_input, sample_rate, 16000) # 重采样
test_input_audio = torch.tensor(audio_input).to(self.device).unsqueeze(0) # 添加 batch 维度
complexity_text, performance_text = self.evaluate_model(self.multi_modal_model, 'text_generation', test_input_text)
complexity_code, performance_code = self.evaluate_model(self.multi_modal_model, 'code_generation', test_input_code)
complexity_audio, performance_audio = self.evaluate_model(self.multi_modal_model, 'speech_recognition', test_input_audio)
print(f"多模态模型 (文本生成) - 复杂度: {complexity_text}, 性能: {performance_text:.4f}")
print(f"多模态模型 (代码生成) - 复杂度: {complexity_code}, 性能: {performance_code:.4f}")
print(f"多模态模型 (语音识别) - 复杂度: {complexity_audio}, 性能: {performance_audio:.4f}")
def print_model_info(self):
print(f"\n多模态模型结构:")
print(self.multi_modal_model)
print("\n参数统计:")
total_params = sum(p.numel() for p in self.multi_modal_model.parameters())
trainable_params = sum(p.numel() for p in self.multi_modal_model.parameters() if p.requires_grad)
print(f"总参数: {total_params}")
print(f"可训练参数: {trainable_params}")
def main():
# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
# 创建进化多模态网络实例
evolutionary_network = EvolutionaryMultiModalNetwork()
# 打印模型信息
evolutionary_network.print_model_info()
# 进行进化训练
evolutionary_network.evolutionary_training(epochs=5)
if __name__ == "__main__":
main()