zeroMN commited on
Commit
6471d73
·
verified ·
1 Parent(s): 0a66d43

Upload 24 files

Browse files
Files changed (2) hide show
  1. SJMT_model.pth +2 -2
  2. app.py +176 -0
SJMT_model.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b83992690213c479a00be25af7fa6bfea7526861094cff5b6e97ee44d89f1cbb
3
- size 2493181678
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3acba7b1a7aac988188da7e06a48d7db0ea263e2c9bf984412ad45f126b88f99
3
+ size 2476637590
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from transformers import (
6
+ BartForConditionalGeneration,
7
+ AutoModelForCausalLM,
8
+ BertModel,
9
+ Wav2Vec2ForCTC,
10
+ CLIPModel,
11
+ AutoTokenizer
12
+ )
13
+ import numpy as np
14
+ import random
15
+ import soundfile as sf
16
+ import resampy
17
+ import copy
18
+
19
+ class MultiModalModel(nn.Module):
20
+ def __init__(self):
21
+ super(MultiModalModel, self).__init__()
22
+ # 初始化子模型
23
+ self.text_generator = BartForConditionalGeneration.from_pretrained('facebook/bart-base')
24
+ self.code_generator = AutoModelForCausalLM.from_pretrained('gpt2')
25
+ self.nlp_encoder = BertModel.from_pretrained('bert-base-uncased')
26
+ self.speech_encoder = Wav2Vec2ForCTC.from_pretrained('facebook/wav2vec2-base-960h')
27
+ self.vision_encoder = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
28
+
29
+ # 初始化分词器和处理器
30
+ self.text_tokenizer = AutoTokenizer.from_pretrained('facebook/bart-base')
31
+ self.code_tokenizer = AutoTokenizer.from_pretrained('gpt2')
32
+ self.nlp_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
33
+ self.speech_processor = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
34
+ self.vision_processor = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch32')
35
+
36
+ # 创建5层神经网络
37
+ self.neural_network = nn.Sequential(
38
+ nn.Linear(768, 1024),
39
+ nn.ReLU(),
40
+ nn.Linear(1024, 2048),
41
+ nn.ReLU(),
42
+ nn.Linear(2048, 1024),
43
+ nn.ReLU(),
44
+ nn.Linear(1024, 512),
45
+ nn.ReLU(),
46
+ nn.Linear(512, 256)
47
+ )
48
+
49
+ def forward(self, task, inputs):
50
+ if task == 'text_generation':
51
+ attention_mask = inputs.attention_mask
52
+ outputs = self.text_generator.generate(
53
+ inputs.input_ids,
54
+ max_new_tokens=50,
55
+ pad_token_id=self.text_tokenizer.eos_token_id,
56
+ attention_mask=attention_mask,
57
+ top_p=0.95,
58
+ top_k=50,
59
+ temperature=1.2,
60
+ do_sample=True
61
+ )
62
+ return self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
63
+ elif task == 'code_generation':
64
+ attention_mask = inputs.attention_mask
65
+ outputs = self.code_generator.generate(
66
+ inputs.input_ids,
67
+ max_new_tokens=50,
68
+ pad_token_id=self.code_tokenizer.eos_token_id,
69
+ attention_mask=attention_mask,
70
+ top_p=0.95,
71
+ top_k=50,
72
+ temperature=1.2,
73
+ do_sample=True
74
+ )
75
+ return self.code_tokenizer.decode(outputs[0], skip_special_tokens=True)
76
+ elif task == 'text_understanding':
77
+ outputs = self.nlp_encoder(**inputs)
78
+ return self.neural_network(outputs.last_hidden_state)
79
+ elif task == 'speech_recognition':
80
+ inputs = self.speech_processor(audio=inputs, sampling_rate=16000, return_tensors="pt", padding=True)
81
+ outputs = self.speech_encoder(**inputs).logits
82
+ predicted_ids = torch.argmax(outputs, dim=-1)
83
+ transcription = self.speech_processor.batch_decode(predicted_ids)[0]
84
+ return transcription
85
+ elif task == 'vision_understanding':
86
+ outputs = self.vision_encoder.get_image_features(**inputs)
87
+ return outputs
88
+
89
+ class EvolutionaryMultiModalNetwork(nn.Module):
90
+ def __init__(self, device='cuda' if torch.cuda.is_available() else 'cpu'):
91
+ super(EvolutionaryMultiModalNetwork, self).__init__()
92
+ self.device = device
93
+ self.multi_modal_model = MultiModalModel().to(self.device)
94
+ self.mutation_params = {
95
+ 'mutation_rate': 0.2,
96
+ 'mutation_scale': 0.05
97
+ }
98
+
99
+ def mutate_model(self, model):
100
+ for param in model.parameters():
101
+ if param.requires_grad:
102
+ noise = torch.normal(
103
+ mean=torch.zeros_like(param.data),
104
+ std=self.mutation_params['mutation_scale']
105
+ ).to(self.device)
106
+ if random.random() < self.mutation_params['mutation_rate']:
107
+ param.data.add_(noise)
108
+ return model
109
+
110
+ def evaluate_model(self, model, task, test_input):
111
+ try:
112
+ with torch.no_grad():
113
+ output = model(task, test_input)
114
+ complexity = sum(p.numel() for p in model.parameters())
115
+ performance = len(output) # 示例性能评估指标
116
+ return complexity, performance
117
+ except Exception as e:
118
+ print(f"模型评估错误: {e}")
119
+ return 0, 0
120
+
121
+ def evolutionary_training(self, epochs=5):
122
+ print("�� 开始进化训练...")
123
+
124
+ for epoch in range(epochs):
125
+ print(f"\n🌟 第 {epoch+1} 代:")
126
+
127
+ # 模型变异
128
+ self.multi_modal_model = self.mutate_model(self.multi_modal_model)
129
+
130
+ # 模型评估
131
+ test_input_text = self.multi_modal_model.text_tokenizer("Hello, how are you?", return_tensors='pt').to(self.device)
132
+ test_input_code = self.multi_modal_model.code_tokenizer("def add(a, b): return a + b", return_tensors='pt').to(self.device)
133
+
134
+ # 加载音频文件并处理
135
+ audio_path = "C:/Users/baby7/Desktop/推理/sample-3s.wav"
136
+ audio_input, sample_rate = sf.read(audio_path)
137
+ if audio_input.ndim > 1:
138
+ audio_input = np.mean(audio_input, axis=1) # 转换为单声道
139
+ if sample_rate != 16000:
140
+ audio_input = resampy.resample(audio_input, sample_rate, 16000) # 重采样
141
+ test_input_audio = torch.tensor(audio_input).to(self.device).unsqueeze(0) # 添加 batch 维度
142
+
143
+ complexity_text, performance_text = self.evaluate_model(self.multi_modal_model, 'text_generation', test_input_text)
144
+ complexity_code, performance_code = self.evaluate_model(self.multi_modal_model, 'code_generation', test_input_code)
145
+ complexity_audio, performance_audio = self.evaluate_model(self.multi_modal_model, 'speech_recognition', test_input_audio)
146
+
147
+ print(f"多模态模型 (文本生成) - 复杂度: {complexity_text}, 性能: {performance_text:.4f}")
148
+ print(f"多模态模型 (代码生成) - 复杂度: {complexity_code}, 性能: {performance_code:.4f}")
149
+ print(f"多模态模型 (语音识别) - 复杂度: {complexity_audio}, 性能: {performance_audio:.4f}")
150
+
151
+ def print_model_info(self):
152
+ print(f"\n多模态模型结构:")
153
+ print(self.multi_modal_model)
154
+ print("\n参数统计:")
155
+ total_params = sum(p.numel() for p in self.multi_modal_model.parameters())
156
+ trainable_params = sum(p.numel() for p in self.multi_modal_model.parameters() if p.requires_grad)
157
+ print(f"总参数: {total_params}")
158
+ print(f"可训练参数: {trainable_params}")
159
+
160
+ def main():
161
+ # 设置随机种子
162
+ torch.manual_seed(42)
163
+ np.random.seed(42)
164
+ random.seed(42)
165
+
166
+ # 创建进化多模态网络实例
167
+ evolutionary_network = EvolutionaryMultiModalNetwork()
168
+
169
+ # 打印模型信息
170
+ evolutionary_network.print_model_info()
171
+
172
+ # 进行进化训练
173
+ evolutionary_network.evolutionary_training(epochs=5)
174
+
175
+ if __name__ == "__main__":
176
+ main()