| | import csv |
| | import json |
| | import random |
| |
|
| | |
| | csv_file_path = '/mnt/afs/xueyingyi/meme/generate/E_text_1.csv' |
| | user_input_jsonl_path = '/mnt/afs/xueyingyi/meme/generate/user_input_all.jsonl' |
| | output_jsonl_path = '/mnt/afs/xueyingyi/meme/data/Cjson/C_generate_multi_all_item.jsonl' |
| | train_jsonl_path = '/mnt/afs/xueyingyi/meme/data/Cjson/C_generate_train_multi_all_item.jsonl' |
| | eval_jsonl_path = '/mnt/afs/xueyingyi/meme/data/Cjson/C_generate_eval_multi_all_item.jsonl' |
| | train_config_path = '/mnt/afs/xueyingyi/meme/data/C_generate_train_multi_all_item.jsonl' |
| | eval_config_path = '/mnt/afs/xueyingyi/meme/data/C_generate_eval_multi_all_item.jsonl' |
| |
|
| | |
| | csv_data = {} |
| | with open(csv_file_path, 'r', encoding='utf-8') as csv_file: |
| | csv_reader = csv.DictReader(csv_file) |
| | for row in csv_reader: |
| | file_name = row['file_name'] |
| | text = row['text'].strip() |
| | csv_data[file_name] = text |
| |
|
| | |
| | user_input_data = [] |
| | with open(user_input_jsonl_path, 'r', encoding='utf-8') as f: |
| | for line in f: |
| | user_input_data.append(json.loads(line.strip())) |
| |
|
| | |
| | jsonl_data = [] |
| | for idx, item in enumerate(user_input_data): |
| | file_name = item['file_name'] |
| | user_input = item['user_input'] |
| | |
| | |
| | if file_name not in csv_data: |
| | print(f"警告: {file_name} 在CSV文件中未找到,跳过此条数据") |
| | continue |
| | |
| | |
| | text = csv_data[file_name] |
| | |
| | |
| | with open('/mnt/afs/xueyingyi/vl2.5/InternVL/inference/text_new.txt', 'r') as prompt_file: |
| | PROMPT = prompt_file.read() |
| |
|
| | with open('/mnt/afs/xueyingyi/vl2.5/InternVL/inference/text_example.txt', 'r') as prompt_file: |
| | PROMPT_example = prompt_file.read() |
| | |
| | |
| | conversations = [ |
| | { |
| | "from": "human", |
| | "value": f"{PROMPT}<image>\n{PROMPT_example}\n<image>\n{user_input}" |
| | }, |
| | { |
| | "from": "gpt", |
| | "value": text |
| | } |
| | ] |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | json_obj = { |
| | "id": idx, |
| | "image": [ |
| | f"/mnt/afs/xueyingyi/vl2.5/InternVL/inference/example.jpg", |
| | f"/mnt/afs/xueyingyi/image_vague/inpainting_demo/{file_name}" |
| | ], |
| | "conversations": conversations |
| | } |
| | |
| | jsonl_data.append(json_obj) |
| |
|
| | |
| | with open(output_jsonl_path, 'w', encoding='utf-8') as f: |
| | for item in jsonl_data: |
| | f.write(json.dumps(item, ensure_ascii=False) + '\n') |
| |
|
| | |
| | random.seed(42) |
| | random.shuffle(jsonl_data) |
| | train_size = int(len(jsonl_data) * 0.9) |
| | train_data = jsonl_data[:train_size] |
| | eval_data = jsonl_data[train_size:] |
| |
|
| | |
| | with open(train_jsonl_path, 'w', encoding='utf-8') as f: |
| | for item in train_data: |
| | f.write(json.dumps(item, ensure_ascii=False) + '\n') |
| |
|
| | |
| | with open(eval_jsonl_path, 'w', encoding='utf-8') as f: |
| | for item in eval_data: |
| | f.write(json.dumps(item, ensure_ascii=False) + '\n') |
| |
|
| | |
| | train_config = { |
| | "classification_C": { |
| | "root": "/mnt/afs/xueyingyi/image_vague/inpainting_demo", |
| | "annotation": train_jsonl_path, |
| | "data_augment": False, |
| | "repeat_time": 1, |
| | "length": len(train_data) |
| | } |
| | } |
| | with open(train_config_path, 'w', encoding='utf-8') as f: |
| | json.dump(train_config, f, ensure_ascii=False, indent=4) |
| |
|
| | |
| | eval_config = { |
| | "classification_C": { |
| | "root": "/mnt/afs/xueyingyi/image_vague/inpainting_demo", |
| | "annotation": eval_jsonl_path, |
| | "data_augment": False, |
| | "repeat_time": 1, |
| | "length": len(eval_data) |
| | } |
| | } |
| | with open(eval_config_path, 'w', encoding='utf-8') as f: |
| | json.dump(eval_config, f, ensure_ascii=False, indent=4) |
| |
|
| | print("数据处理完成!") |
| | print(f"训练集大小: {len(train_data)}") |
| | print(f"测试集大小: {len(eval_data)}") |