ardalan.mehrani commited on
Commit
5e0f625
·
1 Parent(s): 5be1614

add examples

Browse files
examples/image_chat.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from transformers import AutoModel, AutoTokenizer, AutoConfig
4
+ from utils import load_image
5
+
6
+ if __name__ == "__main__":
7
+
8
+ dir, rev = 'morpheushoc/InternVL2_5-2B', 'main'
9
+ # path = 'OpenGVLab/InternVL2_5-2B'
10
+ model = AutoModel.from_pretrained(dir,
11
+ torch_dtype=torch.bfloat16,
12
+ load_in_8bit=False,
13
+ low_cpu_mem_usage=True,
14
+ use_flash_attn=True,
15
+ trust_remote_code=True,
16
+ revision=rev).eval().cuda()
17
+ tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False)
18
+ generation_config = dict(max_new_tokens=1024, do_sample=False)
19
+
20
+ fp, question = 'image1.jpg', 'Describe this image in great details'
21
+
22
+ pixel_values = load_image(fp, max_num=12).to(torch.bfloat16).cuda()
23
+ num_patches_list = [len(pixel_values)]
24
+ prefix = '<image>\n'
25
+
26
+ question = prefix + question
27
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
28
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
29
+ num_patches_list=num_patches_list, history=None, return_history=True)
30
+ print(f'User: {question}\nAssistant: {response}')
31
+
examples/image_chat_batch.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from transformers import AutoModel, AutoTokenizer
4
+ from utils import load_image
5
+
6
+ if __name__ == "__main__":
7
+
8
+ dir, rev = 'morpheushoc/InternVL2_5-2B', 'main'
9
+ model = AutoModel.from_pretrained(dir,
10
+ torch_dtype=torch.bfloat16,
11
+ load_in_8bit=False,
12
+ low_cpu_mem_usage=True,
13
+ use_flash_attn=True,
14
+ trust_remote_code=True,
15
+ revision=rev).eval().cuda()
16
+ tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False)
17
+ generation_config = dict(max_new_tokens=1024, do_sample=False)
18
+ paths = [
19
+ 'image1.jpg',
20
+ 'image1.jpg'
21
+ ]
22
+
23
+ questions = [
24
+ 'Describe this image in great details',
25
+ 'Describe this image in great details'
26
+ ]
27
+
28
+ pixel_values, num_patches_list, l_questions = [], [], []
29
+ for path, q in zip(paths, questions):
30
+ pxl_val = load_image(path, max_num=12).to(torch.bfloat16).cuda()
31
+ pixel_values.append(pxl_val)
32
+ num_patches_list.append(len(pxl_val))
33
+ l_questions.append('<image>\n{}'.format(q))
34
+ pixel_values = torch.cat(pixel_values)
35
+
36
+ responses = model.batch_chat(tokenizer, pixel_values, num_patches_list=num_patches_list,questions=questions,generation_config=generation_config)
37
+ for question, response in zip(questions, responses):
38
+ print(f'User: {question}\nAssistant: {response}')
examples/utils.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torchvision.transforms as T
4
+ from decord import VideoReader, cpu
5
+ from PIL import Image
6
+ from torchvision.transforms.functional import InterpolationMode
7
+ from transformers import AutoModel, AutoTokenizer
8
+
9
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
10
+ IMAGENET_STD = (0.229, 0.224, 0.225)
11
+
12
+ def build_transform(input_size):
13
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
14
+ transform = T.Compose([
15
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
16
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
17
+ T.ToTensor(),
18
+ T.Normalize(mean=MEAN, std=STD)
19
+ ])
20
+ return transform
21
+
22
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
23
+ best_ratio_diff = float('inf')
24
+ best_ratio = (1, 1)
25
+ area = width * height
26
+ for ratio in target_ratios:
27
+ target_aspect_ratio = ratio[0] / ratio[1]
28
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
29
+ if ratio_diff < best_ratio_diff:
30
+ best_ratio_diff = ratio_diff
31
+ best_ratio = ratio
32
+ elif ratio_diff == best_ratio_diff:
33
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
34
+ best_ratio = ratio
35
+ return best_ratio
36
+
37
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
38
+ orig_width, orig_height = image.size
39
+ aspect_ratio = orig_width / orig_height
40
+
41
+ # calculate the existing image aspect ratio
42
+ target_ratios = set(
43
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
44
+ i * j <= max_num and i * j >= min_num)
45
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
46
+
47
+ # find the closest aspect ratio to the target
48
+ target_aspect_ratio = find_closest_aspect_ratio(
49
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
50
+
51
+ # calculate the target width and height
52
+ target_width = image_size * target_aspect_ratio[0]
53
+ target_height = image_size * target_aspect_ratio[1]
54
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
55
+
56
+ # resize the image
57
+ resized_img = image.resize((target_width, target_height))
58
+ processed_images = []
59
+ for i in range(blocks):
60
+ box = (
61
+ (i % (target_width // image_size)) * image_size,
62
+ (i // (target_width // image_size)) * image_size,
63
+ ((i % (target_width // image_size)) + 1) * image_size,
64
+ ((i // (target_width // image_size)) + 1) * image_size
65
+ )
66
+ # split the image
67
+ split_img = resized_img.crop(box)
68
+ processed_images.append(split_img)
69
+ assert len(processed_images) == blocks
70
+ if use_thumbnail and len(processed_images) != 1:
71
+ thumbnail_img = image.resize((image_size, image_size))
72
+ processed_images.append(thumbnail_img)
73
+ return processed_images
74
+
75
+ def load_image(image_file, input_size=448, max_num=12):
76
+ image = Image.open(image_file).convert('RGB')
77
+ transform = build_transform(input_size=input_size)
78
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
79
+ pixel_values = [transform(image) for image in images]
80
+ pixel_values = torch.stack(pixel_values)
81
+ return pixel_values
82
+
83
+ # video multi-round conversation (视频多轮对话)
84
+ def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
85
+ if bound:
86
+ start, end = bound[0], bound[1]
87
+ else:
88
+ start, end = -100000, 100000
89
+ start_idx = max(first_idx, round(start * fps))
90
+ end_idx = min(round(end * fps), max_frame)
91
+ seg_size = float(end_idx - start_idx) / num_segments
92
+ frame_indices = np.array([
93
+ int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
94
+ for idx in range(num_segments)
95
+ ])
96
+ return frame_indices
97
+
98
+ def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
99
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
100
+ max_frame = len(vr) - 1
101
+ fps = float(vr.get_avg_fps())
102
+
103
+ pixel_values_list, num_patches_list = [], []
104
+ transform = build_transform(input_size=input_size)
105
+ frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
106
+ for frame_index in frame_indices:
107
+ img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
108
+ img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
109
+ pixel_values = [transform(tile) for tile in img]
110
+ pixel_values = torch.stack(pixel_values)
111
+ num_patches_list.append(pixel_values.shape[0])
112
+ pixel_values_list.append(pixel_values)
113
+ pixel_values = torch.cat(pixel_values_list)
114
+ return pixel_values, num_patches_list
examples/video_chat.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from transformers import AutoModel, AutoTokenizer
4
+ from utils import load_video
5
+
6
+ if __name__ == "__main__":
7
+
8
+ dir, rev = 'morpheushoc/InternVL2_5-2B', 'main'
9
+ model = AutoModel.from_pretrained(dir,
10
+ torch_dtype=torch.bfloat16,
11
+ load_in_8bit=False,
12
+ low_cpu_mem_usage=True,
13
+ use_flash_attn=True,
14
+ trust_remote_code=True,
15
+ revision=rev).eval().cuda()
16
+ tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False)
17
+ generation_config = dict(max_new_tokens=1024, do_sample=False)
18
+
19
+ fp, question = 'red-panda.mp4', 'Describe this video in great details'
20
+
21
+ pixel_values, num_patches_list = load_video(fp, num_segments=8, max_num=1)
22
+ prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
23
+ question = prefix + question
24
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
25
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
26
+ num_patches_list=num_patches_list, history=None, return_history=True)
27
+ print(f'User: {question}\nAssistant: {response}')