File size: 2,110 Bytes
11ef54b
 
 
 
 
 
 
b4837db
11ef54b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53

import torch
from transformers import AutoModel, AutoTokenizer
from utils import load_image, load_video

if __name__ == "__main__":

    dir, rev = 'morpheushoc/InternVL2_5-2B', 'main'

    # path = 'OpenGVLab/InternVL2_5-2B'
    model = AutoModel.from_pretrained(dir,
                                      torch_dtype=torch.bfloat16,
                                      load_in_8bit=False,
                                      low_cpu_mem_usage=True,
                                      use_flash_attn=True,
                                      trust_remote_code=True,
                                      revision=rev).eval().cuda()
    tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False)
    generation_config = dict(max_new_tokens=1024, do_sample=False)

    paths = [
        'image1.jpg',
        'image1.jpg',
        'image2.jpg',
        'red-panda.mp4',
    ]

    questions = [
        'describe this image',
        'describe this image',
        'describe this image',
        'describe this video'
    ]

    for fp, question in zip(paths, questions):
        if fp.endswith('mp4'):
            pixel_values, num_patches_list = load_video(fp, num_segments=8, max_num=1)
            prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
            
        else:
            pixel_values = load_image(fp, max_num=12).to(torch.bfloat16).cuda()
            num_patches_list = [len(pixel_values)]
            prefix = '<image>\n'
    
        question = prefix + question
        pixel_values = pixel_values.to(torch.bfloat16).cuda()
        response, history = model.chat(tokenizer, pixel_values, question, generation_config,
                                    num_patches_list=num_patches_list, history=None, return_history=True)
        print(f'User: {question}\nAssistant: {response}')

        question = 'How many animals ?'
        response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
        print(f'User: {question}\nAssistant: {response}')