aekpic877 commited on
Commit
934f8a8
1 Parent(s): 551337f
Files changed (1) hide show
  1. app.py +40 -0
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # test.py
2
+ import torch
3
+ from PIL import Image
4
+ from transformers import AutoModel, AutoTokenizer
5
+
6
+ model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
7
+ model = model.to(device='cuda')
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True)
10
+ model.eval()
11
+
12
+ image = Image.open('xx.jpg').convert('RGB')
13
+ question = 'What is in the image?'
14
+ msgs = [{'role': 'user', 'content': question}]
15
+
16
+ res = model.chat(
17
+ image=image,
18
+ msgs=msgs,
19
+ tokenizer=tokenizer,
20
+ sampling=True, # if sampling=False, beam_search will be used by default
21
+ temperature=0.7,
22
+ # system_prompt='' # pass system_prompt if needed
23
+ )
24
+ print(res)
25
+
26
+ ## if you want to use streaming, please make sure sampling=True and stream=True
27
+ ## the model.chat will return a generator
28
+ res = model.chat(
29
+ image=image,
30
+ msgs=msgs,
31
+ tokenizer=tokenizer,
32
+ sampling=True,
33
+ temperature=0.7,
34
+ stream=True
35
+ )
36
+
37
+ generated_text = ""
38
+ for new_text in res:
39
+ generated_text += new_text
40
+ print(new_text, flush=True, end='')