zamal commited on
Commit
ab9eef9
1 Parent(s): 2a7d76a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -3,11 +3,14 @@ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
3
  from PIL import Image
4
  import requests
5
  from io import BytesIO
6
- import subprocess
7
- import sys
8
  # Load the model and processor
9
  repo_name = "cyan2k/molmo-7B-O-bnb-4bit"
10
- arguments = {"device_map": "auto", "torch_dtype": "auto", "trust_remote_code": True}
 
 
 
 
11
 
12
  # Load the processor and model
13
  processor = AutoProcessor.from_pretrained(repo_name, **arguments)
@@ -17,23 +20,23 @@ def describe_image(image):
17
  # Process the uploaded image
18
  inputs = processor.process(
19
  images=[image],
20
- text="Describe this image."
21
  )
22
-
23
  # Move inputs to model device
24
  inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
25
-
26
  # Generate output
27
  output = model.generate_from_batch(
28
  inputs,
29
- GenerationConfig(max_new_tokens=200, stop_strings="<|endoftext|>"),
30
  tokenizer=processor.tokenizer,
31
  )
32
-
33
  # Decode the generated tokens
34
  generated_tokens = output[0, inputs["input_ids"].size(1):]
35
  generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
36
-
37
  return generated_text
38
 
39
 
@@ -50,7 +53,7 @@ def gradio_app():
50
  title="Image Description App",
51
  description="Upload an image and get a detailed description using the Molmo 7B model"
52
  )
53
-
54
  # Launch the interface
55
  interface.launch()
56
 
 
3
  from PIL import Image
4
  import requests
5
  from io import BytesIO
6
+
 
7
  # Load the model and processor
8
  repo_name = "cyan2k/molmo-7B-O-bnb-4bit"
9
+ arguments = {
10
+ "device_map": "auto", # Force CPU inference
11
+ "torch_dtype": "auto", # Set model to use float32 precision
12
+ "trust_remote_code": True # Allow the loading of remote code
13
+ }
14
 
15
  # Load the processor and model
16
  processor = AutoProcessor.from_pretrained(repo_name, **arguments)
 
20
  # Process the uploaded image
21
  inputs = processor.process(
22
  images=[image],
23
+ text="Describe this image in great detail without missing any piece of information"
24
  )
25
+
26
  # Move inputs to model device
27
  inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
28
+
29
  # Generate output
30
  output = model.generate_from_batch(
31
  inputs,
32
+ GenerationConfig(max_new_tokens=1024, stop_strings="<|endoftext|>"),
33
  tokenizer=processor.tokenizer,
34
  )
35
+
36
  # Decode the generated tokens
37
  generated_tokens = output[0, inputs["input_ids"].size(1):]
38
  generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
39
+
40
  return generated_text
41
 
42
 
 
53
  title="Image Description App",
54
  description="Upload an image and get a detailed description using the Molmo 7B model"
55
  )
56
+
57
  # Launch the interface
58
  interface.launch()
59