xzerus commited on
Commit
0e6e727
·
verified ·
1 Parent(s): 3107743

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -43
app.py CHANGED
@@ -7,94 +7,110 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
 
 
 
 
 
 
 
 
10
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
- model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
- torch_dtype=torch.bfloat16).to("cuda")
13
- processor = AutoProcessor.from_pretrained(ckpt)
 
 
 
 
 
 
14
 
15
 
16
  @spaces.GPU
17
  def bot_streaming(message, history, max_new_tokens=250):
18
-
19
  txt = message["text"]
20
  ext_buffer = f"{txt}"
21
 
22
- messages= []
23
  images = []
24
 
25
-
26
  for i, msg in enumerate(history):
27
  if isinstance(msg[0], tuple):
28
  messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
29
  messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
30
  images.append(Image.open(msg[0][0]).convert("RGB"))
31
  elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
32
- # messages are already handled
33
  pass
34
- elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
35
  messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
36
  messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
37
 
38
- # add current message
39
  if len(message["files"]) == 1:
40
-
41
- if isinstance(message["files"][0], str): # examples
42
  image = Image.open(message["files"][0]).convert("RGB")
43
- else: # regular input
44
  image = Image.open(message["files"][0]["path"]).convert("RGB")
45
  images.append(image)
46
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
47
  else:
48
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
49
 
50
-
51
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)
52
-
53
  if images == []:
54
  inputs = processor(text=texts, return_tensors="pt").to("cuda")
55
  else:
56
  inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
57
- streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
58
 
 
59
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
60
  generated_text = ""
61
 
 
62
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
63
  thread.start()
64
  buffer = ""
65
 
66
  for new_text in streamer:
67
  buffer += new_text
68
- generated_text_without_prompt = buffer
69
  time.sleep(0.01)
70
  yield buffer
71
 
72
 
73
- demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
74
- [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
75
- 200],
76
- [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
77
- 250],
78
- [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
79
- 250],
80
- [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
81
- 250],
82
- [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
83
- 250],
84
  ],
85
- textbox=gr.MultimodalTextbox(),
86
- additional_inputs = [gr.Slider(
87
- minimum=10,
88
- maximum=500,
89
- value=250,
90
- step=10,
91
- label="Maximum number of new tokens to generate",
92
- )
93
- ],
94
- cache_examples=False,
95
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
- stop_btn="Stop Generation",
97
- fill_height=True,
98
- multimodal=True)
99
-
100
- demo.launch(debug=True)
 
 
 
 
 
 
 
7
  from gradio import FileData
8
  import time
9
  import spaces
10
+ import os
11
+
12
+ # Load Hugging Face token from environment variables
13
+ hf_token = os.getenv("HF_AUTH_TOKEN")
14
+ if not hf_token:
15
+ raise ValueError("Hugging Face token not found. Set HF_AUTH_TOKEN in your Space settings.")
16
+
17
+ # Model checkpoint
18
  ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
19
+
20
+ # Load model and processor with authentication
21
+ model = MllamaForConditionalGeneration.from_pretrained(
22
+ ckpt,
23
+ torch_dtype=torch.bfloat16,
24
+ token=hf_token
25
+ ).to("cuda")
26
+
27
+ processor = AutoProcessor.from_pretrained(ckpt, token=hf_token)
28
 
29
 
30
  @spaces.GPU
31
  def bot_streaming(message, history, max_new_tokens=250):
 
32
  txt = message["text"]
33
  ext_buffer = f"{txt}"
34
 
35
+ messages = []
36
  images = []
37
 
38
+ # Process conversation history
39
  for i, msg in enumerate(history):
40
  if isinstance(msg[0], tuple):
41
  messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
42
  messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
43
  images.append(Image.open(msg[0][0]).convert("RGB"))
44
  elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
45
+ # Messages are already handled
46
  pass
47
+ elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # Text-only turn
48
  messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
49
  messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
50
 
51
+ # Add current message
52
  if len(message["files"]) == 1:
53
+ if isinstance(message["files"][0], str): # Examples
 
54
  image = Image.open(message["files"][0]).convert("RGB")
55
+ else: # Regular input
56
  image = Image.open(message["files"][0]["path"]).convert("RGB")
57
  images.append(image)
58
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
59
  else:
60
  messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
61
 
62
+ # Prepare inputs
63
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)
 
64
  if images == []:
65
  inputs = processor(text=texts, return_tensors="pt").to("cuda")
66
  else:
67
  inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
 
68
 
69
+ streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
70
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
71
  generated_text = ""
72
 
73
+ # Stream generation in a separate thread
74
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
75
  thread.start()
76
  buffer = ""
77
 
78
  for new_text in streamer:
79
  buffer += new_text
 
80
  time.sleep(0.01)
81
  yield buffer
82
 
83
 
84
+ # Gradio Interface
85
+ demo = gr.ChatInterface(
86
+ fn=bot_streaming,
87
+ title="Multimodal Llama",
88
+ examples=[
89
+ [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]}, 200],
90
+ [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]}, 250],
91
+ [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]}, 250],
92
+ [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]}, 250],
93
+ [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]}, 250],
 
94
  ],
95
+ textbox=gr.MultimodalTextbox(),
96
+ additional_inputs=[
97
+ gr.Slider(
98
+ minimum=10,
99
+ maximum=500,
100
+ value=250,
101
+ step=10,
102
+ label="Maximum number of new tokens to generate",
103
+ )
104
+ ],
105
+ cache_examples=False,
106
+ description=(
107
+ "Try Multimodal Llama by Meta with transformers in this demo. "
108
+ "Upload an image, and start chatting about it, or simply try one of the examples below. "
109
+ "To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32)."
110
+ ),
111
+ stop_btn="Stop Generation",
112
+ fill_height=True,
113
+ multimodal=True
114
+ )
115
+
116
+ demo.launch(debug=True)