RaushanTurganbay HF staff commited on
Commit
3c6d41a
1 Parent(s): 437026d

update processor kwargs

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -89,7 +89,7 @@ def read_video_pyav(container, indices):
89
  return np.stack([x.to_ndarray(format="rgb24") for x in frames])
90
 
91
 
92
- # define a chat histiry and use `apply_chat_template` to get correctly formatted prompt
93
  # Each value in "content" has to be a list of dicts with types ("text", "image", "video")
94
  conversation = [
95
  {
@@ -138,7 +138,7 @@ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
138
 
139
  image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
140
  raw_image = Image.open(requests.get(image_file, stream=True).raw)
141
- inputs_image = processor(prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
142
 
143
  output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False)
144
  print(processor.decode(output[0][2:], skip_special_tokens=True))
 
89
  return np.stack([x.to_ndarray(format="rgb24") for x in frames])
90
 
91
 
92
+ # define a chat history and use `apply_chat_template` to get correctly formatted prompt
93
  # Each value in "content" has to be a list of dicts with types ("text", "image", "video")
94
  conversation = [
95
  {
 
138
 
139
  image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
140
  raw_image = Image.open(requests.get(image_file, stream=True).raw)
141
+ inputs_image = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
142
 
143
  output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False)
144
  print(processor.decode(output[0][2:], skip_special_tokens=True))