Fix code typo for Simple and Video Inference examples

#9
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -106,7 +106,7 @@ inputs = processor.apply_chat_template(
106
  tokenize=True,
107
  return_dict=True,
108
  return_tensors="pt",
109
- ).to(model.device)
110
 
111
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)
112
  generated_texts = processor.batch_decode(
@@ -137,7 +137,7 @@ inputs = processor.apply_chat_template(
137
  tokenize=True,
138
  return_dict=True,
139
  return_tensors="pt",
140
- ).to(model.device)
141
 
142
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)
143
  generated_texts = processor.batch_decode(
 
106
  tokenize=True,
107
  return_dict=True,
108
  return_tensors="pt",
109
+ ).to(model.device, dtype=torch.bfloat16)
110
 
111
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)
112
  generated_texts = processor.batch_decode(
 
137
  tokenize=True,
138
  return_dict=True,
139
  return_tensors="pt",
140
+ ).to(model.device, dtype=torch.bfloat16)
141
 
142
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)
143
  generated_texts = processor.batch_decode(