LTX-Video-0.9.1-HFIE / example.py
jbilcke-hf's picture
jbilcke-hf HF staff
Update example.py
3637e81 verified
raw
history blame
4.03 kB
import requests
import base64
# Important: the NVIDIA L40S will only support small resolutions, short length and no post-processing.
# If you want those features, you might need to use the NVIDIA A100.
# Use your own Inference Endpoint URL
API_URL = "https://<use your own Inference Endpoint here>.endpoints.huggingface.cloud"
# Use you own API token
API_TOKEN = "hf_<replace by your own Hugging Face token>"
def query(payload):
response = requests.post(API_URL, headers={
"Accept": "application/json",
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json"
}, json=payload)
return response.json()
def save_video(json_response):
video_data_uri = ""
try:
# Extract the video data URI from the response
video_data_uri = json_response["video"]
except Exception as e:
message = str(json_response)
print(message)
raise ValueError(message)
# Remove the data URI prefix to get just the base64 data
# Assumes format like "data:video/mp4;base64,<actual_base64_data>"
base64_data = video_data_uri.split(",")[1]
# Decode the base64 data
video_data = base64.b64decode(base64_data)
# Write the binary data to an MP4 file
with open("video.mp4", "wb") as f:
f.write(video_data)
# Make the API call
output = query({
"inputs": {
"prompt": "Portrait photo, selfie of a beautiful young caucasian woman called Charlotte, wearing a pastel-blue hoodie. She is livestreaming from NYC streets. She looks straight into the camera, looking serious, and she talks. The camera is fixed, static, a medium-shot centered on her face. 4K webcam footage. Intricate details, super resolution, sharp image, award winning."
},
"parameters": {
# ------------------- settings for LTX-Video -----------------------
# for a vertical video look
"width": 480,
"height": 768,
# LTX-Video requires a frame number divisible by 8, plus one frame
# note: glitches might appear if you use more than 168 frames
"num_frames": (8 * 14) + 1,
# using 30 steps seems to be enough for most cases, otherwise use 50 for best quality
# I think using a large number of steps (> 30) might create some overexposure and saturation
"num_inference_steps": 40,
# values between 3.0 and 4.0 are nice
"guidance_scale": 3.5,
# seed: -1,
# ------------------- settings for Varnish -----------------------
# This will double the number of frames.
# You can activate this if you want:
# - a slow motion effect (in that case use double_num_frames=True and fps=24, 25 or 30)
# - a HD soap / video game effect (in that case use double_num_frames=True and fps=60)
"double_num_frames": True,
# controls the number of frames per second
# use this in combination with the num_frames and double_num_frames settings to control the duration and "feel" of your video
"fps": 60, # typical values are: 24, 25, 30, 60
# upscale the video using Real-ESRGAN.
# This upscaling algorithm is relatively fast,
# but might create an uncanny "3D render" or "drawing" effect.
"super_resolution": True,
# for cosmetic purposes and get a "cinematic" feel, you can optionally add some film grain.
# it is not recommended to add film grain if your theme doesn't match (film grain is great for black & white, retro looks)
# and if you do, adding more than 12% will start to negatively impact file size (video codecs aren't great are compressing film grain)
# 0% = no grain
# 10% = a bit of grain
"grain_amount": 10, # value between 0-100
# the following parameters are a work in progress
"enable_audio": False,
#"audio_prompt": "voices, voice, talking, speaking, speech",
#"audio_negative_prompt": "",
}
})
# Save the video
save_video(output)