Spaces:
Running
Running
File size: 3,395 Bytes
58b1ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import { generateSeed, getValidNumber } from "@aitube/clap"
import { getClusterMachine, token } from "./cluster"
export async function render(request: {
prompt?: string
seed?: number
width?: number
height?: number
nbFrames?: number
nbFPS?: number
nbSteps?: number
debug?: boolean
}): Promise<string> {
const prompt = request.prompt || ""
if (!prompt) {
throw new Error(`missing prompt`)
}
const debug = !!request.debug
const seed = request?.seed || generateSeed()
// see https://huggingface.co/spaces/jbilcke-hf/ai-tube-model-animatediff-lightning/blob/main/app.py#L15-L18
const baseModel = "epiCRealism"
// the motion LoRA - could be useful one day
const motion = ""
// can be 1, 2, 4 or 8
// but values below 4 look bad
const nbSteps = getValidNumber(request.nbSteps, 1, 8, 4)
const width = getValidNumber(request.width, 256, 1024, 512)
const height = getValidNumber(request.height, 256, 1024, 288)
const nbFrames = getValidNumber(request.nbFrames, 10, 120, 10)
const nbFPS = getValidNumber(request.nbFPS, 10, 120, 10)
// by default AnimateDiff generates about 2 seconds of video at 10 fps
// the Gradio API now has some code to optional fix that using FFmpeg,
// but this will add some delay overhead, so use with care!
const durationInSec = Math.round(nbFrames / nbFPS)
const framesPerSec = nbFPS
const machine = await getClusterMachine()
try {
if (debug) {
console.log(`calling AnimateDiff Lightning API with params (some are hidden):`, {
baseModel,
motion,
nbSteps,
width,
height,
nbFrames,
nbFPS,
durationInSec,
framesPerSec,
})
}
const res = await fetch(machine.url + (machine.url.endsWith("/") ? "" : "/") + "api/predict", {
method: "POST",
headers: {
"Content-Type": "application/json",
// Authorization: `Bearer ${token}`,
},
body: JSON.stringify({
fn_index: 0, // <- important! it is currently 4, not 1!
data: [
token,
prompt,
baseModel,
width,
height,
motion,
nbSteps,
durationInSec,
framesPerSec,
],
}),
// necessary since we are using the fetch() provided by NextJS
cache: "no-store",
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
// next: { revalidate: 1 }
})
// console.log("res:", res)
const { data } = await res.json()
// console.log("data:", data)
// Recommendation: handle errors
if (res.status !== 200 || !Array.isArray(data)) {
// This will activate the closest `error.js` Error Boundary
throw new Error(`Failed to fetch data (status: ${res.status})`)
}
// console.log("data:", data.slice(0, 50))
const base64Content = (data?.[0] || "") as string
if (!base64Content) {
throw new Error(`invalid response (no content)`)
}
// this API already emits a data-uri with a content type
// addBase64HeaderToMp4(base64Content)
return base64Content
} catch (err) {
if (debug) {
console.error(`failed to call the AnimateDiff Lightning API:`)
console.error(err)
}
throw err
} finally {
// important: we need to free up the machine!
machine.busy = false
}
} |