Spaces:
Running
Running
File size: 3,319 Bytes
1f122c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
"use server" // TODO add a system to mark failed instances as "unavailable" for a couple of minutes // console.log("process.env:", process.env) import { generateSeed } from "@/lib/generateSeed"; import { getValidNumber } from "@/lib/getValidNumber"; // note: to reduce costs I use the small A10s (not the large) // anyway, we will soon not need to use this cloud anymore // since we will be able to leverage the Inference API const instance = `${process.env.FAST_IMAGE_SERVER_API_GRADIO_URL || ""}` const secretToken = `${process.env.FAST_IMAGE_SERVER_API_SECRET_TOKEN || ""}` // console.log("DEBUG:", JSON.stringify({ instances, secretToken }, null, 2)) export async function generateImage(options: { positivePrompt: string; negativePrompt?: string; seed?: number; width?: number; height?: number; nbSteps?: number; }): Promise<string> { // console.log("querying " + instance) const positivePrompt = options?.positivePrompt || "" if (!positivePrompt) { throw new Error("missing prompt") } // the negative prompt CAN be missing, since we use a trick // where we make the interface mandatory in the TS doc, // but browsers might send something partial const negativePrompt = options?.negativePrompt || "" // we treat 0 as meaning "random seed" const seed = (options?.seed ? options.seed : 0) || generateSeed() const width = getValidNumber(options?.width, 256, 1024, 512) const height = getValidNumber(options?.height, 256, 1024, 512) const nbSteps = getValidNumber(options?.nbSteps, 1, 8, 4) // console.log("SEED:", seed) const positive = [ // oh well.. is it too late to move this to the bottom? "beautiful", // too opinionated, so let's remove it // "intricate details", positivePrompt, "award winning", "high resolution" ].filter(word => word) .join(", ") const negative = [ negativePrompt, "watermark", "copyright", "blurry", // "artificial", // "cropped", "low quality", "ugly" ].filter(word => word) .join(", ") const res = await fetch(instance + (instance.endsWith("/") ? "" : "/") + "api/predict", { method: "POST", headers: { "Content-Type": "application/json", // Authorization: `Bearer ${token}`, }, body: JSON.stringify({ fn_index: 0, // <- important! data: [ positive, // string in 'Prompt' Textbox component negative, // string in 'Negative prompt' Textbox component seed, // number (numeric value between 0 and 2147483647) in 'Seed' Slider component width, // number (numeric value between 256 and 1024) in 'Width' Slider component height, // number (numeric value between 256 and 1024) in 'Height' Slider component 0.0, // can be disabled for LCM SDXL nbSteps, // number (numeric value between 2 and 8) in 'Number of inference steps for base' Slider component secretToken ] }), cache: "no-store", }) const { data } = await res.json() if (res.status !== 200 || !Array.isArray(data)) { // This will activate the closest `error.js` Error Boundary throw new Error(`Failed to fetch data (status: ${res.status})`) } if (!data[0]) { throw new Error(`the returned image was empty`) } return data[0] as string } |