Spaces:
Running
Running
File size: 1,427 Bytes
1f122c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
"use server" import { Story, StoryLine, TTSVoice } from "@/types" const instance = `${process.env.AI_BEDTIME_STORY_API_GRADIO_URL || ""}` const secretToken = `${process.env.AI_BEDTIME_STORY_API_SECRET_TOKEN || ""}` export async function generateStoryLines(prompt: string, voice: TTSVoice): Promise<StoryLine[]> { if (!prompt?.length) { throw new Error(`prompt is too short!`) } const cropped = prompt.slice(0, 30) console.log(`user requested "${cropped}${cropped !== prompt ? "..." : ""}"`) // positivePrompt = filterOutBadWords(positivePrompt) const res = await fetch(instance + (instance.endsWith("/") ? "" : "/") + "api/predict", { method: "POST", headers: { "Content-Type": "application/json", // Authorization: `Bearer ${token}`, }, body: JSON.stringify({ fn_index: 0, // <- important! data: [ secretToken, prompt, voice, ], }), cache: "no-store", // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) // next: { revalidate: 1 } }) const rawJson = await res.json() const data = rawJson.data as StoryLine[][] const stories = data?.[0] || [] if (res.status !== 200) { throw new Error('Failed to fetch data') } return stories.map(line => ({ text: line.text.replaceAll(" .", ".").replaceAll(" ?", "?").replaceAll(" !", "!").trim(), audio: line.audio })) } |