Spaces:
Paused
Paused
Julian Bilcke
commited on
Commit
·
80dd4ba
1
Parent(s):
72f62be
sorry folks - that's a wrap
Browse files
src/providers/video-generation/generateVideoWithAnimateDiffLightning.mts
CHANGED
|
@@ -5,12 +5,14 @@ import { getValidNumber } from "../../utils/validators/getValidNumber.mts"
|
|
| 5 |
|
| 6 |
const accessToken = `${process.env.VC_MICROSERVICE_SECRET_TOKEN || ""}`
|
| 7 |
|
|
|
|
| 8 |
export const generateVideoWithAnimateDiffLightning = async (
|
| 9 |
request: RenderRequest,
|
| 10 |
response: RenderedScene,
|
| 11 |
): Promise<RenderedScene> => {
|
| 12 |
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
const actualFunction = async (): Promise<RenderedScene> => {
|
|
@@ -32,17 +34,19 @@ export const generateVideoWithAnimateDiffLightning = async (
|
|
| 32 |
|
| 33 |
// can be 1, 2, 4 or 8
|
| 34 |
// but values below 4 look bad
|
| 35 |
-
const nbSteps = getValidNumber(request.nbSteps, 1, 8, 4)
|
| 36 |
-
const width = getValidNumber(request.width, 256, 1024, 512)
|
| 37 |
-
const height = getValidNumber(request.height, 256, 1024, 256)
|
| 38 |
|
| 39 |
-
const nbFrames = getValidNumber(request.nbFrames, 10, 60, 10)
|
| 40 |
-
const nbFPS = getValidNumber(request.nbFPS, 10, 60, 10)
|
| 41 |
|
| 42 |
// by default AnimateDiff generates about 2 seconds of video at 10 fps
|
| 43 |
// the Gradio API now has some code to optional fix that using FFmpeg,
|
| 44 |
// but this will add some delay overhead, so use with care!
|
| 45 |
-
const durationInSec =
|
|
|
|
|
|
|
| 46 |
const framesPerSec = nbFPS
|
| 47 |
|
| 48 |
try {
|
|
|
|
| 5 |
|
| 6 |
const accessToken = `${process.env.VC_MICROSERVICE_SECRET_TOKEN || ""}`
|
| 7 |
|
| 8 |
+
// @deprecated This endpoint has been decommissioned. Please use the AiTube API instead (check aitube.at/api/v1/render)
|
| 9 |
export const generateVideoWithAnimateDiffLightning = async (
|
| 10 |
request: RenderRequest,
|
| 11 |
response: RenderedScene,
|
| 12 |
): Promise<RenderedScene> => {
|
| 13 |
|
| 14 |
+
throw new Error(`This endpoint has been decommissioned. Please use the AiTube API instead (check aitube.at/api/v1/render)`)
|
| 15 |
+
const debug = true
|
| 16 |
|
| 17 |
|
| 18 |
const actualFunction = async (): Promise<RenderedScene> => {
|
|
|
|
| 34 |
|
| 35 |
// can be 1, 2, 4 or 8
|
| 36 |
// but values below 4 look bad
|
| 37 |
+
const nbSteps = 4// getValidNumber(request.nbSteps, 1, 8, 4)
|
| 38 |
+
const width = 512 // getValidNumber(request.width, 256, 1024, 512)
|
| 39 |
+
const height = 288 // getValidNumber(request.height, 256, 1024, 256)
|
| 40 |
|
| 41 |
+
const nbFrames = 16 // getValidNumber(request.nbFrames, 10, 60, 10)
|
| 42 |
+
const nbFPS = 10 // getValidNumber(request.nbFPS, 10, 60, 10)
|
| 43 |
|
| 44 |
// by default AnimateDiff generates about 2 seconds of video at 10 fps
|
| 45 |
// the Gradio API now has some code to optional fix that using FFmpeg,
|
| 46 |
// but this will add some delay overhead, so use with care!
|
| 47 |
+
const durationInSec = nbFrames / nbFPS
|
| 48 |
+
// no, we need decimals
|
| 49 |
+
// const durationInSec = Math.round(nbFrames / nbFPS)
|
| 50 |
const framesPerSec = nbFPS
|
| 51 |
|
| 52 |
try {
|
src/providers/video-generation/generateVideoWithAnimateLCM.mts
DELETED
|
@@ -1,169 +0,0 @@
|
|
| 1 |
-
import { VideoGenerationParams } from "../../types.mts"
|
| 2 |
-
import { generateSeed } from "../../utils/misc/generateSeed.mts"
|
| 3 |
-
import { tryApiCalls } from "../../utils/misc/tryApiCall.mts"
|
| 4 |
-
import { addBase64HeaderToMp4 } from "./addBase64HeaderToMp4.mts"
|
| 5 |
-
|
| 6 |
-
import { getNegativePrompt, getPositivePrompt } from "./defaultPrompts.mts"
|
| 7 |
-
|
| 8 |
-
// const gradioApi = `${process.env.AI_TUBE_MODEL_ANIMATELCM_GRADIO_URL || ""}`
|
| 9 |
-
const gradioApi = "https://jbilcke-hf-ai-tube-model-animatelcm.hf.space"
|
| 10 |
-
const accessToken = `${process.env.VC_MICROSERVICE_SECRET_TOKEN || ""}`
|
| 11 |
-
|
| 12 |
-
export const generateVideoWithAnimateLCM = async ({
|
| 13 |
-
prompt,
|
| 14 |
-
orientation,
|
| 15 |
-
projection,
|
| 16 |
-
width,
|
| 17 |
-
height,
|
| 18 |
-
style = "",
|
| 19 |
-
nbSteps = 4,
|
| 20 |
-
nbFrames = 20,
|
| 21 |
-
seed,
|
| 22 |
-
debug,
|
| 23 |
-
}: VideoGenerationParams): Promise<string> => {
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
const actualFunction = async () => {
|
| 27 |
-
|
| 28 |
-
// seed = seed || generateSeed()
|
| 29 |
-
seed = generateSeed()
|
| 30 |
-
|
| 31 |
-
// label="Sampling steps", value=6, minimum=1, maximum=25, step=1
|
| 32 |
-
// we wanna keep this one low (this is LCM after all)
|
| 33 |
-
// but values like 10 also give nice results
|
| 34 |
-
const nbSteps = 6 // 25
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
// label="LoRA alpha", value=0.8, minimum=0, maximum=2
|
| 38 |
-
const loraAlpha = 0.8 // lora_alpha_slider,
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
// label="LCM LoRA alpha", value=0.8, minimum=0.0, maximum=1.0
|
| 42 |
-
const lcmLoraAlpha = 0.8 // spatial_lora_slider,
|
| 43 |
-
|
| 44 |
-
// label="Width", value=512, minimum=256, maximum=1024, step=64)
|
| 45 |
-
|
| 46 |
-
// label="Animation length", value=16, minimum=12, maximum=20, step=1)
|
| 47 |
-
const nbFrames = 16
|
| 48 |
-
|
| 49 |
-
// label="Height", value=512, minimum=256, maximum=1024, step=64)
|
| 50 |
-
|
| 51 |
-
// label="CFG Scale", value=1.5, minimum=1, maximum=2)
|
| 52 |
-
const cfgScale = 1.5
|
| 53 |
-
|
| 54 |
-
// pimp the prompt
|
| 55 |
-
|
| 56 |
-
/*
|
| 57 |
-
// we put it at the start, to make sure it is always part of the prompt
|
| 58 |
-
const positivePrompt = getPositivePrompt([
|
| 59 |
-
style,
|
| 60 |
-
prompt
|
| 61 |
-
].map(x => x.trim()).filter(x => x).join(", "))
|
| 62 |
-
|
| 63 |
-
const negativePrompt = getNegativePrompt(negPrompt)
|
| 64 |
-
*/
|
| 65 |
-
const positivePrompt = "Close-up of the dancing duo as they take a bow. The orange tabby cat wears a sequined top hat, and the Siamese cat hugs their shoulders, also wearing a sequined outfit"
|
| 66 |
-
const negativePrompt = ""
|
| 67 |
-
|
| 68 |
-
try {
|
| 69 |
-
if (debug) {
|
| 70 |
-
console.log(`calling AnimateLCM API with params (some are hidden):`, {
|
| 71 |
-
loraAlpha,
|
| 72 |
-
lcmLoraAlpha,
|
| 73 |
-
positivePrompt,
|
| 74 |
-
negativePrompt,
|
| 75 |
-
width,
|
| 76 |
-
height,
|
| 77 |
-
nbSteps,
|
| 78 |
-
nbFrames,
|
| 79 |
-
cfgScale,
|
| 80 |
-
seed,
|
| 81 |
-
})
|
| 82 |
-
}
|
| 83 |
-
|
| 84 |
-
const res = await fetch(gradioApi + (gradioApi.endsWith("/") ? "" : "/") + "api/predict", {
|
| 85 |
-
method: "POST",
|
| 86 |
-
headers: {
|
| 87 |
-
"Content-Type": "application/json",
|
| 88 |
-
// Authorization: `Bearer ${token}`,
|
| 89 |
-
},
|
| 90 |
-
body: JSON.stringify({
|
| 91 |
-
fn_index: 4, // <- important! it is currently 4, not 1!
|
| 92 |
-
data: [
|
| 93 |
-
accessToken,
|
| 94 |
-
|
| 95 |
-
// label="LoRA alpha", value=0.8, minimum=0, maximum=2
|
| 96 |
-
loraAlpha, // lora_alpha_slider,
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
// label="LCM LoRA alpha", value=0.8, minimum=0.0, maximum=1.0
|
| 100 |
-
lcmLoraAlpha, // spatial_lora_slider,
|
| 101 |
-
|
| 102 |
-
//
|
| 103 |
-
positivePrompt, // prompt_textbox,
|
| 104 |
-
|
| 105 |
-
negativePrompt, // negative_prompt_textbox,
|
| 106 |
-
|
| 107 |
-
// this is the scheduler
|
| 108 |
-
// so.. LCM, it is
|
| 109 |
-
"LCM", // sampler_dropdown,
|
| 110 |
-
|
| 111 |
-
// label="Sampling steps", value=6, minimum=1, maximum=25, step=1
|
| 112 |
-
// we wanna keep this one low (this is LCM after all)
|
| 113 |
-
// but values like 10 also give nice results
|
| 114 |
-
nbSteps, // sample_step_slider,
|
| 115 |
-
|
| 116 |
-
// label="Width", value=512, minimum=256, maximum=1024, step=64)
|
| 117 |
-
width, // width_slider,
|
| 118 |
-
|
| 119 |
-
// label="Animation length", value=16, minimum=12, maximum=20, step=1)
|
| 120 |
-
nbFrames, // length_slider,
|
| 121 |
-
|
| 122 |
-
// label="Height", value=512, minimum=256, maximum=1024, step=64)
|
| 123 |
-
height, // height_slider,
|
| 124 |
-
|
| 125 |
-
// label="CFG Scale", value=1.5, minimum=1, maximum=2)
|
| 126 |
-
cfgScale, // cfg_scale_slider,
|
| 127 |
-
|
| 128 |
-
seed, // seed_textbox,
|
| 129 |
-
],
|
| 130 |
-
}),
|
| 131 |
-
cache: "no-store",
|
| 132 |
-
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
|
| 133 |
-
// next: { revalidate: 1 }
|
| 134 |
-
})
|
| 135 |
-
|
| 136 |
-
// console.log("res:", res)
|
| 137 |
-
|
| 138 |
-
const { data } = await res.json()
|
| 139 |
-
|
| 140 |
-
// console.log("data:", data)
|
| 141 |
-
// Recommendation: handle errors
|
| 142 |
-
if (res.status !== 200 || !Array.isArray(data)) {
|
| 143 |
-
// This will activate the closest `error.js` Error Boundary
|
| 144 |
-
throw new Error(`Failed to fetch data (status: ${res.status})`)
|
| 145 |
-
}
|
| 146 |
-
// console.log("data:", data.slice(0, 50))
|
| 147 |
-
|
| 148 |
-
const base64Content = (data?.[0] || "") as string
|
| 149 |
-
|
| 150 |
-
if (!base64Content) {
|
| 151 |
-
throw new Error(`invalid response (no content)`)
|
| 152 |
-
}
|
| 153 |
-
|
| 154 |
-
return addBase64HeaderToMp4(base64Content)
|
| 155 |
-
} catch (err) {
|
| 156 |
-
if (debug) {
|
| 157 |
-
console.error(`failed to call the AnimateLCM API:`)
|
| 158 |
-
console.error(err)
|
| 159 |
-
}
|
| 160 |
-
throw err
|
| 161 |
-
}
|
| 162 |
-
}
|
| 163 |
-
|
| 164 |
-
return tryApiCalls({
|
| 165 |
-
func: actualFunction,
|
| 166 |
-
debug,
|
| 167 |
-
failureMessage: "failed to call the AnimateLCM endpoint"
|
| 168 |
-
})
|
| 169 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|