Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
4 |
-
import asyncio
|
5 |
import subprocess
|
6 |
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
|
7 |
import torch
|
@@ -20,8 +19,8 @@ intents.messages = True
|
|
20 |
intents.guilds = True
|
21 |
intents.guild_messages = True
|
22 |
|
23 |
-
# PaliGemma 모델 설정
|
24 |
-
model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("
|
25 |
processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
|
26 |
|
27 |
def modify_caption(caption: str) -> str:
|
@@ -39,8 +38,8 @@ def modify_caption(caption: str) -> str:
|
|
39 |
|
40 |
def create_captions_rich(image: Image.Image) -> str:
|
41 |
prompt = "caption en"
|
42 |
-
image_tensor = processor(image, return_tensors="pt").pixel_values.to("
|
43 |
-
model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("
|
44 |
input_len = model_inputs["input_ids"].shape[-1]
|
45 |
|
46 |
with torch.inference_mode():
|
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
|
|
4 |
import subprocess
|
5 |
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
|
6 |
import torch
|
|
|
19 |
intents.guilds = True
|
20 |
intents.guild_messages = True
|
21 |
|
22 |
+
# PaliGemma 모델 설정 (CPU 모드)
|
23 |
+
model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
|
24 |
processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
|
25 |
|
26 |
def modify_caption(caption: str) -> str:
|
|
|
38 |
|
39 |
def create_captions_rich(image: Image.Image) -> str:
|
40 |
prompt = "caption en"
|
41 |
+
image_tensor = processor(image, return_tensors="pt").pixel_values.to("cpu")
|
42 |
+
model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu")
|
43 |
input_len = model_inputs["input_ids"].shape[-1]
|
44 |
|
45 |
with torch.inference_mode():
|