seawolf2357 commited on
Commit
265c54d
ยท
verified ยท
1 Parent(s): aa7be7c

Update web.py

Browse files
Files changed (1) hide show
  1. web.py +28 -56
web.py CHANGED
@@ -1,61 +1,33 @@
1
  import gradio as gr
2
- from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
3
- import torch
4
- import re
5
-
6
- # ๋ชจ๋ธ ๋กœ๋“œ ๋ฐ ์ „์ฒ˜๋ฆฌ ์„ค์ •
7
- model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
8
- processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
9
-
10
- def modify_caption(caption: str) -> str:
11
- prefix_substrings = [
12
- ('captured from ', ''),
13
- ('captured at ', '')
14
- ]
15
- pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
16
- replacers = {opening: replacer for opening, replacer in prefix_substrings}
 
 
 
17
 
18
- def replace_fn(match):
19
- return replacers[match.group(0)]
20
 
21
- return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
22
-
23
- def create_captions_rich(image):
24
- prompt = "caption en"
25
- image_tensor = processor(images=image, return_tensors="pt").pixel_values.to("cpu")
26
- image_tensor = (image_tensor * 255).type(torch.uint8)
27
- model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu")
28
- input_len = model_inputs["input_ids"].shape[-1]
29
-
30
- with torch.no_grad():
31
- generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
32
- generation = generation[0][input_len:]
33
- decoded = processor.decode(generation, skip_special_tokens=True)
34
- modified_caption = modify_caption(decoded)
35
- return modified_caption
36
-
37
- css = """
38
- #mkd {
39
- height: 500px;
40
- overflow: auto;
41
- border: 1px solid #ccc;
42
- }
43
- """
44
-
45
- with gr.Blocks(css=css) as demo:
46
- gr.HTML("<h1><center>PaliGemma Fine-tuned for Long Captioning<center><h1>")
47
- with gr.Tab(label="PaliGemma Long Captioner"):
48
- with gr.Row():
49
- with gr.Column():
50
- input_img = gr.Image(label="Input Picture")
51
- submit_btn = gr.Button(value="Submit")
52
- output = gr.Text(label="Caption")
53
 
54
- submit_btn.click(create_captions_rich, [input_img], [output])
 
55
 
56
- # ํฌํŠธ ๋ณ€๊ฒฝ ๋ฐ launch ์ˆ˜์ •
57
- demo.launch(
58
- server_name="0.0.0.0",
59
- server_port=int(os.getenv("GRADIO_SERVER_PORT", 7861)),
60
- inbrowser=True
61
- )
 
1
  import gradio as gr
2
+ import datetime
3
+ import asyncio
4
+
5
+ def update_live_message():
6
+ """ ํ˜„์žฌ ์‹œ๊ฐ„๊ณผ 'live' ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. """
7
+ current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
8
+ return f"{current_time} - live"
9
+
10
+ async def periodic_update(interface, interval=60):
11
+ """ ์ฃผ์–ด์ง„ ์ธํ„ฐํŽ˜์ด์Šค์— 1๋ถ„ ๊ฐ„๊ฒฉ์œผ๋กœ ์—…๋ฐ์ดํŠธ๋ฅผ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. """
12
+ while True:
13
+ live_message = update_live_message()
14
+ interface.update(live_message)
15
+ await asyncio.sleep(interval)
16
+
17
+ def run_gradio():
18
+ """ Gradio ์›น ์ธํ„ฐํŽ˜์ด์Šค๋ฅผ ์„ค์ •ํ•˜๊ณ  ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. """
19
+ live_block = gr.Textbox(label="Live Output", value="Starting...", elem_id="live_output")
20
 
21
+ demo = gr.Blocks()
 
22
 
23
+ with demo:
24
+ gr.Markdown("## Live Server Output")
25
+ live_block
26
+
27
+ demo.launch(server_name="0.0.0.0", server_port=7860, inbrowser=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # ๋น„๋™๊ธฐ ์—…๋ฐ์ดํŠธ ์ž‘์—… ์‹œ์ž‘
30
+ asyncio.run(periodic_update(live_block))
31
 
32
+ if __name__ == "__main__":
33
+ run_gradio()