mylessss commited on
Commit
b95cd6c
·
0 Parent(s):

initial commit

Browse files
Files changed (6) hide show
  1. .gitattributes +35 -0
  2. Dockerfile +16 -0
  3. README.md +13 -0
  4. app.py +248 -0
  5. development.md +8 -0
  6. requirements.txt +12 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+ COPY --from=ghcr.io/astral-sh/uv:0.4.20 /uv /bin/uv
3
+
4
+ RUN useradd -m -u 1000 user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+ ENV UV_SYSTEM_PYTHON=1
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user ./requirements.txt requirements.txt
11
+ RUN uv pip install -r requirements.txt
12
+
13
+ COPY --chown=user . /app
14
+ USER user
15
+
16
+ CMD ["marimo", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: marimo text-to-image template
3
+ emoji: 🎨
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: docker
7
+ pinned: true
8
+ license: mit
9
+ short_description: Template for deploying a marimo text-to-image app to HF
10
+ ---
11
+
12
+ Check out marimo at <https://github.com/marimo-team/marimo>
13
+ Check out the configuration reference at <https://huggingface.co/docs/hub/spaces-config-reference>
app.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marimo
2
+
3
+ __generated_with = "0.9.14"
4
+ app = marimo.App(width="medium")
5
+
6
+
7
+ @app.cell(hide_code=True)
8
+ def __():
9
+ import random
10
+
11
+ import marimo as mo
12
+ import numpy as np
13
+ import tqdm
14
+ import transformers
15
+
16
+ # Patch tqdm to work marimo notebooks
17
+ tqdm.auto.tqdm = tqdm.notebook.tqdm
18
+
19
+ import torch
20
+ from diffusers import DiffusionPipeline
21
+
22
+ MAX_SEED = np.iinfo(np.int32).max
23
+ MAX_IMAGE_SIZE = 1024
24
+
25
+ model_repo_id = (
26
+ "stabilityai/sdxl-turbo" # Replace to the model you would like to use
27
+ )
28
+ return (
29
+ DiffusionPipeline,
30
+ MAX_IMAGE_SIZE,
31
+ MAX_SEED,
32
+ mo,
33
+ model_repo_id,
34
+ np,
35
+ random,
36
+ torch,
37
+ tqdm,
38
+ transformers,
39
+ )
40
+
41
+
42
+ @app.cell
43
+ def __(mo, model_repo_id):
44
+ mo.md(f"""# HuggingFace Text-to-Image: **{model_repo_id}**""")
45
+ return
46
+
47
+
48
+ @app.cell(hide_code=True)
49
+ def __():
50
+ examples = [
51
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
52
+ "An astronaut riding a green horse",
53
+ "A delicious ceviche cheesecake slice",
54
+ ]
55
+ return (examples,)
56
+
57
+
58
+ @app.cell
59
+ def __(
60
+ DiffusionPipeline,
61
+ MAX_SEED,
62
+ mo,
63
+ model_repo_id,
64
+ random,
65
+ torch,
66
+ ):
67
+ device = "cuda" if torch.cuda.is_available() else "cpu"
68
+
69
+ if torch.cuda.is_available():
70
+ torch_dtype = torch.float16
71
+ else:
72
+ torch_dtype = torch.float32
73
+
74
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
75
+ pipe = pipe.to(device)
76
+
77
+ def infer(
78
+ prompt,
79
+ negative_prompt,
80
+ seed,
81
+ randomize_seed,
82
+ width,
83
+ height,
84
+ guidance_scale,
85
+ num_inference_steps,
86
+ ):
87
+ if randomize_seed:
88
+ seed = random.randint(0, MAX_SEED)
89
+
90
+ generator = torch.Generator().manual_seed(seed)
91
+
92
+ image = pipe(
93
+ prompt=prompt,
94
+ negative_prompt=negative_prompt,
95
+ guidance_scale=guidance_scale,
96
+ num_inference_steps=num_inference_steps,
97
+ width=width,
98
+ height=height,
99
+ generator=generator,
100
+ ).images[0]
101
+
102
+ return image, seed
103
+
104
+ mo.output.clear() # Clear loading tdqm
105
+ return device, infer, pipe, torch_dtype
106
+
107
+
108
+ @app.cell
109
+ def __(mo):
110
+ get_prompt, set_prompt = mo.state("")
111
+ return get_prompt, set_prompt
112
+
113
+
114
+ @app.cell
115
+ def __(get_prompt, mo, set_prompt):
116
+ prompt = mo.ui.text_area(
117
+ placeholder="Enter your prompt",
118
+ label="Prompt",
119
+ full_width=True,
120
+ value=get_prompt(),
121
+ on_change=set_prompt,
122
+ )
123
+ return (prompt,)
124
+
125
+
126
+ @app.cell
127
+ def __(examples, mo, set_prompt):
128
+ def _on_click(example):
129
+ def handle(v):
130
+ set_prompt(example)
131
+
132
+ return handle
133
+
134
+ buttons = mo.ui.array(
135
+ [
136
+ mo.ui.button(label=example, on_click=_on_click(example))
137
+ for example in examples
138
+ ]
139
+ )
140
+
141
+ example_options = mo.vstack(buttons)
142
+ return buttons, example_options
143
+
144
+
145
+ @app.cell
146
+ def __(MAX_IMAGE_SIZE, MAX_SEED, example_options, mo, prompt):
147
+ run_button = mo.ui.run_button(label="Run", kind="success", full_width=True)
148
+
149
+ negative_prompt = mo.ui.text_area(
150
+ placeholder="Enter a negative prompt", label="Negative prompt"
151
+ )
152
+
153
+ seed = mo.ui.slider(start=0, stop=MAX_SEED, value=0, label="Seed")
154
+
155
+ randomize_seed = mo.ui.checkbox(label="Randomize seed", value=True)
156
+
157
+ width = mo.ui.slider(
158
+ start=256, stop=MAX_IMAGE_SIZE, step=32, value=1024, label="Width"
159
+ )
160
+ height = mo.ui.slider(
161
+ start=256, stop=MAX_IMAGE_SIZE, step=32, value=1024, label="Height"
162
+ )
163
+
164
+ guidance_scale = mo.ui.slider(
165
+ start=0.0, stop=10.0, step=0.1, value=0.0, label="Guidance scale"
166
+ )
167
+
168
+ num_inference_steps = mo.ui.slider(
169
+ start=1, stop=50, step=1, value=2, label="Number of inference steps"
170
+ )
171
+
172
+ # Create advanced settings in an accordion
173
+ advanced_settings = mo.accordion(
174
+ {
175
+ "::lucide:list:: Examples": example_options,
176
+ "::lucide:settings:: Advanced Settings": mo.hstack(
177
+ [
178
+ mo.vstack([negative_prompt, seed, randomize_seed]),
179
+ mo.vstack(
180
+ [width, height, guidance_scale, num_inference_steps],
181
+ align="end",
182
+ ),
183
+ ]
184
+ ).style(padding="10px"),
185
+ },
186
+ )
187
+
188
+ # Layout the main interface
189
+ mo.vstack([prompt, run_button, advanced_settings])
190
+ return (
191
+ advanced_settings,
192
+ guidance_scale,
193
+ height,
194
+ negative_prompt,
195
+ num_inference_steps,
196
+ randomize_seed,
197
+ run_button,
198
+ seed,
199
+ width,
200
+ )
201
+
202
+
203
+ @app.cell
204
+ def __(mo):
205
+ get_image, set_image = mo.state(None)
206
+ return get_image, set_image
207
+
208
+
209
+ @app.cell
210
+ def __(
211
+ guidance_scale,
212
+ height,
213
+ infer,
214
+ mo,
215
+ negative_prompt,
216
+ num_inference_steps,
217
+ prompt,
218
+ randomize_seed,
219
+ run_button,
220
+ seed,
221
+ set_image,
222
+ width,
223
+ ):
224
+ mo.stop(not run_button.value)
225
+
226
+ _image, _seed = infer(
227
+ prompt.value,
228
+ negative_prompt.value,
229
+ seed.value,
230
+ randomize_seed.value,
231
+ width.value,
232
+ height.value,
233
+ guidance_scale.value,
234
+ num_inference_steps.value,
235
+ )
236
+ set_image(_image)
237
+ mo.output.clear() # Clear loading tdqm
238
+ return
239
+
240
+
241
+ @app.cell
242
+ def __(get_image):
243
+ get_image()
244
+ return
245
+
246
+
247
+ if __name__ == "__main__":
248
+ app.run()
development.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Development
2
+
3
+ ## Testing your Dockerfile locally
4
+
5
+ ```bash
6
+ docker build -t marimo-app .
7
+ docker run -it --rm -p 7860:7860 marimo-app
8
+ ```
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.1.0
2
+ diffusers==0.31.0
3
+ numpy==2.1.3
4
+ torch==2.5.1
5
+ tqdm==4.66.6
6
+ transformers==4.46.1
7
+ huggingface-hub==0.26.2
8
+ marimo
9
+ # Or a specific version
10
+ # marimo>=0.9.0
11
+
12
+ # Add other dependencies as needed