not-lain commited on
Commit
08e3abb
·
1 Parent(s): ad68db4

first commit

Browse files
Files changed (3) hide show
  1. README.md +6 -5
  2. app.py +68 -0
  3. requirements.txt +5 -0
README.md CHANGED
@@ -1,11 +1,12 @@
1
  ---
2
- title: Stable Diffusion From A Ckpt File
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.8.0
8
  app_file: app.py
 
9
  pinned: false
10
  ---
11
 
 
1
  ---
2
+ title: Running Stable Diffusion From A Ckpt File
3
+ emoji: 🥰
4
+ colorFrom: gray
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
+ models: ["waifu-research-department/Rem"]
10
  pinned: false
11
  ---
12
 
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+ work_around_for_hugging_face_gradio_sdk_bug = "/blob/main/rem_3k.ckpt"
4
+ model_url = "https://huggingface.co/waifu-research-department/Rem" + work_around_for_hugging_face_gradio_sdk_bug
5
+ pipeline = StableDiffusionPipeline.from_single_file(
6
+ model_url,
7
+ torch_dtype=torch.float16,
8
+ )
9
+
10
+
11
+
12
+ import gradio as gr
13
+
14
+
15
+ description="""
16
+
17
+ # running stable diffusion from a ckpt file
18
+
19
+ ## NOTICE ⚠️:
20
+ - this space does not work rn because it needs GPU, feel free to **clone this space** and set your own with GPU an meet your waifu **ヽ(≧□≦)ノ**
21
+
22
+
23
+ if you do not have money (just like me **(┬┬﹏┬┬)** ) you can always :
24
+ * **run the code in your PC** if you have a good GPU a good internet connection (to download the ai model only a 1 time thing)
25
+ * **run the model in the cloud** (colab, and kaggle are good alternatives and they have a pretty good internet connection )
26
+ ### minimalistic code to run a ckpt model
27
+ * enable GPU (click runtime thenchange runtime type)
28
+ * install the following libraries
29
+ ```
30
+ !pip install -q diffusers gradio omegaconf
31
+ ```
32
+ * **restart your kernal** 👈 (click runtime then click restart session)
33
+ * run the following code
34
+ ```python
35
+
36
+ from diffusers import StableDiffusionPipeline
37
+ import torch
38
+ pipeline = StableDiffusionPipeline.from_single_file(
39
+ "https://huggingface.co/waifu-research-department/Rem/blob/main/rem_3k.ckpt", # put your model url here
40
+ torch_dtype=torch.float16,
41
+ ).to("cuda")
42
+
43
+ postive_prompt = "anime girl prompt here" # 👈 change this
44
+ negative_prompt = "3D" 👈 things you hate here
45
+ image = pipeline(postive_prompt,negative_prompt=negative_prompt).images[0]
46
+
47
+ image # your image is saved in this PIL variable
48
+ ```
49
+ """
50
+
51
+
52
+ log = "GPU available"
53
+ try :
54
+ pipeline.to("cuda")
55
+ except :
56
+ log = "no GPU available"
57
+
58
+
59
+ def text2img(positive_prompt,negative_prompt):
60
+ if log == "no GPU available":
61
+ image = None
62
+ return log,image
63
+ else :
64
+ image = pipeline(positive_prompt,negative_prompt=negative_prompt).images[0]
65
+ log = {"postive_prompt":positive_prompt,"negative_prompt":negative_prompt}
66
+ return log,image
67
+
68
+ gr.Interface(text2img,["text","text"],["text","image"],examples=[["rem","3D"]],description=description).launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ diffusers
3
+ gradio
4
+ omegaconf
5
+ transformers