vilarin commited on
Commit
8c0b352
1 Parent(s): 86579ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -15
app.py CHANGED
@@ -8,7 +8,6 @@ import requests
8
  import re
9
  import asyncio
10
  from PIL import Image
11
- from glob import glob
12
 
13
  translator = Translator()
14
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
@@ -29,7 +28,7 @@ JS = """function () {
29
  }
30
  }"""
31
 
32
- client = AsyncInferenceClient()
33
 
34
  def enable_lora(lora_in, lora_add):
35
  if not lora_in and not lora_add:
@@ -39,12 +38,6 @@ def enable_lora(lora_in, lora_add):
39
  lora_in = lora_add
40
  return lora_in
41
 
42
- def imagename():
43
- os.makedirs("output", exist_ok=True)
44
- base_count = len(glob(os.path.join("output", "*.webp")))
45
- image_path = os.path.join("output", f"{base_count:06d}.webp")
46
- return image_path
47
-
48
  async def generate_image(
49
  prompt:str,
50
  model:str,
@@ -61,9 +54,9 @@ async def generate_image(
61
 
62
  text = str(translator.translate(prompt, 'English'))
63
 
64
- #generator = torch.Generator().manual_seed(seed)
65
-
66
- image1 = await client.text_to_image(
67
  prompt=text,
68
  height=height,
69
  width=width,
@@ -71,10 +64,11 @@ async def generate_image(
71
  num_inference_steps=steps,
72
  model=basemodel,
73
  )
74
- image1=image1.save(imagename())
75
  print(image1)
76
-
77
- image2 = await client.text_to_image(
 
 
78
  prompt=text,
79
  height=height,
80
  width=width,
@@ -82,8 +76,8 @@ async def generate_image(
82
  num_inference_steps=steps,
83
  model=model,
84
  )
85
- image2=image2.save(imagename())
86
  print(image2)
 
87
  return image1, image2, seed
88
 
89
  async def gen(
 
8
  import re
9
  import asyncio
10
  from PIL import Image
 
11
 
12
  translator = Translator()
13
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
 
28
  }
29
  }"""
30
 
31
+
32
 
33
  def enable_lora(lora_in, lora_add):
34
  if not lora_in and not lora_add:
 
38
  lora_in = lora_add
39
  return lora_in
40
 
 
 
 
 
 
 
41
  async def generate_image(
42
  prompt:str,
43
  model:str,
 
54
 
55
  text = str(translator.translate(prompt, 'English'))
56
 
57
+ client1 = AsyncInferenceClient(basemodel)
58
+
59
+ image1 = await client1.text_to_image(
60
  prompt=text,
61
  height=height,
62
  width=width,
 
64
  num_inference_steps=steps,
65
  model=basemodel,
66
  )
 
67
  print(image1)
68
+
69
+ client2 = AsyncInferenceClient(model)
70
+
71
+ image2 = await client2.text_to_image(
72
  prompt=text,
73
  height=height,
74
  width=width,
 
76
  num_inference_steps=steps,
77
  model=model,
78
  )
 
79
  print(image2)
80
+
81
  return image1, image2, seed
82
 
83
  async def gen(