John6666 commited on
Commit
4557bc9
1 Parent(s): 0fcc824

Upload 3 files

Browse files
Files changed (2) hide show
  1. model.py +3 -0
  2. multit2i.py +13 -11
model.py CHANGED
@@ -2,6 +2,7 @@ from multit2i import find_model_list
2
 
3
 
4
  models = [
 
5
  'yodayo-ai/kivotos-xl-2.0',
6
  'yodayo-ai/holodayo-xl-2.1',
7
  'cagliostrolab/animagine-xl-3.1',
@@ -18,6 +19,8 @@ models = [
18
  'Raelina/Raemu-XL-V4',
19
  ]
20
 
 
 
21
 
22
  # Examples:
23
  #models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
 
2
 
3
 
4
  models = [
5
+ 'yodayo-ai/clandestine-xl-1.0',
6
  'yodayo-ai/kivotos-xl-2.0',
7
  'yodayo-ai/holodayo-xl-2.1',
8
  'cagliostrolab/animagine-xl-3.1',
 
19
  'Raelina/Raemu-XL-V4',
20
  ]
21
 
22
+ #models = find_model_list("Disty0", [], "", "last_modified", 100)
23
+
24
 
25
  # Examples:
26
  #models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
multit2i.py CHANGED
@@ -124,7 +124,7 @@ def load_from_model(model_name: str, hf_token: str = None):
124
  f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
125
  )
126
  headers["X-Wait-For-Model"] = "true"
127
- client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token)
128
  inputs = gr.components.Textbox(label="Input")
129
  outputs = gr.components.Image(label="Output")
130
  fn = client.text_to_image
@@ -325,19 +325,20 @@ async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: fl
325
  images = results if results else []
326
  image_num_offset = len(images)
327
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
328
- tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
 
329
  for task in tasks:
330
  progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
331
  try:
332
- result = await task
333
- except Exception as e:
334
  print(e)
335
- task.cancel()
336
  result = None
337
  image_num_offset += 1
338
  with lock:
339
  if result and len(result) == 2 and result[1]: images.append(result)
340
- await asyncio.sleep(0.05)
341
  yield images
342
 
343
 
@@ -351,17 +352,18 @@ async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_
351
  random.seed()
352
  model_names = random.choices(list(loaded_models.keys()), k = image_num)
353
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
354
- tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
 
355
  for task in tasks:
356
  progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
357
  try:
358
- result = await task
359
- except Exception as e:
360
  print(e)
361
- task.cancel()
362
  result = None
363
  image_num_offset += 1
364
  with lock:
365
  if result and len(result) == 2 and result[1]: images.append(result)
366
- await asyncio.sleep(0.05)
367
  yield images
 
124
  f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
125
  )
126
  headers["X-Wait-For-Model"] = "true"
127
+ client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token, timeout=120)
128
  inputs = gr.components.Textbox(label="Input")
129
  outputs = gr.components.Image(label="Output")
130
  fn = client.text_to_image
 
325
  images = results if results else []
326
  image_num_offset = len(images)
327
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
328
+ tasks = [asyncio.create_task(asyncio.to_thread(infer, prompt, neg_prompt, model_name)) for i in range(image_num)]
329
+ await asyncio.sleep(0)
330
  for task in tasks:
331
  progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
332
  try:
333
+ result = await asyncio.wait_for(task, timeout=120)
334
+ except (Exception, asyncio.TimeoutError) as e:
335
  print(e)
336
+ if not task.done(): task.cancel()
337
  result = None
338
  image_num_offset += 1
339
  with lock:
340
  if result and len(result) == 2 and result[1]: images.append(result)
341
+ await asyncio.sleep(0)
342
  yield images
343
 
344
 
 
352
  random.seed()
353
  model_names = random.choices(list(loaded_models.keys()), k = image_num)
354
  prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
355
+ tasks = [asyncio.create_task(asyncio.to_thread(infer, prompt, neg_prompt, model_name)) for model_name in model_names]
356
+ await asyncio.sleep(0)
357
  for task in tasks:
358
  progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
359
  try:
360
+ result = await asyncio.wait_for(task, timeout=120)
361
+ except (Exception, asyncio.TimeoutError) as e:
362
  print(e)
363
+ if not task.done(): task.cancel()
364
  result = None
365
  image_num_offset += 1
366
  with lock:
367
  if result and len(result) == 2 and result[1]: images.append(result)
368
+ await asyncio.sleep(0)
369
  yield images