MohamedRashad commited on
Commit
87af913
·
1 Parent(s): a5e543c

Update image generation to use InferenceClient and adjust requirements

Browse files
Files changed (2) hide show
  1. app.py +22 -13
  2. requirements.txt +1 -1
app.py CHANGED
@@ -19,7 +19,7 @@ from diffusers import FluxPipeline
19
  from huggingface_hub import InferenceClient
20
 
21
  llm_client = Client("Qwen/Qwen2.5-72B-Instruct")
22
- client = InferenceClient("black-forest-labs/FLUX.1-dev")
23
 
24
  # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
  # pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cpu")
@@ -54,18 +54,6 @@ Focus on the item itself, ensuring it is fully described, and specify a plain, w
54
 
55
  return object_t2i_prompt
56
 
57
- def generate_item_image(object_t2i_prompt):
58
- # image = pipe(prompt=object_t2i_prompt, guidance_scale=3.5, num_inference_steps=28, width=1024, height=1024, generator=torch.Generator("cpu").manual_seed(0), output_type="pil").images[0]
59
- image = client.text_to_image(object_t2i_prompt, guidance_scale=3.5, num_inference_steps=28, width=1024, height=1024)
60
- trial_id, processed_image = preprocess_pil_image(image)
61
- return trial_id, processed_image
62
-
63
- MAX_SEED = np.iinfo(np.int32).max
64
- TMP_DIR = "/tmp/Trellis-demo"
65
-
66
- os.makedirs(TMP_DIR, exist_ok=True)
67
-
68
-
69
  def preprocess_pil_image(image: Image.Image) -> Tuple[str, Image.Image]:
70
  """
71
  Preprocess the input image.
@@ -82,6 +70,27 @@ def preprocess_pil_image(image: Image.Image) -> Tuple[str, Image.Image]:
82
  processed_image.save(f"{TMP_DIR}/{trial_id}.png")
83
  return trial_id, processed_image
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
87
  return {
 
19
  from huggingface_hub import InferenceClient
20
 
21
  llm_client = Client("Qwen/Qwen2.5-72B-Instruct")
22
+ t2i_client = Client("black-forest-labs/FLUX.1-dev")
23
 
24
  # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
  # pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cpu")
 
54
 
55
  return object_t2i_prompt
56
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  def preprocess_pil_image(image: Image.Image) -> Tuple[str, Image.Image]:
58
  """
59
  Preprocess the input image.
 
70
  processed_image.save(f"{TMP_DIR}/{trial_id}.png")
71
  return trial_id, processed_image
72
 
73
+ def generate_item_image(object_t2i_prompt):
74
+ # image = pipe(prompt=object_t2i_prompt, guidance_scale=3.5, num_inference_steps=28, width=1024, height=1024, generator=torch.Generator("cpu").manual_seed(0), output_type="pil").images[0]
75
+ # image = client.text_to_image(object_t2i_prompt, guidance_scale=3.5, num_inference_steps=28, width=1024, height=1024)
76
+ img_path = t2i_client.predict(
77
+ prompt=object_t2i_prompt,
78
+ seed=0,
79
+ randomize_seed=True,
80
+ width=1024,
81
+ height=1024,
82
+ guidance_scale=3.5,
83
+ num_inference_steps=8,
84
+ api_name="/infer"
85
+ )[0]
86
+ image = Image.open(img_path)
87
+ trial_id, processed_image = preprocess_pil_image(image)
88
+ return trial_id, processed_image
89
+
90
+ MAX_SEED = np.iinfo(np.int32).max
91
+ TMP_DIR = "/tmp/Trellis-demo"
92
+
93
+ os.makedirs(TMP_DIR, exist_ok=True)
94
 
95
  def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
96
  return {
requirements.txt CHANGED
@@ -4,7 +4,7 @@
4
  accelerate
5
  sentencepiece
6
  diffusers
7
- gradio_client
8
  huggingface-hub==0.26.5
9
  torch==2.4.0
10
  torchvision==0.19.0
 
4
  accelerate
5
  sentencepiece
6
  diffusers
7
+ gradio_client==1.4.0
8
  huggingface-hub==0.26.5
9
  torch==2.4.0
10
  torchvision==0.19.0