import os import math import gradio as gr import numpy as np import requests import json import base64 from PIL import Image from io import BytesIO import runpod from enum import Enum api_key = os.getenv("FAI_API_KEY") api = os.getenv("FAI_API") def image_to_base64(image): # Open the image file with image: # Create a buffer to hold the binary data buffered = BytesIO() # Save the image in its original format to the buffer #print(image.format) image.save(buffered, format="PNG") # Get the byte data from the buffer binary_image_data = buffered.getvalue() # Encode the binary data to a base64 string base64_image = base64.b64encode(binary_image_data).decode("utf-8") return base64_image def process(data, api, api_key): runpod.api_key = api_key input_payload = {"input": data } try: endpoint = runpod.Endpoint(api) run_request = endpoint.run(input_payload) # Initial check without blocking, useful for quick tasks status = run_request.status() print(f"Initial job status: {status}") if status=="IN_QUEUE": gr.Info("Queued 🚶🚶🚶🚶!", duration=15) if status != "COMPLETED": # Polling with timeout for long-running tasks output = run_request.output(timeout=120) else: output = run_request.output() print(f"Job output: {output}") except Exception as e: print(f"An error occurred: {e}") status = run_request.status() if status=="FAILED": raise gr.Error(f"An error occured 💥! {e}", duration=5) if status=="TIMED_OUT": raise gr.Error("Sorry we could not secure a worker for you ⏳! Try again", duration=5) image_data = output['image'] # Decode the Base64 string image_bytes = base64.b64decode(image_data) # Convert binary data to image image = Image.open(BytesIO(image_bytes)) return image def resize_to_fit(max_size, original_size): """ Calculate the new size for an image to fit within max_size while maintaining the aspect ratio. :param max_size: Maximum allowed size as a tuple (width, height). :param original_size: Original size of the image as a tuple (width, height). :return: New size as a tuple (new_width, new_height) that fits within max_size while maintaining the aspect ratio. """ original_width, original_height = original_size max_width, max_height = max_size # Calculate the scaling factor to maintain aspect ratio width_ratio = max_width / original_width height_ratio = max_height / original_height scaling_factor = min(width_ratio, height_ratio) # Calculate the new size while maintaining the aspect ratio new_width = int(original_width * scaling_factor) new_height = int(original_height * scaling_factor) return new_width, new_height def process_generate(fore, prompt, intensity, mode, refprompt, bg): size = fore.size image_width = size[0] image_height = size[1] if size[0]*size[1]<=(768*768): gr.Warning("ℹ️ The input image resolution is low, it might lead to some deformation!") if size[0]*size[1]>(1500*1500): gr.Warning("ℹ️ The input image size is too big, I will lower it!") image_width, image_height = resize_to_fit((1500,1500), (image_width, image_height)) forestr = image_to_base64(fore.convert("RGBA")) data = { "foreground_image64": forestr, "prompt" : prompt, "mode" : mode, "intensity" : float(intensity), "width" : int(image_width), "height" : int(image_height), "refprompt" : refprompt } print(f"DATA: {data}") ''' data = { "foreground_image64": forestr, "prompt" : "There is Perfume, nestled on a crystalline cliff of glistening snow, under a celestial night sky adorned with constellations and swirling galaxies, framed by ethereal, blue flames that dance gracefully in the icy air", "mode" : "full", #refiner, full "intensity" : 3.0, "width" : 1000, "height" : 1000, "refprompt" : " transparent glass " } ''' image = process(data, api, api_key) return image def update_value(val): return val class Stage(Enum): FIRST_STAGE = "first-stage" SECOND_STAGE = "refiner" FULL = "full" css="""#disp_image { text-align: center; /* Horizontally center the content */ } #share-btn-container {padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;} div#share-btn-container > div {flex-direction: row;background: black;align-items: center} #share-btn-container:hover {background-color: #060606} #share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;} #share-btn * {all: unset} #share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;} #share-btn-container .wrap {display: none !important} #share-btn-container.hidden {display: none!important} #duplicate-button { margin-left: auto; color: #fff; background: #1565c0; } """ block = gr.Blocks(css=css, title="## F.ai Fuzer").queue() with block: gr.HTML("""

Fotographer AI Fuzer

""") gr.HTML('''
Check out our AppFotographer.ai!
''') with gr.Row(): gr.Markdown("### F.ai Fuzer: Real Composite Photography in 2 minutes!") with gr.Row(): fore = gr.Image(image_mode='RGBA', type="pil", label="Foreground Image", height=400) with gr.Column(): result_gallery = gr.Image(label='Output') #gr.Gallery(height=400, object_fit='contain', label='Outputs') with gr.Row(): prompt = gr.Textbox(label="Prompt") with gr.Column(): refprompt = gr.Textbox(label="Refiner Prompt") with gr.Row(): mode = gr.Radio(choices=[e.value for e in Stage], value=Stage.FULL.value, label="Generation Mode", type='value') mode.change(fn=update_value, inputs=mode, outputs=mode) with gr.Column(): bg = gr.Checkbox(info="Remove Backgroung") bg.change(fn=update_value, inputs=bg, outputs=bg) with gr.Column(): gr.HTML('''
Check out our AppFotographer.ai!
''') with gr.Row(): intensity = gr.Slider(label="Refiner Strength", minimum=1.0, maximum=7.0, value=3.0, step=0.5) intensity.change(fn=update_value, inputs=intensity, outputs=intensity) generate_button = gr.Button(value="Generate") ips = [fore, prompt, intensity, mode, refprompt, bg] generate_button.click(fn=process_generate, inputs=ips, outputs=[result_gallery]) block.launch()