import gradio as gr import os hf_token = os.environ.get("HF_TOKEN") import spaces import torch from pipeline_bria import BriaPipeline import time resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"] # Ng default_negative_prompt= "Logo,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.1", torch_dtype=torch.bfloat16,trust_remote_code=True) pipe.to(device="cuda") @spaces.GPU(enable_queue=True) def infer(prompt,negative_prompt,seed,resolution): print(f""" —/n {prompt} """) # generator = torch.Generator("cuda").manual_seed(555) t=time.time() if seed=="-1": generator=None else: try: seed=int(seed) generator = torch.Generator("cuda").manual_seed(seed) except: generator=None w,h = resolution.split() w,h = int(w),int(h) image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0] print(f'gen time is {time.time()-t} secs') # Future # Add amound of steps # if nsfw: # raise gr.Error("Generated image is NSFW") return image css = """ #col-container{ margin: 0 auto; max-width: 580px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown("## BRIA 3.1") gr.HTML(''' <p style="margin-bottom: 10px; font-size: 94%"> This is a demo for <a href="https://huggingface.co/briaai/BRIA-3.1" target="_blank">BRIA 3.1 text-to-image </a>. is our new text-to-image model that achieves high-quality generation while being trained exclusively on fully licensed data. We offer both API access and direct access to the model weights, making integration seamless for developers. </p> ''') with gr.Group(): with gr.Column(): prompt_in = gr.Textbox(label="Prompt", value="""photo of mystical dragon eating sushi, text bubble says "Sushi Time".""") resolution = gr.Dropdown(value=resolutions[0], show_label=True, label="Resolution", choices=resolutions) seed = gr.Textbox(label="Seed", value=-1) negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt) submit_btn = gr.Button("Generate") result = gr.Image(label="BRIA-3.1 Result") # gr.Examples( # examples = [ # "Dragon, digital art, by Greg Rutkowski", # "Armored knight holding sword", # "A flat roof villa near a river with black walls and huge windows", # "A calm and peaceful office", # "Pirate guinea pig" # ], # fn = infer, # inputs = [ # prompt_in # ], # outputs = [ # result # ] # ) submit_btn.click( fn = infer, inputs = [ prompt_in, negative_prompt, seed, resolution ], outputs = [ result ] ) demo.queue().launch(show_api=False)