|
import streamlit as st |
|
from PIL import Image |
|
import replicate |
|
import ast |
|
import requests |
|
import io |
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image" |
|
|
|
def query(payload): |
|
response = requests.post(API_URL, json=payload) |
|
return response.content |
|
|
|
st.header("Mirror: AI Stylist") |
|
uploaded_file = st.file_uploader("Upload Your Photo", type=["jpg", "jpeg", "png"]) |
|
image = "" |
|
|
|
if uploaded_file is not None: |
|
image = Image.open(uploaded_file) |
|
st.image(image, caption="Uploaded Image.", use_column_width=True) |
|
|
|
st.markdown("Generating Fashion Accessories For You...") |
|
with st.spinner("Loading..."): |
|
prompt = ''' |
|
You are a personal stylist recommending fashion advice and clothing combinations. |
|
Use the person's dressing style, colour of clothes and appearance from the given |
|
image to generate three fashion accessories according to the gender of the person |
|
in an array format. |
|
Examples: |
|
Example 1: ["accessory1 name with speicific detail"] |
|
''' |
|
input = { |
|
"image": uploaded_file, |
|
"prompt": prompt |
|
} |
|
|
|
output = replicate.run( |
|
"yorickvp/llava-13b:b5f6212d032508382d61ff00469ddda3e32fd8a0e75dc39d8a4191bb742157fb", |
|
input=input, |
|
) |
|
arr = "".join(output) |
|
output = ast.literal_eval(arr) |
|
|
|
for item in output: |
|
image_bytes = query({ |
|
"inputs": item, |
|
}) |
|
|
|
image = Image.open(io.BytesIO(image_bytes)) |
|
st.image(image, caption=item, use_column_width=True) |
|
st.success("Generated Fashion Accessories!") |
|
|
|
|