File size: 8,078 Bytes
b1532b0 2abf116 ff6bf82 2abf116 a4dc223 f30185d 2abf116 f30185d 2abf116 ff6bf82 2abf116 b1532b0 f932259 b1532b0 ff6bf82 b1532b0 f932259 6e0ffb7 2abf116 210730f b1532b0 67fb9c5 b1532b0 2abf116 210730f 2abf116 210730f 2abf116 210730f 2abf116 210730f e09fed0 ff6bf82 3bda041 4724a1e 3bda041 1f79208 6845b01 b1532b0 ff6bf82 3bda041 2abf116 f30185d a792718 2abf116 64af198 2abf116 64af198 2abf116 210730f b3333a0 210730f 7557387 210730f 540630a 210730f 540630a 0fdfbbc 540630a 210730f 2abf116 ca2d13e b1532b0 ca2d13e e09fed0 ca2d13e e09fed0 b1532b0 a4dc223 ff6bf82 5012113 a4dc223 2abf116 a4dc223 e3ccbe7 0fdfbbc a4dc223 2abf116 a4dc223 f30185d a4dc223 5012113 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import ast
import json
import base64
import spaces
import requests
import numpy as np
import gradio as gr
from PIL import Image
from io import BytesIO
import face_recognition
from turtle import title
from openai import OpenAI
from collections import Counter
from transformers import pipeline
client = OpenAI()
pipe = pipeline("zero-shot-image-classification", model="patrickjohncyh/fashion-clip")
color_file_path = 'color_config.json'
attributes_file_path = 'attributes_config.json'
import os
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Open and read the COLOR JSON file
with open(color_file_path, 'r') as file:
color_data = json.load(file)
# Open and read the ATTRIBUTES JSON file
with open(attributes_file_path, 'r') as file:
attributes_data = json.load(file)
COLOURS_DICT = color_data['color_mapping']
ATTRIBUTES_DICT = attributes_data['attribute_mapping']
def shot(input, category):
subColour,mainColour,score = get_colour(ast.literal_eval(str(input)),category)
common_result = get_predicted_attributes(ast.literal_eval(str(input)),category)
openai_parsed_response = get_openAI_tags(ast.literal_eval(str(input)))
face_embeddings = get_face_embeddings(ast.literal_eval(str(input)))
return {
"colors":{
"main":mainColour,
"sub":subColour,
"score":round(score*100,2)
},
"attributes":common_result,
"image_mapping":openai_parsed_response,
"face_embeddings":face_embeddings
}
@spaces.GPU
def get_colour(image_urls, category):
colourLabels = list(COLOURS_DICT.keys())
for i in range(len(colourLabels)):
colourLabels[i] = colourLabels[i] + " clothing: " + category
responses = pipe(image_urls, candidate_labels=colourLabels)
# Get the most common colour
mainColour = responses[0][0]['label'].split(" clothing:")[0]
if mainColour not in COLOURS_DICT:
return None, None, None
# Add category to the end of each label
labels = COLOURS_DICT[mainColour]
for i in range(len(labels)):
labels[i] = labels[i] + " clothing: " + category
# Run pipeline in one go
responses = pipe(image_urls, candidate_labels=labels)
subColour = responses[0][0]['label'].split(" clothing:")[0]
return subColour, mainColour, responses[0][0]['score']
@spaces.GPU
def get_predicted_attributes(image_urls, category):
# Get the predicted attributes for the image
# attributes = get_category_attributes(category)
attributes = list(ATTRIBUTES_DICT.get(category,{}).keys())
# Mapping of possible values per attribute
common_result = []
for attribute in attributes:
# values = get_attribute_values(attribute, category)
values = ATTRIBUTES_DICT.get(category,{}).get(attribute,[])
if len(values) == 0:
continue
# Adjust labels for the pipeline to be in format: "{attr}: {value}, clothing: {category}"
attribute = attribute.replace("colartype", "collar").replace("sleevelength", "sleeve length").replace("fabricstyle", "fabric")
values = [f"{attribute}: {value}, clothing: {category}" for value in values]
# Get the predicted values for the attribute
responses = pipe(image_urls, candidate_labels=values)
result = [response[0]['label'].split(", clothing:")[0] for response in responses]
# If attribute is details, then get the top 2 most common labels
if attribute == "details":
result += [response[1]['label'].split(", clothing:")[0] for response in responses]
common_result.append(Counter(result).most_common(2))
else:
common_result.append(Counter(result).most_common(1))
# Clean up the results into one long string
for i, result in enumerate(common_result):
common_result[i] = ", ".join([f"{x[0]}" for x in result])
result = {}
# Iterate through the list and split each item into key and value
for item in common_result:
# Split by ': ' to separate the key and value
key, value = item.split(': ', 1)
# Add to the dictionary
result[key] = value
return result
def get_openAI_tags(image_urls):
# Create list containing JSONs of each image URL
imageList = []
for image in image_urls:
imageList.append({"type": "image_url", "image_url": {"url": image}})
openai_response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": [
{
"type": "text",
"text": "You're a tagging assistant, you will help label and tag product pictures for my online e-commerce platform. Your tasks will be to return which angle the product images were taken from. You will have to choose from 'full-body', 'half-body', 'side', 'back', or 'zoomed' angles. You should label each of the images with one of these labels depending on which you think fits best (ideally, every label should be used at least once, but only if there are 5 or more images), and should respond with an unformatted dictionary where the key is a string representation of the url index of the url and the value is the assigned label."
}
]
},
{
"role": "user",
"content": imageList
},
],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response = json.loads(openai_response.choices[0].message.content)
return response
@spaces.GPU
def get_face_embeddings(image_urls):
# Initialize a dictionary to store the face encodings or errors
results = {}
# Loop through each image URL
for index, url in enumerate(image_urls):
try:
# Try to download the image from the URL
response = requests.get(url)
# Raise an exception if the response is not successful
response.raise_for_status()
# Load the image using face_recognition
image = face_recognition.load_image_file(BytesIO(response.content))
# Get the face encodings for all faces in the image
face_encodings = face_recognition.face_encodings(image)
# If no faces are detected, store an empty list
if not face_encodings:
results[index] = []
else:
# Otherwise, store the first face encoding as a list
results[index] = face_encodings[0].tolist()
except Exception as e:
# If any error occurs during the download or processing, store the error message
results[index] = f"Error processing image: {str(e)}"
return results
# Define the Gradio interface with the updated components
iface = gr.Interface(
fn=shot,
inputs=[
gr.Textbox(label="Image URLs (starting with http/https) comma seperated "),
gr.Textbox(label="Category")
],
outputs="text" ,
examples=[
[['https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTEuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19',
'https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTIuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19',
'https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTMuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19'], "women-top-shirt"]],
description="Add an image URL (starting with http/https) or upload a picture, and provide a list of labels separated by commas.",
title="Full product flow"
)
# Launch the interface
iface.launch()
|