File size: 6,658 Bytes
b1532b0 2abf116 a4dc223 f30185d 2abf116 f30185d 2abf116 b1532b0 f932259 b1532b0 f932259 6e0ffb7 2abf116 210730f b1532b0 2abf116 210730f 2abf116 210730f 2abf116 210730f 2abf116 210730f 3bda041 4724a1e 3bda041 1f79208 6845b01 b1532b0 3bda041 2abf116 f30185d a792718 2abf116 64af198 2abf116 64af198 2abf116 210730f b3333a0 210730f 7557387 210730f 540630a 210730f 540630a 0fdfbbc 540630a 210730f 2abf116 ca2d13e b1532b0 ca2d13e b1532b0 a4dc223 5012113 a4dc223 2abf116 a4dc223 e3ccbe7 0fdfbbc a4dc223 2abf116 a4dc223 f30185d a4dc223 5012113 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import ast
import json
import spaces
import requests
import numpy as np
import gradio as gr
from PIL import Image
from io import BytesIO
from turtle import title
from openai import OpenAI
from collections import Counter
from transformers import pipeline
client = OpenAI()
pipe = pipeline("zero-shot-image-classification", model="patrickjohncyh/fashion-clip")
color_file_path = 'color_config.json'
attributes_file_path = 'attributes_config.json'
import os
OPENAIKEY = os.getenv("OPENAI_KEY")
# Open and read the COLOR JSON file
with open(color_file_path, 'r') as file:
color_data = json.load(file)
# Open and read the ATTRIBUTES JSON file
with open(attributes_file_path, 'r') as file:
attributes_data = json.load(file)
COLOURS_DICT = color_data['color_mapping']
ATTRIBUTES_DICT = attributes_data['attribute_mapping']
def shot(input, category):
subColour,mainColour,score = get_colour(ast.literal_eval(str(input)),category)
common_result = get_predicted_attributes(ast.literal_eval(str(input)),category)
return {
"colors":{
"main":mainColour,
"sub":subColour,
"score":round(score*100,2)
},
"attributes":common_result,
"image_mapping":openai_parsed_response
}
@spaces.GPU
def get_colour(image_urls, category):
colourLabels = list(COLOURS_DICT.keys())
for i in range(len(colourLabels)):
colourLabels[i] = colourLabels[i] + " clothing: " + category
responses = pipe(image_urls, candidate_labels=colourLabels)
# Get the most common colour
mainColour = responses[0][0]['label'].split(" clothing:")[0]
if mainColour not in COLOURS_DICT:
return None, None, None
# Add category to the end of each label
labels = COLOURS_DICT[mainColour]
for i in range(len(labels)):
labels[i] = labels[i] + " clothing: " + category
# Run pipeline in one go
responses = pipe(image_urls, candidate_labels=labels)
subColour = responses[0][0]['label'].split(" clothing:")[0]
return subColour, mainColour, responses[0][0]['score']
@spaces.GPU
def get_predicted_attributes(image_urls, category):
# Get the predicted attributes for the image
# attributes = get_category_attributes(category)
attributes = list(ATTRIBUTES_DICT.get(category,{}).keys())
# Mapping of possible values per attribute
common_result = []
for attribute in attributes:
# values = get_attribute_values(attribute, category)
values = ATTRIBUTES_DICT.get(category,{}).get(attribute,[])
if len(values) == 0:
continue
# Adjust labels for the pipeline to be in format: "{attr}: {value}, clothing: {category}"
attribute = attribute.replace("colartype", "collar").replace("sleevelength", "sleeve length").replace("fabricstyle", "fabric")
values = [f"{attribute}: {value}, clothing: {category}" for value in values]
# Get the predicted values for the attribute
responses = pipe(image_urls, candidate_labels=values)
result = [response[0]['label'].split(", clothing:")[0] for response in responses]
# If attribute is details, then get the top 2 most common labels
if attribute == "details":
result += [response[1]['label'].split(", clothing:")[0] for response in responses]
common_result.append(Counter(result).most_common(2))
else:
common_result.append(Counter(result).most_common(1))
# Clean up the results into one long string
for i, result in enumerate(common_result):
common_result[i] = ", ".join([f"{x[0]}" for x in result])
result = {}
# Iterate through the list and split each item into key and value
for item in common_result:
# Split by ': ' to separate the key and value
key, value = item.split(': ', 1)
# Add to the dictionary
result[key] = value
return result
def get_openAI_tags(image_urls):
# Create list containing JSONs of each image URL
imageList = []
for image in image_urls:
imageList.append({"type": "image_url", "image_url": {"url": image}})
openai_response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": [
{
"type": "text",
"text": "You're a tagging assistant, you will help label and tag product pictures for my online e-commerce platform. Your tasks will be to return which angle the product images were taken from. You will have to choose from 'full-body', 'half-body', 'side', 'back', or 'zoomed' angles. You should label each of the images with one of these labels depending on which you think fits best (ideally, every label should be used at least once, but only if there are 5 or more images), and should respond with nothing but the labels separated by a comma in the order of the images without any other text. You should label every picture, no more, no less."
}
]
},
{
"role": "user",
"content": imageList
},
],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response= json.loads(openai_response.choices[0].message.content)
return response
# Define the Gradio interface with the updated components
iface = gr.Interface(
fn=shot,
inputs=[
gr.Textbox(label="Image URLs (starting with http/https) comma seperated "),
gr.Textbox(label="Category")
],
outputs="text" ,
examples=[
[['https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTEuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19',
'https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTIuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19',
'https://d2q1sfov6ca7my.cloudfront.net/eyJidWNrZXQiOiAiaGljY3VwLWltYWdlLWhvc3RpbmciLCAia2V5IjogIlc4MDAwMDAwMTM0LU9SL1c4MDAwMDAwMTM0LU9SLTMuanBnIiwgImVkaXRzIjogeyJyZXNpemUiOiB7IndpZHRoIjogODAwLCAiaGVpZ2h0IjogMTIwMC4wLCAiZml0IjogIm91dHNpZGUifX19'], "women-top-shirt"]],
description="Add an image URL (starting with http/https) or upload a picture, and provide a list of labels separated by commas.",
title="Full product flow"
)
# Launch the interface
iface.launch()
|