Spaces:
Sleeping
Sleeping
File size: 8,003 Bytes
32f8cee 63dce96 ecae6dc 32f8cee 63dce96 63b55ed 63dce96 63b55ed 63dce96 63b55ed 63dce96 11ed7a1 63dce96 ea53bcc 63dce96 11ed7a1 63dce96 bd96a0e 32f8cee ea53bcc 32f8cee 8c5b0dd 32f8cee 7ff0ab2 32f8cee bd96a0e 32f8cee cd648eb 32f8cee b24890a bd96a0e b24890a 32f8cee 20c8933 32f8cee b24890a ef8a626 20c8933 b24890a 63dce96 32f8cee 32eb62e 63358fa 32eb62e 32f8cee 0adf401 32eb62e 20c8933 32eb62e 0adf401 522d033 0adf401 b24890a 32f8cee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import streamlit as st
from PIL import Image
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import torch
from datetime import datetime
import openai
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import LabelEncoder
# Initialize OpenAI API key
openai.api_key = st.secrets["GPT_TOKEN"]
# Function to classify the car image using pre-trained model
def classify_image(image):
try:
# Load the model and feature extractor
model_name = "dima806/car_models_image_detection"
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)
# Preprocess the image
inputs = feature_extractor(images=image, return_tensors="pt")
# Perform inference
with torch.no_grad():
outputs = model(**inputs)
# Get the predicted class
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
# Get the class label and score
predicted_class_label = model.config.id2label[predicted_class_idx]
score = torch.nn.functional.softmax(logits, dim=-1)[0, predicted_class_idx].item()
# Return the top prediction
return [{'label': predicted_class_label, 'score': score}]
except Exception as e:
st.error(f"Classification error: {e}")
return None
# Function to get an overview of the car using OpenAI
def get_car_overview(brand, model, year):
prompt = f"Provide an overview of the following car:\nYear: {year}\nMake: {brand}\nModel: {model}\n"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message['content']
# Load and preprocess the car data once (globally for the session)
def load_car_data():
try:
df = pd.read_csv('CTP_Model1.csv') # Replace with the path to your actual CSV file
return df
except Exception as e:
st.error(f"Error loading CSV file: {e}")
return None
# Preprocess car data and encode categorical features
def preprocess_car_data(df):
label_encoders = {}
# Encode categorical columns (make, model, trim, fuel, title_status, etc.)
for col in ['make', 'model', 'trim', 'fuel', 'title_status', 'transmission', 'drive', 'size', 'type', 'paint_color']:
le = LabelEncoder()
df[col] = le.fit_transform(df[col])
label_encoders[col] = le
# Handle NaN values by filling them with a placeholder (e.g., -1 for categorical columns)
df.fillna(-1, inplace=True)
return df, label_encoders
# Calculate similarity between the classified car and entries in the CSV
def find_closest_car(df, label_encoders, make, model, year):
# Encode the user-provided make and model
make_encoded = label_encoders['make'].transform([make])[0]
model_encoded = label_encoders['model'].transform([model])[0]
# Create a feature vector for the classified car (make, model, year)
classified_car_vector = np.array([make_encoded, model_encoded, year]).reshape(1, -1)
# Prepare the data for similarity calculation
feature_columns = ['make', 'model', 'year']
df_feature_vectors = df[feature_columns].values
# Handle NaN values before calculating similarity
df_feature_vectors = np.nan_to_num(df_feature_vectors) # Converts NaN to 0
# Compute cosine similarity between the classified car and all entries in the CSV
similarity_scores = cosine_similarity(classified_car_vector, df_feature_vectors)
# Get the index of the closest match
closest_match_idx = similarity_scores.argmax()
# Return the closest match details
return df.iloc[closest_match_idx]
# Streamlit App
st.title("Auto Appraise")
st.write("Upload a car image or take a picture to get its brand, model, and overview!")
# Initialize session_state image attribute if it doesn't exist
if 'image' not in st.session_state:
st.session_state.image = None
# File uploader for image
uploaded_file = st.file_uploader("Choose a car image", type=["jpg", "jpeg", "png"])
# Camera input as an alternative (optional)
camera_image = st.camera_input("Or take a picture of the car")
# Process the image (either uploaded or from camera)
if uploaded_file is not None:
st.write("Attempting to open uploaded file...")
try:
st.session_state.image = Image.open(uploaded_file)
st.write("Image uploaded successfully.")
except Exception as e:
st.error(f"Error opening uploaded file: {str(e)}")
elif camera_image is not None:
st.write("Attempting to open camera image...")
try:
st.session_state.image = Image.open(camera_image)
st.write("Image captured successfully.")
except Exception as e:
st.error(f"Error opening camera image: {str(e)}")
# Display the processed image
if st.session_state.image is not None:
st.image(st.session_state.image, caption='Processed Image', use_container_width=True)
current_year = datetime.now().year
# Classify the car image
with st.spinner('Analyzing image...'):
car_classifications = classify_image(st.session_state.image)
if car_classifications:
st.write("Image classification successful.")
st.subheader("Car Classification Results:")
# for classification in car_classifications:
# st.write(f"Model: {classification['label']}")
# st.write(f"Confidence: {classification['score'] * 100:.2f}%")
# Separate make and model from the classification result
top_prediction = car_classifications[0]['label']
make_name, model_name = top_prediction.split(' ', 1)
col1, col2= st.columns(2)
col1.metric("Identified Car Make", make_name)
col2.metric("Identified Car Model", model_name)
# st.write(f"Identified Car Model: {make_name}")
# st.write(f"Identified Car Model: {model_name}")
# Find the closest match in the CSV based on the classification
car_data = load_car_data()
if car_data is not None:
processed_car_data, label_encoders = preprocess_car_data(car_data)
closest_car = find_closest_car(processed_car_data, label_encoders, make_name, model_name, current_year)
a, b, c, d = st.columns(4)
e, f, g = st.columns(3)
a.metric("Year", closest_car['year'])
b.metric("Price", closest_car['price'])
c.metric("Condition", closest_car['condition'])
d.metric("Fuel", closest_car['fuel'])
e.metric("Transmission", closest_car['transmission'])
f.metric("Drive", closest_car['drive'])
g.metric("Type", closest_car['type'])
# st.write(f"Closest match in database:")
# st.write(f"Year: {closest_car['year']}")
# st.write(f"Make: {label_encoders['make'].inverse_transform([closest_car['make']])[0]}")
# st.write(f"Model: {label_encoders['model'].inverse_transform([closest_car['model']])[0]}")
# st.write(f"Price: ${closest_car['price']}")
# st.write(f"Condition: {closest_car['condition']}")
# st.write(f"Fuel: {closest_car['fuel']}")
# st.write(f"Transmission: {closest_car['transmission']}")
# st.write(f"Drive: {closest_car['drive']}")
# st.write(f"Type: {closest_car['type']}")
st.divider()
# Get additional information using GPT-3.5-turbo
overview = get_car_overview(make_name, model_name, current_year)
st.subheader("Car Overview:")
st.write(overview)
else:
st.error("Could not classify the image. Please try again with a different image.")
else:
st.write("Please upload an image or take a picture to proceed.") |