Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.prompts import ChatPromptTemplate
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
import random
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from textblob import TextBlob
|
7 |
+
import readability
|
8 |
+
from PIL import Image, ImageFilter
|
9 |
+
import numpy as np
|
10 |
+
import cv2
|
11 |
+
|
12 |
+
# Initialize ChatGroq with your API key and model
|
13 |
+
chat = ChatGroq(
|
14 |
+
temperature=0.7,
|
15 |
+
model="llama3-70b-8192",
|
16 |
+
api_key="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"
|
17 |
+
)
|
18 |
+
|
19 |
+
# Define the prompt template for the assistant
|
20 |
+
system = """
|
21 |
+
You are a helpful assistant specialized in social media optimization. When given a post and an image, suggest relevant keywords and tags to maximize engagement, regenerate the post incorporating those suggestions, categorize the post, provide the best posting time based on social media trends, and offer content improvement tips. Also, add relevant emojis and image enhancement suggestions.
|
22 |
+
"""
|
23 |
+
human = """
|
24 |
+
Post: {text}
|
25 |
+
"""
|
26 |
+
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
|
27 |
+
|
28 |
+
# Define the chain for post text optimization
|
29 |
+
chain_text = prompt | chat
|
30 |
+
|
31 |
+
# Function to handle chat interaction for text
|
32 |
+
def chat_with_groq_text(user_message):
|
33 |
+
response = chain_text.invoke({"text": user_message})
|
34 |
+
return response.content
|
35 |
+
|
36 |
+
# Define the chain for image optimization
|
37 |
+
chain_image = prompt | chat
|
38 |
+
|
39 |
+
# Function to handle chat interaction for image
|
40 |
+
def chat_with_groq_image(user_message, image_features):
|
41 |
+
response = chain_image.invoke({"text": user_message, **image_features})
|
42 |
+
return response.content
|
43 |
+
|
44 |
+
# Function to predict engagement
|
45 |
+
def predict_engagement():
|
46 |
+
likes = random.randint(20, 200)
|
47 |
+
comments = random.randint(2, 20)
|
48 |
+
reposts = random.randint(1, 13)
|
49 |
+
return likes, comments, reposts
|
50 |
+
|
51 |
+
# Function to plot engagement bar chart
|
52 |
+
def plot_engagement(likes, comments, reposts):
|
53 |
+
labels = ['Likes', 'Comments', 'Reposts']
|
54 |
+
sizes = [likes, comments, reposts]
|
55 |
+
colors = ['#ff9999','#66b3ff','#99ff99']
|
56 |
+
fig, ax = plt.subplots()
|
57 |
+
ax.bar(labels, sizes, color=colors)
|
58 |
+
for i, v in enumerate(sizes):
|
59 |
+
ax.text(i, v + 10, str(v), color='black', ha='center')
|
60 |
+
ax.set_ylabel('Count')
|
61 |
+
return fig
|
62 |
+
|
63 |
+
# Function to analyze sentiment
|
64 |
+
def analyze_sentiment(text):
|
65 |
+
blob = TextBlob(text)
|
66 |
+
sentiment = blob.sentiment.polarity
|
67 |
+
if sentiment > 0:
|
68 |
+
return "Positive"
|
69 |
+
elif sentiment < 0:
|
70 |
+
return "Negative"
|
71 |
+
else:
|
72 |
+
return "Neutral"
|
73 |
+
|
74 |
+
# Function to calculate readability score
|
75 |
+
def calculate_readability(text):
|
76 |
+
results = readability.getmeasures(text, lang='en')
|
77 |
+
return results['readability grades']['FleschReadingEase']
|
78 |
+
|
79 |
+
# Function to analyze image and extract features using OpenCV
|
80 |
+
def analyze_image(image):
|
81 |
+
image_array = np.array(image.convert('RGB'))
|
82 |
+
gray_image = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
|
83 |
+
|
84 |
+
# Brightness
|
85 |
+
brightness = np.mean(image_array)
|
86 |
+
|
87 |
+
# Contrast
|
88 |
+
contrast = np.std(gray_image)
|
89 |
+
|
90 |
+
# Sharpness using Laplacian
|
91 |
+
laplacian = cv2.Laplacian(gray_image, cv2.CV_64F)
|
92 |
+
sharpness = np.var(laplacian)
|
93 |
+
|
94 |
+
# Color Distribution (mean color)
|
95 |
+
mean_color = np.mean(image_array, axis=(0, 1))
|
96 |
+
|
97 |
+
# Noise estimation
|
98 |
+
noise = np.std(image_array)
|
99 |
+
|
100 |
+
return {
|
101 |
+
"brightness": brightness,
|
102 |
+
"contrast": contrast,
|
103 |
+
"sharpness": sharpness,
|
104 |
+
"mean_color": mean_color,
|
105 |
+
"noise": noise
|
106 |
+
}
|
107 |
+
|
108 |
+
# Function to generate practical suggestions based on image features
|
109 |
+
def generate_image_suggestions(image_features):
|
110 |
+
suggestions = []
|
111 |
+
if image_features['brightness'] < 100: # Adjust threshold as needed
|
112 |
+
suggestions.append("Brightness is low. Consider increasing it.")
|
113 |
+
if image_features['contrast'] < 30: # Adjust threshold as needed
|
114 |
+
suggestions.append("Contrast is low. Consider increasing it.")
|
115 |
+
if image_features['sharpness'] < 100: # Adjust threshold as needed
|
116 |
+
suggestions.append("Sharpness is low. Consider increasing it.")
|
117 |
+
if image_features['noise'] > 50: # Adjust threshold as needed
|
118 |
+
suggestions.append("Image may be noisy. Consider reducing noise.")
|
119 |
+
|
120 |
+
return suggestions
|
121 |
+
|
122 |
+
# Function to optimize and display the post and image
|
123 |
+
def optimize_post_and_image(user_input, image):
|
124 |
+
# Process text post
|
125 |
+
groq_response_text = chat_with_groq_text(user_input)
|
126 |
+
|
127 |
+
st.markdown("### Optimized Post and Suggestions:")
|
128 |
+
st.markdown(f"<div style='font-size:20px;'>{groq_response_text}</div>", unsafe_allow_html=True)
|
129 |
+
|
130 |
+
# Process image
|
131 |
+
image_features = analyze_image(image)
|
132 |
+
groq_response_image = chat_with_groq_image(user_input, image_features)
|
133 |
+
likes, comments, reposts = predict_engagement()
|
134 |
+
sentiment = analyze_sentiment(groq_response_image)
|
135 |
+
readability_score = calculate_readability(groq_response_image)
|
136 |
+
char_count = len(groq_response_image)
|
137 |
+
|
138 |
+
st.markdown("### Engagement Prediction:")
|
139 |
+
st.markdown(f"**Predicted Likes:** {likes}")
|
140 |
+
st.markdown(f"**Predicted Comments:** {comments}")
|
141 |
+
st.markdown(f"**Predicted Reposts:** {reposts}")
|
142 |
+
st.markdown(f"**Sentiment:** {sentiment}")
|
143 |
+
st.markdown(f"**Readability Score:** {readability_score:.2f}")
|
144 |
+
st.markdown(f"**Character Count:** {char_count}")
|
145 |
+
|
146 |
+
fig = plot_engagement(likes, comments, reposts)
|
147 |
+
st.pyplot(fig)
|
148 |
+
|
149 |
+
# Display original image
|
150 |
+
st.markdown("### Original Image:")
|
151 |
+
st.image(image, caption='Original Image', use_column_width=True)
|
152 |
+
|
153 |
+
# Display image parameters
|
154 |
+
st.markdown("### Image Parameters:")
|
155 |
+
st.markdown(f"**Brightness:** {image_features['brightness']:.2f}")
|
156 |
+
st.markdown(f"**Contrast:** {image_features['contrast']:.2f}")
|
157 |
+
st.markdown(f"**Sharpness:** {image_features['sharpness']:.2f}")
|
158 |
+
st.markdown(f"**Mean Color:** {image_features['mean_color']}")
|
159 |
+
st.markdown(f"**Noise:** {image_features['noise']:.2f}")
|
160 |
+
|
161 |
+
# Generate and display image enhancement suggestions
|
162 |
+
suggestions = generate_image_suggestions(image_features)
|
163 |
+
st.markdown("### Image Enhancement Suggestions:")
|
164 |
+
for suggestion in suggestions:
|
165 |
+
st.markdown(f"- {suggestion}")
|
166 |
+
|
167 |
+
# Streamlit app UI
|
168 |
+
st.title("Social Media Post Optimizer")
|
169 |
+
|
170 |
+
user_input = st.text_area("Enter your post:", "")
|
171 |
+
uploaded_file = st.file_uploader("Upload an image:", type=["jpg", "jpeg", "png"])
|
172 |
+
|
173 |
+
if st.button("Optimize Post and Image"):
|
174 |
+
if user_input and uploaded_file:
|
175 |
+
image = Image.open(uploaded_file)
|
176 |
+
optimize_post_and_image(user_input, image)
|
177 |
+
|
178 |
+
# Adding some UI enhancements
|
179 |
+
st.markdown(
|
180 |
+
"""
|
181 |
+
<style>
|
182 |
+
.stTextInput > div > div > textarea {
|
183 |
+
padding: 10px;
|
184 |
+
font-size: 18px;
|
185 |
+
}
|
186 |
+
.stButton > button {
|
187 |
+
padding: 10px 20px;
|
188 |
+
font-size: 18px;
|
189 |
+
}
|
190 |
+
.stMarkdown h1, h2, h3, h4 {
|
191 |
+
font-size: 28px;
|
192 |
+
margin-top: 20px;
|
193 |
+
}
|
194 |
+
.stMarkdown div {
|
195 |
+
font-size: 18px;
|
196 |
+
}
|
197 |
+
</style>
|
198 |
+
""",
|
199 |
+
unsafe_allow_html=True
|
200 |
+
)
|