Spaces:
Sleeping
Sleeping
Pragya Jatav
commited on
Commit
·
fef7f72
1
Parent(s):
0a4f4cb
m1
Browse files- Model_Result_Overview.py +49 -9
- Overview_data_test_panel@#prospects.xlsx +0 -0
- Streamlit_functions.py +197 -107
- __pycache__/Streamlit_functions.cpython-310.pyc +0 -0
- __pycache__/response_curves_model_quality.cpython-310.pyc +0 -0
- __pycache__/response_curves_model_quality_base.cpython-310.pyc +0 -0
- pages/1_Model_Quality.py +6 -4
- pages/2_Scenario_Planner.py +134 -81
- pages/3_Saved_Scenarios.py +3 -3
- response_curves_model_quality.py +17 -7
- response_curves_model_quality_base.py +15 -4
- summary_df.pkl +1 -1
Model_Result_Overview.py
CHANGED
@@ -4,7 +4,7 @@ import pandas as pd
|
|
4 |
from sklearn.preprocessing import MinMaxScaler
|
5 |
import pickle
|
6 |
import Streamlit_functions as sf
|
7 |
-
from utilities import load_authenticator
|
8 |
|
9 |
from utilities_with_panel import (set_header,
|
10 |
overview_test_data_prep_panel,
|
@@ -23,7 +23,7 @@ import streamlit_authenticator as stauth
|
|
23 |
import yaml
|
24 |
from yaml import SafeLoader
|
25 |
import time
|
26 |
-
from datetime import datetime
|
27 |
|
28 |
st.set_page_config(layout='wide')
|
29 |
load_local_css('styles.css')
|
@@ -116,14 +116,21 @@ if auth_status:
|
|
116 |
|
117 |
# now = datetime.now()
|
118 |
# us_format = now.strftime("%m/%d/%Y")
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
with col1:
|
121 |
-
|
122 |
-
|
123 |
|
124 |
with col2:
|
125 |
-
|
126 |
-
|
127 |
|
128 |
# col1, col2 = st.columns(2)
|
129 |
# with col1:
|
@@ -137,6 +144,12 @@ if auth_status:
|
|
137 |
st.plotly_chart(fig,use_container_width=True)
|
138 |
|
139 |
# Dropdown menu options
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
options = [
|
141 |
"Month on Month",
|
142 |
"Year on Year"]
|
@@ -166,11 +179,38 @@ if auth_status:
|
|
166 |
unsafe_allow_html=True
|
167 |
)
|
168 |
# Waterfall chart
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
st.plotly_chart(fig,use_container_width=True)
|
171 |
|
172 |
# Waterfall table
|
173 |
-
shares_df = sf.shares_df_func(
|
174 |
st.table(sf.waterfall_table_func(shares_df).style.format("{:.0%}"))
|
175 |
|
176 |
## Channel Contribution Bar Chart
|
|
|
4 |
from sklearn.preprocessing import MinMaxScaler
|
5 |
import pickle
|
6 |
import Streamlit_functions as sf
|
7 |
+
from utilities import (load_authenticator)
|
8 |
|
9 |
from utilities_with_panel import (set_header,
|
10 |
overview_test_data_prep_panel,
|
|
|
23 |
import yaml
|
24 |
from yaml import SafeLoader
|
25 |
import time
|
26 |
+
from datetime import datetime,timedelta
|
27 |
|
28 |
st.set_page_config(layout='wide')
|
29 |
load_local_css('styles.css')
|
|
|
116 |
|
117 |
# now = datetime.now()
|
118 |
# us_format = now.strftime("%m/%d/%Y")
|
119 |
+
|
120 |
+
# Define the minimum and maximum dates
|
121 |
+
min_date,max_date = sf.get_date_range()
|
122 |
+
# st.write(min_date,max_date)
|
123 |
+
# min_date = datetime(2023, 1, 1)
|
124 |
+
# max_date = datetime(2024, 12, 31)
|
125 |
+
default_date1,default_date2 = sf.get_default_dates()
|
126 |
+
# st.write(default_date1,default_date2)
|
127 |
with col1:
|
128 |
+
start_date = st.date_input("Start Date: ",value=default_date1,min_value=min_date,
|
129 |
+
max_value=max_date)
|
130 |
|
131 |
with col2:
|
132 |
+
end_date = st.date_input("End Date: ",value = default_date2,min_value=min_date,
|
133 |
+
max_value=max_date)
|
134 |
|
135 |
# col1, col2 = st.columns(2)
|
136 |
# with col1:
|
|
|
144 |
st.plotly_chart(fig,use_container_width=True)
|
145 |
|
146 |
# Dropdown menu options
|
147 |
+
st.markdown("<h1 style='font-size:28px;'>Change in MMM Estimated Prospect Contributions</h1>", unsafe_allow_html=True)
|
148 |
+
# st.header("Change in MMM Estimated Prospect Contributions")
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
options = [
|
154 |
"Month on Month",
|
155 |
"Year on Year"]
|
|
|
179 |
unsafe_allow_html=True
|
180 |
)
|
181 |
# Waterfall chart
|
182 |
+
|
183 |
+
def get_month_year_list(start_date, end_date):
|
184 |
+
# Generate a range of dates from start_date to end_date with a monthly frequency
|
185 |
+
dates = pd.date_range(start=start_date, end=end_date, freq='MS') # 'MS' is month start frequency
|
186 |
+
|
187 |
+
# Extract month and year from each date and create a list of tuples
|
188 |
+
month_year_list = [(date.month, date.year) for date in dates]
|
189 |
+
|
190 |
+
return month_year_list
|
191 |
+
def get_start_end_dates(month, year):
|
192 |
+
start_date = datetime(year, month, 1).date()
|
193 |
+
|
194 |
+
if month == 12:
|
195 |
+
end_date = datetime(year + 1, 1, 1).date() - timedelta(days=1)
|
196 |
+
else:
|
197 |
+
end_date = datetime(year, month + 1, 1).date() - timedelta(days=1)
|
198 |
+
|
199 |
+
return start_date, end_date
|
200 |
+
|
201 |
+
month_year_list = get_month_year_list(start_date, end_date)
|
202 |
+
dropdown_options = [f"{date.strftime('%B %Y')}" for date in pd.date_range(start=start_date, end=end_date, freq='MS')]
|
203 |
+
waterfall_option = st.selectbox("Select a month:", dropdown_options)
|
204 |
+
waterfall_date = datetime.strptime(waterfall_option, "%B %Y")
|
205 |
+
waterfall_month = waterfall_date.month
|
206 |
+
waterfall_year = waterfall_date.year
|
207 |
+
waterfall_start_date, waterfall_end_date = get_start_end_dates(waterfall_month, waterfall_year)
|
208 |
+
|
209 |
+
fig = sf.waterfall(waterfall_start_date,waterfall_end_date,selected_option)
|
210 |
st.plotly_chart(fig,use_container_width=True)
|
211 |
|
212 |
# Waterfall table
|
213 |
+
shares_df = sf.shares_df_func(waterfall_start_date,waterfall_end_date)
|
214 |
st.table(sf.waterfall_table_func(shares_df).style.format("{:.0%}"))
|
215 |
|
216 |
## Channel Contribution Bar Chart
|
Overview_data_test_panel@#prospects.xlsx
CHANGED
Binary files a/Overview_data_test_panel@#prospects.xlsx and b/Overview_data_test_panel@#prospects.xlsx differ
|
|
Streamlit_functions.py
CHANGED
@@ -106,6 +106,13 @@ contribution_cols = [
|
|
106 |
'Audio_Prospects',
|
107 |
'Email_Prospects']
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def pie_charts(start_date,end_date):
|
110 |
start_date = pd.to_datetime(start_date)
|
111 |
end_date = pd.to_datetime(end_date)
|
@@ -129,7 +136,7 @@ def pie_charts(start_date,end_date):
|
|
129 |
hoverinfo='label+percent',
|
130 |
textinfo= 'label+percent',
|
131 |
showlegend= False,textfont=dict(size =10),
|
132 |
-
|
133 |
), 1, 1)
|
134 |
|
135 |
fig.add_trace(go.Pie(labels=channels,
|
@@ -139,10 +146,35 @@ def pie_charts(start_date,end_date):
|
|
139 |
textinfo= 'label+percent',
|
140 |
showlegend= False,
|
141 |
textfont=dict(size = 10),
|
142 |
-
|
143 |
), 1, 2)
|
|
|
|
|
|
|
|
|
144 |
fig.update_layout(
|
145 |
-
title="Distribution Of Spends
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
)
|
147 |
|
148 |
return fig
|
@@ -166,8 +198,29 @@ def pie_spend(start_date,end_date):
|
|
166 |
)])
|
167 |
|
168 |
# Customize the layout
|
169 |
-
fig.update_layout(
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
)
|
172 |
|
173 |
# Show the figure
|
@@ -222,6 +275,8 @@ def waterfall(start_date,end_date,btn_chart):
|
|
222 |
end_date_prev = start_date_prev +timedelta(weeks=4) +timedelta(days=-1)
|
223 |
|
224 |
|
|
|
|
|
225 |
|
226 |
prev_data = df[(df['Date'] >= start_date_prev) & (df['Date'] <= end_date_prev)]
|
227 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
@@ -319,8 +374,7 @@ def waterfall(start_date,end_date,btn_chart):
|
|
319 |
# )
|
320 |
else :
|
321 |
fig.update_layout(
|
322 |
-
|
323 |
-
,showlegend=False,
|
324 |
# plot_bgcolor='black',
|
325 |
# paper_bgcolor='black',
|
326 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
@@ -453,10 +507,21 @@ def channel_contribution(start_date,end_date):
|
|
453 |
|
454 |
# Updating layout for better visualization
|
455 |
fig.update_layout(
|
456 |
-
|
|
|
457 |
# plot_bgcolor='black',
|
458 |
# paper_bgcolor='black',
|
459 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
460 |
xaxis=dict(
|
461 |
showgrid=False,
|
462 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
@@ -470,6 +535,8 @@ def channel_contribution(start_date,end_date):
|
|
470 |
zeroline=False, # Hiding the y-axis zero line
|
471 |
)
|
472 |
)
|
|
|
|
|
473 |
fig.add_annotation(
|
474 |
text=f"{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}",
|
475 |
x=0,
|
@@ -511,10 +578,20 @@ def chanel_spends(start_date,end_date):
|
|
511 |
|
512 |
# Updating layout for better visualization
|
513 |
fig.update_layout(
|
514 |
-
title=f"Media Spends",
|
515 |
# plot_bgcolor='black',
|
516 |
# paper_bgcolor='black',
|
517 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
518 |
xaxis=dict(
|
519 |
showgrid=False,
|
520 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
@@ -634,8 +711,18 @@ def cpp(start_date,end_date):
|
|
634 |
|
635 |
# Update layout for better visualization
|
636 |
fig.update_layout(
|
637 |
-
title=f"CPP Distribution"
|
638 |
-
,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
639 |
# plot_bgcolor='black',
|
640 |
# paper_bgcolor='black',
|
641 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
@@ -683,12 +770,22 @@ def base_decomp():
|
|
683 |
|
684 |
# Update layout for better visualization
|
685 |
fig.update_layout(
|
686 |
-
title=f"Base Decomposition"
|
687 |
# <br>{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}"
|
688 |
-
,
|
689 |
# plot_bgcolor='black',
|
690 |
# paper_bgcolor='black',
|
691 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
692 |
xaxis=dict(
|
693 |
showgrid=False,
|
694 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
@@ -703,7 +800,16 @@ def base_decomp():
|
|
703 |
),
|
704 |
hovermode='x' # Show hover info for all lines at a single point
|
705 |
)
|
706 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
707 |
return fig
|
708 |
|
709 |
def media_decomp():
|
@@ -776,7 +882,17 @@ def media_decomp():
|
|
776 |
|
777 |
# Updating layout for better visualization
|
778 |
fig.update_layout(
|
779 |
-
title=f"Media Decomposition",# <br>{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
780 |
# plot_bgcolor='black',
|
781 |
# paper_bgcolor='black',
|
782 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
@@ -793,7 +909,16 @@ def media_decomp():
|
|
793 |
zeroline=False, # Hiding the y-axis zero line
|
794 |
)
|
795 |
)
|
796 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
797 |
return fig
|
798 |
|
799 |
def mmm_model_quality():
|
@@ -807,7 +932,16 @@ def mmm_model_quality():
|
|
807 |
|
808 |
# Update layout for better visualization
|
809 |
fig.update_layout(
|
810 |
-
title=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
811 |
,
|
812 |
# plot_bgcolor='black',
|
813 |
# paper_bgcolor='black',
|
@@ -898,12 +1032,23 @@ def elasticity(media_df):
|
|
898 |
orientation='h', # Setting the orientation to horizontal
|
899 |
marker_color='rgba(75, 136, 257, 1)',
|
900 |
text= media_df['coeff'].round(2),
|
901 |
-
textposition="
|
902 |
))
|
903 |
|
904 |
# Updating layout for better visualization
|
905 |
fig.update_layout(
|
906 |
-
title=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
907 |
xaxis=dict(
|
908 |
title="Elasticity (coefficient)",
|
909 |
showgrid=True,
|
@@ -939,12 +1084,22 @@ def half_life(media_df):
|
|
939 |
orientation='h', # Setting the orientation to horizontal
|
940 |
marker_color='rgba(75, 136, 257, 1)',
|
941 |
text= media_df['coeff'].round(2),
|
942 |
-
textposition="
|
943 |
))
|
944 |
|
945 |
# Updating layout for better visualization
|
946 |
fig.update_layout(
|
947 |
-
title=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
948 |
xaxis=dict(
|
949 |
title="Weeks",
|
950 |
showgrid=True,
|
@@ -1009,7 +1164,11 @@ def model_metrics_table_func():
|
|
1009 |
# model_metrics_df.index = model_metrics_df["R-squared"]
|
1010 |
# model_metrics_df = model_metrics_df.drop(columns=["R-squared"])
|
1011 |
model_metrics_df2 = pd.DataFrame(model_metrics_df.values,columns=["R-squared","Adjusted R-squared","MAPE","AIC","BIC"] )
|
1012 |
-
|
|
|
|
|
|
|
|
|
1013 |
model_metrics_df2["AIC"] = model_metrics_df2["AIC"].round(0)
|
1014 |
model_metrics_df2["BIC"] = model_metrics_df2["BIC"].round(0)
|
1015 |
model_metrics_df2.index = [" "]
|
@@ -1062,16 +1221,20 @@ def scenario_spend_forecasting(delta_df,start_date,end_date):
|
|
1062 |
df_modified = delta_df.merge(key_df,on = "Channel_name",how = "inner")
|
1063 |
df_modified2 = df_modified.merge(data1,on = "Channels",how ="outer")
|
1064 |
# df_modified2["Forecasted Spends"] =( df_modified2["last_year_spends"]*(1+df_modified2["Delta_percent"]/100)).astype(int)
|
1065 |
-
df_modified2["Forecasted Spends"] =( df_modified2["last_year_spends"]*(1+df_modified2["Delta_percent"]/100)).
|
1066 |
-
|
1067 |
df_modified2.index = df_modified2["Channels"]
|
1068 |
-
df_modified2["Spend Change"] = df_modified2["Delta_percent"].
|
1069 |
# df_modified2["Forecasted Spends"] = df_modified2["Forecasted Spends"].astype(int)
|
1070 |
-
df_modified2["Last Year Spends"] = df_modified2["last_year_spends"].
|
1071 |
df_modified3 = df_modified2[["Last Year Spends","Forecasted Spends","Spend Change"]].transpose()
|
1072 |
# df_modified2["forecasted_spends"] =
|
1073 |
# # df_modified = delta_percent
|
1074 |
# # df_modified["Optimised Spends"] = df_modified["Current Spends"]*
|
|
|
|
|
|
|
|
|
|
|
1075 |
|
1076 |
return df_modified3
|
1077 |
|
@@ -1152,96 +1315,23 @@ def scenario_spend_forecasting2(delta_df,start_date,end_date):
|
|
1152 |
|
1153 |
data2["Month year"] = data2["Month"].apply(get_month_name) + ' ' +(data2["Date"].dt.year+1).astype(str)
|
1154 |
print(data2.columns)
|
1155 |
-
data2 = data2[['Month year' ,'
|
1156 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1157 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1158 |
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1159 |
'EMAIL']]
|
1160 |
-
data2.columns = ['Month ','
|
1161 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1162 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1163 |
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1164 |
'EMAIL']
|
1165 |
-
data2['Base Data Start Date'] = data2['Base Data Start Date'].dt.date
|
1166 |
-
data2['Base Data End Date'] = data2['Base Data End Date'].dt.date
|
1167 |
-
#.transpose()
|
1168 |
-
# st.dataframe(data2)
|
1169 |
-
# st.dataframe(data1)
|
1170 |
-
# months_list = cur_data["Month"].unique()
|
1171 |
-
# data1["Channels"]=data1.index
|
1172 |
-
# df_modified = delta_df.merge(key_df,on = "Channel_name",how = "inner")
|
1173 |
-
# df_modified2 = df_modified.merge(data1,on = "Channels",how ="outer")
|
1174 |
-
# df_modified2.index = df_modified2["Channels"]
|
1175 |
-
|
1176 |
-
# data3 = pd.DataFrame(index = data1.index)
|
1177 |
-
# for c in months_list:
|
1178 |
-
# data3[c] = df_modified2[c]*(1+df_modified2["Delta_percent"]/100)
|
1179 |
-
|
1180 |
-
# df1 = df_modified2[months_list].transpose()
|
1181 |
-
# df1["Metrics"] = "Last Year Spends"
|
1182 |
-
|
1183 |
-
# data3 = data3.transpose()
|
1184 |
-
# data3 = data3.astype(int)
|
1185 |
-
# # data2.index = data2["Date_diff_months"]
|
1186 |
-
# data2.columns = ["start date","end date"]
|
1187 |
-
# data3["start date"] = data2["start date"].dt.date
|
1188 |
-
# data3["end date"] = data2["end date"].dt.date
|
1189 |
-
# data3["Month"] = data3.index
|
1190 |
-
# cols = ["Month","start date","end date",'BROADCAST TV',
|
1191 |
-
# 'CABLE TV',
|
1192 |
-
# 'CONNECTED & OTT TV',
|
1193 |
-
# 'VIDEO',
|
1194 |
-
# 'DISPLAY PROSPECTING',
|
1195 |
-
# 'DISPLAY RETARGETING',
|
1196 |
-
# 'SOCIAL PROSPECTING',
|
1197 |
-
# 'SOCIAL RETARGETING',
|
1198 |
-
# 'SEARCH BRAND',
|
1199 |
-
# 'SEARCH NON-BRAND',
|
1200 |
-
# 'DIGITAL PARTNERS',
|
1201 |
-
# 'AUDIO',
|
1202 |
-
# 'EMAIL']
|
1203 |
-
# data3["Metrics"] = "Forecasted Year Spends"
|
1204 |
-
|
1205 |
-
# df2 = df_modified2["Delta_percent"].transpose()
|
1206 |
-
# df2["Metrics"] = "Percent Change"
|
1207 |
-
# df_modified2["last_year_spends"] =
|
1208 |
-
|
1209 |
-
# data3 = pd.DataFrame(index = data1.index)
|
1210 |
-
# for c in months_list:
|
1211 |
-
# for idx in data3.index:
|
1212 |
-
# data3[c][idx] = df_modified2[c][idx]*df_modified2["Delta_percent"]
|
1213 |
-
|
1214 |
|
1215 |
-
# data1 = data1[['Date',"Date2",'BROADCAST TV',
|
1216 |
-
# 'CABLE TV',
|
1217 |
-
# 'CONNECTED & OTT TV',
|
1218 |
-
# 'VIDEO','DISPLAY PROSPECTING',
|
1219 |
-
# 'DISPLAY RETARGETING',
|
1220 |
-
# 'SOCIAL PROSPECTING',
|
1221 |
-
# 'SOCIAL RETARGETING',
|
1222 |
-
# 'SEARCH BRAND',
|
1223 |
-
# 'SEARCH NON-BRAND',
|
1224 |
-
# 'DIGITAL PARTNERS',
|
1225 |
-
# 'AUDIO',
|
1226 |
-
# 'EMAIL',
|
1227 |
-
# ]]
|
1228 |
-
# data1[channels] = data1[channels].astype(int)
|
1229 |
-
# data1["Date"] = data1["Date"].dt.date
|
1230 |
-
# data1["Date2"] = data1["Date2"].dt.date
|
1231 |
-
# # pd.DataFrame(cur_data[channels].groupby("Date_diff_months").sum().transpose()).reset_index()
|
1232 |
-
# # # data1.columns = ["Channels","last_year_spends"]
|
1233 |
-
|
1234 |
-
# # df_modified = delta_df.merge(key_df,on = "Channel_name",how = "inner")
|
1235 |
-
# # df_modified2 = df_modified.merge(data1,on = "Channels",how ="outer")
|
1236 |
-
# # df_modified2["Forecasted Spends"] =( df_modified2["last_year_spends"]*(1+df_modified2["Delta_percent"]/100)).apply(numerize)
|
1237 |
-
# # df_modified2.index = df_modified2["Channels"]
|
1238 |
-
# # df_modified2["Spend Change"] = df_modified2["Delta_percent"]
|
1239 |
-
# # df_modified2["Last Year Spends"] = df_modified2["last_year_spends"].apply(numerize)
|
1240 |
-
# # df_modified3 = df_modified2[["Last Year Spends","Forecasted Spends","Spend Change"]].transpose()
|
1241 |
-
# # # df_modified2["forecasted_spends"] =
|
1242 |
-
# # # # df_modified = delta_percent
|
1243 |
-
# # # # df_modified["Optimised Spends"] = df_modified["Current Spends"]*
|
1244 |
-
# # spend_cols1 = pd.DataFrame(spend_cols)[0].to_list()
|
1245 |
data2.set_index('Month ', inplace=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
1246 |
return data2
|
1247 |
|
|
|
106 |
'Audio_Prospects',
|
107 |
'Email_Prospects']
|
108 |
|
109 |
+
def get_date_range():
|
110 |
+
return df['Date'].min(),df['Date'].max()+ timedelta(days=7)
|
111 |
+
|
112 |
+
def get_default_dates():
|
113 |
+
return df['Date'].max()- timedelta(days=21),df['Date'].max()+ timedelta(days=6)
|
114 |
+
|
115 |
+
|
116 |
def pie_charts(start_date,end_date):
|
117 |
start_date = pd.to_datetime(start_date)
|
118 |
end_date = pd.to_datetime(end_date)
|
|
|
136 |
hoverinfo='label+percent',
|
137 |
textinfo= 'label+percent',
|
138 |
showlegend= False,textfont=dict(size =10),
|
139 |
+
title="Distribution of Spends"
|
140 |
), 1, 1)
|
141 |
|
142 |
fig.add_trace(go.Pie(labels=channels,
|
|
|
146 |
textinfo= 'label+percent',
|
147 |
showlegend= False,
|
148 |
textfont=dict(size = 10),
|
149 |
+
title = "Distribution of Prospect Contributions"
|
150 |
), 1, 2)
|
151 |
+
# fig.update_layout(
|
152 |
+
# title="Distribution Of Spends And Prospect Contributions"
|
153 |
+
# )
|
154 |
+
|
155 |
fig.update_layout(
|
156 |
+
# title="Distribution Of Spends"
|
157 |
+
title={
|
158 |
+
'text': "Distribution Of Spends And Prospects",
|
159 |
+
'font': {
|
160 |
+
'size': 24,
|
161 |
+
'family': 'Arial',
|
162 |
+
'color': 'black',
|
163 |
+
# 'bold': True
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
)
|
168 |
+
|
169 |
+
fig.add_annotation(
|
170 |
+
text=f"{start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}",
|
171 |
+
x=0,
|
172 |
+
y=1.15,
|
173 |
+
xref="x domain",
|
174 |
+
yref="y domain",
|
175 |
+
showarrow=False,
|
176 |
+
font=dict(size=18),
|
177 |
+
# align='left'
|
178 |
)
|
179 |
|
180 |
return fig
|
|
|
198 |
)])
|
199 |
|
200 |
# Customize the layout
|
201 |
+
# fig.update_layout(
|
202 |
+
# # title="Distribution Of Spends"
|
203 |
+
# title={
|
204 |
+
# 'text': "Distribution Of Spends",
|
205 |
+
# 'font': {
|
206 |
+
# 'size': 24,
|
207 |
+
# 'family': 'Arial',
|
208 |
+
# 'color': 'black',
|
209 |
+
# # 'bold': True
|
210 |
+
# }
|
211 |
+
# }
|
212 |
+
|
213 |
+
# )
|
214 |
+
|
215 |
+
fig.add_annotation(
|
216 |
+
text=f"Distribution Of Spends </br> {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}",
|
217 |
+
x=0,
|
218 |
+
y=1.15,
|
219 |
+
xref="x domain",
|
220 |
+
yref="y domain",
|
221 |
+
showarrow=False,
|
222 |
+
font=dict(size=18),
|
223 |
+
# align='left'
|
224 |
)
|
225 |
|
226 |
# Show the figure
|
|
|
275 |
end_date_prev = start_date_prev +timedelta(weeks=4) +timedelta(days=-1)
|
276 |
|
277 |
|
278 |
+
# if start_date_prev < df['Date'].min() :
|
279 |
+
# return "a"
|
280 |
|
281 |
prev_data = df[(df['Date'] >= start_date_prev) & (df['Date'] <= end_date_prev)]
|
282 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
|
|
374 |
# )
|
375 |
else :
|
376 |
fig.update_layout(
|
377 |
+
showlegend=False,
|
|
|
378 |
# plot_bgcolor='black',
|
379 |
# paper_bgcolor='black',
|
380 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
507 |
|
508 |
# Updating layout for better visualization
|
509 |
fig.update_layout(
|
510 |
+
|
511 |
+
# title=f"Media Contribution",
|
512 |
# plot_bgcolor='black',
|
513 |
# paper_bgcolor='black',
|
514 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
515 |
+
title=
|
516 |
+
{
|
517 |
+
'text': "Media Contribution",
|
518 |
+
'font': {
|
519 |
+
'size': 28,
|
520 |
+
'family': 'Arial',
|
521 |
+
'color': 'black',
|
522 |
+
# 'bold': True
|
523 |
+
}
|
524 |
+
},
|
525 |
xaxis=dict(
|
526 |
showgrid=False,
|
527 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
|
|
535 |
zeroline=False, # Hiding the y-axis zero line
|
536 |
)
|
537 |
)
|
538 |
+
|
539 |
+
|
540 |
fig.add_annotation(
|
541 |
text=f"{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}",
|
542 |
x=0,
|
|
|
578 |
|
579 |
# Updating layout for better visualization
|
580 |
fig.update_layout(
|
581 |
+
# title=f"Media Spends",
|
582 |
# plot_bgcolor='black',
|
583 |
# paper_bgcolor='black',
|
584 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
585 |
+
title=
|
586 |
+
{
|
587 |
+
'text': "Media Spends",
|
588 |
+
'font': {
|
589 |
+
'size': 28,
|
590 |
+
'family': 'Arial',
|
591 |
+
'color': 'black',
|
592 |
+
# 'bold': True
|
593 |
+
}
|
594 |
+
},
|
595 |
xaxis=dict(
|
596 |
showgrid=False,
|
597 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
|
|
711 |
|
712 |
# Update layout for better visualization
|
713 |
fig.update_layout(
|
714 |
+
# title=f"CPP Distribution"
|
715 |
+
# ,
|
716 |
+
title=
|
717 |
+
{
|
718 |
+
'text': "CPP Distribution",
|
719 |
+
'font': {
|
720 |
+
'size': 28,
|
721 |
+
'family': 'Arial',
|
722 |
+
'color': 'black',
|
723 |
+
# 'bold': True
|
724 |
+
}
|
725 |
+
},
|
726 |
# plot_bgcolor='black',
|
727 |
# paper_bgcolor='black',
|
728 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
770 |
|
771 |
# Update layout for better visualization
|
772 |
fig.update_layout(
|
773 |
+
# title=f"Base Decomposition"
|
774 |
# <br>{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}"
|
775 |
+
# ,
|
776 |
# plot_bgcolor='black',
|
777 |
# paper_bgcolor='black',
|
778 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
779 |
+
title=
|
780 |
+
{
|
781 |
+
'text': "Base Decomposition",
|
782 |
+
'font': {
|
783 |
+
'size': 28,
|
784 |
+
'family': 'Arial',
|
785 |
+
'color': 'black',
|
786 |
+
# 'bold': True
|
787 |
+
}
|
788 |
+
},
|
789 |
xaxis=dict(
|
790 |
showgrid=False,
|
791 |
gridcolor='gray', # Setting x-axis gridline color to gray
|
|
|
800 |
),
|
801 |
hovermode='x' # Show hover info for all lines at a single point
|
802 |
)
|
803 |
+
fig.add_annotation(
|
804 |
+
text=f"{base_decomp_df['Date'].min().strftime('%m-%d-%Y')} to {(base_decomp_df['Date'].max()+timedelta(days=6)).strftime('%m-%d-%Y')}",
|
805 |
+
x=0,
|
806 |
+
y=1.15,
|
807 |
+
xref="x domain",
|
808 |
+
yref="y domain",
|
809 |
+
showarrow=False,
|
810 |
+
font=dict(size=16),
|
811 |
+
# align='left'
|
812 |
+
)
|
813 |
return fig
|
814 |
|
815 |
def media_decomp():
|
|
|
882 |
|
883 |
# Updating layout for better visualization
|
884 |
fig.update_layout(
|
885 |
+
# title=f"Media Decomposition",# <br>{cur_data['Date'].min().strftime('%m-%d-%Y')} to {cur_data['Date'].max().strftime('%m-%d-%Y')}",
|
886 |
+
title=
|
887 |
+
{
|
888 |
+
'text': "Media Decomposition",
|
889 |
+
'font': {
|
890 |
+
'size': 28,
|
891 |
+
'family': 'Arial',
|
892 |
+
'color': 'black',
|
893 |
+
# 'bold': True
|
894 |
+
}
|
895 |
+
},
|
896 |
# plot_bgcolor='black',
|
897 |
# paper_bgcolor='black',
|
898 |
# font=dict(color='white'), # Changing font color to white for better contrast
|
|
|
909 |
zeroline=False, # Hiding the y-axis zero line
|
910 |
)
|
911 |
)
|
912 |
+
fig.add_annotation(
|
913 |
+
text=f"{media_decomp_df['Date'].min().strftime('%m-%d-%Y')} to {(media_decomp_df['Date'].max()+timedelta(days=6)).strftime('%m-%d-%Y')}",
|
914 |
+
x=0,
|
915 |
+
y=1.15,
|
916 |
+
xref="x domain",
|
917 |
+
yref="y domain",
|
918 |
+
showarrow=False,
|
919 |
+
font=dict(size=16),
|
920 |
+
# align='left'
|
921 |
+
)
|
922 |
return fig
|
923 |
|
924 |
def mmm_model_quality():
|
|
|
932 |
|
933 |
# Update layout for better visualization
|
934 |
fig.update_layout(
|
935 |
+
title={
|
936 |
+
'text': "Model Predicted v/s Actual Prospects",
|
937 |
+
'font': {
|
938 |
+
'size': 24,
|
939 |
+
'family': 'Arial',
|
940 |
+
'color': 'black',
|
941 |
+
# 'bold': True
|
942 |
+
}
|
943 |
+
}
|
944 |
+
# title=f"Model Predicted v/s Actual Prospects"
|
945 |
,
|
946 |
# plot_bgcolor='black',
|
947 |
# paper_bgcolor='black',
|
|
|
1032 |
orientation='h', # Setting the orientation to horizontal
|
1033 |
marker_color='rgba(75, 136, 257, 1)',
|
1034 |
text= media_df['coeff'].round(2),
|
1035 |
+
textposition="outside"
|
1036 |
))
|
1037 |
|
1038 |
# Updating layout for better visualization
|
1039 |
fig.update_layout(
|
1040 |
+
title={
|
1041 |
+
'text': "Media And Baseline Elasticity",
|
1042 |
+
'font': {
|
1043 |
+
'size': 24,
|
1044 |
+
'family': 'Arial',
|
1045 |
+
'color': 'black',
|
1046 |
+
# 'bold': True
|
1047 |
+
}
|
1048 |
+
}
|
1049 |
+
|
1050 |
+
,
|
1051 |
+
# title="Media And Baseline Elasticity",
|
1052 |
xaxis=dict(
|
1053 |
title="Elasticity (coefficient)",
|
1054 |
showgrid=True,
|
|
|
1084 |
orientation='h', # Setting the orientation to horizontal
|
1085 |
marker_color='rgba(75, 136, 257, 1)',
|
1086 |
text= media_df['coeff'].round(2),
|
1087 |
+
textposition="outside"
|
1088 |
))
|
1089 |
|
1090 |
# Updating layout for better visualization
|
1091 |
fig.update_layout(
|
1092 |
+
title={
|
1093 |
+
'text': "Media Half-life",
|
1094 |
+
'font': {
|
1095 |
+
'size': 24,
|
1096 |
+
'family': 'Arial',
|
1097 |
+
'color': 'black',
|
1098 |
+
# 'bold': True
|
1099 |
+
}
|
1100 |
+
}
|
1101 |
+
|
1102 |
+
,
|
1103 |
xaxis=dict(
|
1104 |
title="Weeks",
|
1105 |
showgrid=True,
|
|
|
1164 |
# model_metrics_df.index = model_metrics_df["R-squared"]
|
1165 |
# model_metrics_df = model_metrics_df.drop(columns=["R-squared"])
|
1166 |
model_metrics_df2 = pd.DataFrame(model_metrics_df.values,columns=["R-squared","Adjusted R-squared","MAPE","AIC","BIC"] )
|
1167 |
+
|
1168 |
+
# model_metrics_df2 = model_metrics_df2.round(2)
|
1169 |
+
model_metrics_df2["R-squared"] = model_metrics_df2["R-squared"].apply(lambda x: "{:.2%}".format(x))
|
1170 |
+
model_metrics_df2["Adjusted R-squared"] = model_metrics_df2["Adjusted R-squared"].apply(lambda x: "{:.2%}".format(x))
|
1171 |
+
model_metrics_df2["MAPE"] = (model_metrics_df2["MAPE"]/100).apply(lambda x: "{:.2%}".format(x))
|
1172 |
model_metrics_df2["AIC"] = model_metrics_df2["AIC"].round(0)
|
1173 |
model_metrics_df2["BIC"] = model_metrics_df2["BIC"].round(0)
|
1174 |
model_metrics_df2.index = [" "]
|
|
|
1221 |
df_modified = delta_df.merge(key_df,on = "Channel_name",how = "inner")
|
1222 |
df_modified2 = df_modified.merge(data1,on = "Channels",how ="outer")
|
1223 |
# df_modified2["Forecasted Spends"] =( df_modified2["last_year_spends"]*(1+df_modified2["Delta_percent"]/100)).astype(int)
|
1224 |
+
df_modified2["Forecasted Spends"] =( df_modified2["last_year_spends"]*(1+df_modified2["Delta_percent"]/100)).apply(lambda x: "${:,.0f}".format(x))
|
|
|
1225 |
df_modified2.index = df_modified2["Channels"]
|
1226 |
+
df_modified2["Spend Change"] = (df_modified2["Delta_percent"]/100).apply(lambda x: "{:.0%}".format(x))
|
1227 |
# df_modified2["Forecasted Spends"] = df_modified2["Forecasted Spends"].astype(int)
|
1228 |
+
df_modified2["Last Year Spends"] = df_modified2["last_year_spends"].apply(lambda x: "${:,.0f}".format(x))
|
1229 |
df_modified3 = df_modified2[["Last Year Spends","Forecasted Spends","Spend Change"]].transpose()
|
1230 |
# df_modified2["forecasted_spends"] =
|
1231 |
# # df_modified = delta_percent
|
1232 |
# # df_modified["Optimised Spends"] = df_modified["Current Spends"]*
|
1233 |
+
df_modified3 = df_modified3[['BROADCAST TV', 'CABLE TV',
|
1234 |
+
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1235 |
+
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1236 |
+
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1237 |
+
'EMAIL']]
|
1238 |
|
1239 |
return df_modified3
|
1240 |
|
|
|
1315 |
|
1316 |
data2["Month year"] = data2["Month"].apply(get_month_name) + ' ' +(data2["Date"].dt.year+1).astype(str)
|
1317 |
print(data2.columns)
|
1318 |
+
data2 = data2[['Month year' ,'BROADCAST TV', 'CABLE TV',
|
1319 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1320 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1321 |
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1322 |
'EMAIL']]
|
1323 |
+
data2.columns = ['Month ','BROADCAST TV', 'CABLE TV',
|
1324 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1325 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1326 |
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1327 |
'EMAIL']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1328 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1329 |
data2.set_index('Month ', inplace=True)
|
1330 |
+
for c in ['BROADCAST TV', 'CABLE TV',
|
1331 |
+
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1332 |
+
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
1333 |
+
'SEARCH BRAND', 'SEARCH NON-BRAND', 'DIGITAL PARTNERS', 'AUDIO',
|
1334 |
+
'EMAIL']:
|
1335 |
+
data2[c] = data2[c].apply(lambda x: "${:,.0f}".format(x))
|
1336 |
return data2
|
1337 |
|
__pycache__/Streamlit_functions.cpython-310.pyc
CHANGED
Binary files a/__pycache__/Streamlit_functions.cpython-310.pyc and b/__pycache__/Streamlit_functions.cpython-310.pyc differ
|
|
__pycache__/response_curves_model_quality.cpython-310.pyc
CHANGED
Binary files a/__pycache__/response_curves_model_quality.cpython-310.pyc and b/__pycache__/response_curves_model_quality.cpython-310.pyc differ
|
|
__pycache__/response_curves_model_quality_base.cpython-310.pyc
CHANGED
Binary files a/__pycache__/response_curves_model_quality_base.cpython-310.pyc and b/__pycache__/response_curves_model_quality_base.cpython-310.pyc differ
|
|
pages/1_Model_Quality.py
CHANGED
@@ -16,14 +16,16 @@ st.plotly_chart(sf.mmm_model_quality(),use_container_width=True)
|
|
16 |
|
17 |
media_df = sf.media_data()
|
18 |
# Create two columns for start date and end date input
|
19 |
-
col1, col2 = st.columns(
|
20 |
-
|
21 |
-
st.dataframe(
|
22 |
|
23 |
-
st.plotly_chart(sf.elasticity_and_media(media_df))
|
24 |
with col1:
|
25 |
st.plotly_chart(sf.elasticity(media_df))
|
26 |
with col2:
|
|
|
|
|
27 |
st.plotly_chart(sf.half_life(media_df))
|
28 |
|
29 |
|
|
|
16 |
|
17 |
media_df = sf.media_data()
|
18 |
# Create two columns for start date and end date input
|
19 |
+
col1, col2 , col3 = st.columns([1,0.01,1])
|
20 |
+
df1 = sf.model_metrics_table_func()
|
21 |
+
st.dataframe(df1,hide_index = True,use_container_width=True)
|
22 |
|
23 |
+
# st.plotly_chart(sf.elasticity_and_media(media_df))
|
24 |
with col1:
|
25 |
st.plotly_chart(sf.elasticity(media_df))
|
26 |
with col2:
|
27 |
+
st.write("")
|
28 |
+
with col3:
|
29 |
st.plotly_chart(sf.half_life(media_df))
|
30 |
|
31 |
|
pages/2_Scenario_Planner.py
CHANGED
@@ -48,6 +48,34 @@ for k, v in st.session_state.items():
|
|
48 |
# ======================= Functions ====================== #
|
49 |
# ======================================================== #
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def optimize(key, status_placeholder):
|
53 |
"""
|
@@ -984,6 +1012,9 @@ if auth_status == True:
|
|
984 |
)
|
985 |
st.session_state["initialized"] = True
|
986 |
st.session_state["first_time"] = False
|
|
|
|
|
|
|
987 |
|
988 |
# initialize_data(
|
989 |
# panel=panel_selected,
|
@@ -1894,7 +1925,14 @@ if auth_status == True:
|
|
1894 |
scenario_planner_plots()
|
1895 |
|
1896 |
with st.expander ("View Forecasted spends"):
|
1897 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1898 |
# List of 12 months
|
1899 |
months_start = ["January", "February", "March", "April", "May", "June",
|
1900 |
"July", "August", "September", "October", "November", "December"]
|
@@ -1902,74 +1940,85 @@ if auth_status == True:
|
|
1902 |
months_end = ["January", "February", "March", "April", "May", "June",
|
1903 |
"July", "August", "September", "October", "November", "December"]
|
1904 |
years_end = range(2022,2025)
|
1905 |
-
|
1906 |
-
|
1907 |
-
|
1908 |
-
|
1909 |
-
|
1910 |
-
|
1911 |
-
|
1912 |
-
|
1913 |
-
|
1914 |
-
|
1915 |
-
|
1916 |
-
|
1917 |
-
|
1918 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1919 |
|
1920 |
-
|
1921 |
-
|
1922 |
-
|
1923 |
-
with col2:
|
1924 |
-
st.write ("Select End Time Period")
|
1925 |
-
ec1,ec2 = st.columns([1,1])
|
1926 |
-
with ec1:
|
1927 |
-
end_date_mon = st.selectbox("Select End Month:", months_end,index=1)
|
1928 |
-
with ec2:
|
1929 |
-
end_date_year = st.selectbox("Select End Year:", years_end,index=2)
|
1930 |
-
end_date1 = date(end_date_year, months_end.index(end_date_mon)+2, 1)- relativedelta(years=1)
|
1931 |
-
# st.write(end_date1)
|
1932 |
-
# default_Month = "February"
|
1933 |
-
# end_date_mon = st.text_input("Select End Month: ",value=default_Month)
|
1934 |
|
1935 |
-
|
1936 |
-
|
1937 |
-
|
1938 |
-
|
1939 |
-
|
1940 |
-
|
1941 |
-
|
1942 |
-
|
1943 |
-
|
1944 |
-
|
1945 |
-
end_date1 = current_date + relativedelta(months=1)- relativedelta(years=1)
|
1946 |
-
with c2:
|
1947 |
-
if st.button('Next Quarter'):
|
1948 |
-
start_date1 = current_date- relativedelta(years=1)
|
1949 |
-
end_date1 = current_date + relativedelta(months = 3)- relativedelta(years=1)
|
1950 |
-
with c3:
|
1951 |
-
if st.button('Next Year'):
|
1952 |
-
start_date1 = current_date- relativedelta(years=1)
|
1953 |
-
end_date1 = current_date + relativedelta(months = 12)- relativedelta(years=1)
|
1954 |
-
|
1955 |
-
forecasted_table_df = sf.scenario_spend_forecasting(summary_df_sorted,start_date1,end_date1)
|
1956 |
-
st.dataframe(forecasted_table_df)
|
1957 |
-
|
1958 |
-
forecasted_table_df2 = sf.scenario_spend_forecasting2(summary_df_sorted,start_date1,end_date1)
|
1959 |
-
st.dataframe(forecasted_table_df2)
|
1960 |
-
|
1961 |
-
st.markdown("""
|
1962 |
-
<style>
|
1963 |
-
.yellow-container {
|
1964 |
-
background-color: #FFFF99;
|
1965 |
-
border: 1px solid #FFD700;
|
1966 |
-
padding: 10px;
|
1967 |
-
border-radius: 5px;
|
1968 |
-
margin-bottom: 10px;
|
1969 |
-
}
|
1970 |
-
</style>
|
1971 |
-
""", unsafe_allow_html=True)
|
1972 |
-
|
1973 |
def save_report_forecast(forecasted_table_df,forecasted_table_df2):
|
1974 |
|
1975 |
# Convert the DataFrame to an Excel file in memory
|
@@ -1980,24 +2029,28 @@ if auth_status == True:
|
|
1980 |
# Seek to the beginning of the BytesIO buffer
|
1981 |
excel_file.seek(0)
|
1982 |
return excel_file
|
1983 |
-
|
1984 |
st.subheader("Download Report")
|
1985 |
report_name = st.text_input(
|
1986 |
-
|
1987 |
-
|
1988 |
-
|
1989 |
-
|
1990 |
-
|
1991 |
-
|
1992 |
|
|
|
1993 |
st.download_button(
|
1994 |
-
|
1995 |
-
|
1996 |
-
|
1997 |
-
|
1998 |
-
|
1999 |
-
|
2000 |
-
|
|
|
|
|
|
|
|
|
2001 |
|
2002 |
# filename = st.text_input("Save Report: ",placeholder="Report name")
|
2003 |
# if st.button("Download Report",disabled= (filename != "Report name")):
|
|
|
48 |
# ======================= Functions ====================== #
|
49 |
# ======================================================== #
|
50 |
|
51 |
+
def first_day_of_next_year(date):
|
52 |
+
next_year = date.year + 1
|
53 |
+
first_day = datetime(next_year, 1, 1).date()
|
54 |
+
|
55 |
+
# Calculate the last day of the next year
|
56 |
+
last_day = (first_day + relativedelta(years=1, days=-1))
|
57 |
+
|
58 |
+
return first_day, last_day
|
59 |
+
|
60 |
+
def first_day_of_next_quarter(date):
|
61 |
+
current_quarter = (date.month - 1) // 3 + 1
|
62 |
+
next_quarter_first_month = ((current_quarter % 4) * 3) + 1
|
63 |
+
next_quarter_year = date.year if next_quarter_first_month > 1 else date.year + 1
|
64 |
+
# Ensure month is within valid range
|
65 |
+
if next_quarter_first_month < 1 or next_quarter_first_month > 12:
|
66 |
+
raise ValueError("Calculated month is out of range: {}".format(next_quarter_first_month))
|
67 |
+
# st.write(next_quarter_first_month)
|
68 |
+
first_day_next_quarter = datetime(next_quarter_year, next_quarter_first_month, 1).date()
|
69 |
+
last_day_next_quarter = (first_day_next_quarter + relativedelta(months=3)) - relativedelta(days=1)
|
70 |
+
|
71 |
+
return first_day_next_quarter, last_day_next_quarter
|
72 |
+
|
73 |
+
|
74 |
+
def first_day_of_next_month(date):
|
75 |
+
next_month_date = date + relativedelta(months=1)
|
76 |
+
first_day_next_month = next_month_date.replace(day=1)
|
77 |
+
last_day_next_month = (first_day_next_month + relativedelta(months=1)) - relativedelta(days=1)
|
78 |
+
return first_day_next_month, last_day_next_month
|
79 |
|
80 |
def optimize(key, status_placeholder):
|
81 |
"""
|
|
|
1012 |
)
|
1013 |
st.session_state["initialized"] = True
|
1014 |
st.session_state["first_time"] = False
|
1015 |
+
save_scenario("current scenario")
|
1016 |
+
|
1017 |
+
|
1018 |
|
1019 |
# initialize_data(
|
1020 |
# panel=panel_selected,
|
|
|
1925 |
scenario_planner_plots()
|
1926 |
|
1927 |
with st.expander ("View Forecasted spends"):
|
1928 |
+
# st.write("Select Time Period")
|
1929 |
+
|
1930 |
+
options = ["Next Month","Next Quarter","Next Year","Custom Time Period"]
|
1931 |
+
|
1932 |
+
# # Create the radio button
|
1933 |
+
forecast_btn_op = st.radio("Select Time Period", options)
|
1934 |
+
|
1935 |
+
|
1936 |
# List of 12 months
|
1937 |
months_start = ["January", "February", "March", "April", "May", "June",
|
1938 |
"July", "August", "September", "October", "November", "December"]
|
|
|
1940 |
months_end = ["January", "February", "March", "April", "May", "June",
|
1941 |
"July", "August", "September", "October", "November", "December"]
|
1942 |
years_end = range(2022,2025)
|
1943 |
+
|
1944 |
+
if forecast_btn_op == "Custom Time Period":
|
1945 |
+
col1, col2, col3 = st.columns([1,1,0.75])
|
1946 |
+
|
1947 |
+
with col1:
|
1948 |
+
from datetime import date
|
1949 |
+
st.write ("Select Start Time Period")
|
1950 |
+
sc1,sc2 = st.columns([1,1])
|
1951 |
+
|
1952 |
+
with sc1:
|
1953 |
+
# Create a dropdown (selectbox) for months
|
1954 |
+
start_date_mon = st.selectbox("Select Start Month:", months_start)
|
1955 |
+
with sc2:
|
1956 |
+
start_date_year = st.selectbox("Select Start Year:", years_start,index=2)
|
1957 |
+
start_date1 = date(start_date_year, months_start.index(start_date_mon)+1, 1)
|
1958 |
+
# - relativedelta(years=1)
|
1959 |
+
# st.write(start_date1)
|
1960 |
+
# default_Month = "January"
|
1961 |
+
# start_date_mon = st.text_input("Select Start Month: ",value=default_Month)
|
1962 |
+
|
1963 |
+
# default_Year = 2024
|
1964 |
+
# start_date_year = st.number_input("Select Start Year: ",value=default_Year)
|
1965 |
+
|
1966 |
+
with col2:
|
1967 |
+
st.write ("Select End Time Period")
|
1968 |
+
ec1,ec2 = st.columns([1,1])
|
1969 |
+
with ec1:
|
1970 |
+
end_date_mon = st.selectbox("Select End Month:", months_end,index=1)
|
1971 |
+
with ec2:
|
1972 |
+
end_date_year = st.selectbox("Select End Year:", years_end,index=2)
|
1973 |
+
end_date1 = date(end_date_year, months_end.index(end_date_mon)+1, 1)+ relativedelta(months=1) - relativedelta(days=1)
|
1974 |
+
# - relativedelta(years=1)
|
1975 |
+
# st.write(end_date1)
|
1976 |
+
# default_Month = "February"
|
1977 |
+
# end_date_mon = st.text_input("Select End Month: ",value=default_Month)
|
1978 |
+
|
1979 |
+
# default_Year = 2024
|
1980 |
+
# end_date_year = st.number_input("Select End Year: ",value=default_Year)
|
1981 |
+
# end_date1 = st.date_input("Select End Date: ",value=default_date) - relativedelta(years=1)
|
1982 |
+
elif forecast_btn_op == 'Next Month':
|
1983 |
+
# current_date = datetime.now()
|
1984 |
+
# start_date1 = current_date- relativedelta(years=1)
|
1985 |
+
# end_date1 = current_date + relativedelta(months=1)- relativedelta(years=1)
|
1986 |
+
start_date1,end_date1 = first_day_of_next_month(datetime.now())
|
1987 |
+
# start_date1 = start_date1- relativedelta(years=1)
|
1988 |
+
# end_date1 = end_date1 - relativedelta(years=1)
|
1989 |
+
elif forecast_btn_op == 'Next Quarter':
|
1990 |
+
# current_date = datetime.now()
|
1991 |
+
# start_date1 = current_date- relativedelta(years=1)
|
1992 |
+
# end_date1 = current_date + relativedelta(months = 3)- relativedelta(years=1)
|
1993 |
+
start_date1,end_date1 = first_day_of_next_quarter(datetime.now())
|
1994 |
+
# start_date1 = start_date1- relativedelta(years=1)
|
1995 |
+
# end_date1 = end_date1 - relativedelta(years=1)
|
1996 |
+
elif forecast_btn_op == 'Next Year':
|
1997 |
+
# current_date = datetime.now()
|
1998 |
+
# start_date1 = current_date- relativedelta(years=1)
|
1999 |
+
# end_date1 = current_date + relativedelta(months = 12)- relativedelta(years=1)
|
2000 |
+
start_date1,end_date1 = first_day_of_next_year(datetime.now())
|
2001 |
+
# start_date1 = start_date1- relativedelta(years=1)
|
2002 |
+
# end_date1 = end_date1 - relativedelta(years=1)
|
2003 |
+
st.write(f"Forecasted Spends Time Period : {start_date1.strftime('%m-%d-%Y')} to {end_date1.strftime('%m-%d-%Y')}")
|
2004 |
+
if end_date1 < start_date1 :
|
2005 |
+
st.error("End date cannot be less than start date")
|
2006 |
+
forecasted_table_df2 = pd.DataFrame()
|
2007 |
+
try:
|
2008 |
|
2009 |
+
st.write("Forecasted Spends wrt. Channels ")
|
2010 |
+
forecasted_table_df = sf.scenario_spend_forecasting(summary_df_sorted,start_date1- relativedelta(years=1),end_date1- relativedelta(years=1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2011 |
|
2012 |
+
forecasted_table_df2 = sf.scenario_spend_forecasting2(summary_df_sorted,start_date1- relativedelta(years=1),end_date1- relativedelta(years=1))
|
2013 |
+
st.dataframe(forecasted_table_df)
|
2014 |
+
|
2015 |
+
st.write("Monthly Breakdown Of Forecasted Spends wrt. Channels ")
|
2016 |
+
st.dataframe(forecasted_table_df2)
|
2017 |
+
|
2018 |
+
|
2019 |
+
except:
|
2020 |
+
st.warning("Please make sure the base data is updated")
|
2021 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2022 |
def save_report_forecast(forecasted_table_df,forecasted_table_df2):
|
2023 |
|
2024 |
# Convert the DataFrame to an Excel file in memory
|
|
|
2029 |
# Seek to the beginning of the BytesIO buffer
|
2030 |
excel_file.seek(0)
|
2031 |
return excel_file
|
2032 |
+
|
2033 |
st.subheader("Download Report")
|
2034 |
report_name = st.text_input(
|
2035 |
+
"Report name",
|
2036 |
+
key="report_input",
|
2037 |
+
placeholder="Report name",
|
2038 |
+
label_visibility="collapsed",
|
2039 |
+
)
|
|
|
2040 |
|
2041 |
+
|
2042 |
st.download_button(
|
2043 |
+
"Download Report",
|
2044 |
+
data = save_report_forecast(forecasted_table_df,forecasted_table_df2),
|
2045 |
+
file_name = report_name+".xlsx",
|
2046 |
+
mime="application/vnd.ms-excel",
|
2047 |
+
# on_click=lambda: save_report_forecast(forecasted_table_df,report_name),
|
2048 |
+
disabled=len(st.session_state["report_input"]) == 0,#use_container_width=True
|
2049 |
+
)
|
2050 |
+
|
2051 |
+
|
2052 |
+
|
2053 |
+
|
2054 |
|
2055 |
# filename = st.text_input("Save Report: ",placeholder="Report name")
|
2056 |
# if st.button("Download Report",disabled= (filename != "Report name")):
|
pages/3_Saved_Scenarios.py
CHANGED
@@ -109,7 +109,7 @@ def plot_comparison_chart(df,metric):
|
|
109 |
y=df[column],
|
110 |
name=column,
|
111 |
text=df[column].apply(numerize), # Adding text for each point
|
112 |
-
textposition='
|
113 |
hoverinfo='x+y+text',
|
114 |
))
|
115 |
|
@@ -465,8 +465,8 @@ if auth_status == True:
|
|
465 |
]).to_html(),unsafe_allow_html=True)
|
466 |
st.markdown("<br><br>", unsafe_allow_html=True)
|
467 |
|
468 |
-
with st.expander('Scenario
|
469 |
-
st.header("Scenario
|
470 |
if len(scenarios_to_compare)== 0:
|
471 |
st.write("")
|
472 |
else:
|
|
|
109 |
y=df[column],
|
110 |
name=column,
|
111 |
text=df[column].apply(numerize), # Adding text for each point
|
112 |
+
textposition='auto',
|
113 |
hoverinfo='x+y+text',
|
114 |
))
|
115 |
|
|
|
465 |
]).to_html(),unsafe_allow_html=True)
|
466 |
st.markdown("<br><br>", unsafe_allow_html=True)
|
467 |
|
468 |
+
with st.expander('Scenario Comparison'):
|
469 |
+
st.header("Scenario Comparison")
|
470 |
if len(scenarios_to_compare)== 0:
|
471 |
st.write("")
|
472 |
else:
|
response_curves_model_quality.py
CHANGED
@@ -434,7 +434,7 @@ for i in range(1,13):
|
|
434 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
435 |
|
436 |
def response_curves(channel,x_modified,y_modified):
|
437 |
-
|
438 |
# Initialize the Plotly figure
|
439 |
fig = go.Figure()
|
440 |
|
@@ -447,10 +447,10 @@ def response_curves(channel,x_modified,y_modified):
|
|
447 |
# mode='markers',
|
448 |
# name=x_col.replace('_Spends', '')
|
449 |
# ))
|
450 |
-
|
451 |
fig.add_trace(go.Scatter(
|
452 |
-
x=
|
453 |
-
y=
|
454 |
mode='lines',
|
455 |
marker=dict(color = 'blue'),
|
456 |
name=x_col.replace('_Spends', '')
|
@@ -458,10 +458,11 @@ def response_curves(channel,x_modified,y_modified):
|
|
458 |
|
459 |
plotly_data2 = plotly_data.copy()
|
460 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
|
|
461 |
# .dropna(subset=[x_col]).reset_index(inplace = True)
|
462 |
fig.add_trace(go.Scatter(
|
463 |
-
x=plotly_data2[
|
464 |
-
y=plotly_data2[
|
465 |
mode='markers',
|
466 |
marker=dict(
|
467 |
size=13 # Adjust the size value to make the markers larger or smaller
|
@@ -483,7 +484,16 @@ def response_curves(channel,x_modified,y_modified):
|
|
483 |
|
484 |
# Update layout with titles
|
485 |
fig.update_layout(
|
486 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
487 |
xaxis_title='Weekly Spends',
|
488 |
yaxis_title='Prospects'
|
489 |
)
|
|
|
434 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
435 |
|
436 |
def response_curves(channel,x_modified,y_modified):
|
437 |
+
|
438 |
# Initialize the Plotly figure
|
439 |
fig = go.Figure()
|
440 |
|
|
|
447 |
# mode='markers',
|
448 |
# name=x_col.replace('_Spends', '')
|
449 |
# ))
|
450 |
+
plotly_data1 = plotly_data[plotly_data["MAT"]!="ext"]
|
451 |
fig.add_trace(go.Scatter(
|
452 |
+
x=plotly_data1.sort_values(by=x_col, ascending=True)[x_col],
|
453 |
+
y=plotly_data1.sort_values(by=x_col, ascending=True)[y_col],
|
454 |
mode='lines',
|
455 |
marker=dict(color = 'blue'),
|
456 |
name=x_col.replace('_Spends', '')
|
|
|
458 |
|
459 |
plotly_data2 = plotly_data.copy()
|
460 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
461 |
+
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
462 |
# .dropna(subset=[x_col]).reset_index(inplace = True)
|
463 |
fig.add_trace(go.Scatter(
|
464 |
+
x=np.array(plotly_data2[x_col].mean()),
|
465 |
+
y=np.array(plotly_data2[y_col].mean()),
|
466 |
mode='markers',
|
467 |
marker=dict(
|
468 |
size=13 # Adjust the size value to make the markers larger or smaller
|
|
|
484 |
|
485 |
# Update layout with titles
|
486 |
fig.update_layout(
|
487 |
+
title={
|
488 |
+
'text': channel_name_formating(channel)+' Response Curve',
|
489 |
+
'font': {
|
490 |
+
'size': 24,
|
491 |
+
'family': 'Arial',
|
492 |
+
'color': 'black',
|
493 |
+
# 'bold': True
|
494 |
+
}
|
495 |
+
},
|
496 |
+
# title=channel_name_formating(channel)+' Response Curve',
|
497 |
xaxis_title='Weekly Spends',
|
498 |
yaxis_title='Prospects'
|
499 |
)
|
response_curves_model_quality_base.py
CHANGED
@@ -244,23 +244,34 @@ def response_curves(channel,chart_typ):
|
|
244 |
|
245 |
|
246 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
|
|
|
|
247 |
# import steamlit as st
|
248 |
# st.dataframe()
|
249 |
fig.add_trace(go.Scatter(
|
250 |
-
x=plotly_data2[
|
251 |
-
y=plotly_data2[
|
252 |
mode='markers',
|
253 |
marker=dict(
|
254 |
size=13 # Adjust the size value to make the markers larger or smaller
|
255 |
, color = 'green'
|
256 |
),
|
257 |
-
name="
|
258 |
))
|
259 |
|
260 |
# Update layout with titles
|
261 |
fig.update_layout(
|
262 |
width=700, height=500,
|
263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
xaxis_title='Weekly Spends',
|
265 |
yaxis_title='Prospects'
|
266 |
)
|
|
|
244 |
|
245 |
|
246 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
247 |
+
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
248 |
+
# print(plotly_data2[x_col].mean(),plotly_data2[y_col].mean())
|
249 |
# import steamlit as st
|
250 |
# st.dataframe()
|
251 |
fig.add_trace(go.Scatter(
|
252 |
+
x=np.array(plotly_data2[x_col].mean()),
|
253 |
+
y=np.array(plotly_data2['Fit_Data_'+channel].mean()),
|
254 |
mode='markers',
|
255 |
marker=dict(
|
256 |
size=13 # Adjust the size value to make the markers larger or smaller
|
257 |
, color = 'green'
|
258 |
),
|
259 |
+
name="Average Weekly Spends"
|
260 |
))
|
261 |
|
262 |
# Update layout with titles
|
263 |
fig.update_layout(
|
264 |
width=700, height=500,
|
265 |
+
title={
|
266 |
+
'text': channel_name_formating(channel)+' Response Curve',
|
267 |
+
'font': {
|
268 |
+
'size': 24,
|
269 |
+
'family': 'Arial',
|
270 |
+
'color': 'black',
|
271 |
+
# 'bold': True
|
272 |
+
}
|
273 |
+
},
|
274 |
+
# title=channel_name_formating(channel)+' Response Curve',
|
275 |
xaxis_title='Weekly Spends',
|
276 |
yaxis_title='Prospects'
|
277 |
)
|
summary_df.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1822
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea149846bb9c8341d1a26a5e74dfbfea9df62ce4a7bd05ff07922a9169932a5d
|
3 |
size 1822
|