Spaces:
Sleeping
Sleeping
import streamlit as st | |
st.set_page_config(layout="wide") | |
for name in dir(): | |
if not name.startswith('_'): | |
del globals()[name] | |
import numpy as np | |
import pandas as pd | |
import streamlit as st | |
import gspread | |
def init_conn(): | |
scope = ['https://www.googleapis.com/auth/spreadsheets', | |
"https://www.googleapis.com/auth/drive"] | |
credentials = { | |
"type": "service_account", | |
"project_id": "sheets-api-connect-378620", | |
"private_key_id": "1005124050c80d085e2c5b344345715978dd9cc9", | |
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtKa01beXwc88R\nnPZVQTNPVQuBnbwoOfc66gW3547ja/UEyIGAF112dt/VqHprRafkKGmlg55jqJNt\na4zceLKV+wTm7vBu7lDISTJfGzCf2TrxQYNqwMKE2LOjI69dBM8u4Dcb4k0wcp9v\ntW1ZzLVVuwTvmrg7JBHjiSaB+x5wxm/r3FOiJDXdlAgFlytzqgcyeZMJVKKBQHyJ\njEGg/1720A0numuOCt71w/2G0bDmijuj1e6tH32MwRWcvRNZ19K9ssyDz2S9p68s\nYDhIxX69OWxwScTIHLY6J2t8txf/XMivL/636fPlDADvBEVTdlT606n8CcKUVQeq\npUVdG+lfAgMBAAECggEAP38SUA7B69eTfRpo658ycOs3Amr0JW4H/bb1rNeAul0K\nZhwd/HnU4E07y81xQmey5kN5ZeNrD5EvqkZvSyMJHV0EEahZStwhjCfnDB/cxyix\nZ+kFhv4y9eK+kFpUAhBy5nX6T0O+2T6WvzAwbmbVsZ+X8kJyPuF9m8ldcPlD0sce\ntj8NwVq1ys52eosqs7zi2vjt+eMcaY393l4ls+vNq8Yf27cfyFw45W45CH/97/Nu\n5AmuzlCOAfFF+z4OC5g4rei4E/Qgpxa7/uom+BVfv9G0DIGW/tU6Sne0+37uoGKt\nW6DzhgtebUtoYkG7ZJ05BTXGp2lwgVcNRoPwnKJDxQKBgQDT5wYPUBDW+FHbvZSp\nd1m1UQuXyerqOTA9smFaM8sr/UraeH85DJPEIEk8qsntMBVMhvD3Pw8uIUeFNMYj\naLmZFObsL+WctepXrVo5NB6RtLB/jZYxiKMatMLUJIYtcKIp+2z/YtKiWcLnwotB\nWdCjVnPTxpkurmF2fWP/eewZ+wKBgQDRMtJg7etjvKyjYNQ5fARnCc+XsI3gkBe1\nX9oeXfhyfZFeBXWnZzN1ITgFHplDznmBdxAyYGiQdbbkdKQSghviUQ0igBvoDMYy\n1rWcy+a17Mj98uyNEfmb3X2cC6WpvOZaGHwg9+GY67BThwI3FqHIbyk6Ko09WlTX\nQpRQjMzU7QKBgAfi1iflu+q0LR+3a3vvFCiaToskmZiD7latd9AKk2ocsBd3Woy9\n+hXXecJHPOKV4oUJlJgvAZqe5HGBqEoTEK0wyPNLSQlO/9ypd+0fEnArwFHO7CMF\nycQprAKHJXM1eOOFFuZeQCaInqdPZy1UcV5Szla4UmUZWkk1m24blHzXAoGBAMcA\nyH4qdbxX9AYrC1dvsSRvgcnzytMvX05LU0uF6tzGtG0zVlub4ahvpEHCfNuy44UT\nxRWW/oFFaWjjyFxO5sWggpUqNuHEnRopg3QXx22SRRTGbN45li/+QAocTkgsiRh1\nqEcYZsO4mPCsQqAy6E2p6RcK+Xa+omxvSnVhq0x1AoGAKr8GdkCl4CF6rieLMAQ7\nLNBuuoYGaHoh8l5E2uOQpzwxVy/nMBcAv+2+KqHEzHryUv1owOi6pMLv7A9mTFoS\n18B0QRLuz5fSOsVnmldfC9fpUc6H8cH1SINZpzajqQA74bPwELJjnzrCnH79TnHG\nJuElxA33rFEjbgbzdyrE768=\n-----END PRIVATE KEY-----\n", | |
"client_email": "gspread-connection@sheets-api-connect-378620.iam.gserviceaccount.com", | |
"client_id": "106625872877651920064", | |
"auth_uri": "https://accounts.google.com/o/oauth2/auth", | |
"token_uri": "https://oauth2.googleapis.com/token", | |
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", | |
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40sheets-api-connect-378620.iam.gserviceaccount.com" | |
} | |
gc = gspread.service_account_from_dict(credentials) | |
return gc | |
gspreadcon = init_conn() | |
dk_player_url = 'https://docs.google.com/spreadsheets/d/1Yq0vGriWK-bS79e-bD6_u9pqrYE6Yrlbb_wEkmH-ot0/edit#gid=172632260' | |
def load_overall_stats(): | |
sh = gspreadcon.open_by_url(dk_player_url) | |
worksheet = sh.worksheet('DK_Build_Up') | |
raw_display = pd.DataFrame(worksheet.get_all_records()) | |
raw_display.rename(columns={"Name": "Player", "Nickname": "Player", "Fantasy": "Median"}, inplace = True) | |
raw_display.replace("", 'Welp', inplace=True) | |
raw_display = raw_display.loc[raw_display['Player'] != 'Welp'] | |
raw_display = raw_display.loc[raw_display['Salary'] > 0] | |
raw_display = raw_display.loc[raw_display['Median'] > 0] | |
raw_display = raw_display.apply(pd.to_numeric, errors='ignore') | |
dk_raw = raw_display.sort_values(by='Median', ascending=False) | |
worksheet = sh.worksheet('FD_Build_Up') | |
raw_display = pd.DataFrame(worksheet.get_all_records()) | |
raw_display.rename(columns={"Name": "Player", "Nickname": "Player", "Fantasy": "Median"}, inplace = True) | |
raw_display.replace("", 'Welp', inplace=True) | |
raw_display = raw_display.loc[raw_display['Player'] != 'Welp'] | |
raw_display = raw_display.loc[raw_display['Median'] > 0] | |
raw_display = raw_display.apply(pd.to_numeric, errors='ignore') | |
fd_raw = raw_display.sort_values(by='Median', ascending=False) | |
worksheet = sh.worksheet('Secondary_DK_Build') | |
raw_display = pd.DataFrame(worksheet.get_all_records()) | |
raw_display.rename(columns={"Name": "Player", "Nickname": "Player", "Fantasy": "Median"}, inplace = True) | |
raw_display.replace("", 'Welp', inplace=True) | |
raw_display = raw_display.loc[raw_display['Player'] != 'Welp'] | |
raw_display = raw_display.loc[raw_display['Median'] > 0] | |
raw_display = raw_display.apply(pd.to_numeric, errors='ignore') | |
dk_raw_sec = raw_display.sort_values(by='Median', ascending=False) | |
worksheet = sh.worksheet('Secondary_FD_Build') | |
raw_display = pd.DataFrame(worksheet.get_all_records()) | |
raw_display.rename(columns={"Name": "Player", "Nickname": "Player", "Fantasy": "Median"}, inplace = True) | |
raw_display.replace("", 'Welp', inplace=True) | |
raw_display = raw_display.loc[raw_display['Player'] != 'Welp'] | |
raw_display = raw_display.loc[raw_display['Median'] > 0] | |
raw_display = raw_display.apply(pd.to_numeric, errors='ignore') | |
fd_raw_sec = raw_display.sort_values(by='Median', ascending=False) | |
worksheet = sh.worksheet('Player_Level_ROO') | |
raw_display = pd.DataFrame(worksheet.get_all_records()) | |
raw_display.replace("", 'Welp', inplace=True) | |
raw_display = raw_display.loc[raw_display['Player'] != 'Welp'] | |
raw_display = raw_display.loc[raw_display['Median'] > 0] | |
raw_display = raw_display.apply(pd.to_numeric, errors='ignore') | |
roo_raw = raw_display.sort_values(by='Median', ascending=False) | |
worksheet = sh.worksheet('Timestamp') | |
timestamp = worksheet.acell('A1').value | |
return dk_raw, fd_raw, dk_raw_sec, fd_raw_sec, roo_raw, timestamp | |
def convert_df_to_csv(df): | |
return df.to_csv().encode('utf-8') | |
dk_raw, fd_raw, dk_raw_sec, fd_raw_sec, roo_raw, timestamp = load_overall_stats() | |
t_stamp = f"Last Update: " + str(timestamp) + f" CST" | |
tab1, tab2, tab3 = st.tabs(['Uploads and Info', 'Range of Outcomes', 'Custom Range of Outcomes']) | |
with tab1: | |
st.info("The Projections file can have any columns in any order, but must contain columns explicitly named: 'Player', 'Salary', 'Position', 'Team', 'Opp', 'Minutes', 'Median', 'Own'.") | |
col1, col2 = st.columns([1, 5]) | |
with col1: | |
proj_file = st.file_uploader("Upload Projections File", key = 'proj_uploader') | |
if proj_file is not None: | |
try: | |
proj_dataframe = pd.read_csv(proj_file) | |
except: | |
proj_dataframe = pd.read_excel(proj_file) | |
with col2: | |
if proj_file is not None: | |
st.dataframe(proj_dataframe.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(precision=2), use_container_width = True) | |
with tab2: | |
col1, col2 = st.columns([1, 9]) | |
with col1: | |
st.info(t_stamp) | |
if st.button("Load/Reset Data", key='reset1'): | |
st.cache_data.clear() | |
dk_raw, fd_raw, dk_raw_sec, fd_raw_sec, roo_raw, timestamp = load_overall_stats() | |
t_stamp = f"Last Update: " + str(timestamp) + f" CST" | |
for key in st.session_state.keys(): | |
del st.session_state[key] | |
site_var2 = st.radio("What table would you like to display?", ('Draftkings', 'Fanduel'), key='site_var2') | |
if site_var2 == 'Draftkings': | |
site_baselines = roo_raw[roo_raw['Site'] == 'Draftkings'] | |
elif site_var2 == 'Fanduel': | |
site_baselines = roo_raw[roo_raw['Site'] == 'Fanduel'] | |
slate_split = st.radio("Are you viewing the main slate or the secondary slate?", ('Main Slate', 'Secondary'), key='slate_split') | |
if slate_split == 'Main Slate': | |
raw_baselines = site_baselines[site_baselines['Slate'] == 'Main Slate'] | |
elif slate_split == 'Secondary': | |
raw_baselines = site_baselines[site_baselines['Slate'] == 'Secondary'] | |
split_var2 = st.radio("Are you running the full slate or crtain games?", ('Full Slate Run', 'Specific Games'), key='split_var2') | |
if split_var2 == 'Specific Games': | |
team_var2 = st.multiselect('Which teams would you like to include in the ROO?', options = raw_baselines['Team'].unique(), key='team_var2') | |
elif split_var2 == 'Full Slate Run': | |
team_var2 = raw_baselines.Team.values.tolist() | |
pos_var2 = st.selectbox('View specific position?', options = ['All', 'PG', 'SG', 'SF', 'PF', 'C'], key='pos_var2') | |
with col2: | |
display_container_1 = st.empty() | |
display_dl_container_1 = st.empty() | |
display_proj = raw_baselines[raw_baselines['Team'].isin(team_var2)] | |
st.session_state.display_proj = display_proj | |
with display_container_1: | |
display_container = st.empty() | |
if 'display_proj' in st.session_state: | |
if pos_var2 == 'All': | |
st.session_state.display_proj = st.session_state.display_proj | |
elif pos_var2 != 'All': | |
st.session_state.display_proj = st.session_state.display_proj[st.session_state.display_proj['Position'].str.contains(pos_var2)] | |
st.dataframe(st.session_state.display_proj.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(precision=2), use_container_width = True) | |
with display_dl_container_1: | |
display_dl_container = st.empty() | |
if 'display_proj' in st.session_state: | |
st.download_button( | |
label="Export Tables", | |
data=convert_df_to_csv(st.session_state.display_proj), | |
file_name='NBA_ROO_export.csv', | |
mime='text/csv', | |
) | |
with tab3: | |
col1, col2 = st.columns([1, 9]) | |
with col1: | |
st.info(t_stamp) | |
if st.button("Load/Reset Data", key='reset2'): | |
st.cache_data.clear() | |
dk_raw, fd_raw, dk_raw_sec, fd_raw_sec, roo_raw, timestamp = load_overall_stats() | |
t_stamp = f"Last Update: " + str(timestamp) + f" CST" | |
for key in st.session_state.keys(): | |
del st.session_state[key] | |
slate_var1 = st.radio("Which data are you loading?", ('Paydirt', 'User'), key='slate_var1') | |
site_var1 = st.radio("What table would you like to display?", ('Draftkings', 'Fanduel'), key='site_var1') | |
slate_split2 = st.radio("Are you viewing the main slate or the secondary slate?", ('Main Slate', 'Secondary'), key='slate_split2') | |
if site_var1 == 'Draftkings': | |
if slate_var1 == 'User': | |
raw_baselines = proj_dataframe | |
elif slate_var1 != 'User': | |
if slate_split2 == 'Main Slate': | |
raw_baselines = dk_raw | |
elif slate_split2 == 'Secondary': | |
raw_baselines = dk_raw_sec | |
elif site_var1 == 'Fanduel': | |
if slate_var1 == 'User': | |
raw_baselines = proj_dataframe | |
elif slate_var1 != 'User': | |
if slate_split2 == 'Main Slate': | |
raw_baselines = fd_raw | |
elif slate_split2 == 'Secondary': | |
raw_baselines = fd_raw_sec | |
split_var1 = st.radio("Are you running the full slate or crtain games?", ('Full Slate Run', 'Specific Games'), key='split_var1') | |
if split_var1 == 'Specific Games': | |
team_var1 = st.multiselect('Which teams would you like to include in the ROO?', options = raw_baselines['Team'].unique(), key='team_var1') | |
elif split_var1 == 'Full Slate Run': | |
team_var1 = raw_baselines.Team.values.tolist() | |
pos_var1 = st.selectbox('View specific position?', options = ['All', 'PG', 'SG', 'SF', 'PF', 'C']) | |
with col2: | |
display_container = st.empty() | |
display_dl_container = st.empty() | |
hold_container = st.empty() | |
if st.button('Create Range of Outcomes for Slate'): | |
with hold_container: | |
working_roo = raw_baselines | |
working_roo = working_roo[working_roo['Team'].isin(team_var1)] | |
own_dict = dict(zip(working_roo.Player, working_roo.Own)) | |
min_dict = dict(zip(working_roo.Player, working_roo.Minutes)) | |
team_dict = dict(zip(working_roo.Player, working_roo.Team)) | |
total_sims = 1000 | |
flex_file = working_roo[['Player', 'Position', 'Salary', 'Median', 'Minutes']] | |
flex_file.rename(columns={"Agg": "Median"}, inplace = True) | |
flex_file['Floor'] = (flex_file['Median'] * .25) + (flex_file['Minutes'] * .25) | |
flex_file['Ceiling'] = flex_file['Median'] + 10 + (flex_file['Minutes'] * .25) | |
flex_file['STD'] = (flex_file['Median']/4) | |
flex_file = flex_file[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD']] | |
hold_file = flex_file | |
overall_file = flex_file | |
salary_file = flex_file | |
overall_players = overall_file[['Player']] | |
for x in range(0,total_sims): | |
salary_file[x] = salary_file['Salary'] | |
salary_file=salary_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1) | |
salary_file.astype('int').dtypes | |
salary_file = salary_file.div(1000) | |
for x in range(0,total_sims): | |
overall_file[x] = np.random.normal(overall_file['Median'],overall_file['STD']) | |
overall_file=overall_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1) | |
overall_file.astype('int').dtypes | |
players_only = hold_file[['Player']] | |
raw_lineups_file = players_only | |
for x in range(0,total_sims): | |
maps_dict = {'proj_map':dict(zip(hold_file.Player,hold_file[x]))} | |
raw_lineups_file[x] = sum([raw_lineups_file['Player'].map(maps_dict['proj_map'])]) | |
players_only[x] = raw_lineups_file[x].rank(ascending=False) | |
players_only=players_only.drop(['Player'], axis=1) | |
players_only.astype('int').dtypes | |
salary_2x_check = (overall_file - (salary_file*4)) | |
salary_3x_check = (overall_file - (salary_file*5)) | |
salary_4x_check = (overall_file - (salary_file*6)) | |
gpp_check = (overall_file - ((salary_file*5)+10)) | |
players_only['Average_Rank'] = players_only.mean(axis=1) | |
players_only['Top_finish'] = players_only[players_only == 1].count(axis=1)/total_sims | |
players_only['Top_5_finish'] = players_only[players_only <= 5].count(axis=1)/total_sims | |
players_only['Top_10_finish'] = players_only[players_only <= 10].count(axis=1)/total_sims | |
players_only['20+%'] = overall_file[overall_file >= 20].count(axis=1)/float(total_sims) | |
players_only['3x%'] = salary_2x_check[salary_2x_check >= 1].count(axis=1)/float(total_sims) | |
players_only['4x%'] = salary_3x_check[salary_3x_check >= 1].count(axis=1)/float(total_sims) | |
players_only['5x%'] = salary_4x_check[salary_4x_check >= 1].count(axis=1)/float(total_sims) | |
players_only['GPP%'] = salary_4x_check[gpp_check >= 1].count(axis=1)/float(total_sims) | |
players_only['Player'] = hold_file[['Player']] | |
final_outcomes = players_only[['Player', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '3x%', '4x%', '5x%', 'GPP%']] | |
final_Proj = pd.merge(hold_file, final_outcomes, on="Player") | |
final_Proj = final_Proj[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '3x%', '4x%', '5x%', 'GPP%']] | |
final_Proj['Own'] = final_Proj['Player'].map(own_dict) | |
final_Proj['Minutes Proj'] = final_Proj['Player'].map(min_dict) | |
final_Proj['Team'] = final_Proj['Player'].map(team_dict) | |
final_Proj['Own'] = final_Proj['Own'].astype('float') | |
final_Proj['LevX'] = ((final_Proj[['Top_finish', '4x%', 'Top_5_finish']].mean(axis=1))*100) - final_Proj['Own'] | |
final_Proj['ValX'] = ((final_Proj[['4x%', '5x%']].mean(axis=1))*100) + final_Proj['LevX'] | |
final_Proj = final_Proj[['Player', 'Minutes Proj', 'Position', 'Team', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '3x%', '4x%', '5x%', 'GPP%', 'Own', 'LevX', 'ValX']] | |
final_Proj = final_Proj.set_index('Player') | |
final_Proj = final_Proj.sort_values(by='Median', ascending=False) | |
st.session_state.final_Proj = final_Proj | |
hold_container = st.empty() | |
with display_container: | |
display_container = st.empty() | |
if 'final_Proj' in st.session_state: | |
if pos_var1 == 'All': | |
st.session_state.final_Proj = st.session_state.final_Proj | |
elif pos_var1 != 'All': | |
st.session_state.final_Proj = st.session_state.final_Proj[st.session_state.final_Proj['Position'].str.contains(pos_var1)] | |
st.dataframe(st.session_state.final_Proj.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(precision=2), use_container_width = True) | |
with display_dl_container: | |
display_dl_container = st.empty() | |
if 'final_Proj' in st.session_state: | |
st.download_button( | |
label="Export Tables", | |
data=convert_df_to_csv(st.session_state.final_Proj), | |
file_name='Custom_NBA_export.csv', | |
mime='text/csv', | |
) |