File size: 3,102 Bytes
cf9e214
 
 
 
fc88e22
 
 
 
 
500b356
 
 
 
 
 
 
 
 
 
69c22e0
500b356
 
 
 
 
 
f2ba714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9e214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500b356
cf9e214
 
 
 
ec8644a
cf9e214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69c22e0
cf9e214
 
 
fc88e22
 
cf9e214
fc88e22
 
 
 
 
 
983c3a7
 
 
 
 
 
 
 
 
 
ec8644a
983c3a7
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import pandas as pd
import os


def get_data_path_for_config(config_name):
    data_dir = "../data"
    return os.path.join(data_dir, config_name)


def coalesce_columns(
    df,
):
    columns_to_coalesce = [col for col in df.columns if "_" not in col]
    for index, row in df.iterrows():
        for col in df.columns:
            for column_to_coalesce in columns_to_coalesce:
                if column_to_coalesce in col and "_" in col:
                    if not pd.isna(row[col]):
                        df.at[index, column_to_coalesce] = row[col]
                        continue

    # remove columns with underscores
    combined_df = df[columns_to_coalesce]
    return combined_df


def set_home_type(cur_df, filename):
    if "_sfrcondo_" in filename:
        cur_df["Home Type"] = "all homes"
    if "_sfrcondomfr_" in filename:
        cur_df["Home Type"] = "all homes plus multifamily"
    elif "_sfr_" in filename:
        cur_df["Home Type"] = "SFR"
    elif "_condo_" in filename:
        cur_df["Home Type"] = "condo/co-op"
    elif "_mfr_" in filename:
        cur_df["Home Type"] = "multifamily"

    return cur_df


def get_combined_df(data_frames, on):
    combined_df = None
    if len(data_frames) > 1:
        # iterate over dataframes and merge or concat
        combined_df = data_frames[0]
        for i in range(1, len(data_frames)):
            cur_df = data_frames[i]
            combined_df = pd.merge(
                combined_df,
                cur_df,
                on=on,
                how="outer",
                suffixes=("", "_" + str(i)),
            )
    elif len(data_frames) == 1:
        combined_df = data_frames[0]

    combined_df = coalesce_columns(combined_df)

    return combined_df


def get_melted_df(
    df,
    exclude_columns,
    columns_to_pivot,
    col_name,
    filename,
):
    smoothed = "_sm_" in filename
    seasonally_adjusted = "_sa_" in filename

    if smoothed:
        col_name += " (Smoothed)"
    if seasonally_adjusted:
        col_name += " (Seasonally Adjusted)"

    df = pd.melt(
        df,
        id_vars=exclude_columns,
        value_vars=columns_to_pivot,
        var_name="Date",
        value_name=col_name,
    )

    return df


def save_final_df_as_jsonl(config_name, df):
    processed_dir = "../processed/"

    if not os.path.exists(processed_dir):
        os.makedirs(processed_dir)

    full_path = os.path.join(processed_dir, config_name + ".jsonl")

    df.to_json(full_path, orient="records", lines=True)


def handle_slug_column_mappings(
    data_frames, slug_column_mappings, exclude_columns, filename, cur_df
):
    # Identify columns to pivot
    columns_to_pivot = [col for col in cur_df.columns if col not in exclude_columns]

    for slug, col_name in slug_column_mappings.items():
        if slug in filename:
            cur_df = get_melted_df(
                cur_df,
                exclude_columns,
                columns_to_pivot,
                col_name,
                filename,
            )

            data_frames.append(cur_df)
            break

    return data_frames