misikoff commited on
Commit
69c22e0
1 Parent(s): 9c27965

fix:update

Browse files
processors/days_on_market.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "days_on_market/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[3]:
28
+
29
+
30
+ data_frames = []
31
+
32
+ exclude_columns = [
33
+ "RegionID",
34
+ "SizeRank",
35
+ "RegionName",
36
+ "RegionType",
37
+ "StateName",
38
+ "Home Type",
39
+ ]
40
+
41
+ slug_column_mappings = {
42
+ "_mean_listings_price_cut_amt_": "Mean Listings Price Cut Amount",
43
+ "_med_doz_pending_": "Median Days on Pending",
44
+ "_median_days_to_pending_": "Median Days to Close",
45
+ "_perc_listings_price_cut_": "Percent Listings Price Cut",
46
+ }
47
+
48
+
49
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
50
+ if filename.endswith(".csv"):
51
+ print("processing " + filename)
52
+ # skip month files for now since they are redundant
53
+ if "month" in filename:
54
+ continue
55
+
56
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
57
+
58
+ if "_uc_sfrcondo_" in filename:
59
+ cur_df["Home Type"] = "all homes (SFR + Condo)"
60
+ # change column type to string
61
+ cur_df["RegionName"] = cur_df["RegionName"].astype(str)
62
+ elif "_uc_sfr_" in filename:
63
+ cur_df["Home Type"] = "SFR"
64
+
65
+ data_frames = handle_slug_column_mappings(
66
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
67
+ )
68
+
69
+
70
+ combined_df = get_combined_df(
71
+ data_frames,
72
+ [
73
+ "RegionID",
74
+ "SizeRank",
75
+ "RegionName",
76
+ "RegionType",
77
+ "StateName",
78
+ "Home Type",
79
+ "Date",
80
+ ],
81
+ )
82
+
83
+ combined_df
84
+
85
+
86
+ # In[9]:
87
+
88
+
89
+ # Adjust column names
90
+ final_df = combined_df.rename(
91
+ columns={
92
+ "RegionID": "Region ID",
93
+ "SizeRank": "Size Rank",
94
+ "RegionName": "Region",
95
+ "RegionType": "Region Type",
96
+ "StateName": "State",
97
+ }
98
+ )
99
+
100
+ final_df
101
+
102
+
103
+ # In[5]:
104
+
105
+
106
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
107
+
processors/for_sale_listings.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "for_sale_listings/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[3]:
28
+
29
+
30
+ exclude_columns = [
31
+ "RegionID",
32
+ "SizeRank",
33
+ "RegionName",
34
+ "RegionType",
35
+ "StateName",
36
+ "Home Type",
37
+ ]
38
+
39
+ slug_column_mappings = {
40
+ "_mlp_": "Median Listing Price",
41
+ "_new_listings_": "New Listings",
42
+ "new_pending": "New Pending",
43
+ }
44
+
45
+
46
+ data_frames = []
47
+
48
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
49
+ if filename.endswith(".csv"):
50
+ print("processing " + filename)
51
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
52
+
53
+ # ignore monthly data for now since it is redundant
54
+ if "month" in filename:
55
+ continue
56
+
57
+ if "sfrcondo" in filename:
58
+ cur_df["Home Type"] = "all homes"
59
+ elif "sfr" in filename:
60
+ cur_df["Home Type"] = "SFR"
61
+ elif "condo" in filename:
62
+ cur_df["Home Type"] = "condo/co-op only"
63
+
64
+ data_frames = handle_slug_column_mappings(
65
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
66
+ )
67
+
68
+
69
+ combined_df = get_combined_df(
70
+ data_frames,
71
+ [
72
+ "RegionID",
73
+ "SizeRank",
74
+ "RegionName",
75
+ "RegionType",
76
+ "StateName",
77
+ "Home Type",
78
+ "Date",
79
+ ],
80
+ )
81
+
82
+ combined_df
83
+
84
+
85
+ # In[4]:
86
+
87
+
88
+ # Adjust column names
89
+ final_df = combined_df.rename(
90
+ columns={
91
+ "RegionID": "Region ID",
92
+ "SizeRank": "Size Rank",
93
+ "RegionName": "Region",
94
+ "RegionType": "Region Type",
95
+ "StateName": "State",
96
+ }
97
+ )
98
+
99
+ final_df
100
+
101
+
102
+ # In[5]:
103
+
104
+
105
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
106
+
processors/helpers.py CHANGED
@@ -12,6 +12,7 @@ def coalesce_columns(
12
  if column_to_coalesce in col and "_" in col:
13
  if not pd.isna(row[col]):
14
  df.at[index, column_to_coalesce] = row[col]
 
15
 
16
  # remove columns with underscores
17
  combined_df = df[columns_to_coalesce]
@@ -62,6 +63,7 @@ def get_melted_df(
62
  var_name="Date",
63
  value_name=col_name,
64
  )
 
65
  return df
66
 
67
 
@@ -70,7 +72,7 @@ def save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df):
70
  os.makedirs(FULL_PROCESSED_DIR_PATH)
71
 
72
  final_df.to_json(
73
- FULL_PROCESSED_DIR_PATH + "final.jsonl", orient="records", lines=True
74
  )
75
 
76
 
 
12
  if column_to_coalesce in col and "_" in col:
13
  if not pd.isna(row[col]):
14
  df.at[index, column_to_coalesce] = row[col]
15
+ continue
16
 
17
  # remove columns with underscores
18
  combined_df = df[columns_to_coalesce]
 
63
  var_name="Date",
64
  value_name=col_name,
65
  )
66
+
67
  return df
68
 
69
 
 
72
  os.makedirs(FULL_PROCESSED_DIR_PATH)
73
 
74
  final_df.to_json(
75
+ FULL_PROCESSED_DIR_PATH + "final2.jsonl", orient="records", lines=True
76
  )
77
 
78
 
processors/home_value_forecasts.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import get_combined_df, save_final_df_as_jsonl
11
+
12
+
13
+ # In[2]:
14
+
15
+
16
+ DATA_DIR = "../data/"
17
+ PROCESSED_DIR = "../processed/"
18
+ FACET_DIR = "home_values_forecasts/"
19
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
20
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
21
+
22
+
23
+ # In[3]:
24
+
25
+
26
+ data_frames = []
27
+
28
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
29
+ if filename.endswith(".csv"):
30
+ print("processing " + filename)
31
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
32
+
33
+ cols = ["Month Over Month %", "Quarter Over Quarter %", "Year Over Year %"]
34
+ if filename.endswith("sm_sa_month.csv"):
35
+ # print('Smoothed')
36
+ cur_df.columns = list(cur_df.columns[:-3]) + [
37
+ x + " (Smoothed) (Seaonally Adjusted)" for x in cols
38
+ ]
39
+ else:
40
+ # print('Raw')
41
+ cur_df.columns = list(cur_df.columns[:-3]) + cols
42
+ cur_df["RegionName"] = cur_df["RegionName"].astype(str)
43
+
44
+ data_frames.append(cur_df)
45
+
46
+
47
+ combined_df = get_combined_df(
48
+ data_frames,
49
+ [
50
+ "RegionID",
51
+ "RegionType",
52
+ "SizeRank",
53
+ "StateName",
54
+ "BaseDate",
55
+ ],
56
+ )
57
+
58
+ combined_df
59
+
60
+
61
+ # In[4]:
62
+
63
+
64
+ # Adjust columns
65
+ final_df = combined_df
66
+ final_df = combined_df.drop("StateName", axis=1)
67
+ final_df = final_df.rename(
68
+ columns={
69
+ "CountyName": "County",
70
+ "BaseDate": "Date",
71
+ "RegionName": "Region",
72
+ "RegionID": "Region ID",
73
+ "SizeRank": "Size Rank",
74
+ }
75
+ )
76
+
77
+ # iterate over rows of final_df and populate State and City columns if the regionType is msa
78
+ for index, row in final_df.iterrows():
79
+ if row["RegionType"] == "msa":
80
+ regionName = row["Region"]
81
+ # final_df.at[index, 'Metro'] = regionName
82
+
83
+ city = regionName.split(", ")[0]
84
+ final_df.at[index, "City"] = city
85
+
86
+ state = regionName.split(", ")[1]
87
+ final_df.at[index, "State"] = state
88
+
89
+ final_df
90
+
91
+
92
+ # In[9]:
93
+
94
+
95
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
96
+
processors/home_values.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "home_values/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[5]:
28
+
29
+
30
+ data_frames = []
31
+
32
+ slug_column_mappings = {
33
+ "_tier_0.0_0.33_": "Bottom Tier ZHVI",
34
+ "_tier_0.33_0.67_": "Mid Tier ZHVI",
35
+ "_tier_0.67_1.0_": "Top Tier ZHVI",
36
+ "": "ZHVI",
37
+ }
38
+
39
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
40
+ if filename.endswith(".csv"):
41
+ print("processing " + filename)
42
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
43
+ exclude_columns = [
44
+ "RegionID",
45
+ "SizeRank",
46
+ "RegionName",
47
+ "RegionType",
48
+ "StateName",
49
+ "Bedroom Count",
50
+ "Home Type",
51
+ ]
52
+
53
+ if "Zip" in filename:
54
+ continue
55
+ if "Neighborhood" in filename:
56
+ continue
57
+ if "City" in filename:
58
+ continue
59
+ if "Metro" in filename:
60
+ continue
61
+ if "County" in filename:
62
+ continue
63
+
64
+ if "City" in filename:
65
+ exclude_columns = exclude_columns + ["State", "Metro", "CountyName"]
66
+ elif "Zip" in filename:
67
+ exclude_columns = exclude_columns + [
68
+ "State",
69
+ "City",
70
+ "Metro",
71
+ "CountyName",
72
+ ]
73
+ elif "County" in filename:
74
+ exclude_columns = exclude_columns + [
75
+ "State",
76
+ "Metro",
77
+ "StateCodeFIPS",
78
+ "MunicipalCodeFIPS",
79
+ ]
80
+ elif "Neighborhood" in filename:
81
+ exclude_columns = exclude_columns + [
82
+ "State",
83
+ "City",
84
+ "Metro",
85
+ "CountyName",
86
+ ]
87
+
88
+ if "_bdrmcnt_1_" in filename:
89
+ cur_df["Bedroom Count"] = "1-Bedroom"
90
+ elif "_bdrmcnt_2_" in filename:
91
+ cur_df["Bedroom Count"] = "2-Bedrooms"
92
+ elif "_bdrmcnt_3_" in filename:
93
+ cur_df["Bedroom Count"] = "3-Bedrooms"
94
+ elif "_bdrmcnt_4_" in filename:
95
+ cur_df["Bedroom Count"] = "4-Bedrooms"
96
+ elif "_bdrmcnt_5_" in filename:
97
+ cur_df["Bedroom Count"] = "5+-Bedrooms"
98
+ else:
99
+ cur_df["Bedroom Count"] = "All Bedrooms"
100
+
101
+ if "_uc_sfr_" in filename:
102
+ cur_df["Home Type"] = "SFR"
103
+ elif "_uc_sfrcondo_" in filename:
104
+ cur_df["Home Type"] = "all homes (SFR/condo)"
105
+ elif "_uc_condo_" in filename:
106
+ cur_df["Home Type"] = "condo"
107
+
108
+ cur_df["StateName"] = cur_df["StateName"].astype(str)
109
+ cur_df["RegionName"] = cur_df["RegionName"].astype(str)
110
+
111
+ data_frames = handle_slug_column_mappings(
112
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
113
+ )
114
+
115
+
116
+ combined_df = get_combined_df(
117
+ data_frames,
118
+ [
119
+ "RegionID",
120
+ "SizeRank",
121
+ "RegionName",
122
+ "RegionType",
123
+ "StateName",
124
+ "Bedroom Count",
125
+ "Home Type",
126
+ "Date",
127
+ ],
128
+ )
129
+
130
+ combined_df
131
+
132
+
133
+ # In[11]:
134
+
135
+
136
+ final_df = combined_df
137
+
138
+ for index, row in final_df.iterrows():
139
+ if row["RegionType"] == "city":
140
+ final_df.at[index, "City"] = row["RegionName"]
141
+ elif row["RegionType"] == "county":
142
+ final_df.at[index, "County"] = row["RegionName"]
143
+ if row["RegionType"] == "state":
144
+ final_df.at[index, "StateName"] = row["RegionName"]
145
+
146
+ # coalesce State and StateName columns
147
+ # final_df["State"] = final_df["State"].combine_first(final_df["StateName"])
148
+ # final_df["County"] = final_df["County"].combine_first(final_df["CountyName"])
149
+
150
+ # final_df = final_df.drop(
151
+ # columns=[
152
+ # "StateName",
153
+ # # "CountyName"
154
+ # ]
155
+ # )
156
+ final_df
157
+
158
+
159
+ # In[12]:
160
+
161
+
162
+ final_df = final_df.rename(
163
+ columns={
164
+ "RegionID": "Region ID",
165
+ "SizeRank": "Size Rank",
166
+ "RegionName": "Region",
167
+ "RegionType": "Region Type",
168
+ "StateCodeFIPS": "State Code FIPS",
169
+ "StateName": "State",
170
+ "MunicipalCodeFIPS": "Municipal Code FIPS",
171
+ }
172
+ )
173
+
174
+ final_df
175
+
176
+
177
+ # In[13]:
178
+
179
+
180
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
181
+
processors/new_construction.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "new_construction/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[3]:
28
+
29
+
30
+ exclude_columns = [
31
+ "RegionID",
32
+ "SizeRank",
33
+ "RegionName",
34
+ "RegionType",
35
+ "StateName",
36
+ "Home Type",
37
+ ]
38
+
39
+ slug_column_mappings = {
40
+ "_median_sale_price_per_sqft": "Median Sale Price per Sqft",
41
+ "_median_sale_price": "Median Sale Price",
42
+ "sales_count": "Sales Count",
43
+ }
44
+
45
+ data_frames = []
46
+
47
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
48
+ if filename.endswith(".csv"):
49
+ print("processing " + filename)
50
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
51
+
52
+ if "sfrcondo" in filename:
53
+ cur_df["Home Type"] = "all homes"
54
+ elif "sfr" in filename:
55
+ cur_df["Home Type"] = "SFR"
56
+ elif "condo" in filename:
57
+ cur_df["Home Type"] = "condo/co-op only"
58
+
59
+ data_frames = handle_slug_column_mappings(
60
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
61
+ )
62
+
63
+
64
+ combined_df = get_combined_df(
65
+ data_frames,
66
+ [
67
+ "RegionID",
68
+ "SizeRank",
69
+ "RegionName",
70
+ "RegionType",
71
+ "StateName",
72
+ "Home Type",
73
+ "Date",
74
+ ],
75
+ )
76
+
77
+ combined_df
78
+
79
+
80
+ # In[4]:
81
+
82
+
83
+ final_df = combined_df
84
+ final_df = final_df.rename(
85
+ columns={
86
+ "RegionID": "Region ID",
87
+ "SizeRank": "Size Rank",
88
+ "RegionName": "Region",
89
+ "RegionType": "Region Type",
90
+ "StateName": "State",
91
+ }
92
+ )
93
+
94
+ final_df.sort_values(by=["Region ID", "Home Type", "Date"])
95
+
96
+
97
+ # In[5]:
98
+
99
+
100
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
101
+
processors/rentals.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[2]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[3]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "rentals/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[7]:
28
+
29
+
30
+ data_frames = []
31
+
32
+ slug_column_mappings = {"": "Rent"}
33
+
34
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
35
+ if filename.endswith(".csv"):
36
+ # print("processing " + filename)
37
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
38
+ exclude_columns = [
39
+ "RegionID",
40
+ "SizeRank",
41
+ "RegionName",
42
+ "RegionType",
43
+ "StateName",
44
+ "Home Type",
45
+ ]
46
+
47
+ if "_sfrcondomfr_" in filename:
48
+ cur_df["Home Type"] = "all homes plus multifamily"
49
+ # change column type to string
50
+ cur_df["RegionName"] = cur_df["RegionName"].astype(str)
51
+ if "City" in filename:
52
+ exclude_columns = [
53
+ "RegionID",
54
+ "SizeRank",
55
+ "RegionName",
56
+ "RegionType",
57
+ "StateName",
58
+ "Home Type",
59
+ # City Specific
60
+ "State",
61
+ "Metro",
62
+ "CountyName",
63
+ ]
64
+ elif "Zip" in filename:
65
+ exclude_columns = [
66
+ "RegionID",
67
+ "SizeRank",
68
+ "RegionName",
69
+ "RegionType",
70
+ "StateName",
71
+ "Home Type",
72
+ # Zip Specific
73
+ "State",
74
+ "City",
75
+ "Metro",
76
+ "CountyName",
77
+ ]
78
+ elif "County" in filename:
79
+ exclude_columns = [
80
+ "RegionID",
81
+ "SizeRank",
82
+ "RegionName",
83
+ "RegionType",
84
+ "StateName",
85
+ "Home Type",
86
+ # County Specific
87
+ "State",
88
+ "Metro",
89
+ "StateCodeFIPS",
90
+ "MunicipalCodeFIPS",
91
+ ]
92
+
93
+ elif "_sfr_" in filename:
94
+ cur_df["Home Type"] = "SFR"
95
+ elif "_mfr_" in filename:
96
+ cur_df["Home Type"] = "multifamily"
97
+
98
+ data_frames = handle_slug_column_mappings(
99
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
100
+ )
101
+
102
+
103
+ combined_df = get_combined_df(
104
+ data_frames,
105
+ [
106
+ "RegionID",
107
+ "SizeRank",
108
+ "RegionName",
109
+ "RegionType",
110
+ "StateName",
111
+ "Home Type",
112
+ "Date",
113
+ ],
114
+ )
115
+
116
+ combined_df
117
+
118
+
119
+ # In[8]:
120
+
121
+
122
+ final_df = combined_df
123
+
124
+ for index, row in final_df.iterrows():
125
+ if row["RegionType"] == "city":
126
+ final_df.at[index, "City"] = row["RegionName"]
127
+ elif row["RegionType"] == "county":
128
+ final_df.at[index, "County"] = row["RegionName"]
129
+
130
+ # coalesce State and StateName columns
131
+ final_df["State"] = final_df["State"].combine_first(final_df["StateName"])
132
+ final_df["State"] = final_df["County"].combine_first(final_df["CountyName"])
133
+
134
+ final_df = final_df.drop(columns=["StateName", "CountyName"])
135
+ final_df
136
+
137
+
138
+ # In[6]:
139
+
140
+
141
+ # Adjust column names
142
+ final_df = final_df.rename(
143
+ columns={
144
+ "RegionID": "Region ID",
145
+ "SizeRank": "Size Rank",
146
+ "RegionName": "Region",
147
+ "RegionType": "Region Type",
148
+ "StateCodeFIPS": "State Code FIPS",
149
+ "MunicipalCodeFIPS": "Municipal Code FIPS",
150
+ }
151
+ )
152
+
153
+ final_df
154
+
155
+
156
+ # In[7]:
157
+
158
+
159
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
160
+
processors/sales.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import os
9
+
10
+ from helpers import (
11
+ get_combined_df,
12
+ save_final_df_as_jsonl,
13
+ handle_slug_column_mappings,
14
+ )
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ DATA_DIR = "../data"
21
+ PROCESSED_DIR = "../processed/"
22
+ FACET_DIR = "sales/"
23
+ FULL_DATA_DIR_PATH = os.path.join(DATA_DIR, FACET_DIR)
24
+ FULL_PROCESSED_DIR_PATH = os.path.join(PROCESSED_DIR, FACET_DIR)
25
+
26
+
27
+ # In[3]:
28
+
29
+
30
+ exclude_columns = [
31
+ "RegionID",
32
+ "SizeRank",
33
+ "RegionName",
34
+ "RegionType",
35
+ "StateName",
36
+ "Home Type",
37
+ ]
38
+
39
+ slug_column_mappings = {
40
+ "_median_sale_to_list_": "Median Sale to List Ratio",
41
+ "_mean_sale_to_list_": "Mean Sale to List Ratio",
42
+ "_median_sale_price_": "Median Sale Price",
43
+ "_pct_sold_above_list_": "% Sold Above List",
44
+ "_pct_sold_below_list_": "% Sold Below List",
45
+ "_sales_count_now_": "Nowcast",
46
+ }
47
+
48
+ data_frames = []
49
+
50
+ for filename in os.listdir(FULL_DATA_DIR_PATH):
51
+ if filename.endswith(".csv"):
52
+ print("processing " + filename)
53
+ # ignore monthly data for now since it is redundant
54
+ if "month" in filename:
55
+ continue
56
+
57
+ cur_df = pd.read_csv(os.path.join(FULL_DATA_DIR_PATH, filename))
58
+
59
+ if "_sfrcondo_" in filename:
60
+ cur_df["Home Type"] = "all homes"
61
+ elif "_sfr_" in filename:
62
+ cur_df["Home Type"] = "SFR"
63
+
64
+ data_frames = handle_slug_column_mappings(
65
+ data_frames, slug_column_mappings, exclude_columns, filename, cur_df
66
+ )
67
+
68
+
69
+ combined_df = get_combined_df(
70
+ data_frames,
71
+ [
72
+ "RegionID",
73
+ "SizeRank",
74
+ "RegionName",
75
+ "RegionType",
76
+ "StateName",
77
+ "Home Type",
78
+ "Date",
79
+ ],
80
+ )
81
+
82
+ combined_df
83
+
84
+
85
+ # In[4]:
86
+
87
+
88
+ # Adjust column names
89
+ final_df = combined_df.rename(
90
+ columns={
91
+ "RegionID": "Region ID",
92
+ "SizeRank": "Size Rank",
93
+ "RegionName": "Region",
94
+ "RegionType": "Region Type",
95
+ "StateName": "State",
96
+ }
97
+ )
98
+
99
+ final_df.sort_values(by=["Region ID", "Home Type", "Date"])
100
+
101
+
102
+ # In[5]:
103
+
104
+
105
+ save_final_df_as_jsonl(FULL_PROCESSED_DIR_PATH, final_df)
106
+
zillow.py CHANGED
@@ -299,7 +299,7 @@ class Zillow(datasets.GeneratorBasedBuilder):
299
  )
300
 
301
  def _split_generators(self, dl_manager):
302
- file_path = os.path.join("processed", self.config.name, "final1.jsonl")
303
  file_train = dl_manager.download(file_path)
304
  # file_test = dl_manager.download(os.path.join(self.config.name, "test.csv"))
305
  # file_eval = dl_manager.download(os.path.join(self.config.name, "valid.csv"))
 
299
  )
300
 
301
  def _split_generators(self, dl_manager):
302
+ file_path = os.path.join("processed", self.config.name, "final2.jsonl")
303
  file_train = dl_manager.download(file_path)
304
  # file_test = dl_manager.download(os.path.join(self.config.name, "test.csv"))
305
  # file_eval = dl_manager.download(os.path.join(self.config.name, "valid.csv"))