misikoff commited on
Commit
f2c7521
1 Parent(s): 2ab32a9
process_home_value_forecasts.ipynb CHANGED
@@ -415,7 +415,7 @@
415
  },
416
  {
417
  "cell_type": "code",
418
- "execution_count": 34,
419
  "metadata": {},
420
  "outputs": [
421
  {
@@ -446,7 +446,7 @@
446
  " <th>State</th>\n",
447
  " <th>City</th>\n",
448
  " <th>Metro</th>\n",
449
- " <th>CountyName</th>\n",
450
  " <th>BaseDate</th>\n",
451
  " <th>Month Over Month % (Smoothed)</th>\n",
452
  " <th>Quarter Over Quarter % (Smoothed)</th>\n",
@@ -483,7 +483,7 @@
483
  " <td>1</td>\n",
484
  " <td>NY</td>\n",
485
  " <td>New York</td>\n",
486
- " <td>New York, NY</td>\n",
487
  " <td>NaN</td>\n",
488
  " <td>2023-12-31</td>\n",
489
  " <td>0.2</td>\n",
@@ -501,7 +501,7 @@
501
  " <td>2</td>\n",
502
  " <td>CA</td>\n",
503
  " <td>Los Angeles</td>\n",
504
- " <td>Los Angeles, CA</td>\n",
505
  " <td>NaN</td>\n",
506
  " <td>2023-12-31</td>\n",
507
  " <td>-0.1</td>\n",
@@ -519,7 +519,7 @@
519
  " <td>3</td>\n",
520
  " <td>IL</td>\n",
521
  " <td>Chicago</td>\n",
522
- " <td>Chicago, IL</td>\n",
523
  " <td>NaN</td>\n",
524
  " <td>2023-12-31</td>\n",
525
  " <td>0.1</td>\n",
@@ -537,7 +537,7 @@
537
  " <td>4</td>\n",
538
  " <td>TX</td>\n",
539
  " <td>Dallas</td>\n",
540
- " <td>Dallas, TX</td>\n",
541
  " <td>NaN</td>\n",
542
  " <td>2023-12-31</td>\n",
543
  " <td>-0.1</td>\n",
@@ -674,12 +674,12 @@
674
  "20165 92811 79078 zip 39992 TX NaN \n",
675
  "20166 98183 95419 zip 39992 CA Camp Meeker \n",
676
  "\n",
677
- " Metro CountyName BaseDate \\\n",
678
  "0 NaN NaN 2023-12-31 \n",
679
- "1 New York, NY NaN 2023-12-31 \n",
680
- "2 Los Angeles, CA NaN 2023-12-31 \n",
681
- "3 Chicago, IL NaN 2023-12-31 \n",
682
- "4 Dallas, TX NaN 2023-12-31 \n",
683
  "... ... ... ... \n",
684
  "20162 Faribault-Northfield, MN Rice County 2023-12-31 \n",
685
  "20163 St. Louis, MO-IL Macoupin County 2023-12-31 \n",
@@ -729,7 +729,7 @@
729
  "[21062 rows x 15 columns]"
730
  ]
731
  },
732
- "execution_count": 34,
733
  "metadata": {},
734
  "output_type": "execute_result"
735
  }
@@ -743,13 +743,13 @@
743
  "\n",
744
  "all_cols = ['RegionID', 'RegionName', 'RegionType', 'SizeRank', 'StateName', 'State', 'City', 'Metro', 'CountyName',\n",
745
  " 'BaseDate'] + result_cols\n",
746
- "all_cols\n",
747
  "\n",
748
  "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
749
  " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
750
  "\n",
751
  "final_df = combined_df[all_cols]\n",
752
  "final_df = final_df.drop('StateName', axis=1)\n",
 
753
  "\n",
754
  "# iterate over rows of final_df and populate State and City columns if the regionType is msa\n",
755
  "for index, row in final_df.iterrows():\n",
@@ -768,11 +768,12 @@
768
  },
769
  {
770
  "cell_type": "code",
771
- "execution_count": 36,
772
  "metadata": {},
773
  "outputs": [],
774
  "source": [
775
- "final_df.to_csv(FULL_PROCESSED_DIR_PATH + 'final.csv', index=False)"
 
776
  ]
777
  }
778
  ],
 
415
  },
416
  {
417
  "cell_type": "code",
418
+ "execution_count": 49,
419
  "metadata": {},
420
  "outputs": [
421
  {
 
446
  " <th>State</th>\n",
447
  " <th>City</th>\n",
448
  " <th>Metro</th>\n",
449
+ " <th>County</th>\n",
450
  " <th>BaseDate</th>\n",
451
  " <th>Month Over Month % (Smoothed)</th>\n",
452
  " <th>Quarter Over Quarter % (Smoothed)</th>\n",
 
483
  " <td>1</td>\n",
484
  " <td>NY</td>\n",
485
  " <td>New York</td>\n",
486
+ " <td>NaN</td>\n",
487
  " <td>NaN</td>\n",
488
  " <td>2023-12-31</td>\n",
489
  " <td>0.2</td>\n",
 
501
  " <td>2</td>\n",
502
  " <td>CA</td>\n",
503
  " <td>Los Angeles</td>\n",
504
+ " <td>NaN</td>\n",
505
  " <td>NaN</td>\n",
506
  " <td>2023-12-31</td>\n",
507
  " <td>-0.1</td>\n",
 
519
  " <td>3</td>\n",
520
  " <td>IL</td>\n",
521
  " <td>Chicago</td>\n",
522
+ " <td>NaN</td>\n",
523
  " <td>NaN</td>\n",
524
  " <td>2023-12-31</td>\n",
525
  " <td>0.1</td>\n",
 
537
  " <td>4</td>\n",
538
  " <td>TX</td>\n",
539
  " <td>Dallas</td>\n",
540
+ " <td>NaN</td>\n",
541
  " <td>NaN</td>\n",
542
  " <td>2023-12-31</td>\n",
543
  " <td>-0.1</td>\n",
 
674
  "20165 92811 79078 zip 39992 TX NaN \n",
675
  "20166 98183 95419 zip 39992 CA Camp Meeker \n",
676
  "\n",
677
+ " Metro County BaseDate \\\n",
678
  "0 NaN NaN 2023-12-31 \n",
679
+ "1 NaN NaN 2023-12-31 \n",
680
+ "2 NaN NaN 2023-12-31 \n",
681
+ "3 NaN NaN 2023-12-31 \n",
682
+ "4 NaN NaN 2023-12-31 \n",
683
  "... ... ... ... \n",
684
  "20162 Faribault-Northfield, MN Rice County 2023-12-31 \n",
685
  "20163 St. Louis, MO-IL Macoupin County 2023-12-31 \n",
 
729
  "[21062 rows x 15 columns]"
730
  ]
731
  },
732
+ "execution_count": 49,
733
  "metadata": {},
734
  "output_type": "execute_result"
735
  }
 
743
  "\n",
744
  "all_cols = ['RegionID', 'RegionName', 'RegionType', 'SizeRank', 'StateName', 'State', 'City', 'Metro', 'CountyName',\n",
745
  " 'BaseDate'] + result_cols\n",
 
746
  "\n",
747
  "if not os.path.exists(FULL_PROCESSED_DIR_PATH):\n",
748
  " os.makedirs(FULL_PROCESSED_DIR_PATH)\n",
749
  "\n",
750
  "final_df = combined_df[all_cols]\n",
751
  "final_df = final_df.drop('StateName', axis=1)\n",
752
+ "final_df = final_df.rename(columns={\"CountyName\": \"County\"})\n",
753
  "\n",
754
  "# iterate over rows of final_df and populate State and City columns if the regionType is msa\n",
755
  "for index, row in final_df.iterrows():\n",
 
768
  },
769
  {
770
  "cell_type": "code",
771
+ "execution_count": 53,
772
  "metadata": {},
773
  "outputs": [],
774
  "source": [
775
+ "# final_df.to_csv(FULL_PROCESSED_DIR_PATH + 'final.csv', index=False)\n",
776
+ "final_df.to_json(FULL_PROCESSED_DIR_PATH + 'final.jsonl', orient='records', lines=True)"
777
  ]
778
  }
779
  ],
processed/final.csv DELETED
The diff for this file is too large to render. See raw diff
 
processed/home_value_forecasts/final.csv DELETED
The diff for this file is too large to render. See raw diff
 
processed/{test.jsonl → home_value_forecasts/final.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
processed/test.json DELETED
The diff for this file is too large to render. See raw diff
 
zillow.py CHANGED
@@ -47,10 +47,10 @@ _LICENSE = ""
47
  # TODO: Add link to the official dataset URLs here
48
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _URLS = {
51
- "first_domain": "https://files.zillowstatic.com/research/public_csvs/zhvf_growth/Metro_zhvf_growth_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv",
52
- # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
53
- }
54
 
55
 
56
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -92,9 +92,22 @@ class NewDataset(datasets.GeneratorBasedBuilder):
92
  ): # This is the name of the configuration selected in BUILDER_CONFIGS above
93
  features = datasets.Features(
94
  {
95
- # "RegionID": datasets.Value("RegionID"),
96
  "SizeRank": datasets.Value(dtype='int32', id="SizeRank"),
97
- # "RegionName": datasets.Value("RegionName"),
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  # These are the features of your dataset like images, labels ...
99
  }
100
  )
@@ -130,12 +143,14 @@ class NewDataset(datasets.GeneratorBasedBuilder):
130
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
131
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
132
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
 
133
  # urls = _URLS[self.config.name]
134
  # data_dir = dl_manager.download_and_extract(urls)
135
  # file_train = dl_manager.download(os.path.join('./data/home_value_forecasts', "Metro_zhvf_growth_uc_sfrcondo_tier_0.33_0.67_month.csv"))
136
- file_path = os.path.join('processed', "test.jsonl")
137
- print('*********************')
138
- print(file_path)
 
139
  file_train = dl_manager.download(file_path)
140
  # file_test = dl_manager.download(os.path.join(self.config.name, "test.csv"))
141
  # file_eval = dl_manager.download(os.path.join(self.config.name, "valid.csv"))
@@ -176,9 +191,21 @@ class NewDataset(datasets.GeneratorBasedBuilder):
176
  if self.config.name == "first_domain":
177
  # Yields examples as (key, example) tuples
178
  yield key, {
179
- # "RegionID": data["RegionID"],
180
  "SizeRank": data["SizeRank"],
181
- # "RegionName": data["RegionName"],
 
 
 
 
 
 
 
 
 
 
 
 
182
  # "answer": "" if split == "test" else data["answer"],
183
  }
184
  # else:
 
47
  # TODO: Add link to the official dataset URLs here
48
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
+ # _URLS = {
51
+ # "first_domain": "https://files.zillowstatic.com/research/public_csvs/zhvf_growth/Metro_zhvf_growth_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv",
52
+ # # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
53
+ # }
54
 
55
 
56
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
92
  ): # This is the name of the configuration selected in BUILDER_CONFIGS above
93
  features = datasets.Features(
94
  {
95
+ "RegionID": datasets.Value(dtype='string', id="RegionID"),
96
  "SizeRank": datasets.Value(dtype='int32', id="SizeRank"),
97
+ "RegionName": datasets.Value(dtype='string', id="RegionName"),
98
+ "RegionType": datasets.Value(dtype='string', id="RegionType"),
99
+ 'State': datasets.Value(dtype='string', id="State"),
100
+ 'City': datasets.Value(dtype='string', id="City"),
101
+ 'Metro': datasets.Value(dtype='string', id="Metro"),
102
+ 'County': datasets.Value(dtype='string', id="County"),
103
+ 'BaseDate': datasets.Value(dtype='string', id="BaseDate"),
104
+ 'Month Over Month % (Smoothed)': datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
105
+ 'Quarter Over Quarter % (Smoothed)': datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
106
+ 'Year Over Year % (Smoothed)' : datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
107
+ 'Month Over Month % (Raw)': datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
108
+ 'Quarter Over Quarter % (Raw)': datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
109
+ 'Year Over Year % (Raw)': datasets.Value(dtype='float32', id="Month Over Month % (Smoothed)"),
110
+
111
  # These are the features of your dataset like images, labels ...
112
  }
113
  )
 
143
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
144
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
145
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
146
+
147
  # urls = _URLS[self.config.name]
148
  # data_dir = dl_manager.download_and_extract(urls)
149
  # file_train = dl_manager.download(os.path.join('./data/home_value_forecasts', "Metro_zhvf_growth_uc_sfrcondo_tier_0.33_0.67_month.csv"))
150
+ file_path = os.path.join('processed/home_value_forecasts', "final.jsonl")
151
+ # print('*********************')
152
+ # print(file_path)
153
+
154
  file_train = dl_manager.download(file_path)
155
  # file_test = dl_manager.download(os.path.join(self.config.name, "test.csv"))
156
  # file_eval = dl_manager.download(os.path.join(self.config.name, "valid.csv"))
 
191
  if self.config.name == "first_domain":
192
  # Yields examples as (key, example) tuples
193
  yield key, {
194
+ "RegionID": data["RegionID"],
195
  "SizeRank": data["SizeRank"],
196
+ "RegionName": data["RegionName"],
197
+ "RegionType": data["RegionType"],
198
+ 'State': data["State"],
199
+ 'City': data["City"],
200
+ 'Metro': data["Metro"],
201
+ 'County': data["County"],
202
+ 'BaseDate': data["BaseDate"],
203
+ 'Month Over Month % (Smoothed)': data["Month Over Month % (Smoothed)"],
204
+ 'Quarter Over Quarter % (Smoothed)': data["Quarter Over Quarter % (Smoothed)"],
205
+ 'Year Over Year % (Smoothed)' : data["Year Over Year % (Smoothed)"],
206
+ 'Month Over Month % (Raw)': data["Month Over Month % (Raw)"],
207
+ 'Quarter Over Quarter % (Raw)': data["Quarter Over Quarter % (Raw)"],
208
+ 'Year Over Year % (Raw)': data["Year Over Year % (Raw)"],
209
  # "answer": "" if split == "test" else data["answer"],
210
  }
211
  # else: