Refine CSV glob in validation
Browse files- validation.py +0 -28
- {{cookiecutter.repo_name}}/cli.py +5 -3
validation.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
from datasets import load_dataset
|
4 |
-
|
5 |
-
|
6 |
-
def main():
|
7 |
-
tasks = ["safety_or_not"]
|
8 |
-
|
9 |
-
# check that all the expected files exist
|
10 |
-
prediction_files = list(Path(".").glob("*.csv"))
|
11 |
-
mismatched_files = set(tasks).symmetric_difference(set([f.stem for f in prediction_files]))
|
12 |
-
if mismatched_files:
|
13 |
-
raise ValueError(f"Incorrect number of files! Expected {len(tasks)} files, but got {len(prediction_files)}.")
|
14 |
-
|
15 |
-
# check we can load the dataset for each task
|
16 |
-
load_errors = []
|
17 |
-
for task in tasks:
|
18 |
-
try:
|
19 |
-
dset = load_dataset("../raft_submission", task)
|
20 |
-
except Exception as e:
|
21 |
-
load_errors.append(e)
|
22 |
-
|
23 |
-
if load_errors:
|
24 |
-
raise ValueError(f"Could not load predictions! Errors: {load_errors}")
|
25 |
-
|
26 |
-
|
27 |
-
if __name__ == "__main__":
|
28 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{{cookiecutter.repo_name}}/cli.py
CHANGED
@@ -25,16 +25,17 @@ app = typer.Typer()
|
|
25 |
|
26 |
@app.command()
|
27 |
def validate():
|
|
|
28 |
tasks = get_dataset_config_names("ought/raft")
|
29 |
|
30 |
# Check that all the expected files exist
|
31 |
-
prediction_files = list(Path("data").rglob("
|
32 |
mismatched_files = set(tasks).symmetric_difference(set([f.parent.name for f in prediction_files]))
|
33 |
if mismatched_files:
|
34 |
raise ValueError(f"Incorrect number of files! Expected {len(tasks)} files, but got {len(prediction_files)}.")
|
35 |
|
36 |
# Check all files have the expected shape (number of rows, number of columns)
|
37 |
-
# TODO(lewtun): Add a check for the IDs per file
|
38 |
shape_errors = []
|
39 |
column_errors = []
|
40 |
for prediction_file in prediction_files:
|
@@ -69,8 +70,9 @@ def validate():
|
|
69 |
|
70 |
@app.command()
|
71 |
def submit():
|
|
|
72 |
subprocess.call("git pull origin main".split())
|
73 |
-
subprocess.call(["git", "add", "
|
74 |
subprocess.call(["git", "commit", "-m", "Submission"])
|
75 |
subprocess.call(["git", "push"])
|
76 |
|
|
|
25 |
|
26 |
@app.command()
|
27 |
def validate():
|
28 |
+
# TODO(lewtun): Consider using great_expectations for the data validation
|
29 |
tasks = get_dataset_config_names("ought/raft")
|
30 |
|
31 |
# Check that all the expected files exist
|
32 |
+
prediction_files = list(Path("data").rglob("predictions.csv"))
|
33 |
mismatched_files = set(tasks).symmetric_difference(set([f.parent.name for f in prediction_files]))
|
34 |
if mismatched_files:
|
35 |
raise ValueError(f"Incorrect number of files! Expected {len(tasks)} files, but got {len(prediction_files)}.")
|
36 |
|
37 |
# Check all files have the expected shape (number of rows, number of columns)
|
38 |
+
# TODO(lewtun): Add a check for the specific IDs per file
|
39 |
shape_errors = []
|
40 |
column_errors = []
|
41 |
for prediction_file in prediction_files:
|
|
|
70 |
|
71 |
@app.command()
|
72 |
def submit():
|
73 |
+
# TODO(lewtun): Replace with subprocess.run and only add the exact files we need for evaluation
|
74 |
subprocess.call("git pull origin main".split())
|
75 |
+
subprocess.call(["git", "add", "data"])
|
76 |
subprocess.call(["git", "commit", "-m", "Submission"])
|
77 |
subprocess.call(["git", "push"])
|
78 |
|