feat: add generate scripts for survey
Browse files- .gitignore +4 -0
- configs/generate.yaml +31 -0
- pcbm_metashift.py +193 -0
- poetry.lock +0 -0
- pyproject.toml +22 -0
- scripts/__init__.py +0 -0
- scripts/generate.py +275 -0
- scripts/test_generate.py +37 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
.python-version
|
3 |
+
**/__pycache__
|
4 |
+
.DS_Store
|
configs/generate.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tasks:
|
2 |
+
- name: task_abcck_u
|
3 |
+
seed: 42
|
4 |
+
num_images_per_class_train: 30
|
5 |
+
num_images_per_class_test: 5
|
6 |
+
selected_classes:
|
7 |
+
- airplane
|
8 |
+
- bed
|
9 |
+
- car
|
10 |
+
- cow
|
11 |
+
- keyboard
|
12 |
+
experiments:
|
13 |
+
- name: bed_dog_dog
|
14 |
+
spurious_class: bed
|
15 |
+
train_context: dog
|
16 |
+
test_context: dog
|
17 |
+
- name: task_bbakb_u
|
18 |
+
seed: 42
|
19 |
+
num_images_per_class_train: 50
|
20 |
+
num_images_per_class_test: 50
|
21 |
+
selected_classes:
|
22 |
+
- beach
|
23 |
+
- bus
|
24 |
+
- airplane
|
25 |
+
- keyboard
|
26 |
+
- bird
|
27 |
+
experiments:
|
28 |
+
- name: keyboard_cat_cat
|
29 |
+
spurious_class: keyboard
|
30 |
+
train_context: cat
|
31 |
+
test_context: cat
|
pcbm_metashift.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import json
|
3 |
+
from string import Template
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
_HOMEPAGE = ""
|
7 |
+
_CITATION = ""
|
8 |
+
_LICENSE = ""
|
9 |
+
_DESCRIPTION_TEMPLATE = Template(
|
10 |
+
"$num_classes-way image classification task "
|
11 |
+
"to test domain shift of class $spurious_class from "
|
12 |
+
"context $source_context to $target_context. "
|
13 |
+
"Selected classes: $selected_classes"
|
14 |
+
)
|
15 |
+
_REPO = "https://huggingface.co/datasets/dgcnz/pcbm-metashift/resolve/main"
|
16 |
+
_IMAGES_DIR = Path("data")
|
17 |
+
|
18 |
+
|
19 |
+
class PCBMMetashiftConfig(datasets.BuilderConfig):
|
20 |
+
"""Builder Config for Food-101"""
|
21 |
+
|
22 |
+
def __init__(
|
23 |
+
self,
|
24 |
+
metadata_path: str,
|
25 |
+
selected_classes: list[str],
|
26 |
+
spurious_class: str,
|
27 |
+
source_context: str,
|
28 |
+
target_context: str,
|
29 |
+
**kwargs,
|
30 |
+
):
|
31 |
+
"""BuilderConfig for Food-101.
|
32 |
+
Args:
|
33 |
+
data_url: `string`, url to download the zip file from.
|
34 |
+
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
|
35 |
+
**kwargs: keyword arguments forwarded to super.
|
36 |
+
"""
|
37 |
+
super(PCBMMetashiftConfig, self).__init__(
|
38 |
+
version=datasets.Version("1.0.0"), **kwargs
|
39 |
+
)
|
40 |
+
self.metadata_path = metadata_path
|
41 |
+
self.selected_classes = selected_classes
|
42 |
+
self.spurious_class = spurious_class
|
43 |
+
self.source_context = source_context
|
44 |
+
self.target_context = target_context
|
45 |
+
|
46 |
+
|
47 |
+
class PCBMMetashift(datasets.GeneratorBasedBuilder):
|
48 |
+
"""Food-101 Images dataset"""
|
49 |
+
|
50 |
+
BUILDER_CONFIGS = [
|
51 |
+
PCBMMetashiftConfig(
|
52 |
+
name="task_abcck_bed_cat_dog",
|
53 |
+
description="Task 1: bed(cat) -> bed(dog)",
|
54 |
+
metadata_path="configs/task_abcck_bed_cat_dog.json",
|
55 |
+
selected_classes=["airplane", "bed", "car", "cow", "keyboard"],
|
56 |
+
spurious_class="bed",
|
57 |
+
source_context="cat",
|
58 |
+
target_context="dog",
|
59 |
+
),
|
60 |
+
PCBMMetashiftConfig(
|
61 |
+
name="task_abcck_bed_dog_cat",
|
62 |
+
description="Task 1: bed(dog) -> bed(cat)",
|
63 |
+
metadata_path="configs/task_abcck_bed_dog_cat.json",
|
64 |
+
selected_classes=["airplane", "bed", "car", "cow", "keyboard"],
|
65 |
+
spurious_class="bed",
|
66 |
+
source_context="dog",
|
67 |
+
target_context="cat",
|
68 |
+
),
|
69 |
+
PCBMMetashiftConfig(
|
70 |
+
name="task_abcck_car_cat_dog",
|
71 |
+
description="Task 1: car(cat) -> car(dog)",
|
72 |
+
metadata_path="configs/task_abcck_car_cat_dog.json",
|
73 |
+
selected_classes=["airplane", "bed", "car", "cow", "keyboard"],
|
74 |
+
spurious_class="car",
|
75 |
+
source_context="cat",
|
76 |
+
target_context="dog",
|
77 |
+
),
|
78 |
+
PCBMMetashiftConfig(
|
79 |
+
name="task_abcck_car_dog_cat",
|
80 |
+
description="Task 1: car(dog) -> car(cat)",
|
81 |
+
metadata_path="configs/task_abcck_car_dog_cat.json",
|
82 |
+
selected_classes=["airplane", "bed", "car", "cow", "keyboard"],
|
83 |
+
spurious_class="car",
|
84 |
+
source_context="dog",
|
85 |
+
target_context="cat",
|
86 |
+
),
|
87 |
+
PCBMMetashiftConfig(
|
88 |
+
name="task_bcmst_table_books_cat",
|
89 |
+
description="Task 2: table(books) -> table(cat)",
|
90 |
+
metadata_path="configs/task_bcmst_table_books_cat.json",
|
91 |
+
selected_classes=["beach", "computer", "motorcycle", "stove", "table"],
|
92 |
+
spurious_class="table",
|
93 |
+
source_context="books",
|
94 |
+
target_context="cat",
|
95 |
+
),
|
96 |
+
PCBMMetashiftConfig(
|
97 |
+
name="task_bcmst_table_books_dog",
|
98 |
+
description="Task 2: table(books) -> table(dog)",
|
99 |
+
metadata_path="configs/task_bcmst_table_books_dog.json",
|
100 |
+
selected_classes=["beach", "computer", "motorcycle", "stove", "table"],
|
101 |
+
spurious_class="table",
|
102 |
+
source_context="books",
|
103 |
+
target_context="dog",
|
104 |
+
),
|
105 |
+
PCBMMetashiftConfig(
|
106 |
+
name="task_bcmst_table_cat_dog",
|
107 |
+
description="Task 2: table(cat) -> table(dog)",
|
108 |
+
metadata_path="configs/task_bcmst_table_cat_dog.json",
|
109 |
+
selected_classes=["beach", "computer", "motorcycle", "stove", "table"],
|
110 |
+
spurious_class="table",
|
111 |
+
source_context="cat",
|
112 |
+
target_context="dog",
|
113 |
+
),
|
114 |
+
PCBMMetashiftConfig(
|
115 |
+
name="task_bcmst_table_dog_cat",
|
116 |
+
description="Task 2: table(dog) -> table(cat)",
|
117 |
+
metadata_path="configs/task_bcmst_table_dog_cat.json",
|
118 |
+
selected_classes=["beach", "computer", "motorcycle", "stove", "table"],
|
119 |
+
spurious_class="table",
|
120 |
+
source_context="dog",
|
121 |
+
target_context="cat",
|
122 |
+
),
|
123 |
+
]
|
124 |
+
|
125 |
+
def _info(self):
|
126 |
+
return datasets.DatasetInfo(
|
127 |
+
description=_DESCRIPTION_TEMPLATE.substitute(
|
128 |
+
num_classes=len(self.config.selected_classes),
|
129 |
+
spurious_class=self.config.spurious_class,
|
130 |
+
source_context=self.config.source_context,
|
131 |
+
target_context=self.config.target_context,
|
132 |
+
selected_classes=", ".join(self.config.selected_classes),
|
133 |
+
),
|
134 |
+
features=datasets.Features(
|
135 |
+
{
|
136 |
+
"image": datasets.Image(),
|
137 |
+
"label": datasets.ClassLabel(names=self.config.selected_classes),
|
138 |
+
}
|
139 |
+
),
|
140 |
+
supervised_keys=("image", "label"),
|
141 |
+
homepage=_HOMEPAGE,
|
142 |
+
citation=_CITATION,
|
143 |
+
license=_LICENSE,
|
144 |
+
task_templates=[
|
145 |
+
datasets.ImageClassification(image_column="image", label_column="label")
|
146 |
+
],
|
147 |
+
)
|
148 |
+
|
149 |
+
def _split_generators(self, dl_manager):
|
150 |
+
archive_path = dl_manager.download(f"{_REPO}/data/images.tar.gz")
|
151 |
+
metadata_path = dl_manager.download(f"{_REPO}/{self.config.metadata_path}")
|
152 |
+
return [
|
153 |
+
datasets.SplitGenerator(
|
154 |
+
name=datasets.Split.TRAIN,
|
155 |
+
gen_kwargs={
|
156 |
+
"images": dl_manager.iter_archive(archive_path),
|
157 |
+
"metadata_path": metadata_path,
|
158 |
+
"split": "train",
|
159 |
+
},
|
160 |
+
),
|
161 |
+
datasets.SplitGenerator(
|
162 |
+
name=datasets.Split.TEST,
|
163 |
+
gen_kwargs={
|
164 |
+
"images": dl_manager.iter_archive(archive_path),
|
165 |
+
"metadata_path": metadata_path,
|
166 |
+
"split": "test",
|
167 |
+
},
|
168 |
+
),
|
169 |
+
]
|
170 |
+
|
171 |
+
def _generate_examples(self, images, metadata_path: str, split: str):
|
172 |
+
"""Generate images and labels for splits."""
|
173 |
+
with open(metadata_path, encoding="utf-8") as f:
|
174 |
+
metadata = json.load(f)
|
175 |
+
split_data = metadata["data_splits"][split]
|
176 |
+
ids_to_keep = set()
|
177 |
+
for _, ids in split_data.items():
|
178 |
+
ids_to_keep.update([Path(id).stem for id in ids])
|
179 |
+
|
180 |
+
files = dict()
|
181 |
+
for file_path, file_obj in images:
|
182 |
+
image_id = Path(file_path).stem
|
183 |
+
if image_id in ids_to_keep:
|
184 |
+
files[image_id] = (file_obj.read(), file_path)
|
185 |
+
|
186 |
+
for cls, ids in split_data.items():
|
187 |
+
for image_id in ids:
|
188 |
+
image_id = Path(image_id).stem
|
189 |
+
file_obj, file_path = files[image_id]
|
190 |
+
yield f"{cls}_{image_id}", {
|
191 |
+
"image": {"path": file_path, "bytes": file_obj},
|
192 |
+
"label": cls,
|
193 |
+
}
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "pcbm-metashift"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["Your Name <[email protected]>"]
|
6 |
+
|
7 |
+
readme = "README.md"
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.9"
|
11 |
+
datasets = "^2.16.1"
|
12 |
+
pillow = "^10.2.0"
|
13 |
+
|
14 |
+
|
15 |
+
[tool.poetry.group.dev.dependencies]
|
16 |
+
omegaconf = "^2.3.0"
|
17 |
+
pydantic = "^2.5.3"
|
18 |
+
pytest = "^7.4.4"
|
19 |
+
|
20 |
+
[build-system]
|
21 |
+
requires = ["poetry-core"]
|
22 |
+
build-backend = "poetry.core.masonry.api"
|
scripts/__init__.py
ADDED
File without changes
|
scripts/generate.py
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import shutil
|
3 |
+
import pickle
|
4 |
+
import logging
|
5 |
+
from omegaconf import OmegaConf
|
6 |
+
import re
|
7 |
+
import random
|
8 |
+
import tarfile
|
9 |
+
from pydantic import BaseModel
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
logging.basicConfig(level=logging.INFO)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
|
16 |
+
def setup_parser():
|
17 |
+
parser = argparse.ArgumentParser(description="Generate a domain shift dataset")
|
18 |
+
parser.add_argument("--config", type=str, required=True, help="Path to config file")
|
19 |
+
parser.add_argument(
|
20 |
+
"--output_dir", type=str, required=True, help="Path to output directory"
|
21 |
+
)
|
22 |
+
parser.add_argument(
|
23 |
+
"--full_candidate_subsets_path",
|
24 |
+
type=str,
|
25 |
+
required=True,
|
26 |
+
help="Path to full-candidate-subsets.pkl",
|
27 |
+
)
|
28 |
+
parser.add_argument(
|
29 |
+
"--visual_genome_images_dir",
|
30 |
+
type=str,
|
31 |
+
required=True,
|
32 |
+
help="Path to VisualGenome images directory allImages/images",
|
33 |
+
)
|
34 |
+
return parser
|
35 |
+
|
36 |
+
|
37 |
+
def get_ms_domain_name(obj: str, context: str) -> str:
|
38 |
+
return f"{obj}({context})"
|
39 |
+
|
40 |
+
|
41 |
+
class DataSplits(BaseModel):
|
42 |
+
train: dict[str, list[str]]
|
43 |
+
test: dict[str, list[str]]
|
44 |
+
|
45 |
+
|
46 |
+
class MetashiftData(BaseModel):
|
47 |
+
selected_classes: list[str]
|
48 |
+
spurious_class: str
|
49 |
+
train_context: str
|
50 |
+
test_context: str
|
51 |
+
data_splits: DataSplits
|
52 |
+
|
53 |
+
|
54 |
+
class MetashiftFactory(object):
|
55 |
+
object_context_to_id: dict[str, list[int]]
|
56 |
+
visual_genome_images_dir: str
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
full_candidate_subsets_path: str,
|
61 |
+
visual_genome_images_dir: str,
|
62 |
+
):
|
63 |
+
"""
|
64 |
+
full_candidate_subsets_path: Path to `full-candidate-subsets.pkl`
|
65 |
+
visual_genome_images_dir: Path to VisualGenome images directory `allImages/images`
|
66 |
+
"""
|
67 |
+
with open(full_candidate_subsets_path, "rb") as f:
|
68 |
+
self.object_context_to_id = pickle.load(f)
|
69 |
+
self.visual_genome_images_dir = visual_genome_images_dir
|
70 |
+
|
71 |
+
def _get_all_domains_with_object(self, obj: str) -> set[str]:
|
72 |
+
"""Get all domains with given object and any context.
|
73 |
+
Example:
|
74 |
+
- _get_all_domains_with_object(table) => [table(dog), table(cat), ...]
|
75 |
+
"""
|
76 |
+
return {
|
77 |
+
key
|
78 |
+
for key in self.object_context_to_id.keys()
|
79 |
+
if re.match(f"^{obj}\\(.*\\)$", key)
|
80 |
+
}
|
81 |
+
|
82 |
+
def _get_all_image_ids_with_object(self, obj: str) -> set[str]:
|
83 |
+
"""Get all image ids with given object and any context.
|
84 |
+
Example:
|
85 |
+
- get_all_image_ids_with_object(table) => [id~table(dog), id~table(cat), ...]
|
86 |
+
- where id~domain, means an image sampled from the given domain.
|
87 |
+
"""
|
88 |
+
domains = self._get_all_domains_with_object(obj)
|
89 |
+
return {_id for domain in domains for _id in self.object_context_to_id[domain]}
|
90 |
+
|
91 |
+
def _get_image_ids(self, obj: str, context: str | None, exclude_context: str | None = None) -> set[str]:
|
92 |
+
"""Get image ids for the domain `obj(context)`, optionally excluding a specific context."""
|
93 |
+
if exclude_context is not None:
|
94 |
+
all_ids = self._get_all_image_ids_with_object(obj)
|
95 |
+
exclude_ids = self.object_context_to_id[get_ms_domain_name(obj, exclude_context)]
|
96 |
+
return all_ids - exclude_ids
|
97 |
+
elif context is not None:
|
98 |
+
return self.object_context_to_id[get_ms_domain_name(obj, context)]
|
99 |
+
else:
|
100 |
+
return self._get_all_image_ids_with_object(obj)
|
101 |
+
|
102 |
+
def _get_class_domains(
|
103 |
+
self, domains_specification: dict[str, tuple[str, str | None]]
|
104 |
+
) -> dict[str, tuple[list[str], list[str]]]:
|
105 |
+
"""Get train and test image ids for the given domains specification."""
|
106 |
+
domain_ids = dict()
|
107 |
+
for cls, (train_context, test_context) in domains_specification.items():
|
108 |
+
if train_context == test_context:
|
109 |
+
train_ids = self._get_image_ids(cls, context=train_context)
|
110 |
+
test_ids = self._get_image_ids(cls, context=None, exclude_context=test_context)
|
111 |
+
domain_ids[cls] = [train_ids, test_ids]
|
112 |
+
logger.info(
|
113 |
+
f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
|
114 |
+
" -> "
|
115 |
+
f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
|
116 |
+
)
|
117 |
+
else:
|
118 |
+
train_ids = self._get_image_ids(cls, train_context)
|
119 |
+
test_ids = self._get_image_ids(cls, test_context)
|
120 |
+
domain_ids[cls] = [train_ids, test_ids]
|
121 |
+
logger.info(
|
122 |
+
f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
|
123 |
+
" -> "
|
124 |
+
f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
|
125 |
+
)
|
126 |
+
return domain_ids
|
127 |
+
|
128 |
+
def _sample_from_domains(
|
129 |
+
self,
|
130 |
+
seed: int,
|
131 |
+
domains: dict[str, tuple[list[str], list[str]]],
|
132 |
+
num_train_images_per_class: int,
|
133 |
+
num_test_images_per_class: int,
|
134 |
+
) -> dict[str, tuple[list[str], list[str]]]:
|
135 |
+
"""Return sampled domain data from the given full domains."""
|
136 |
+
# TODO: Do we have to ensure that there's no overlap between classes?
|
137 |
+
# For example, we could have repeated files in training for different classes.
|
138 |
+
sampled_domains = dict()
|
139 |
+
for cls, (train_ids, test_ids) in domains.items():
|
140 |
+
try:
|
141 |
+
sampled_train_ids = random.Random(seed).sample(
|
142 |
+
list(train_ids), num_train_images_per_class
|
143 |
+
)
|
144 |
+
test_ids = test_ids - set(sampled_train_ids)
|
145 |
+
sampled_test_ids = random.Random(seed).sample(
|
146 |
+
list(test_ids), num_test_images_per_class
|
147 |
+
)
|
148 |
+
except ValueError:
|
149 |
+
logger.error(
|
150 |
+
f"{cls}: {len(train_ids)} train images, {len(test_ids)} test images"
|
151 |
+
)
|
152 |
+
raise Exception("Not enough images for this class")
|
153 |
+
sampled_domains[cls] = (sampled_train_ids, sampled_test_ids)
|
154 |
+
return sampled_domains
|
155 |
+
|
156 |
+
def create(
|
157 |
+
self,
|
158 |
+
seed: int,
|
159 |
+
selected_classes: list[str],
|
160 |
+
spurious_class: str,
|
161 |
+
train_spurious_context: str,
|
162 |
+
test_spurious_context: str,
|
163 |
+
num_train_images_per_class: int,
|
164 |
+
num_test_images_per_class: int,
|
165 |
+
) -> MetashiftData:
|
166 |
+
"""Return (metadata, data) splits for the given data shift."""
|
167 |
+
domains_specification = {
|
168 |
+
**{cls: (None, None) for cls in selected_classes},
|
169 |
+
spurious_class: (
|
170 |
+
train_spurious_context,
|
171 |
+
test_spurious_context,
|
172 |
+
), # overwrite spurious_class
|
173 |
+
}
|
174 |
+
domains = self._get_class_domains(domains_specification)
|
175 |
+
sampled_domains = self._sample_from_domains(
|
176 |
+
seed=seed,
|
177 |
+
domains=domains,
|
178 |
+
num_train_images_per_class=num_train_images_per_class,
|
179 |
+
num_test_images_per_class=num_test_images_per_class,
|
180 |
+
)
|
181 |
+
data_splits = {"train": dict(), "test": dict()}
|
182 |
+
for cls, (train_ids, test_ids) in sampled_domains.items():
|
183 |
+
data_splits["train"][cls] = train_ids
|
184 |
+
data_splits["test"][cls] = test_ids
|
185 |
+
|
186 |
+
return MetashiftData(
|
187 |
+
selected_classes=selected_classes,
|
188 |
+
spurious_class=spurious_class,
|
189 |
+
train_context=train_spurious_context,
|
190 |
+
test_context=test_spurious_context,
|
191 |
+
data_splits=DataSplits(
|
192 |
+
train=data_splits["train"],
|
193 |
+
test=data_splits["test"],
|
194 |
+
),
|
195 |
+
)
|
196 |
+
|
197 |
+
def _get_unique_ids_from_info(self, info: dict[str, MetashiftData]):
|
198 |
+
"""Get unique ids from info struct."""
|
199 |
+
unique_ids = set()
|
200 |
+
for data in info.values():
|
201 |
+
for ids in data.data_splits.train.values():
|
202 |
+
unique_ids.update(ids)
|
203 |
+
for ids in data.data_splits.test.values():
|
204 |
+
unique_ids.update(ids)
|
205 |
+
return unique_ids
|
206 |
+
|
207 |
+
def _replace_ids_with_paths(
|
208 |
+
self, info: dict[str, MetashiftData], data_path: Path, out_path: Path
|
209 |
+
) -> MetashiftData:
|
210 |
+
"""Replace ids with paths."""
|
211 |
+
new_data = dict()
|
212 |
+
for dataset_name, data in info.items():
|
213 |
+
for cls, ids in data.data_splits.train.items():
|
214 |
+
data.data_splits.train[cls] = [
|
215 |
+
str(data_path / f"{_id}.jpg") for _id in ids
|
216 |
+
]
|
217 |
+
for cls, ids in data.data_splits.test.items():
|
218 |
+
data.data_splits.test[cls] = [
|
219 |
+
str(data_path / f"{_id}.jpg") for _id in ids
|
220 |
+
]
|
221 |
+
new_data[dataset_name] = data
|
222 |
+
return new_data
|
223 |
+
|
224 |
+
def save_all(self, out_dir: str, info: dict[str, MetashiftData]):
|
225 |
+
"""Save all datasets to the given directory."""
|
226 |
+
out_path = Path(out_dir)
|
227 |
+
data_path = out_path / "data"
|
228 |
+
data_path.mkdir(parents=True, exist_ok=True)
|
229 |
+
|
230 |
+
unique_ids = self._get_unique_ids_from_info(info)
|
231 |
+
data = self._replace_ids_with_paths(info, data_path, out_path)
|
232 |
+
# for dataset_name, data in info.items():
|
233 |
+
# with open(out_path / f"{dataset_name}.json", "w") as f:
|
234 |
+
# f.write(data.model_dump_json(indent=2))
|
235 |
+
|
236 |
+
# with tarfile.open(data_path / "images.tar.gz", "w:gz") as tar:
|
237 |
+
# for _id in unique_ids:
|
238 |
+
# tar.add(
|
239 |
+
# Path(self.visual_genome_images_dir) / f"{_id}.jpg",
|
240 |
+
# )
|
241 |
+
|
242 |
+
|
243 |
+
def get_dataset_name(task_name: str, experiment_name: str) -> str:
|
244 |
+
return f"{task_name}_{experiment_name}"
|
245 |
+
|
246 |
+
|
247 |
+
def main():
|
248 |
+
parser = setup_parser()
|
249 |
+
args = parser.parse_args()
|
250 |
+
config = OmegaConf.load(args.config)
|
251 |
+
metashift_factory = MetashiftFactory(
|
252 |
+
full_candidate_subsets_path=args.full_candidate_subsets_path,
|
253 |
+
visual_genome_images_dir=args.visual_genome_images_dir,
|
254 |
+
)
|
255 |
+
info: dict[str, MetashiftData] = dict()
|
256 |
+
for task_config in config.tasks:
|
257 |
+
for experiment_config in task_config.experiments:
|
258 |
+
data = metashift_factory.create(
|
259 |
+
seed=task_config.seed,
|
260 |
+
selected_classes=task_config.selected_classes,
|
261 |
+
spurious_class=experiment_config.spurious_class,
|
262 |
+
train_spurious_context=experiment_config.train_context,
|
263 |
+
test_spurious_context=experiment_config.test_context,
|
264 |
+
num_test_images_per_class=task_config.num_images_per_class_test,
|
265 |
+
num_train_images_per_class=task_config.num_images_per_class_train,
|
266 |
+
)
|
267 |
+
dataset_name = get_dataset_name(task_config.name, experiment_config.name)
|
268 |
+
assert dataset_name not in info
|
269 |
+
info[dataset_name] = data
|
270 |
+
|
271 |
+
metashift_factory.save_all(args.output_dir, info)
|
272 |
+
|
273 |
+
|
274 |
+
if __name__ == "__main__":
|
275 |
+
main()
|
scripts/test_generate.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from scripts.generate import MetashiftFactory, MetashiftData, get_dataset_name
|
2 |
+
from omegaconf import OmegaConf
|
3 |
+
import random
|
4 |
+
|
5 |
+
CONFIG_PATH = "configs/generate.yaml"
|
6 |
+
CANDIDATE_SUBSETS_PATH = "scripts/artifacts/csp.pkl"
|
7 |
+
|
8 |
+
def test_generate():
|
9 |
+
config = OmegaConf.load(CONFIG_PATH)
|
10 |
+
metashift_factory = MetashiftFactory(
|
11 |
+
full_candidate_subsets_path=CANDIDATE_SUBSETS_PATH,
|
12 |
+
visual_genome_images_dir=".",
|
13 |
+
)
|
14 |
+
info: dict[str, MetashiftData] = dict()
|
15 |
+
for task_config in config.tasks:
|
16 |
+
for experiment_config in task_config.experiments:
|
17 |
+
data = metashift_factory.create(
|
18 |
+
seed=task_config.seed,
|
19 |
+
selected_classes=task_config.selected_classes,
|
20 |
+
spurious_class=experiment_config.spurious_class,
|
21 |
+
train_spurious_context=experiment_config.train_context,
|
22 |
+
test_spurious_context=experiment_config.test_context,
|
23 |
+
num_test_images_per_class=task_config.num_images_per_class_test,
|
24 |
+
num_train_images_per_class=task_config.num_images_per_class_train,
|
25 |
+
)
|
26 |
+
dataset_name = get_dataset_name(task_config.name, experiment_config.name)
|
27 |
+
assert dataset_name not in info
|
28 |
+
info[dataset_name] = data
|
29 |
+
|
30 |
+
random.seed(2)
|
31 |
+
unique_ids = metashift_factory._get_unique_ids_from_info(info)
|
32 |
+
random.seed(10000)
|
33 |
+
unique_ids_2 = metashift_factory._get_unique_ids_from_info(info)
|
34 |
+
|
35 |
+
assert unique_ids == unique_ids_2
|
36 |
+
|
37 |
+
|