Initialize (#1)
Browse files* add files
* update script
* add README.md
* add CI
* update
- .github/workflows/ci.yaml +49 -0
- .github/workflows/push_to_hub.yaml +26 -0
- .gitignore +176 -0
- PosterErase.py +348 -0
- README.md +184 -0
- poetry.lock +0 -0
- pyproject.toml +21 -0
- tests/PosterErase_test.py +33 -0
- tests/__init__.py +0 -0
.github/workflows/ci.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: CI
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
pull_request:
|
7 |
+
branches: [main]
|
8 |
+
paths-ignore:
|
9 |
+
- "README.md"
|
10 |
+
|
11 |
+
jobs:
|
12 |
+
test:
|
13 |
+
runs-on: ubuntu-latest
|
14 |
+
strategy:
|
15 |
+
matrix:
|
16 |
+
python-version: ["3.9", "3.10"]
|
17 |
+
|
18 |
+
steps:
|
19 |
+
- uses: actions/checkout@v3
|
20 |
+
|
21 |
+
- name: Set up Python ${{ matrix.python-version }}
|
22 |
+
uses: actions/setup-python@v4
|
23 |
+
with:
|
24 |
+
python-version: ${{ matrix.python-version }}
|
25 |
+
|
26 |
+
- name: Install dependencies
|
27 |
+
run: |
|
28 |
+
pip install -U pip setuptools wheel poetry
|
29 |
+
poetry install
|
30 |
+
|
31 |
+
- name: Format
|
32 |
+
run: |
|
33 |
+
poetry run black --check .
|
34 |
+
|
35 |
+
- name: Lint
|
36 |
+
run: |
|
37 |
+
poetry run ruff .
|
38 |
+
|
39 |
+
- name: Type check
|
40 |
+
run: |
|
41 |
+
poetry run mypy . \
|
42 |
+
--ignore-missing-imports \
|
43 |
+
--no-strict-optional \
|
44 |
+
--no-site-packages \
|
45 |
+
--cache-dir=/dev/null
|
46 |
+
|
47 |
+
- name: Run tests
|
48 |
+
run: |
|
49 |
+
poetry run pytest --color=yes -rf
|
.github/workflows/push_to_hub.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face Hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_run:
|
5 |
+
workflows:
|
6 |
+
- CI
|
7 |
+
branches:
|
8 |
+
- main
|
9 |
+
types:
|
10 |
+
- completed
|
11 |
+
|
12 |
+
jobs:
|
13 |
+
push_to_hub:
|
14 |
+
runs-on: ubuntu-latest
|
15 |
+
|
16 |
+
steps:
|
17 |
+
- name: Checkout repository
|
18 |
+
uses: actions/checkout@v3
|
19 |
+
|
20 |
+
- name: Push to Huggingface hub
|
21 |
+
env:
|
22 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
23 |
+
HF_USERNAME: ${{ secrets.HF_USERNAME }}
|
24 |
+
run: |
|
25 |
+
git fetch --unshallow
|
26 |
+
git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/PosterErase main
|
.gitignore
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
3 |
+
|
4 |
+
### Python ###
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# Distribution / packaging
|
14 |
+
.Python
|
15 |
+
build/
|
16 |
+
develop-eggs/
|
17 |
+
dist/
|
18 |
+
downloads/
|
19 |
+
eggs/
|
20 |
+
.eggs/
|
21 |
+
lib/
|
22 |
+
lib64/
|
23 |
+
parts/
|
24 |
+
sdist/
|
25 |
+
var/
|
26 |
+
wheels/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
cover/
|
57 |
+
|
58 |
+
# Translations
|
59 |
+
*.mo
|
60 |
+
*.pot
|
61 |
+
|
62 |
+
# Django stuff:
|
63 |
+
*.log
|
64 |
+
local_settings.py
|
65 |
+
db.sqlite3
|
66 |
+
db.sqlite3-journal
|
67 |
+
|
68 |
+
# Flask stuff:
|
69 |
+
instance/
|
70 |
+
.webassets-cache
|
71 |
+
|
72 |
+
# Scrapy stuff:
|
73 |
+
.scrapy
|
74 |
+
|
75 |
+
# Sphinx documentation
|
76 |
+
docs/_build/
|
77 |
+
|
78 |
+
# PyBuilder
|
79 |
+
.pybuilder/
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
# For a library or package, you might want to ignore these files since the code is
|
91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
92 |
+
.python-version
|
93 |
+
|
94 |
+
# pipenv
|
95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
98 |
+
# install all needed dependencies.
|
99 |
+
#Pipfile.lock
|
100 |
+
|
101 |
+
# poetry
|
102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
104 |
+
# commonly ignored for libraries.
|
105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
106 |
+
#poetry.lock
|
107 |
+
|
108 |
+
# pdm
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
110 |
+
#pdm.lock
|
111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
112 |
+
# in version control.
|
113 |
+
# https://pdm.fming.dev/#use-with-ide
|
114 |
+
.pdm.toml
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
165 |
+
|
166 |
+
### Python Patch ###
|
167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
168 |
+
poetry.toml
|
169 |
+
|
170 |
+
# ruff
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# LSP config files
|
174 |
+
pyrightconfig.json
|
175 |
+
|
176 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
PosterErase.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pathlib
|
4 |
+
from dataclasses import asdict, dataclass
|
5 |
+
from typing import Any, Dict, List, Optional, Tuple
|
6 |
+
|
7 |
+
import datasets as ds
|
8 |
+
import pandas as pd
|
9 |
+
from datasets.utils.logging import get_logger
|
10 |
+
from PIL import Image
|
11 |
+
from PIL.Image import Image as PilImage
|
12 |
+
|
13 |
+
logger = get_logger(__name__)
|
14 |
+
|
15 |
+
JsonDict = Dict[str, Any]
|
16 |
+
|
17 |
+
|
18 |
+
_DESCRIPTION = """\
|
19 |
+
PosterErase is a new dataset, which contains 60K high-resolution posters with texts and is more challenging for the text erasing task.
|
20 |
+
"""
|
21 |
+
|
22 |
+
_CITATION = """
|
23 |
+
@inproceedings{jiang2022self,
|
24 |
+
title={Self-supervised text erasing with controllable image synthesis},
|
25 |
+
author={Jiang, Gangwei and Wang, Shiyao and Ge, Tiezheng and Jiang, Yuning and Wei, Ying and Lian, Defu},
|
26 |
+
booktitle={Proceedings of the 30th ACM International Conference on Multimedia},
|
27 |
+
pages={1973--1983},
|
28 |
+
year={2022}
|
29 |
+
}
|
30 |
+
"""
|
31 |
+
|
32 |
+
_HOMEPAGE = "https://github.com/alimama-creative/Self-supervised-Text-Erasing"
|
33 |
+
|
34 |
+
_LICENSE = """\
|
35 |
+
The dataset is distributed under the CC BY-SA 4.0 license.
|
36 |
+
"""
|
37 |
+
|
38 |
+
_URL_BASE = (
|
39 |
+
"https://huggingface.co/datasets/shunk031/PosterErase-private/resolve/main/{}"
|
40 |
+
)
|
41 |
+
_ZIP_FILES = [f"erase_{i}.zip" for i in range(1, 7)]
|
42 |
+
_URLS = [_URL_BASE.format(zip_file) for zip_file in _ZIP_FILES]
|
43 |
+
|
44 |
+
|
45 |
+
def load_image(file_path: pathlib.Path) -> PilImage:
|
46 |
+
return Image.open(file_path)
|
47 |
+
|
48 |
+
|
49 |
+
@dataclass
|
50 |
+
class ColorData(object):
|
51 |
+
c1: Optional[int]
|
52 |
+
c2: Optional[int]
|
53 |
+
c3: Optional[int]
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
def from_string(cls, s: str) -> "ColorData":
|
57 |
+
assert isinstance(s, str)
|
58 |
+
cs = s.split(",")
|
59 |
+
if len(cs) == 3:
|
60 |
+
return ColorData(*list(map(lambda s: int(s), cs)))
|
61 |
+
elif len(cs) == 1:
|
62 |
+
return ColorData(*[None] * 3)
|
63 |
+
else:
|
64 |
+
raise ValueError(f"Invalid value: {cs}")
|
65 |
+
|
66 |
+
|
67 |
+
@dataclass
|
68 |
+
class TextData(object):
|
69 |
+
x: int
|
70 |
+
y: int
|
71 |
+
cs: List[ColorData]
|
72 |
+
|
73 |
+
@classmethod
|
74 |
+
def from_text_tuple(cls, text_tuple: Tuple[int, int, List[str]]) -> "TextData":
|
75 |
+
x, y, cs = text_tuple
|
76 |
+
assert isinstance(x, int) and isinstance(y, int)
|
77 |
+
return cls(x=x, y=y, cs=[ColorData.from_string(c) for c in cs])
|
78 |
+
|
79 |
+
|
80 |
+
@dataclass
|
81 |
+
class ObjectData(object):
|
82 |
+
text: Optional[str]
|
83 |
+
size: Optional[int]
|
84 |
+
direction: Optional[int]
|
85 |
+
|
86 |
+
@classmethod
|
87 |
+
def from_string(cls, s: str) -> "ObjectData":
|
88 |
+
assert isinstance(s, str)
|
89 |
+
ss = s.split(",")
|
90 |
+
if len(ss) == 3:
|
91 |
+
return cls(text=ss[0], size=int(ss[1]), direction=int(ss[2]))
|
92 |
+
elif len(ss) == 1:
|
93 |
+
return cls(*[None] * 3)
|
94 |
+
else:
|
95 |
+
raise ValueError(f"Invalid value: {ss}")
|
96 |
+
|
97 |
+
|
98 |
+
@dataclass
|
99 |
+
class PlaceData(object):
|
100 |
+
objs: List[ObjectData]
|
101 |
+
texts: List[List[TextData]]
|
102 |
+
|
103 |
+
@classmethod
|
104 |
+
def from_dict(cls, json_dict: JsonDict) -> "PlaceData":
|
105 |
+
objs = [
|
106 |
+
ObjectData.from_string(s) for s in json_dict["obj"].strip(";").split(";")
|
107 |
+
]
|
108 |
+
texts = [
|
109 |
+
[TextData.from_text_tuple(tt) for tt in tts] for tts in json_dict["text"]
|
110 |
+
]
|
111 |
+
return cls(objs=objs, texts=texts)
|
112 |
+
|
113 |
+
|
114 |
+
@dataclass
|
115 |
+
class MaskData(object):
|
116 |
+
x1: Optional[int]
|
117 |
+
x2: Optional[int]
|
118 |
+
y1: Optional[int]
|
119 |
+
y2: Optional[int]
|
120 |
+
|
121 |
+
@classmethod
|
122 |
+
def from_string(cls, s: str) -> "MaskData":
|
123 |
+
assert isinstance(s, str)
|
124 |
+
ss = s.split(",")
|
125 |
+
|
126 |
+
if len(ss) == 4:
|
127 |
+
return cls(*list(map(lambda s: int(s), ss)))
|
128 |
+
elif len(ss) == 1:
|
129 |
+
return cls(*[None] * 4)
|
130 |
+
else:
|
131 |
+
raise ValueError(f"Invalid value: {ss}")
|
132 |
+
|
133 |
+
|
134 |
+
@dataclass
|
135 |
+
class Annotation(object):
|
136 |
+
masks: List[MaskData]
|
137 |
+
place: Optional[PlaceData]
|
138 |
+
|
139 |
+
@classmethod
|
140 |
+
def from_dict(cls, json_dict: JsonDict) -> "Annotation":
|
141 |
+
masks = [
|
142 |
+
MaskData.from_string(s) for s in json_dict["mask"].strip(";").split(";")
|
143 |
+
]
|
144 |
+
|
145 |
+
place_json = json_dict.get("place")
|
146 |
+
place = (
|
147 |
+
PlaceData.from_dict(json_dict["place"]) if place_json is not None else None
|
148 |
+
)
|
149 |
+
return cls(masks=masks, place=place)
|
150 |
+
|
151 |
+
|
152 |
+
@dataclass
|
153 |
+
class EraseData(object):
|
154 |
+
number: int
|
155 |
+
path: str
|
156 |
+
annotation: Annotation
|
157 |
+
|
158 |
+
@classmethod
|
159 |
+
def from_dict(cls, json_dict: JsonDict) -> "EraseData":
|
160 |
+
number = int(json_dict["number"])
|
161 |
+
path = json_dict["path"]
|
162 |
+
annotation = Annotation.from_dict(json_dict["json"])
|
163 |
+
return cls(number=number, path=path, annotation=annotation)
|
164 |
+
|
165 |
+
|
166 |
+
def _load_annotation(file_path: pathlib.Path, columns: List[str]) -> pd.DataFrame:
|
167 |
+
df = pd.read_csv(file_path, delimiter="\t", names=columns)
|
168 |
+
df["json"] = df["json"].apply(json.loads)
|
169 |
+
return df
|
170 |
+
|
171 |
+
|
172 |
+
def _load_tng_annotation(file_path: pathlib.Path) -> pd.DataFrame:
|
173 |
+
return _load_annotation(file_path=file_path, columns=["number", "path", "json"])
|
174 |
+
|
175 |
+
|
176 |
+
def _load_val_annotation(file_path: pathlib.Path) -> pd.DataFrame:
|
177 |
+
return _load_annotation(
|
178 |
+
file_path=file_path, columns=["number", "path", "json", "gt_path"]
|
179 |
+
)
|
180 |
+
|
181 |
+
|
182 |
+
def _load_tst_annotation(file_path: pathlib.Path) -> pd.DataFrame:
|
183 |
+
return _load_val_annotation(file_path=file_path)
|
184 |
+
|
185 |
+
|
186 |
+
class PosterEraseDataset(ds.GeneratorBasedBuilder):
|
187 |
+
VERSION = ds.Version("1.0.0")
|
188 |
+
BUILDER_CONFIGS = [ds.BuilderConfig(version=VERSION, description=_DESCRIPTION)]
|
189 |
+
|
190 |
+
@property
|
191 |
+
def _manual_download_instructions(self) -> str:
|
192 |
+
return (
|
193 |
+
"To use PosterErase dataset, you need to download the dataset "
|
194 |
+
"via [Alibaba Cloud](https://tianchi.aliyun.com/dataset/134810)."
|
195 |
+
)
|
196 |
+
|
197 |
+
def _info(self) -> ds.DatasetInfo:
|
198 |
+
masks = ds.Sequence(
|
199 |
+
{
|
200 |
+
"x1": ds.Value("int32"),
|
201 |
+
"x2": ds.Value("int32"),
|
202 |
+
"y1": ds.Value("int32"),
|
203 |
+
"y2": ds.Value("int32"),
|
204 |
+
}
|
205 |
+
)
|
206 |
+
objs = ds.Sequence(
|
207 |
+
{
|
208 |
+
"text": ds.Value("string"),
|
209 |
+
"size": ds.Value("int32"),
|
210 |
+
"direction": ds.Value("int8"),
|
211 |
+
}
|
212 |
+
)
|
213 |
+
color = {
|
214 |
+
"c1": ds.Value("int32"),
|
215 |
+
"c2": ds.Value("int32"),
|
216 |
+
"c3": ds.Value("int32"),
|
217 |
+
}
|
218 |
+
text_feature = {
|
219 |
+
"x": ds.Value("int32"),
|
220 |
+
"y": ds.Value("int32"),
|
221 |
+
"cs": ds.Sequence(color),
|
222 |
+
}
|
223 |
+
texts = ds.Sequence(ds.Sequence(text_feature))
|
224 |
+
place = {"objs": objs, "texts": texts}
|
225 |
+
annotation = {"masks": masks, "place": place}
|
226 |
+
features = ds.Features(
|
227 |
+
{
|
228 |
+
"number": ds.Value("int32"),
|
229 |
+
"path": ds.Value("string"),
|
230 |
+
"image": ds.Image(),
|
231 |
+
"gt_image": ds.Image(),
|
232 |
+
"annotation": annotation,
|
233 |
+
}
|
234 |
+
)
|
235 |
+
return ds.DatasetInfo(
|
236 |
+
description=_DESCRIPTION,
|
237 |
+
citation=_CITATION,
|
238 |
+
homepage=_HOMEPAGE,
|
239 |
+
license=_LICENSE,
|
240 |
+
features=features,
|
241 |
+
)
|
242 |
+
|
243 |
+
def _download_from_hf(self, dl_manager: ds.DownloadManager) -> List[str]:
|
244 |
+
return dl_manager.download_and_extract(_URLS)
|
245 |
+
|
246 |
+
def _download_from_local(self, dl_manager: ds.DownloadManager) -> List[str]:
|
247 |
+
assert dl_manager.manual_dir is not None, dl_manager.manual_dir
|
248 |
+
dir_path = os.path.expanduser(dl_manager.manual_dir)
|
249 |
+
|
250 |
+
if not os.path.exists(dir_path):
|
251 |
+
raise FileNotFoundError(
|
252 |
+
"Make sure you have downloaded and placed the PosterErase dataset correctly. "
|
253 |
+
'Furthermore, you shoud check that a manual dir via `datasets.load_dataset("shunk031/PosterErase", data_dir=...)` '
|
254 |
+
"that include zip files from the downloaded files. "
|
255 |
+
f"Manual downloaded instructions: {self._manual_download_instructions}"
|
256 |
+
)
|
257 |
+
|
258 |
+
return dl_manager.extract(
|
259 |
+
path_or_paths=[os.path.join(dir_path, zip_file) for zip_file in _ZIP_FILES]
|
260 |
+
)
|
261 |
+
|
262 |
+
def _split_generators(
|
263 |
+
self, dl_manager: ds.DownloadManager
|
264 |
+
) -> List[ds.SplitGenerator]:
|
265 |
+
base_dir_paths = (
|
266 |
+
self._download_from_hf(dl_manager)
|
267 |
+
if dl_manager.download_config.token
|
268 |
+
else self._download_from_local(dl_manager)
|
269 |
+
)
|
270 |
+
dir_paths = [pathlib.Path(dir_path) for dir_path in base_dir_paths]
|
271 |
+
dir_paths = [dir_path / f"erase_{i+1}" for i, dir_path in enumerate(dir_paths)]
|
272 |
+
dir_path, *sub_dir_paths = dir_paths
|
273 |
+
|
274 |
+
tng_df = _load_tng_annotation(dir_path / "train.txt")
|
275 |
+
val_df = _load_val_annotation(dir_path / "ps_valid.txt")
|
276 |
+
tst_df = _load_tst_annotation(dir_path / "ps_test.txt")
|
277 |
+
|
278 |
+
tng_image_files = {
|
279 |
+
f"{f.parent.name}/{f.name}": f for f in dir_path.glob("train/*.png")
|
280 |
+
}
|
281 |
+
val_image_files = {
|
282 |
+
f"{f.parent.name}/{f.name}": f for f in dir_path.glob("valid/*.png")
|
283 |
+
}
|
284 |
+
val_gt_image_files = {
|
285 |
+
f"{f.parent.name}/{f.name}": f for f in dir_path.glob("valid/*_gt.png")
|
286 |
+
}
|
287 |
+
tst_image_files = {
|
288 |
+
f"{f.parent.name}/{f.name}": f for f in dir_path.glob("test/*.png")
|
289 |
+
}
|
290 |
+
tst_gt_image_files = {
|
291 |
+
f"{f.parent.name}/{f.name}": f for f in dir_path.glob("test/*_gt.png")
|
292 |
+
}
|
293 |
+
for sub_dir_path in sub_dir_paths:
|
294 |
+
tng_image_files.update(
|
295 |
+
{
|
296 |
+
f"{f.parent.name}/{f.name}": f
|
297 |
+
for f in sub_dir_path.glob("train/*.png")
|
298 |
+
}
|
299 |
+
)
|
300 |
+
return [
|
301 |
+
ds.SplitGenerator(
|
302 |
+
name=ds.Split.TRAIN,
|
303 |
+
gen_kwargs={
|
304 |
+
"annotation_df": tng_df,
|
305 |
+
"image_files": tng_image_files,
|
306 |
+
},
|
307 |
+
),
|
308 |
+
ds.SplitGenerator(
|
309 |
+
name=ds.Split.VALIDATION,
|
310 |
+
gen_kwargs={
|
311 |
+
"annotation_df": val_df,
|
312 |
+
"image_files": val_image_files,
|
313 |
+
"gt_image_files": val_gt_image_files,
|
314 |
+
},
|
315 |
+
),
|
316 |
+
ds.SplitGenerator(
|
317 |
+
name=ds.Split.TEST,
|
318 |
+
gen_kwargs={
|
319 |
+
"annotation_df": tst_df,
|
320 |
+
"image_files": tst_image_files,
|
321 |
+
"gt_image_files": tst_gt_image_files,
|
322 |
+
},
|
323 |
+
),
|
324 |
+
]
|
325 |
+
|
326 |
+
def _generate_examples(
|
327 |
+
self,
|
328 |
+
annotation_df: pd.DataFrame,
|
329 |
+
image_files: Dict[str, pathlib.Path],
|
330 |
+
gt_image_files: Optional[Dict[str, pathlib.Path]] = None,
|
331 |
+
):
|
332 |
+
ann_dicts = annotation_df.to_dict(orient="records")
|
333 |
+
for i, ann_dict in enumerate(ann_dicts):
|
334 |
+
image_path = image_files[ann_dict["path"]]
|
335 |
+
image = load_image(image_path)
|
336 |
+
erase_data = EraseData.from_dict(json_dict=ann_dict)
|
337 |
+
|
338 |
+
example = asdict(erase_data)
|
339 |
+
example["image"] = image
|
340 |
+
|
341 |
+
if gt_image_files is not None and "gt_path" in ann_dict:
|
342 |
+
gt_image_path = gt_image_files[ann_dict["gt_path"]]
|
343 |
+
gt_image = load_image(gt_image_path)
|
344 |
+
example["gt_image"] = gt_image
|
345 |
+
else:
|
346 |
+
example["gt_image"] = None
|
347 |
+
|
348 |
+
yield i, example
|
README.md
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- machine-generated
|
4 |
+
language:
|
5 |
+
- zh
|
6 |
+
language_creators:
|
7 |
+
- found
|
8 |
+
license:
|
9 |
+
- cc-by-sa-4.0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
pretty_name: PosterErase
|
13 |
+
size_categories: []
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
tags:
|
17 |
+
- graphic design
|
18 |
+
task_categories:
|
19 |
+
- other
|
20 |
+
task_ids: []
|
21 |
+
---
|
22 |
+
|
23 |
+
# Dataset Card for PosterErase
|
24 |
+
|
25 |
+
[![CI](https://github.com/shunk031/huggingface-datasets_PosterErase/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_PosterErase/actions/workflows/ci.yaml)
|
26 |
+
|
27 |
+
## Table of Contents
|
28 |
+
- [Dataset Card Creation Guide](#dataset-card-creation-guide)
|
29 |
+
- [Table of Contents](#table-of-contents)
|
30 |
+
- [Dataset Description](#dataset-description)
|
31 |
+
- [Dataset Summary](#dataset-summary)
|
32 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
33 |
+
- [Languages](#languages)
|
34 |
+
- [Dataset Structure](#dataset-structure)
|
35 |
+
- [Data Instances](#data-instances)
|
36 |
+
- [Data Fields](#data-fields)
|
37 |
+
- [Data Splits](#data-splits)
|
38 |
+
- [Dataset Creation](#dataset-creation)
|
39 |
+
- [Curation Rationale](#curation-rationale)
|
40 |
+
- [Source Data](#source-data)
|
41 |
+
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
|
42 |
+
- [Who are the source language producers?](#who-are-the-source-language-producers)
|
43 |
+
- [Annotations](#annotations)
|
44 |
+
- [Annotation process](#annotation-process)
|
45 |
+
- [Who are the annotators?](#who-are-the-annotators)
|
46 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
47 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
48 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
49 |
+
- [Discussion of Biases](#discussion-of-biases)
|
50 |
+
- [Other Known Limitations](#other-known-limitations)
|
51 |
+
- [Additional Information](#additional-information)
|
52 |
+
- [Dataset Curators](#dataset-curators)
|
53 |
+
- [Licensing Information](#licensing-information)
|
54 |
+
- [Citation Information](#citation-information)
|
55 |
+
- [Contributions](#contributions)
|
56 |
+
|
57 |
+
## Dataset Description
|
58 |
+
|
59 |
+
- **Homepage:** https://github.com/alimama-creative/Self-supervised-Text-Erasing
|
60 |
+
- **Repository:** https://github.com/shunk031/huggingface-datasets_PosterErase
|
61 |
+
- **Paper (Preprint):** https://arxiv.org/abs/2204.12743
|
62 |
+
- **Paper (ACMMM2022):** https://dl.acm.org/doi/abs/10.1145/3503161.3547905
|
63 |
+
|
64 |
+
### Dataset Summary
|
65 |
+
|
66 |
+
### Supported Tasks and Leaderboards
|
67 |
+
|
68 |
+
[More Information Needed]
|
69 |
+
|
70 |
+
### Languages
|
71 |
+
|
72 |
+
The language data in PKU-PosterLayout is in Chinese (BCP-47 zh).
|
73 |
+
|
74 |
+
## Dataset Structure
|
75 |
+
|
76 |
+
### Data Instances
|
77 |
+
|
78 |
+
To use PosterErase dataset, you need to download the dataset via [Alibaba Cloud](https://tianchi.aliyun.com/dataset/134810).
|
79 |
+
Then place the downloaded files in the following structure and specify its path.
|
80 |
+
|
81 |
+
```
|
82 |
+
/path/to/datasets
|
83 |
+
├── erase_1.zip
|
84 |
+
├── erase_2.zip
|
85 |
+
├── erase_3.zip
|
86 |
+
├── erase_4.zip
|
87 |
+
├── erase_5.zip
|
88 |
+
└── erase_6.zip
|
89 |
+
```
|
90 |
+
|
91 |
+
```python
|
92 |
+
import datasets as ds
|
93 |
+
|
94 |
+
dataset = ds.load_dataset(
|
95 |
+
path="shunk031/PosterErase",
|
96 |
+
data_dir="/path/to/datasets/",
|
97 |
+
)
|
98 |
+
```
|
99 |
+
|
100 |
+
### Data Fields
|
101 |
+
|
102 |
+
[More Information Needed]
|
103 |
+
|
104 |
+
### Data Splits
|
105 |
+
|
106 |
+
[More Information Needed]
|
107 |
+
|
108 |
+
## Dataset Creation
|
109 |
+
|
110 |
+
### Curation Rationale
|
111 |
+
|
112 |
+
[More Information Needed]
|
113 |
+
|
114 |
+
### Source Data
|
115 |
+
|
116 |
+
[More Information Needed]
|
117 |
+
|
118 |
+
#### Initial Data Collection and Normalization
|
119 |
+
|
120 |
+
[More Information Needed]
|
121 |
+
|
122 |
+
#### Who are the source language producers?
|
123 |
+
|
124 |
+
[More Information Needed]
|
125 |
+
|
126 |
+
### Annotations
|
127 |
+
|
128 |
+
[More Information Needed]
|
129 |
+
|
130 |
+
#### Annotation process
|
131 |
+
|
132 |
+
[More Information Needed]
|
133 |
+
|
134 |
+
#### Who are the annotators?
|
135 |
+
|
136 |
+
[More Information Needed]
|
137 |
+
|
138 |
+
### Personal and Sensitive Information
|
139 |
+
|
140 |
+
[More Information Needed]
|
141 |
+
|
142 |
+
## Considerations for Using the Data
|
143 |
+
|
144 |
+
### Social Impact of Dataset
|
145 |
+
|
146 |
+
[More Information Needed]
|
147 |
+
|
148 |
+
### Discussion of Biases
|
149 |
+
|
150 |
+
[More Information Needed]
|
151 |
+
|
152 |
+
### Other Known Limitations
|
153 |
+
|
154 |
+
[More Information Needed]
|
155 |
+
|
156 |
+
## Additional Information
|
157 |
+
|
158 |
+
### Dataset Curators
|
159 |
+
|
160 |
+
[More Information Needed]
|
161 |
+
|
162 |
+
### Licensing Information
|
163 |
+
|
164 |
+
You can find the following statement in [the license section](https://tianchi.aliyun.com/dataset/134810#license) of t[he dataset distribution location](https://tianchi.aliyun.com/dataset/134810).
|
165 |
+
|
166 |
+
> The dataset is distributed under the CC BY-SA 4.0 license.
|
167 |
+
|
168 |
+
However, the license setting on that page appears to be set to [CC-BY-SA-NC 4.0](http://creativecommons.org/licenses/by-sa/4.0/?spm=a2c22.12282016.0.0.7abc5a92qnyxdR).
|
169 |
+
|
170 |
+
### Citation Information
|
171 |
+
|
172 |
+
```bibtex
|
173 |
+
@inproceedings{jiang2022self,
|
174 |
+
title={Self-supervised text erasing with controllable image synthesis},
|
175 |
+
author={Jiang, Gangwei and Wang, Shiyao and Ge, Tiezheng and Jiang, Yuning and Wei, Ying and Lian, Defu},
|
176 |
+
booktitle={Proceedings of the 30th ACM International Conference on Multimedia},
|
177 |
+
pages={1973--1983},
|
178 |
+
year={2022}
|
179 |
+
}
|
180 |
+
```
|
181 |
+
|
182 |
+
### Contributions
|
183 |
+
|
184 |
+
Thanks to [alimama-creative](https://github.com/alimama-creative) for creating this dataset.
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "huggingface-datasets-postererase"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["Shunsuke KITADA <[email protected]>"]
|
6 |
+
readme = "README.md"
|
7 |
+
|
8 |
+
[tool.poetry.dependencies]
|
9 |
+
python = "^3.9"
|
10 |
+
datasets = {extras = ["vision"], version = "^2.14.6"}
|
11 |
+
|
12 |
+
|
13 |
+
[tool.poetry.group.dev.dependencies]
|
14 |
+
ruff = "^0.1.4"
|
15 |
+
black = "^23.10.1"
|
16 |
+
mypy = "^1.6.1"
|
17 |
+
pytest = "^7.4.3"
|
18 |
+
|
19 |
+
[build-system]
|
20 |
+
requires = ["poetry-core"]
|
21 |
+
build-backend = "poetry.core.masonry.api"
|
tests/PosterErase_test.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import datasets as ds
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
|
7 |
+
@pytest.fixture
|
8 |
+
def dataset_path() -> str:
|
9 |
+
return "PosterErase.py"
|
10 |
+
|
11 |
+
|
12 |
+
@pytest.mark.skipif(
|
13 |
+
condition=bool(os.environ.get("CI", False)),
|
14 |
+
reason=(
|
15 |
+
"Because this loading script downloads a large dataset, "
|
16 |
+
"we will skip running it on CI."
|
17 |
+
),
|
18 |
+
)
|
19 |
+
@pytest.mark.parametrize(
|
20 |
+
argnames=("expected_num_train", "expected_num_valid", "expected_num_test"),
|
21 |
+
argvalues=((58114, 148, 146),),
|
22 |
+
)
|
23 |
+
def test_load_dataset(
|
24 |
+
dataset_path: str,
|
25 |
+
expected_num_train: int,
|
26 |
+
expected_num_valid: int,
|
27 |
+
expected_num_test: int,
|
28 |
+
):
|
29 |
+
dataset = ds.load_dataset(path=dataset_path, token=True)
|
30 |
+
|
31 |
+
assert dataset["train"].num_rows == expected_num_train
|
32 |
+
assert dataset["validation"].num_rows == expected_num_valid
|
33 |
+
assert dataset["test"].num_rows == expected_num_test
|
tests/__init__.py
ADDED
File without changes
|