Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
KennethEnevoldsen
commited on
moved tests to allow for imports
Browse files- .vscode/settings.json +1 -1
- README.md +3 -0
- makefile +1 -1
- {scripts → src}/bump_version.py +0 -0
- {scripts → src}/git_utilities.py +0 -0
- {tests → src/tests}/__init__.py +0 -0
- {tests → src/tests}/conftest.py +3 -1
- {tests → src/tests}/readme_parsing.py +0 -0
- {tests → src/tests}/test_dataset_schema.py +2 -1
- {tests → src/tests}/test_load.py +3 -4
- {tests → src/tests}/test_unique_ids.py +0 -0
- {scripts → src}/update_descriptive_statistics.py +4 -0
.vscode/settings.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"python.testing.pytestArgs": [
|
3 |
-
"
|
4 |
],
|
5 |
"python.testing.unittestEnabled": false,
|
6 |
"python.testing.pytestEnabled": true
|
|
|
1 |
{
|
2 |
"python.testing.pytestArgs": [
|
3 |
+
"src/tests"
|
4 |
],
|
5 |
"python.testing.unittestEnabled": false,
|
6 |
"python.testing.pytestEnabled": true
|
README.md
CHANGED
@@ -252,6 +252,9 @@ This data generally contains no annotation besides the metadata attached to each
|
|
252 |
|
253 |
Below follows a brief overview of the sources in the corpus along with their individual license.
|
254 |
|
|
|
|
|
|
|
255 |
| Source | License |
|
256 |
| ------------------- | -------------------- |
|
257 |
| [adl] | [CC-0] |
|
|
|
252 |
|
253 |
Below follows a brief overview of the sources in the corpus along with their individual license.
|
254 |
|
255 |
+
<!-- START-MAIN TABLE -->
|
256 |
+
<!-- END-MAIN TABLE -->
|
257 |
+
|
258 |
| Source | License |
|
259 |
| ------------------- | -------------------- |
|
260 |
| [adl] | [CC-0] |
|
makefile
CHANGED
@@ -4,7 +4,7 @@ install:
|
|
4 |
|
5 |
test:
|
6 |
@echo "--- 🧪 Running tests ---"
|
7 |
-
uv run pytest tests/
|
8 |
|
9 |
|
10 |
lint:
|
|
|
4 |
|
5 |
test:
|
6 |
@echo "--- 🧪 Running tests ---"
|
7 |
+
uv run pytest src/tests/
|
8 |
|
9 |
|
10 |
lint:
|
{scripts → src}/bump_version.py
RENAMED
File without changes
|
{scripts → src}/git_utilities.py
RENAMED
File without changes
|
{tests → src/tests}/__init__.py
RENAMED
File without changes
|
{tests → src/tests}/conftest.py
RENAMED
@@ -4,10 +4,12 @@ from typing import Any
|
|
4 |
import pytest
|
5 |
import yaml
|
6 |
|
|
|
|
|
7 |
|
8 |
@pytest.fixture()
|
9 |
def repo_path() -> Path:
|
10 |
-
return
|
11 |
|
12 |
|
13 |
def readme_yaml_header(repo_path: Path) -> dict[str, Any]:
|
|
|
4 |
import pytest
|
5 |
import yaml
|
6 |
|
7 |
+
root_path = Path(__file__).parent.parent.parent
|
8 |
+
|
9 |
|
10 |
@pytest.fixture()
|
11 |
def repo_path() -> Path:
|
12 |
+
return root_path
|
13 |
|
14 |
|
15 |
def readme_yaml_header(repo_path: Path) -> dict[str, Any]:
|
{tests → src/tests}/readme_parsing.py
RENAMED
File without changes
|
{tests → src/tests}/test_dataset_schema.py
RENAMED
@@ -7,9 +7,10 @@ from datasets import load_dataset
|
|
7 |
from pydantic import AfterValidator, BaseModel, BeforeValidator
|
8 |
from typing_extensions import Annotated
|
9 |
|
|
|
10 |
from .readme_parsing import get_tag_idx, read_frontmatter_and_body
|
11 |
|
12 |
-
main_readme =
|
13 |
|
14 |
frontmatter, _ = read_frontmatter_and_body(main_readme)
|
15 |
DATASET_NAMES = [
|
|
|
7 |
from pydantic import AfterValidator, BaseModel, BeforeValidator
|
8 |
from typing_extensions import Annotated
|
9 |
|
10 |
+
from .conftest import root_path
|
11 |
from .readme_parsing import get_tag_idx, read_frontmatter_and_body
|
12 |
|
13 |
+
main_readme = root_path / "README.md"
|
14 |
|
15 |
frontmatter, _ = read_frontmatter_and_body(main_readme)
|
16 |
DATASET_NAMES = [
|
{tests → src/tests}/test_load.py
RENAMED
@@ -3,15 +3,14 @@ from pathlib import Path
|
|
3 |
from datasets import load_dataset
|
4 |
|
5 |
|
6 |
-
def test_dataset_loads():
|
7 |
"""Ensures that the dataset can load as intended"""
|
8 |
-
|
9 |
-
name = str(repo.resolve())
|
10 |
ds = load_dataset(name, split="train", streaming=True)
|
11 |
sample = next(iter(ds))
|
12 |
assert isinstance(sample, dict)
|
13 |
|
14 |
-
|
15 |
# def test_all_datasets_in_yaml(repo_path: Path, readme_yaml_header: dict[str, Any]):
|
16 |
# configs = readme_yaml_header["configs"]
|
17 |
|
|
|
3 |
from datasets import load_dataset
|
4 |
|
5 |
|
6 |
+
def test_dataset_loads(repo_path: Path):
|
7 |
"""Ensures that the dataset can load as intended"""
|
8 |
+
name = str(repo_path.resolve())
|
|
|
9 |
ds = load_dataset(name, split="train", streaming=True)
|
10 |
sample = next(iter(ds))
|
11 |
assert isinstance(sample, dict)
|
12 |
|
13 |
+
# TODO
|
14 |
# def test_all_datasets_in_yaml(repo_path: Path, readme_yaml_header: dict[str, Any]):
|
15 |
# configs = readme_yaml_header["configs"]
|
16 |
|
{tests → src/tests}/test_unique_ids.py
RENAMED
File without changes
|
{scripts → src}/update_descriptive_statistics.py
RENAMED
@@ -183,6 +183,10 @@ def create_parser():
|
|
183 |
)
|
184 |
return parser
|
185 |
|
|
|
|
|
|
|
|
|
186 |
|
187 |
def main(
|
188 |
dataset: str | None = None,
|
|
|
183 |
)
|
184 |
return parser
|
185 |
|
186 |
+
def update_main_table(repo_path: Path = repo_path):
|
187 |
+
main_readme = repo_path / "README.md"
|
188 |
+
get_tag_idx()
|
189 |
+
|
190 |
|
191 |
def main(
|
192 |
dataset: str | None = None,
|