File size: 5,092 Bytes
05597b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3160afe
 
1cc8db2
 
b78a270
3160afe
05597b5
 
 
 
 
b78a270
05597b5
3160afe
05597b5
 
 
 
 
b78a270
 
05597b5
 
 
 
6cf47a2
05597b5
 
b78a270
 
05597b5
 
 
 
 
 
 
 
 
b78a270
 
 
05597b5
 
 
 
 
 
 
 
 
b78a270
05597b5
 
 
 
b78a270
05597b5
 
3160afe
05597b5
 
 
 
 
 
 
 
 
 
3160afe
b78a270
d660d09
b78a270
 
 
20ddc2f
b78a270
20ddc2f
b78a270
 
 
 
 
 
 
20ddc2f
 
 
 
b78a270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4938f99
 
 
b78a270
 
 
 
 
 
20ddc2f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import json
import datasets
import datetime

_CITATION = """
@inproceedings{zellersluhessel2021merlot,
  title={MERLOT: Multimodal Neural Script Knowledge Models},
  author={Zellers, Rowan and Lu, Ximing and Hessel, Jack and Yu, Youngjae and Park, Jae Sung and Cao, Jize and Farhadi, Ali and Choi, Yejin},
  booktitle={Advances in Neural Information Processing Systems 34},
  year={2021}
}
"""

_DESCRIPTION = """\
YT-Temporal-180M, a large and diverse dataset of 6 million videos (spanning 180M extracted frames)
that covers diverse topics.
"""

_URL_BASE = "https://rowanzellers.com/merlot/#data"

url_numbers = ["00" + str(i) if i < 10 else "0" + str(i) for i in range(100)]
_DL_URLS = [
    f"https://storage.googleapis.com/merlot/yttemporal180m/yttemporal180m_{num}of100.jsonl.gz"
    for num in url_numbers
]


def json_serializer(o):
    if isinstance(o, datetime):
        return str(o)

    raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable")


class yttemporal180mConfig(datasets.BuilderConfig):
    """BuilderConfig for ActivityNet Captions."""

    def __init__(self, **kwargs):
        super(yttemporal180mConfig, self).__init__(
            version=datasets.Version("2.1.0", ""), **kwargs
        )


class yttemporal180m(datasets.GeneratorBasedBuilder):

    DEFAULT_CONFIG_NAME = "default"
    BUILDER_CONFIGS = [
        yttemporal180mConfig(
            name="default", description="Default full yttemporal180m dataset"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "video_id": datasets.Value("string"),
                    "video_url": datasets.Value("string"),
                    "caption": datasets.Value("string"),
                    "timestamp_start": datasets.Value("float32"),
                    "timestamp_stop": datasets.Value("float32"),
                    "meta": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_URL_BASE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        archive_paths = [dl_manager.download_and_extract(url) for url in _DL_URLS]

        train_split = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"jsonl_files": archive_paths},
            )
        ]

        return train_split

    def _generate_examples(self, jsonl_files):
        """This function returns the examples."""
        idx = 0
        for file in jsonl_files:
            with open(file, encoding="utf-8") as jsonl_file:
                json_list = list(jsonl_file)
                for json_str in json_list:
                    infos = json.loads(json_str)

                    id = infos["info"]["display_id"]
                    url = "https://www.youtube.com/watch?v=" + id

                    # Divide video by segments of 15 sec
                    max_sec_per_segment = 15
                    last_caption_timestamp = infos["subtitles"][-1]["time"]
                    num_chunks = (
                        int(divmod(last_caption_timestamp, max_sec_per_segment)[0]) + 1
                    )
                    time_chunks = [
                        i * max_sec_per_segment for i in range(num_chunks + 1)
                    ]
                    time_chunk_idx = 0
                    caption = ""
                    for el in infos["subtitles"]:
                        if (
                            el["time"] > time_chunks[time_chunk_idx + 1]
                            or el["time"] == last_caption_timestamp
                        ):
                            timestamp_start = float(time_chunks[time_chunk_idx])
                            timestamp_stop = float(time_chunks[time_chunk_idx + 1])
                            time_chunk_idx += 1

                            metadata_dict = {
                                "asr_info": infos["denoised"],
                                "info": infos["info"],
                                "subtitles": infos["subtitles"],
                                "title": infos["info"]["title"],
                            }
                            yield idx, {
                                "video_id": id,
                                "video_url": url,
                                "caption": caption,
                                "timestamp_start": timestamp_start,
                                "timestamp_stop": timestamp_stop
                                if el["time"] != last_caption_timestamp
                                else last_caption_timestamp,
                                "meta": json.dumps(
                                    metadata_dict, default=json_serializer, indent=2
                                ),
                            }
                            idx += 1
                            caption = ""
                        else:
                            caption += el["word"] + " "