jglaser commited on
Commit
5680ee8
·
1 Parent(s): 88fe72c

fix skip download feature

Browse files
Files changed (1) hide show
  1. binding_affinity.py +9 -26
binding_affinity.py CHANGED
@@ -34,7 +34,7 @@ year={2021}
34
  # TODO: Add description of the dataset here
35
  # You can copy an official description
36
  _DESCRIPTION = """\
37
- A dataset to refine language models on protein-ligand binding affinity prediction.
38
  """
39
 
40
  # TODO: Add a link to an official homepage for the dataset here
@@ -47,9 +47,9 @@ _LICENSE = "BSD two-clause"
47
  # The HuggingFace dataset library don't host the datasets but only point to the original files
48
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
  _URL = "https://huggingface.co/datasets/jglaser/binding_affinity/resolve/main/"
50
-
51
- _file_names = {'default': 'data/all.parquet',
52
- 'no_kras': 'data/all_nokras.parquet'}
53
 
54
  _URLs = {name: _URL+_file_names[name] for name in _file_names}
55
 
@@ -60,23 +60,6 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
63
- # If you don't want/need to define several sub-sets in your dataset,
64
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
65
-
66
- # If you need to make complex sub-parts in the datasets with configurable options
67
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
68
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
69
-
70
- # You will be able to load one or the other configurations in the following list with
71
- # data = datasets.load_dataset('my_dataset', 'first_domain')
72
- # data = datasets.load_dataset('my_dataset', 'second_domain')
73
- # BUILDER_CONFIGS = [
74
- # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
75
- # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
76
- #]
77
-
78
- #DEFAULT_CONFIG_NAME = "affinities" # It's not mandatory to have a default configuration. Just use one if it make sense.
79
-
80
  def _info(self):
81
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
82
  #if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
@@ -125,11 +108,12 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
125
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
126
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
127
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
128
- files = _file_names
129
- try:
 
 
 
130
  files = dl_manager.download_and_extract(_URLs)
131
- except:
132
- pass
133
 
134
  return [
135
  datasets.SplitGenerator(
@@ -156,5 +140,4 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
156
  local = fs.LocalFileSystem()
157
 
158
  for i, f in enumerate([filepath]):
159
- print(f)
160
  yield i, pq.read_table(f,filesystem=local)
 
34
  # TODO: Add description of the dataset here
35
  # You can copy an official description
36
  _DESCRIPTION = """\
37
+ A dataset to fine-tune language models on protein-ligand binding affinity prediction.
38
  """
39
 
40
  # TODO: Add a link to an official homepage for the dataset here
 
47
  # The HuggingFace dataset library don't host the datasets but only point to the original files
48
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
  _URL = "https://huggingface.co/datasets/jglaser/binding_affinity/resolve/main/"
50
+ _data_dir = "data/"
51
+ _file_names = {'default': _data_dir+'all.parquet',
52
+ 'no_kras': _data_dir+'all_nokras.parquet'}
53
 
54
  _URLs = {name: _URL+_file_names[name] for name in _file_names}
55
 
 
60
 
61
  VERSION = datasets.Version("1.1.0")
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  def _info(self):
64
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
65
  #if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
 
108
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
109
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
110
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
111
+ import os
112
+ if os.path.exists(dl_manager._base_path):
113
+ # this is a hack to force the use of the local copy
114
+ files = dl_manager.download_and_extract({fn: os.path.join(dl_manager._base_path, _file_names[fn]) for fn in _file_names})
115
+ else:
116
  files = dl_manager.download_and_extract(_URLs)
 
 
117
 
118
  return [
119
  datasets.SplitGenerator(
 
140
  local = fs.LocalFileSystem()
141
 
142
  for i, f in enumerate([filepath]):
 
143
  yield i, pq.read_table(f,filesystem=local)