Julien 'Lta' BALLET commited on
Commit
a49fd36
·
unverified ·
1 Parent(s): 24048a4

feat: Add upstream dataset processing script

Browse files
Files changed (2) hide show
  1. datafux.py +155 -0
  2. requirements.txt +2 -0
datafux.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python3
2
+
3
+ import os, glob, re
4
+ import datetime as dt
5
+
6
+ import click
7
+ import zipfile as zf
8
+ import polars as pl
9
+
10
+ GAP_RE = re.compile('Gap of (\\d+)s found between (\\d+) and (\\d+)')
11
+ CSV_SCHEMA = {
12
+ 'ts': pl.String,
13
+ 'open': pl.Float64,
14
+ 'high': pl.Float64,
15
+ 'low': pl.Float64,
16
+ 'close': pl.Float64,
17
+ 'volume': pl.UInt64,
18
+ }
19
+
20
+ @click.group
21
+ def cli():
22
+ pass
23
+
24
+ @click.argument('folder')
25
+ @click.option('--only', '-o', multiple=True)
26
+ @click.option('--exclude', '-e', multiple=True)
27
+ @cli.command()
28
+ def extract(folder, only, exclude):
29
+ pairs = []
30
+
31
+ for entry in os.scandir(folder):
32
+ if entry.is_dir() and len(entry.name) == 6:
33
+ pairs.append(entry.path)
34
+
35
+ for pair in pairs:
36
+ name = os.path.basename(pair)
37
+ if len(only) > 0 and name not in only:
38
+ continue
39
+ if len(exclude) > 0 and name in exclude:
40
+ continue
41
+
42
+ print(f"Processing {name}:")
43
+ gaps, ticks = _extract_and_concatenate_pair(pair)
44
+
45
+ gaps_path = os.path.join(pair, 'gaps.parquet')
46
+ print(f" - Writing {gaps_path}")
47
+ gaps.write_parquet(gaps_path)
48
+
49
+ ticks_path = os.path.join(pair, 'ticks.parquet')
50
+ print(f" - Writing {ticks_path}")
51
+ ticks.write_parquet(ticks_path)
52
+
53
+ # breakpoint()
54
+
55
+
56
+ def _parse_gaps(lines):
57
+ "Parse the status report to extract gaps"
58
+ data = []
59
+
60
+ for line in lines:
61
+ line = line.decode('utf-8').strip()
62
+
63
+ if line == '':
64
+ continue
65
+ if 'HistData.com' in line or 'File:' in line or 'tick interval' in line:
66
+ continue
67
+
68
+ md = GAP_RE.match(line)
69
+ if md:
70
+ data.append([int(md[1]), md[2], md[3]])
71
+ else:
72
+ print(f"Unable to parse line: {line}")
73
+
74
+ gaps = pl.DataFrame(
75
+ schema={"length": pl.UInt64, "start": pl.String, "end": pl.String},
76
+ data=data,
77
+ orient='row'
78
+ ).with_columns(
79
+ pl.col("start").str.to_datetime(
80
+ "%Y%m%d%H%M%S"
81
+ ).dt.replace_time_zone('EST').dt.convert_time_zone('UTC')
82
+ ).with_columns(
83
+ pl.col("end").str.to_datetime(
84
+ "%Y%m%d%H%M%S"
85
+ ).dt.replace_time_zone('EST').dt.convert_time_zone('UTC')
86
+ )
87
+ return gaps
88
+
89
+
90
+ @click.argument('folder')
91
+ @click.option('--only', '-o', multiple=True)
92
+ @click.option('--exclude', '-e', multiple=True)
93
+ @cli.command()
94
+ def merge(folder, only, exclude):
95
+
96
+ for kind in ['ticks', 'gaps']:
97
+ print(f'Processing {kind} files: ')
98
+ files = glob.glob(os.path.join(folder, '*', f'{kind}.parquet'))
99
+ dataframes = []
100
+
101
+ for file in files:
102
+ print(f" - {file}")
103
+ name = os.path.basename(os.path.dirname(file))
104
+ df = pl.read_parquet(file).with_columns(symbols=pl.lit(name))
105
+ dataframes.append(df)
106
+
107
+ result = pl.concat(dataframes)
108
+ dest = os.path.join(folder, f'all_{kind}.parquet')
109
+ print(f'-> Writing merged file to {dest}')
110
+ result.write_parquet(dest)
111
+
112
+
113
+ def _extract_and_concatenate_pair(path, name=None):
114
+ if name is None:
115
+ name = os.path.basename(path)
116
+
117
+ gaps = []
118
+ ticks = []
119
+
120
+ zipfiles = glob.glob(os.path.join(path, "*.zip"))
121
+ for zipfile in zipfiles:
122
+ with zf.ZipFile(zipfile) as archive:
123
+ for entry in archive.namelist():
124
+ # This is the status report, let's parse it
125
+ if entry.endswith(".txt"):
126
+ with archive.open(entry) as txt_file:
127
+ lines = txt_file.readlines()
128
+ gaps.append(_parse_gaps(lines))
129
+
130
+ # This is the CSV with the ticker data
131
+ elif entry.endswith(".csv"):
132
+ with archive.open(entry) as csv_file:
133
+ ticks.append(pl.read_csv(
134
+ csv_file,
135
+ has_header=False,
136
+ separator=";",
137
+ ignore_errors=True,
138
+ schema=CSV_SCHEMA,
139
+ ))
140
+ else:
141
+ print(f"Unknown file {entry} in {zipfile}")
142
+
143
+ gaps = pl.concat(gaps).sort(by="start", descending=False)
144
+ ticks = pl.concat(ticks).with_columns(
145
+ pl.col("ts").str.to_datetime(
146
+ "%Y%m%d %H%M%S"
147
+ ).dt.replace_time_zone('EST').dt.convert_time_zone('UTC')
148
+ ).sort(by="ts", descending=False)
149
+
150
+ # breakpoint()
151
+
152
+ return gaps, ticks
153
+
154
+ if __name__ == '__main__':
155
+ cli()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ polars
2
+ click