bleugreen commited on
Commit
cbb6264
1 Parent(s): 5691e74

added scripts

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. package.json +6 -0
  3. parse.py +47 -0
  4. parse_ts.js +45 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ node_modules
package.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "dependencies": {
3
+ "@phenomnomnominal/tsquery": "^5.0.1",
4
+ "typescript": "^5.0.4"
5
+ }
6
+ }
parse.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ from datasets import load_dataset, Dataset
3
+ import json
4
+ from tqdm import tqdm
5
+
6
+ # dataset streaming (will only download the data as needed)
7
+ ds = load_dataset("../datasets/the-stack-smol", data_dir='data/typescript')
8
+ print('loaded')
9
+
10
+ def split_ts_into_chunks(ts_code):
11
+ result = subprocess.run(
12
+ ['node', 'parse_ts.js'],
13
+ input=ts_code,
14
+ text=True,
15
+ )
16
+
17
+ if result.returncode != 0:
18
+ raise Exception('Error in TypeScript parsing')
19
+
20
+ with open('semantic_chunks.jsonl', 'r') as file:
21
+ lines = file.read().splitlines()
22
+
23
+ chunks = [json.loads(line) for line in lines]
24
+ with open('semantic_chunks.jsonl', 'w'):
25
+ pass
26
+
27
+ return chunks
28
+
29
+
30
+ def chunk_ts_file(data):
31
+ funcs = split_ts_into_chunks(data['content'])
32
+ for i in range(len(funcs)):
33
+ funcs[i]['repo'] = data['repository_name']
34
+ funcs[i]['path'] = data['path']
35
+ funcs[i]['language'] = data['lang']
36
+ return funcs
37
+
38
+ chunks = []
39
+ for i in tqdm(range(len(ds['train']))):
40
+ chunk = chunk_ts_file(ds['train'][i])
41
+ chunks +=(chunk)
42
+ if i%100 == 0:
43
+ print(len(chunks))
44
+
45
+ dataset = Dataset.from_list(chunks)
46
+ print(dataset)
47
+ dataset.to_json('ts-chunks.json')
parse_ts.js ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // parse_ts.js
2
+
3
+ const fs = require('fs');
4
+ const ts = require('typescript');
5
+ const { tsquery } = require('@phenomnomnominal/tsquery');
6
+
7
+ let data = '';
8
+
9
+ process.stdin.on('data', function (chunk) {
10
+ data += chunk;
11
+ });
12
+
13
+ process.stdin.on('end', function () {
14
+ const sourceFile = ts.createSourceFile('temp.ts', data.toString(), ts.ScriptTarget.ES2015, true);
15
+
16
+ // Define the node types that constitute a 'semantic chunk'
17
+ const semanticChunkNodeTypes = [
18
+ 'FunctionDeclaration',
19
+ 'ArrowFunction',
20
+ 'ClassDeclaration',
21
+ 'InterfaceDeclaration',
22
+ 'EnumDeclaration',
23
+ 'TypeAliasDeclaration',
24
+ 'MethodDeclaration',
25
+ ];
26
+
27
+ const semanticChunks = semanticChunkNodeTypes.flatMap(nodeType =>
28
+ tsquery(sourceFile, nodeType)
29
+ );
30
+
31
+ const jsonl = semanticChunks.map(chunk => {
32
+ const comments = ts.getLeadingCommentRanges(sourceFile.getFullText(), chunk.getFullStart()) || [];
33
+ const commentTexts = comments.map(comment => sourceFile.getFullText().slice(comment.pos, comment.end)).join('\n');
34
+
35
+ // Append the leading comments to the front of the chunk's content
36
+ const contentWithComments = commentTexts + '\n' + chunk.getText(sourceFile);
37
+
38
+ return JSON.stringify({
39
+ type: ts.SyntaxKind[chunk.kind],
40
+ content: contentWithComments
41
+ });
42
+ }).join('\n');
43
+
44
+ fs.writeFileSync('semantic_chunks.jsonl', jsonl);
45
+ });