Datasets:

Languages:
English
Size:
n<1K
DOI:
Libraries:
License:
File size: 18,237 Bytes
65e48a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
import os
from enum import Enum
import pexpect
import json
import re
from tqdm import tqdm 
import json
import git
import pickle
import requests
from collections import defaultdict

from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, BASE_PATH

class ParseState(Enum):
    defn = 0
    inductive = 1
    block_comment = 2
    prop = 3
    mutual = 4

def loc_tag(fname, line_ind):
    return f'{fname}:{line_ind}'

def parse_file(fname):
    lines = None
    with open(fname, 'r') as f:
        lines = f.readlines()
    imports = []
    defns = []
    props = []
    comments = []

    prev_state = None
    state = None
    data = []

    def change_state(new_state, line_ind, line):
        nonlocal data, state, prev_state
        route_map = {ParseState.defn: defns, ParseState.inductive: defns, 
                     ParseState.prop: props, ParseState.block_comment: comments,
                     ParseState.mutual: defns}
        if state in route_map:
            route_map[state].append(data)
            data = []
        if new_state in route_map:
            data = [(loc_tag(fname, line_ind), line)]
        prev_state = state
        state = new_state

    for line_ind, line in enumerate(lines):
        line_str = line.strip()
        if state == ParseState.block_comment: # end of block comment: reset state
            if line_str.endswith('-/'):
                state = prev_state
                prev_state = None
                continue
        elif line_str.startswith('--'): # inline comment: maintain state
            comments.append((loc_tag(fname, line_ind), line))
            continue
        elif line_str.startswith('/-'): # start block comment
            change_state(ParseState.block_comment, line_ind, line)
            continue
        elif line_str.startswith('mutual'):
            change_state(ParseState.mutual, line_ind, line)
            continue
        elif line_str.startswith('end') and state == ParseState.mutual:
            #manually handle mutual stuff, its pretty annoying
            data.append((loc_tag(fname, line_ind), line))
            change_state(None, line_ind, line)
            continue
        elif state == ParseState.mutual:
            data.append((loc_tag(fname,line_ind), line))
            continue
        elif line.startswith('import'):
            assert state is None
            imports.append(line)
            continue
        elif line_str.startswith('def prop'): # one of the propositions to prove
            change_state(ParseState.prop, line_ind, line)
        elif line_str.startswith('def') or line_str.startswith('lemma') or line_str.startswith('theorem'): # a function definition
            change_state(ParseState.defn, line_ind, line)
        elif line_str.startswith('inductive'):
            change_state(ParseState.inductive, line_ind, line)
        elif len(line_str) == 0:
            change_state(None, line_ind, line)
        else:
            data.append((loc_tag(fname,line_ind), line))
    
    change_state(None, -1, '') # handle EOF


    return imports, defns, props, comments


def process_defns(defns):
    new_defns = []
    for defn in defns:
        inds, lines = zip(*defn)
        prop_text = ''.join(lines)
        ind = min(inds)
        max_ind = max(inds)

        if lines[0].strip().startswith('mutual'):
            # manually process mutual defns
            names = []
            for line in lines:
                if line.strip().startswith('def'):
                    inner_name = [s for s in line.strip().split(' ') if len(s) > 0][1]
                    names.append(inner_name)
                    #names.append(f'_root_.{inner_name}')
        else:

            names = [[s for s in prop_text.split(' ') if len(s) > 0][1].strip()]

        for name in names:
            if name.endswith(':'):
                name = name[:-1]
            new_defns.append(((ind, max_ind), name, prop_text))
    return new_defns

# take in a raw parsed prop (list of lines), output the corresponding lemma for a theorem prover to prove
def process_prop(prop, default_proof=':= by sorry'):
    inds, lines = zip(*prop)
    prop_text = ''.join(lines)
    ind = min(inds)
    max_ind = max(inds)
    name = prop_text.split(' ')[1]
    assert prop_text[:3] == 'def'
    prop2 = 'theorem' + prop_text[3:]
    # TBD what default proof should be; different setups might want different things. i.e. tactic mode just wants
    # a 'by', proof term generation wants nothing, a proof rewriter that expects an initial valid state might want 
    # 'by sorry'.
    prop2 = prop2.strip().replace(':=', ':') + f'{default_proof}' 
    return ((ind, max_ind), name, prop2)

#NOTE: if I eventually choose to handle chained dependencies, I'll want this to return a dictionary representing
# the dependency graph of imports. As of now I have the code for adding in other LeanSrc import definitions, but
# I'm not dealing with the import order in my later sort.
#
# collect all the (location_tag, definition_text) definitions from other files in LeanSrc specified 
# by import_names
def collect_import_defns(import_names):
    import_names = import_names[:] # don't modify original list
    defns = []
    seen = set()
    while len(import_names) > 0:
        imp = import_names.pop()
        if imp in seen:
            continue
        seen.add(imp)
        i, d, p, c = parse_file(f'{BASE_PATH}/{imp}.lean')
        import_names += [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
        defns += d
    return defns

#errors I have come across: 
# "function expected at\n  <ident>\nterm has type"
# "unknown identifier '<ident>'"
def match_error(err_str):
    m1 = re.search('expected at\n\s+(.+)\nterm has', err_str)
    if m1 is not None:
        return m1.group(1)
    m2 = re.search("unknown (identifier|constant) '(.+)'", err_str)
    if m2 is not None:
        return m2.group(2)
    if 'invalid dotted identifier notation' in err_str:
        return err_str.strip().split(' ')[-1]
    print(f'ERROR: err string <<{err_str}>> is not a recognized error pattern')
    exit()
    return None

# these are a result of other things not being defined, and don't contain semantic information about what definition to add
# note that I AM NOT IGNORING THESE ERRORS IN THE FINAL PROP WITH DEPS, i.e. I only output the prop w deps once I have
# no errors at all. These are just ignored for the purpose of finding new dependencies
ignore_errs = ['equality or iff proof expected', 'invalid occurrence of universe level',
                'function is not recursive', 'failed to prove termination', 'unsolved goals',
                'invalid field notation', 'result is not type correct',
                'invalid argument, variable is not a proposition', 'tactic failed. Possible reasons']
def collect_relevant_defns(prop, defns, lean_repl, env, import_order):
    """
    Collect the functions and type definitions used in a prop from a list of defns sourced from the 
    current file and potentially from other libraries, although for now I'm not handling mathlib. 
    This will rely on names being unique, so please don't shadow any names in the files you're importing.
    """
    # use _env because we want all definition dependence checks to be based on the original env
    outp, _env = send_command(lean_repl, prop, env=env) # ignore resulting env; we just want to see the error
    errors = [m for m in outp['messages'] if m['severity'] == 'error']
    #print(errors)
    seen = set()
    seen_locs = set()
    all_deps = []

    while True:
        # reset to original environment
        env2 = env
        all_deps = order_deps(all_deps, import_order)
        errors = []
        seen_err = set()
        for defn in all_deps:
            #print()
            #print(defn[1])
            outp, env2 = send_command(lean_repl, defn[1], env=env2)
            tmp = [(m, defn[1]) for m in outp.get('messages', []) if m['severity'] == 'error' and m['data'] not in seen_err]
            errors += tmp
            for m, _ in tmp:
                seen_err.add(m['data'])
        # env2 is the environment after all dependencies have been added.
        #print('new iteration outp:', outp)
        #print('new iteration errs:', errors)
        #errors = [m for m in outp.get('messages', []) if m['severity'] == 'error']

        # if the dependencies are added without error, also add in the prop.
        if len(errors) == 0:
            outp, env2 = send_command(lean_repl, prop, env=env2)
            errors = [(m, prop) for m in outp.get('messages', []) if m['severity'] == 'error']

        if len(errors) == 0: # all dependencies plus prop statement does not error
            break

        while len(errors) > 0:
            err, err_cause = errors.pop()
            if any([uerr in err['data'] for uerr in ignore_errs]):
                continue
            if 'invalid pattern variable, must be atomic' in err['data']:
                found_ind = False
                defn_line = err_cause.split('\n')[0]
                for ident in defn_line.strip().split(' '):
                    if ident in defns and ident not in seen:
                        found_ind = True
                        cp = err.copy()
                        
                        cp['data'] = f"unknown identifier '{ident}'" # spoof a better error message
                        #print('FOUND INDUCTIVE:', cp['data'])
                        errors.append((cp, err_cause))
                if not found_ind:
                    print('ERROR: failed to resolve inductive type pattern var problem')
                    exit()
                continue

            ident_str = match_error(err['data'])
            ident_str = ident_str.replace('_root_.','')
            #print(ident_str, ident_str in defns)
            if ident_str not in defns:
                print(f'ERROR: couldnt find identifier {ident_str}')
                print(err)
                exit()
                continue
            if ident_str in seen:
                continue
                # don't add the same defn twice
                #print(f'ERROR: circular dependency: {ident_str}')
            seen.add(ident_str)

            if defns[ident_str][0] in seen_locs:
                continue
            seen_locs.add(defns[ident_str][0])
            all_deps.append(defns[ident_str])

    return all_deps

def order_deps(defns, import_order):
    if len(defns) == 0:
        return defns
    order_map = {fname: i for i, fname in enumerate(import_order)}
    line_nums = [int(defn[0][0].split(':')[-1]) for defn in defns]
    max_line_num = max(line_nums)
    def import_rank(defn):
        fpath, line_ind = defn[0][0].split(':')
        fname = re.search(BASE_PATH + '/(\S+)\.lean', fpath).group(1)
        return order_map[fname]*max_line_num + int(line_ind)
    return sorted(defns, key=import_rank)

def extract_file_props(fname, full_path, send_command, default_proof=':= by sorry', repl_type='zsh'):
    # imports, definitions (code and types), propositions, and comments.
    # d, p, and c are lists of lists; each sublit contains the original lines of the file that comprise
    # the definition, proposition, or comment.
    i, d, p, c = parse_file(full_path)

    imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
    imp_d = collect_import_defns(imp_names)

    all_d = imp_d + d 
    import_order = imp_names + [fname] # imports go first
    all_d = process_defns(all_d)
    defns_by_name = {name: (ind, defn) for ind, name, defn in all_d}
    

    props = [process_prop(prop, default_proof=default_proof) for prop in p]
    #TODO
    lemma_props = [(ind, name, defn.split('\n')[0].strip().replace('lemma', 'theorem').replace(':= by', default_proof))
                   for ind, name, defn in all_d if defn.strip().startswith('lemma')]
    props = lemma_props # + props
    #props_by_name = {name: (ind, defn) for ind, name, defn in props}
    lean_repl = make_lean_repl(repl_type=repl_type)

    props_with_deps = {}

    outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
    ct = 0
    for prop_loc, prop_name, prop in tqdm(props, desc='analyzing and loading lean code + properties'):
        ct += 1
        env = mathlib_env

        all_deps = collect_relevant_defns(prop, defns_by_name, lean_repl, env, import_order)

        for defn in all_deps:
            print(defn[-1])
            outp, env = send_command(lean_repl, defn[-1], env=env)
        print('final output of deps', outp)

        outp, env = send_command(lean_repl, prop, env=env)
        for message in outp['messages']:
            if message['severity'] == 'error':
                print(f'error at prop {prop_name}')
                print(message)
                print()
                exit()
        props_with_deps[prop_name] = all_deps + [(prop_loc, prop)]

    lean_repl.close()
    #print(lean_repl.exitstatus, lean_repl.signalstatus)
    return props_with_deps, c

def output_prop_with_deps(prop, prop_name, folder='LeanSrc/benchmark'):
    lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in prop])
    with open(os.path.join(folder, prop_name + '.lean'), 'w') as f:
        f.write(lines)

def convert_file_props(fname, new_fname):
    i, d, p, c = parse_file(f'{BASE_PATH}/{fname}.lean')
    imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
    imp_d = collect_import_defns(imp_names)

    all_d = imp_d + d 
    import_order = imp_names + [fname] # imports go first
    all_d = process_defns(all_d)
    defns_by_name = {name: (ind, defn) for ind, name, defn in all_d}

    props = [process_prop(prop) for prop in p]

    with open(new_fname, 'w') as f:
        defn_lines = '\n'.join([defn for _, _, defn in all_d])
        f.write(defn_lines + '\n')

        prop_lines = '\n'.join([prop for _, _, prop in props])
        f.write(prop_lines + '\n')

def format_llema_input(pwd, lean_url, lean_sha):
    dcts = []
    for prop_name in pwd:
        
        lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name]])
        lines = lines.replace(':= by sorry', '')
        loc, _ = pwd[prop_name][-1] # last line comes from the prop of interest
        fpath = loc.split(':')[0]
        dct = {'full_name': prop_name,
               'statement': lines,
                'url': lean_url,
                'commit': lean_sha,
                'file_path': fpath,
                'split': 'valid'}
        
        dcts.append(json.dumps(dct) + '\n')
    with open('leancb_lemma_inp.jsonl', 'w') as f:
        f.writelines(dcts)

def pwd_to_json(pwd, send_command, loc2comm, repl_type='zsh'):

    lean_repl = make_lean_repl(repl_type=repl_type)

    outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
    assert len([m for m in outp.get('messages', []) if m['severity'] == 'error']) == 0, str(outp)
    
    dcts = []
    for prop_name in pwd:
        deps = '\n\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name][:-1]])
        prop_loc, prop_defn = pwd[prop_name][-1] # last line comes from the prop of interest
        fpath = prop_loc[0].split(':')[0]
        cline = int(prop_loc[0].split(':')[1]) - 1
        score = 5
        if cline in loc2comm:
            comm = loc2comm[cline]
            if 'core: ' in comm: # allow for (S/s)core
                score = int(comm.split('core:')[1].strip().split('/')[0].strip())

        env = mathlib_env
        for _loc, code_lines in pwd[prop_name]:
            outp, env = send_command(lean_repl, code_lines, env=env)
        ps = outp['sorries'][0]['goal']

        locs = [loc for loc, _code_lines in pwd[prop_name]]
        fname2line = defaultdict(lambda: 0)
        for loc in locs:
            fpath, line_num = loc[1].split(':')
            fname2line[fpath] = max(fname2line[fpath], int(line_num))

        dct = {'full_name': prop_name,
               'prop_defn': prop_defn,
               'prop_loc': prop_loc[0],
               'score': score,
               'deps': deps,
               'proof_state': ps,
                'file_locs': [(fpath, fname2line[fpath]) for fpath in fname2line]}
        
        dcts.append(json.dumps(dct) + '\n')
    with open('codeprops_bench.jsonl', 'w') as f:
        f.writelines(dcts)


if __name__ == '__main__':
    #main_fname = 'Properties'
    main_fname = 'Sorts'

    #convert_file_props(main_fname, os.path.join(folder,'all_props.lean'))

    main_full_path = f'{BASE_PATH}/{main_fname}.lean'
    """
    pwd = extract_file_props(main_fname, main_full_path) # props with deps
    for prop_name in pwd:
        output_prop_with_deps(pwd[prop_name], prop_name, folder=folder)
    """
    
    use_icanon = True

    if use_icanon:
        send_command = send_command_icanon
        repl_type = 'icanon'
    else:
        send_command = send_command_zsh
        repl_type = 'zsh'


    rerun = True
    if rerun:
        pwd, comments = extract_file_props(main_fname, main_full_path, send_command, repl_type=repl_type) # props with deps
        with open(f'comm_{main_fname}.pkl', 'wb') as f:
            pickle.dump(comments, f)
        with open(f'pwd_{main_fname}.pkl', 'wb') as f:
            pickle.dump(pwd, f)
    else:
        with open(f'pwd_{main_fname}.pkl', 'rb') as f:
            pwd = pickle.load(f)
        with open(f'comm_{main_fname}.pkl', 'rb') as f:
            comments = pickle.load(f)

    loc2comm = {}
    for loc, comm in comments:
        fname, line_str = loc.strip().split(':')
        if fname != main_full_path:
            continue
        loc2comm[int(line_str.strip())] = comm

    # use to test specific props
    #pwd_spec = {}
    #test_pname = 'prop_29'
    #pwd_spec[test_pname] = pwd[test_pname]
    #pwd = pwd_spec

    pwd_to_json(pwd, send_command, loc2comm, repl_type=repl_type)
        
    #data, by_score = parse_benchmark_output('bench_out_pythia.txt', pwd, loc2comm)

    #sorries = outp['sorries']
    #for sorry in sorries:
    #    ps = sorry['proofState']
    #    # also has 'pos', 'endPos'
    #    goal = sorry['goal']
    #    send_tactic