File size: 7,298 Bytes
3ea2913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
'''
Imports
'''
import guitarpro
from guitarpro import *
import numpy as np
import os
import pickle
from tqdm import tqdm

from keras.utils import np_utils

from _NoteData import NoteData


'''
Constants
'''
# PITCH[i] = the pitch associated with midi note number i.
# For example, PITCH[69] = 'A4'
PITCH = {val : str(GuitarString(number=0, value=val)) for val in range(128)}
# MIDI[string] = the midi number associated with the note described by string.
# For example, MIDI['A4'] = 69.
MIDI  = {str(GuitarString(number=0, value=val)) : val for val in range(128)}




'''
process_notes function
'''
def process_notes(beat, tuning, as_fingerings=True):
    
    noteData = NoteData()
    
    duration = (beat.duration.value, beat.duration.isDotted)
    
    # Tuplets are cool but rare.
    # If a tuplet is found, simply halve its play time (by doubling its duration value) to simplify things.
    if beat.duration.tuplet.enters != 1 or beat.duration.tuplet.times != 1:
        duration = (duration[0] * 2, duration[1]) # Tuples aren't mutable, so just re-assign the tuple.
        
    noteData.duration = duration[0]
    noteData.isDotted = duration[1]
    
    if len(beat.notes) == 0:
        # return 'rest', duration[0], duration[1], False
        noteData.value = 'rest'
        return noteData
    
    noteData.palmMute = beat.notes[0].effect.palmMute
    
    
    note_types = [note.type for note in beat.notes]

    
    if all(note_type == NoteType.rest for note_type in note_types):
        #return 'rest', duration[0], duration[1], False
        noteData.value = 'rest'
        return noteData
    
    if all(note_type == NoteType.tie for note_type in note_types):
        #return 'tied', duration[0], duration[1], False
        noteData.value = 'tied'
        return noteData
    
    if all(note_type == NoteType.dead for note_type in note_types):
        # return 'dead', duration[0], duration[1], False
        noteData.value = 'dead'
        return noteData
                        
                        
                        
    lowest_string = len(tuning)
    
    
    if as_fingerings:
        # NEW CODE: Represent each pitch as its distance (in semitones) from the tuning of the lowest string.
        pitches = np.array([note.value + tuning[note.string] - tuning[lowest_string] for note in beat.notes if note.type == NoteType.normal])
    else:
        # note_number = MIDI note number, where A4 = 440 Hz = note 69
        # OLD CODE:
        pitches = np.array([note.value + tuning[note.string] for note in beat.notes if note.type == NoteType.normal])

    # Remove any possible NaN values.
    pitches = pitches[~np.isnan(pitches)]
    
    
    # Pitches are often stored in descending order, but we want to make sure they're in ascending order.
    # Thus, we flip the pitches before sorting, so as to help the algorithm.
    pitches = np.sort(pitches[::-1]) 
    
    if len(pitches) == 0:
        #return 'rest', duration[0], duration[1]
        noteData.value = 'rest'
        return noteData
    
    if len(pitches) == 1:
        if as_fingerings:
            # NEW CODE:
            # return str(pitches[0]), duration[0], duration[1]
            noteData.value = str(pitches[0])
            return noteData
        else:
            # OLD CODE:
            # return PITCH[pitches[0]], duration[0], duration[1]
            noteData.value = PITCH[pitches[0]]
            return noteData
    
    # Look at the pitch intervals in the lowest 3 notes that are being played.
    # Usually, chords will start at the lowest 2 notes.
    # However, sometimes players will strum the open lowest string constantly throughout the song.
    # (see: 'Be Quiet and Drive', 'Kaiowas')
    # Thus, the next-highest pair of notes should be considered when labeling a chord.
    if len(pitches) == 2:
        note_pairs = [(0, 1)]
    if len(pitches) == 3:
        note_pairs = [(0, 1), (0, 2), (1, 2)]
    elif len(pitches) >= 4:
        note_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]
        
    for idx1, idx2 in note_pairs:

        interval = pitches[idx2] - pitches[idx1]
        
        if interval == 12 or interval == 7:
            # Return a power chord associated with pitches[idx1]
            if as_fingerings:
                # NEW CODE:
                # return str(pitches[idx1]) + '_5', duration[0], duration[1]
                noteData.value = str(pitches[idx1]) + '_5'
                return noteData
            else:
                # OLD CODE:
                # return PITCH[pitches[idx1]] + '_5', duration[0], duration[1]
                noteData.value = PITCH[pitches[idx1]] + '_5'
                return noteData
                
        if interval == 6:
            # Return a tritone chord associated with pitches[idx1]
            if as_fingerings:
                # NEW CODE:
                # return str(pitches[idx1]) + '_dim5', duration[0], duration[1]
                noteData.value = str(pitches[idx1]) + '_dim5'
                return noteData
            else:
                # OLD CODE:
                # return PITCH[pitches[idx1]] + '_dim5', duration[0], duration[1]
                noteData.value = PITCH[pitches[idx1]] + 'dim_5'
                return noteData
        
        if interval == 5:
            # Return a P4 chord associated with pitches[idx1]
            if as_fingerings:
                # return str(pitches[idx1]) + '_4', duration[0], duration[1]
                noteData.value = str(pitches[idx1]) + '_4'
                return noteData
            else:
                # return PITCH[pitches[idx1]] + '_4', duration[0], duration[1]
                noteData.value = PITCH[pitches[idx1]] + '_4'
                return noteData
        
        
    
    if as_fingerings:
        # NEW CODE:
        #return str(pitches[0]), duration[0], duration[1]
        noteData.value = str(pitches[0])
        return noteData
    else:
        # OLD CODE:
        # return PITCH[pitches[0]], duration[0], duration[1]
        noteData.value = PITCH[pitches[0]]
        return noteData
    
    


'''
compress_track function
'''
def compress_track(track, as_fingerings=True):
    # 'song' contains the compressed representation of track.
    song = np.empty(len(track.measures), dtype=object)
    
    # Get the tuning and lowest string of the instrument in this track.
    tuning = {string.number : string.value for string in track.strings}
    lowest_string = len(tuning) # Bass have 4-6 strings, while metal guitars have 6 - 8 strings.

    #print(f'Tuning = {[PITCH[x] for x in tuning.values()]}')

    for m_i, measure in enumerate(track.measures):
        '''
        Upon inspection of some of the most popular Songsterr .gp5 tabs,
        it turns out that each measure always has two Voices.
        The first Voice (index 0) always contains music, while
        the second Voice (index 1) always just contains an empty Beat with no notes.

        Therefore, only the first Voice (index 0) actually matters.
        '''
        song[m_i] = []

        #print(m_i+1)
        for b_i, beat in enumerate(measure.voices[0].beats):
            song[m_i].append(process_notes(beat, tuning, as_fingerings).as_tuple())
            #print('\t', song[m_i][b_i], '\t', beat.duration)
            
    return song