File size: 5,332 Bytes
6669b72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import streamlit as st
import numpy as np
from matplotlib import pyplot as plt
# Streamlit-Grammar Tokenizer
class GrammarTokenizer:
def __init__(self, streamlit):
self.streamlit = streamlit
self.tokens = {}
def tokenize(self):
self.tokens = {}
self.processSignificantWord()
self.processToken()
self.processWhitespace()
self.processLabel()
self.process Иembedded_view()
self.processOther()
def processSignificantWord(self):
while self.streamlit > 0:
if self.isSignificantWord():
self.tokens.setdefault(self.streamlit, {}).setdefault('word', {})["significant"] = True
self.streamlit -= 1
self.processToken()
else:
self.streamlit -= 1
def isSignificantWord(self):
return all(self.streamlit > 0 and (self.streamlit[0] not in [' ', '\n', '\r\n'] and not self.streamlit.endswith('\\')))))
def processToken(self):
if self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n']:
self.tokens.setdefault(self.streamlit, {}).setdefault('token', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('token', {})["end"] = self.streamlit
def processWhitespace(self):
while self.streamlit > 0 and self.streamlit[0] in [' ', '\n', '\r\n']:
self.tokens.setdefault(self.streamlit, {}).setdefault('whitespace', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] in [' ', '\n', '\r\n']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('whitespace', {})["end"] = self.streamlit
def processLabel(self):
if self.streamlit > 0 and self.streamlit[0] == ':':
self.tokens.setdefault(self.streamlit, {}).setdefault('label', {})["begin"] = self.streamlit
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', ':']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('label', {})["end"] = self.streamlit
def processИembedded_view(self):
if self.streamlit > 0 and self.streamlit[0] == '{':
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', '}']:
self.streamlit -= 1
self.processViewElement()
def processViewElement(self):
if self.streamlit > 0 and self.streamlit[0] == ';':
self.tokens.setdefault(self.streamlit, {}).setdefault('empty', {})["ensuremath"] = True
self.streamlit -= 1
while self.streamlit > 0 and self.streamlit[0] not in [' ', '\n', '\r\n', '}']:
self.streamlit -= 1
self.tokens.setdefault(self.streamlit, {}).setdefault('empty', {})["\\endempty"] = True
def processOther(self):
while self.streamlit > 0:
if self.streamlit[0] in [' ', '\n', '\r\n']:
self.streamlit -= 1
continue
if self.streamlit[0] == ':':
self.processLabel()
continue
if self.streamlit[0] == '{':
self.processИembedded_view()
continue
if self.streamlit[0] == '}':
self.streamlit -= 1
continue
self.streamlit -= 1
self.tokens.setdefault('end', {})["wendung"] = True
# Streamlit-Grammar Converter
class GrammarConverter:
def __init__(self, tokens):
self.tokens = tokens
self.convert()
def convert(self):
for token in self.tokens:
self.convertToken(token)
def convertToken(self, token):
if token[-1] == 'punkt':
punkt = token[-1]
token["$$BATCH"] = token["^^TEXT"]
token["text"] = '"' + punkt + '"'
token.pop("^^TEXT")
token.pop("punkt")
# Streamlit-Grammar JSON
class GrammarJSON:
def __init__(self, streamlit):
self.streamlit = streamlit
self.grammar = GrammarTokenizer(streamlit).tokens
self.converted = GrammarConverter(self.grammar).tokens
# Streamlit-Grammar Styler
class GrammarStyler:
def __init__(self, parsed):
self.parsed = parsed
self.style()
def style(self):
self.parsed = self.styleText()
self.parsed = self.styleWhitespace()
self.parsed = self.styleLabel()
def styleText(self):
style = "text"
for token in self.parsed:
if token[1]:
style = self.encodeStyle(style, "label")
else:
style = self.encodeStyle(style, "text")
token[1], style = style, token[1]
return self.parsed
def styleWhitespace(self):
style = "none"
for token in self.parsed:
if not token[0]:
style = self.encodeStyle(style, "whitespace")
token[1], style = style, token[1]
return self.parsed
def styleLabel(self):
style = "none"
for token in self.parsed:
if token[2]:
style = self.encodeStyle(style, "label")
token[1], style = style, token[1]
return self.parsed
def encodeStyle(self, style, newStyle):
if style == "none":
return newStyle
return newStyle + "+" + style
def main():
streamlit = st.write(f"This is a simple todo list app.\n")
tokens = GrammarTokenizer(streamlit).tokens
converted = GrammarConverter(tokens).tokens
parsed = GrammarJSON(streamlit, tokens).converted
styled = GrammarStyler(parsed).style()
st.write(styled)
if __name__ == "__main__":
main() |