Spaces:
Running
Running
update
Browse files
app.py
CHANGED
@@ -39,7 +39,13 @@ from vocab import all_tokenizers
|
|
39 |
from util import *
|
40 |
from examples import example_fn
|
41 |
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
with gr.Blocks(css="css/style.css", title="Tokenizer Arena") as demo:
|
45 |
gr.HTML("""<h1 align="center">Tokenizer Arena ⚔️</h1>""")
|
@@ -175,8 +181,9 @@ with gr.Blocks(css="css/style.css", title="Tokenizer Arena") as demo:
|
|
175 |
demo.load(_js=open("js/onload.js", "r", encoding="utf-8").read())
|
176 |
demo.load(
|
177 |
fn=on_load,
|
178 |
-
inputs=
|
179 |
outputs=[user_input, tokenizer_type_1, tokenizer_type_2],
|
|
|
180 |
)
|
181 |
|
182 |
|
|
|
39 |
from util import *
|
40 |
from examples import example_fn
|
41 |
|
42 |
+
get_window_url_params = """
|
43 |
+
function(url_params) {
|
44 |
+
const params = new URLSearchParams(window.location.search);
|
45 |
+
url_params = JSON.stringify(Object.fromEntries(params));
|
46 |
+
return url_params;
|
47 |
+
}
|
48 |
+
"""
|
49 |
|
50 |
with gr.Blocks(css="css/style.css", title="Tokenizer Arena") as demo:
|
51 |
gr.HTML("""<h1 align="center">Tokenizer Arena ⚔️</h1>""")
|
|
|
181 |
demo.load(_js=open("js/onload.js", "r", encoding="utf-8").read())
|
182 |
demo.load(
|
183 |
fn=on_load,
|
184 |
+
inputs=[user_input], # 这里只需要传个空object即可。
|
185 |
outputs=[user_input, tokenizer_type_1, tokenizer_type_2],
|
186 |
+
_js=get_window_url_params
|
187 |
)
|
188 |
|
189 |
|
util.py
CHANGED
@@ -109,29 +109,39 @@ default_tokenizer_type_1 = "llama"
|
|
109 |
default_tokenizer_type_2 = "gpt_35_turbo"
|
110 |
|
111 |
|
112 |
-
def on_load(request: gr.Request):
|
113 |
"""
|
114 |
onLoad
|
115 |
"""
|
|
|
116 |
text = None
|
117 |
tokenizer_type_1 = None
|
118 |
tokenizer_type_2 = None
|
119 |
-
|
|
|
|
|
|
|
120 |
if request:
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
client_ip = request.client.host
|
123 |
# local_ip = socket.gethostbyname(socket.gethostbyname(""))
|
124 |
# headers = request.kwargs['headers']
|
125 |
# if headers and 'x-forwarded-for' in headers:
|
126 |
# x_forwarded_for = headers['x-forwarded-for']
|
127 |
# client_ip = x_forwarded_for.split(' ')[0] if x_forwarded_for else ""
|
128 |
-
if "referer" in request.headers:
|
129 |
-
|
130 |
-
|
131 |
-
tokenizer_type_1 =
|
132 |
-
tokenizer_type_2 =
|
133 |
-
text =
|
134 |
-
|
|
|
|
|
135 |
return text, tokenizer_type_1, tokenizer_type_2
|
136 |
|
137 |
|
|
|
109 |
default_tokenizer_type_2 = "gpt_35_turbo"
|
110 |
|
111 |
|
112 |
+
def on_load(url_params, request: gr.Request):
|
113 |
"""
|
114 |
onLoad
|
115 |
"""
|
116 |
+
|
117 |
text = None
|
118 |
tokenizer_type_1 = None
|
119 |
tokenizer_type_2 = None
|
120 |
+
try:
|
121 |
+
url_params = json.loads(url_params)
|
122 |
+
except:
|
123 |
+
url_params = {}
|
124 |
if request:
|
125 |
+
try:
|
126 |
+
logger.info(str(request.headers))
|
127 |
+
logger.info(str(request.query_params))
|
128 |
+
except:
|
129 |
+
pass
|
130 |
client_ip = request.client.host
|
131 |
# local_ip = socket.gethostbyname(socket.gethostbyname(""))
|
132 |
# headers = request.kwargs['headers']
|
133 |
# if headers and 'x-forwarded-for' in headers:
|
134 |
# x_forwarded_for = headers['x-forwarded-for']
|
135 |
# client_ip = x_forwarded_for.split(' ')[0] if x_forwarded_for else ""
|
136 |
+
# if "referer" in request.headers: # not work for huggingface-space
|
137 |
+
# url_params = parse_qs(urlparse(request.headers["referer"]).query)
|
138 |
+
# url_params = {k: v[0] for k, v in url_params.items() if len(v) > 0}
|
139 |
+
tokenizer_type_1 = url_params.get("tokenizer1", default_tokenizer_type_1)
|
140 |
+
tokenizer_type_2 = url_params.get("tokenizer2", default_tokenizer_type_2)
|
141 |
+
text = url_params.get("text", default_user_input)
|
142 |
+
|
143 |
+
|
144 |
+
logger.info(f"client_ip: {client_ip}; params: {url_params}")
|
145 |
return text, tokenizer_type_1, tokenizer_type_2
|
146 |
|
147 |
|