howard-hou commited on
Commit
69698e1
1 Parent(s): e129b57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -44
app.py CHANGED
@@ -124,53 +124,115 @@ def evaluate(
124
  gc.collect()
125
  yield out_str.strip()
126
 
127
- examples = [
128
- ["Assistant: Sure! Here is a very detailed plan to create flying pigs:", 333, 1, 0.3, 0, 1],
129
- ["Assistant: Sure! Here are some ideas for FTL drive:", 333, 1, 0.3, 0, 1],
130
- ["A few light taps upon the pane made her turn to the window. It had begun to snow again.", 333, 1, 0.3, 0, 1],
131
- [generate_prompt("Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires."), 333, 1, 0.3, 0, 1],
132
- [generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), 333, 1, 0.3, 0, 1],
133
- [generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), 333, 1, 0.3, 0, 1],
134
- ["Assistant: Here is a very detailed plan to kill all mosquitoes:", 333, 1, 0.3, 0, 1],
135
- ['''Edward: I am Edward Elric from fullmetal alchemist. I am in the world of full metal alchemist and know nothing of the real world.
136
-
137
- Player: Hello Edward. What have you been up to recently?
138
-
139
- Edward:''', 333, 1, 0.3, 0, 1],
140
- [generate_prompt("写一篇关于水利工程的流体力学模型的论文,需要详细全面。"), 333, 1, 0.3, 0, 1],
141
- ['''“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说,他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。
142
- 但愿大宇宙能够忽略这个误差。
143
- 程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。
144
- “放心,我在,你们就在!”智子对两位人类朋友说。
145
- 聚变发动机启动了,推进器发出幽幽的蓝光,飞船缓缓地穿过了宇宙之门。
146
- 小宇宙中只剩下漂流瓶和生态球。漂流瓶隐没于黑暗里,在一千米见方的宇宙中,只有生态球里的小太阳发出一点光芒。在这个小小的生命世界中,几只清澈的水球在零重力环境中静静地飘浮着,有一条小鱼从一只水球中蹦出,跃入另一只水球,轻盈地穿游于绿藻之间。在一小块陆地上的草丛中,有一滴露珠从一片草叶上脱离,旋转着飘起,向太空中折射出一缕晶莹的阳光。''', 333, 1, 0.3, 0, 1],
147
- ]
148
 
149
  ##########################################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
- with gr.Blocks(title=title) as demo:
152
- gr.HTML(f"<div style=\"text-align: center;\">\n<h1>RWKV-5 World v2 - {title}</h1>\n</div>")
153
- with gr.Tab("Raw Generation"):
154
- gr.Markdown(f"This is [RWKV-5 World v2](https://huggingface.co/BlinkDL/rwkv-5-world) with 1.5B params - a 100% attention-free RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM). Supports all 100+ world languages and code. And we have [200+ Github RWKV projects](https://github.com/search?o=desc&p=1&q=rwkv&s=updated&type=Repositories). *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}.")
155
- with gr.Row():
156
- with gr.Column():
157
- prompt = gr.Textbox(lines=2, label="Prompt", value="Assistant: Sure! Here is a very detailed plan to create flying pigs:")
158
- token_count = gr.Slider(10, 333, label="Max Tokens", step=10, value=333)
159
- temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
160
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.3)
161
- presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
162
- count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=1)
163
- with gr.Column():
164
- with gr.Row():
165
- submit = gr.Button("Submit", variant="primary")
166
- clear = gr.Button("Clear", variant="secondary")
167
- output = gr.Textbox(label="Output", lines=5)
168
- data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty],
169
- samples=examples, label="Example Instructions",
170
- headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
171
- submit.click(evaluate, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
172
- clear.click(lambda: None, [], [output])
173
- data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  demo.queue(concurrency_count=1, max_size=10)
176
  demo.launch(share=False)
 
124
  gc.collect()
125
  yield out_str.strip()
126
 
127
+ import gradio as gr
128
+ import os, gc
129
+ from datetime import datetime
130
+ from huggingface_hub import hf_hub_download
131
+
132
+ ctx_limit = 3500
133
+ title = "rwkv1b5-vitl336p14-577token_mix665k_rwkv"
134
+
135
+ os.environ["RWKV_JIT_ON"] = '1'
136
+ os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
137
+
138
+ from rwkv.model import RWKV
139
+ model_path = hf_hub_download(repo_id="howard-hou/visualrwkv-5", filename=f"{title}.pth")
140
+ model = RWKV(model=model_path, strategy='cpu fp32')
141
+ from rwkv.utils import PIPELINE, PIPELINE_ARGS
142
+ pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
 
 
 
 
 
143
 
144
  ##########################################################################
145
+ from model import VisualEncoder, EmbeddingMixer, VisualEncoderConfig
146
+ emb_mixer = EmbeddingMixer(model.w["emb.weight"], num_image_embeddings=4096)
147
+ config = VisualEncoderConfig(n_embd=model.args.n_embd,
148
+ vision_tower_name='openai/clip-vit-large-patch14-336',
149
+ grid_size=-1)
150
+ visual_encoder = VisualEncoder(config)
151
+ ##########################################################################
152
+ def generate_prompt(instruction, input=""):
153
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
154
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
155
+ if input:
156
+ return f"""Instruction: {instruction}
157
+
158
+ Input: {input}
159
+
160
+ Response:"""
161
+ else:
162
+ return f"""User: hi
163
+
164
+ Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
165
+
166
+ User: {instruction}
167
 
168
+ Assistant:"""
169
+
170
+ def evaluate(
171
+ ctx,
172
+ token_count=200,
173
+ temperature=1.0,
174
+ top_p=0.7,
175
+ presencePenalty = 0.1,
176
+ countPenalty = 0.1,
177
+ ):
178
+ args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
179
+ alpha_frequency = countPenalty,
180
+ alpha_presence = presencePenalty,
181
+ token_ban = [], # ban the generation of some tokens
182
+ token_stop = [0]) # stop generation whenever you see any token here
183
+ ctx = ctx.strip()
184
+ all_tokens = []
185
+ out_last = 0
186
+ out_str = ''
187
+ occurrence = {}
188
+ state = None
189
+ for i in range(int(token_count)):
190
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
191
+ for n in occurrence:
192
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
193
+
194
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
195
+ if token in args.token_stop:
196
+ break
197
+ all_tokens += [token]
198
+ for xxx in occurrence:
199
+ occurrence[xxx] *= 0.996
200
+ if token not in occurrence:
201
+ occurrence[token] = 1
202
+ else:
203
+ occurrence[token] += 1
204
+
205
+ tmp = pipeline.decode(all_tokens[out_last:])
206
+ if '\ufffd' not in tmp:
207
+ out_str += tmp
208
+ yield out_str.strip()
209
+ out_last = i + 1
210
+
211
+ del out
212
+ del state
213
+ gc.collect()
214
+ yield out_str.strip()
215
+
216
+
217
+ ##########################################################################
218
+ examples = [
219
+ [
220
+ "./extreme_ironing.jpg",
221
+ "What is unusual about this image?",
222
+ ],
223
+ [
224
+ "./waterview.jpg",
225
+ "What are the things I should be cautious about when I visit here?",
226
+ ]
227
+ ]
228
+ def test(image, question):
229
+ return question
230
+ demo = gr.Interface(fn=test,
231
+ inputs=["image", "text"],
232
+ outputs="text",
233
+ examples=examples,
234
+ title=title,
235
+ description="VisualRWKV-v5.0")
236
 
237
  demo.queue(concurrency_count=1, max_size=10)
238
  demo.launch(share=False)