howard-hou's picture
Update app.py
69698e1
raw
history blame
No virus
8.27 kB
import gradio as gr
import os, gc, copy, torch
from datetime import datetime
from huggingface_hub import hf_hub_download
from transformers import CLIPVisionModel
import torch.nn as nn
import torch.nn.functional as F
ctx_limit = 3500
title = "rwkv1b5-vitl336p14-577token_mix665k_rwkv"
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
model_path = hf_hub_download(repo_id="howard-hou/visualrwkv-5", filename=f"{title}.pth")
model = RWKV(model=model_path, strategy='cpu fp32')
from rwkv.utils import PIPELINE, PIPELINE_ARGS
pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
class VisualRWKV(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.vit = CLIPVisionModel.from_pretrained(args.vision_tower_name)
self.proj = nn.Linear(self.vit.config.hidden_size, args.n_embd, bias=False)
def encode_images(self, images):
B, N, C, H, W = images.shape
images = images.view(B*N, C, H, W)
image_features = self.vit(images).last_hidden_state
L, D = image_features.shape[1], image_features.shape[2]
# rerange [B*N, L, D] -> [B, N, L, D]
image_features = image_features.view(B, N, L, D)[:, 0, :, :]
image_features = self.grid_pooling(image_features)
return self.proj(image_features)
def grid_pooling(self, image_features):
if self.args.grid_size == -1: # no grid pooling
return image_features
if self.args.grid_size == 0: # take cls token
return image_features[:, 0:1, :]
if self.args.grid_size == 1: # global avg pooling
return image_features.mean(dim=1, keepdim=True)
cls_features = image_features[:, 0:1, :]
image_features = image_features[:, 1:, :] #drop cls token
B, L, D = image_features.shape
H_or_W = int(L**0.5)
image_features = image_features.view(B, H_or_W, H_or_W, D)
grid_stride = H_or_W // self.args.grid_size
image_features = F.avg_pool2d(image_features.permute(0, 3, 1, 2),
padding=0,
kernel_size=grid_stride,
stride=grid_stride)
image_features = image_features.permute(0, 2, 3, 1).view(B, -1, D)
return torch.cat((cls_features, image_features), dim=1)
##########################################################################
def generate_prompt(instruction, input=""):
instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
if input:
return f"""Instruction: {instruction}
Input: {input}
Response:"""
else:
return f"""User: hi
Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
User: {instruction}
Assistant:"""
def evaluate(
ctx,
token_count=200,
temperature=1.0,
top_p=0.7,
presencePenalty = 0.1,
countPenalty = 0.1,
):
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
alpha_frequency = countPenalty,
alpha_presence = presencePenalty,
token_ban = [], # ban the generation of some tokens
token_stop = [0]) # stop generation whenever you see any token here
ctx = ctx.strip()
all_tokens = []
out_last = 0
out_str = ''
occurrence = {}
state = None
for i in range(int(token_count)):
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
for n in occurrence:
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
if token in args.token_stop:
break
all_tokens += [token]
for xxx in occurrence:
occurrence[xxx] *= 0.996
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
out_str += tmp
yield out_str.strip()
out_last = i + 1
del out
del state
gc.collect()
yield out_str.strip()
import gradio as gr
import os, gc
from datetime import datetime
from huggingface_hub import hf_hub_download
ctx_limit = 3500
title = "rwkv1b5-vitl336p14-577token_mix665k_rwkv"
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
model_path = hf_hub_download(repo_id="howard-hou/visualrwkv-5", filename=f"{title}.pth")
model = RWKV(model=model_path, strategy='cpu fp32')
from rwkv.utils import PIPELINE, PIPELINE_ARGS
pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
##########################################################################
from model import VisualEncoder, EmbeddingMixer, VisualEncoderConfig
emb_mixer = EmbeddingMixer(model.w["emb.weight"], num_image_embeddings=4096)
config = VisualEncoderConfig(n_embd=model.args.n_embd,
vision_tower_name='openai/clip-vit-large-patch14-336',
grid_size=-1)
visual_encoder = VisualEncoder(config)
##########################################################################
def generate_prompt(instruction, input=""):
instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
if input:
return f"""Instruction: {instruction}
Input: {input}
Response:"""
else:
return f"""User: hi
Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
User: {instruction}
Assistant:"""
def evaluate(
ctx,
token_count=200,
temperature=1.0,
top_p=0.7,
presencePenalty = 0.1,
countPenalty = 0.1,
):
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
alpha_frequency = countPenalty,
alpha_presence = presencePenalty,
token_ban = [], # ban the generation of some tokens
token_stop = [0]) # stop generation whenever you see any token here
ctx = ctx.strip()
all_tokens = []
out_last = 0
out_str = ''
occurrence = {}
state = None
for i in range(int(token_count)):
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
for n in occurrence:
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
if token in args.token_stop:
break
all_tokens += [token]
for xxx in occurrence:
occurrence[xxx] *= 0.996
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
out_str += tmp
yield out_str.strip()
out_last = i + 1
del out
del state
gc.collect()
yield out_str.strip()
##########################################################################
examples = [
[
"./extreme_ironing.jpg",
"What is unusual about this image?",
],
[
"./waterview.jpg",
"What are the things I should be cautious about when I visit here?",
]
]
def test(image, question):
return question
demo = gr.Interface(fn=test,
inputs=["image", "text"],
outputs="text",
examples=examples,
title=title,
description="VisualRWKV-v5.0")
demo.queue(concurrency_count=1, max_size=10)
demo.launch(share=False)