Spaces:
Runtime error
Runtime error
File size: 6,469 Bytes
04c25c5 c745c39 04c25c5 9b9128d bcb52b4 58c547c c745c39 bcb52b4 58c547c f69f8fb bcb52b4 1d73b44 c745c39 bcb52b4 21544ca 0651449 bcb52b4 0651449 bcb52b4 0651449 73ff9c9 e9b47ff 0651449 73ff9c9 bcb52b4 0651449 bcb52b4 e9b47ff 1d73b44 7c4e62e bcb52b4 7c4e62e 91e6940 bcb52b4 04c25c5 bcb52b4 5d5363d bcb52b4 5d5363d bcb52b4 5d5363d bcb52b4 5d5363d bcb52b4 5d5363d bcb52b4 5d5363d 0651449 bcb52b4 0651449 bcb52b4 a68d7a5 bcb52b4 a68d7a5 bcb52b4 5d5363d bcb52b4 e8665df 8904a89 bcb52b4 04c25c5 d7942b7 bcb52b4 5d5363d bcb52b4 61da65e bcb52b4 04c25c5 bcb52b4 0d86f5d 04c25c5 61da65e bcb52b4 04c25c5 7c4e62e 0d86f5d bcb52b4 7c4e62e bcb52b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random
from deep_translator import GoogleTranslator
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
models=[
"google/gemma-7b",
"google/gemma-7b-it",
"google/gemma-2b",
"google/gemma-2b-it"
]
clients=[
InferenceClient(models[0]),
InferenceClient(models[1]),
InferenceClient(models[2]),
InferenceClient(models[3]),
]
VERBOSE=False
def load_models(inp):
if VERBOSE==True:
print(type(inp))
print(inp)
print(models[inp])
#client_z.clear()
#client_z.append(InferenceClient(models[inp]))
return gr.update(label=models[inp])
def format_prompt(message, history, cust_p):
prompt = "<s>"
if history:
for user_prompt, bot_response in history:
prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
if VERBOSE==True:
print(prompt)
#prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
prompt+=cust_p.replace("USER_INPUT",message)
return prompt
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p,translate_fa):
#token max=8192
if(translate_fa == True):
if(len(prompt) > 2000):
translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
translatedtext2 = GoogleTranslator(source='auto', target='en').translate(prompt[2000:(len(prompt))])
prompt = translatedtext1 + translatedtext2
else:
prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
print(client_choice)
hist_len=0
client=clients[int(client_choice)-1]
if not history:
history = []
hist_len=0
if not memory:
memory = []
mem_len=0
if memory:
for ea in memory[0-chat_mem:]:
hist_len+=len(str(ea))
in_len=len(system_prompt+prompt)+hist_len
if (in_len+tokens) > 8000:
history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
yield history,memory
else:
generate_kwargs = dict(
#temperature=temp,
max_new_tokens=tokens,
#top_p=top_p,
#repetition_penalty=rep_p,
#do_sample=True,
#seed=seed,
)
if system_prompt:
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
else:
formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
chat = [
{ "role": "user", "content": f"{formatted_prompt}" },
]
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output = ""
for response in stream:
output += response.token.text
yield [(prompt,output)],memory
if(translate_fa == True):
output = GoogleTranslator(source='auto', target='fa').translate(output)
history.append((prompt,output))
memory.append((prompt,output))
yield history,memory
if VERBOSE==True:
print("\n######### HIST "+str(in_len))
print("\n######### TOKENS "+str(tokens))
def clear_fn():
return None,None,None,None
rand_val=random.randint(1,1111111111111111)
def check_rand(inp,val):
if inp==True:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
else:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
with gr.Blocks() as app:
memory=gr.State()
chat_b = gr.Chatbot(height=500)
with gr.Group():
with gr.Row():
with gr.Column(scale=3):
inp = gr.Textbox(label="Prompt")
sys_inp = gr.Textbox(label="System Prompt (optional)")
with gr.Row():
with gr.Column(scale=2):
btn = gr.Button("Chat")
with gr.Column(scale=1):
with gr.Group():
stop_btn=gr.Button("Stop")
clear_btn=gr.Button("Clear")
client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
with gr.Accordion("Prompt Format",open=False):
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
with gr.Column(scale=1):
with gr.Group():
translate_fa = gr.Checkbox(label="Translate to Persian", value=True)
rand = gr.Checkbox(label="Random Seed", value=True)
seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
rep_p=gr.Slider(label="Repetition Penalty",step=0.1, minimum=0.1, maximum=2.0, value=1.0)
chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
client_choice.change(load_models,client_choice,[chat_b])
app.load(load_models,client_choice,[chat_b])
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
app.queue(default_concurrency_limit=10).launch() |