File size: 7,105 Bytes
04c25c5
c745c39
04c25c5
9b9128d
c745c39
 
1d73b44
 
 
 
8300fa7
 
 
 
 
 
 
1d73b44
c745c39
cd7ba0f
c745c39
ba6433f
 
 
 
 
 
 
 
 
 
d786de6
e9b47ff
 
ba6433f
 
 
 
 
 
e9b47ff
1d73b44
ba6433f
369dc1f
ba6433f
5d5363d
8300fa7
04c25c5
 
 
5d5363d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba6433f
 
 
 
 
5d5363d
 
 
 
 
 
 
ba6433f
5d5363d
 
 
04c25c5
c745c39
 
 
 
 
 
 
 
 
 
d7942b7
fb1320e
61da65e
5d5363d
61da65e
 
 
 
 
 
04c25c5
5d5363d
34a59de
0d86f5d
04c25c5
 
 
 
 
61da65e
 
 
 
 
 
 
0d86f5d
ba6433f
cadb7ad
04c25c5
 
0d86f5d
61da65e
5d5363d
ba6433f
 
 
5d5363d
c745c39
 
 
 
 
 
 
 
 
 
 
 
5d5363d
ba6433f
 
 
 
9b2cb9f
ba6433f
 
 
 
64dcdf0
5d5363d
d0d4c9d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")

models=[
    "google/gemma-7b",
    "google/gemma-7b-it",
    "google/gemma-2b",
    "google/gemma-2b-it"
]
clients=[
InferenceClient(models[0]),
InferenceClient(models[1]),
InferenceClient(models[2]),
InferenceClient(models[3]),
]

VERBOSE=False

def load_models(inp):
    if VERBOSE==True:    
        print(type(inp))
        print(inp)
        print(models[inp])
    #client_z.clear()
    #client_z.append(InferenceClient(models[inp]))
    return gr.update(label=models[inp])

def format_prompt(message, history, cust_p):
    prompt = ""
    if history:
        for user_prompt, bot_response in history:
            prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
            prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
            if VERBOSE==True:
                print(prompt)
    #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
    prompt+=cust_p.replace("USER_INPUT",message)
    return prompt

def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
    #token max=8192
    print(client_choice)
    hist_len=0
    client=clients[int(client_choice)-1]
    if not history:
        history = []
        hist_len=0
    if not memory:
        memory = []
        mem_len=0        
    if memory:
        for ea in memory[0-chat_mem:]:
            hist_len+=len(str(ea))
    in_len=len(system_prompt+prompt)+hist_len

    if (in_len+tokens) > 8000:
        history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
        yield history,memory
    else:
        generate_kwargs = dict(
            temperature=temp,
            max_new_tokens=tokens,
            top_p=top_p,
            repetition_penalty=rep_p,
            do_sample=True,
            seed=seed,
        )
        if system_prompt:
            formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
        else:
            formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
        stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
        output = ""
        for response in stream:
            output += response.token.text
            yield [(prompt,output)],memory
        history.append((prompt,output))
        memory.append((prompt,output))
        yield history,memory
        
    if VERBOSE==True:
        print("\n######### HIST "+str(in_len))
        print("\n######### TOKENS "+str(tokens))        

def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
    print(chatblock)
    tog = 0
    if chatblock:
        tog = 3
    result = ss_client.predict(str(chat),height,width,chatblock,header,theme,wait,api_name="/run_script")
    out = f'https://omnibus-html-image-current-tab.hf.space/file={result[tog]}'
    print(out)
    return out

def clear_fn():
    return None,None,None,None
rand_val=random.randint(1,1111111111111111)

def check_rand(inp,val):
    if inp==True:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
    else:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
    
with gr.Blocks() as app:
    memory=gr.State()
    gr.HTML("""<center><h1 style='font-size:xx-large;'>Google Gemma Models</h1><br><h3>running on Huggingface Inference Client</h3><br><h7>EXPERIMENTAL""")
    chat_b = gr.Chatbot(height=500)
    with gr.Group():
        with gr.Row():
            with gr.Column(scale=3):
                inp = gr.Textbox(label="Prompt")
                sys_inp = gr.Textbox(label="System Prompt (optional)")
                with gr.Row():
                    with gr.Column(scale=2):
                        btn = gr.Button("Chat")
                    with gr.Column(scale=1):
                        with gr.Group():
                            stop_btn=gr.Button("Stop")
                            clear_btn=gr.Button("Clear")                
                client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
                with gr.Accordion("Prompt Format",open=False):
                    custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=3,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
            with gr.Column(scale=1):
                with gr.Group():
                    rand = gr.Checkbox(label="Random Seed", value=True)
                    seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
                    tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
                    temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
                    top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
                    rep_p=gr.Slider(label="Repetition Penalty",step=0.01, minimum=0.1, maximum=2.0, value=0.99)
                    chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
        with gr.Accordion(label="Screenshot",open=False):
            with gr.Row():
                with gr.Column(scale=3):
                    im_btn=gr.Button("Screenshot")
                    img=gr.Image(type='filepath')
                with gr.Column(scale=1):
                    with gr.Row():
                        im_height=gr.Number(label="Height",value=5000)
                        im_width=gr.Number(label="Width",value=500)
                    wait_time=gr.Number(label="Wait Time",value=3000)
                    theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
                    chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)

    
    client_choice.change(load_models,client_choice,[chat_b])
    app.load(load_models,client_choice,[chat_b])
    
    im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
    
    chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
    go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
    
    stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
    clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
app.queue(default_concurrency_limit=10).launch()