peterpeter8585 commited on
Commit
3af9f5f
·
verified ·
1 Parent(s): 5f8c8b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +443 -0
app.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from huggingface_hub import InferenceClient
4
+ import random
5
+ from diffusers import DiffusionPipeline
6
+ import torch
7
+ import transformers
8
+ transformers.utils.move_cache()
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ import os
11
+ password1=os.environ["password"]
12
+ def respond4(
13
+ message,
14
+ history: list[tuple[str, str]],
15
+ system_message,
16
+ max_tokens,
17
+ temperature,
18
+ top_p,
19
+ ):
20
+ messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
21
+
22
+ for val in history:
23
+ if val[0]:
24
+ messages.append({"role": "user", "content": val[0]})
25
+ if val[1]:
26
+ messages.append({"role": "assistant", "content": val[1]})
27
+
28
+ messages.append({"role": "user", "content": message})
29
+
30
+ response = ""
31
+
32
+ for message in client.chat_completion(
33
+ messages,
34
+ max_tokens=max_tokens,
35
+ stream=True,
36
+ temperature=temperature,
37
+ top_p=top_p,
38
+ ):
39
+ token = message.choices[0].delta.content
40
+
41
+ response += token
42
+ yield response
43
+ if torch.cuda.is_available():
44
+ torch.cuda.max_memory_allocated(device=device)
45
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
46
+ pipe.enable_xformers_memory_efficient_attention()
47
+ pipe = pipe.to(device)
48
+ else:
49
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
50
+ pipe = pipe.to(device)
51
+
52
+ MAX_SEED = np.iinfo(np.int32).max
53
+ MAX_IMAGE_SIZE = 1024
54
+
55
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
56
+
57
+ if randomize_seed:
58
+ seed = random.randint(0, MAX_SEED)
59
+
60
+ generator = torch.Generator().manual_seed(seed)
61
+
62
+ image = pipe(
63
+ prompt = prompt,
64
+ negative_prompt = negative_prompt,
65
+ guidance_scale = guidance_scale,
66
+ num_inference_steps = num_inference_steps,
67
+ width = width,
68
+ height = height,
69
+ generator = generator
70
+ ).images[0]
71
+
72
+ return image
73
+ import requests
74
+ from bs4 import BeautifulSoup
75
+ import urllib
76
+ import random
77
+
78
+ # List of user agents to choose from for requests
79
+ _useragent_list = [
80
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
81
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
82
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
83
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
84
+ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
85
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
86
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
87
+ ]
88
+
89
+ def get_useragent():
90
+ """Returns a random user agent from the list."""
91
+ return random.choice(_useragent_list)
92
+
93
+ def extract_text_from_webpage(html_content):
94
+ """Extracts visible text from HTML content using BeautifulSoup."""
95
+ soup = BeautifulSoup(html_content, "html.parser")
96
+ # Remove unwanted tags
97
+ for tag in soup(["script", "style", "header", "footer", "nav"]):
98
+ tag.extract()
99
+ # Get the remaining visible text
100
+ visible_text = soup.get_text(strip=True)
101
+ return visible_text
102
+
103
+ def search(term, num_results=1, lang="ko", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
104
+ """Performs a Google search and returns the results."""
105
+ escaped_term = urllib.parse.quote_plus(term)
106
+ start = 0
107
+ all_results = []
108
+
109
+ # Fetch results in batches
110
+ while start < num_results:
111
+ resp = requests.get(
112
+ url="https://www.google.com/search",
113
+ headers={"User-Agent": get_useragent()}, # Set random user agent
114
+ params={
115
+ "q": term,
116
+ "num": num_results - start, # Number of results to fetch in this batch
117
+ "hl": lang,
118
+ "start": start,
119
+ "safe": safe,
120
+ },
121
+ timeout=timeout,
122
+ verify=ssl_verify,
123
+ )
124
+ resp.raise_for_status() # Raise an exception if request fails
125
+
126
+ soup = BeautifulSoup(resp.text, "html.parser")
127
+ result_block = soup.find_all("div", attrs={"class": "g"})
128
+
129
+ # If no results, continue to the next batch
130
+ if not result_block:
131
+ start += 1
132
+ continue
133
+
134
+ # Extract link and text from each result
135
+ for result in result_block:
136
+ link = result.find("a", href=True)
137
+ if link:
138
+ link = link["href"]
139
+ try:
140
+ # Fetch webpage content
141
+ webpage = requests.get(link, headers={"User-Agent": get_useragent()})
142
+ webpage.raise_for_status()
143
+ # Extract visible text from webpage
144
+ visible_text = extract_text_from_webpage(webpage.text)
145
+ all_results.append({"link": link, "text": visible_text})
146
+ except requests.exceptions.RequestException as e:
147
+ # Handle errors fetching or processing webpage
148
+ print(f"Error fetching or processing {link}: {e}")
149
+ all_results.append({"link": link, "text": None})
150
+ else:
151
+ all_results.append({"link": None, "text": None})
152
+
153
+ start += len(result_block) # Update starting index for next batch
154
+
155
+ return all_results
156
+
157
+
158
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
159
+ def respond1(
160
+ message,
161
+ history: list[tuple[str, str]],
162
+ system_message,
163
+ max_tokens,
164
+ temperature,
165
+ top_p,
166
+ password
167
+ ):
168
+ if password==password1:
169
+ messages = [{"role": "system", "content": "Your name is Chatchat.And your creator of you is Sung Yoon.In Korean, it is 정성윤.These are the instructions for you:"+system_message}]
170
+
171
+ for val in history:
172
+ if val[0]:
173
+ messages.append({"role": "user", "content": val[0]})
174
+ if val[1]:
175
+ messages.append({"role": "assistant", "content": val[1]})
176
+
177
+ messages.append({"role": "user", "content": message})
178
+
179
+ response = ""
180
+
181
+ for message in client.chat_completion(
182
+ messages,
183
+ max_tokens=max_tokens,
184
+ stream=True,
185
+ temperature=temperature,
186
+ top_p=top_p,
187
+ ):
188
+ token = message.choices[0].delta.content
189
+
190
+ response += token
191
+ yield response
192
+ examples = [
193
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
194
+ "An astronaut riding a green horse",
195
+ "A delicious ceviche cheesecake slice",
196
+ ]
197
+
198
+ css="""
199
+ #col-container {
200
+ margin: 0 auto;
201
+ max-width: 520px;
202
+ }
203
+ """
204
+ def respond2(
205
+ message,
206
+ history: list[tuple[str, str]],
207
+ system_message,
208
+ max_tokens,
209
+ temperature,
210
+ top_p,
211
+ ):
212
+ messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
213
+
214
+ for val in history:
215
+ if val[0]:
216
+ messages.append({"role": "user", "content": val[0]})
217
+ if val[1]:
218
+ messages.append({"role": "assistant", "content": val[1]})
219
+
220
+ messages.append({"role": "user", "content": message})
221
+
222
+ response = ""
223
+
224
+ for message in client.chat_completion(
225
+ messages,
226
+ max_tokens=max_tokens,
227
+ stream=True,
228
+ temperature=temperature,
229
+ top_p=top_p,
230
+ ):
231
+ token = message.choices[0].delta.content
232
+
233
+ response += token
234
+ yield response
235
+ def respond3(
236
+ message,
237
+ history: list[tuple[str, str]],
238
+ system_message,
239
+ max_tokens,
240
+ temperature,
241
+ top_p,
242
+ ):
243
+ messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
244
+
245
+ for val in history:
246
+ if val[0]:
247
+ messages.append({"role": "user", "content": val[0]})
248
+ if val[1]:
249
+ messages.append({"role": "assistant", "content": val[1]})
250
+
251
+ messages.append({"role": "user", "content": message})
252
+
253
+ response = ""
254
+
255
+ for message in client.chat_completion(
256
+ messages,
257
+ max_tokens=max_tokens,
258
+ stream=True,
259
+ temperature=temperature,
260
+ top_p=top_p,
261
+ ):
262
+ token = message.choices[0].delta.content
263
+
264
+ response += token
265
+ yield response
266
+ if torch.cuda.is_available():
267
+ power_device = "GPU"
268
+ else:
269
+ power_device = "CPU"
270
+
271
+ with gr.Blocks(css=css) as demo2:
272
+
273
+ with gr.Column(elem_id="col-container"):
274
+ gr.Markdown(f"""
275
+ # Text-to-Image Gradio Template
276
+ Currently running on {power_device}.
277
+ """)
278
+
279
+ with gr.Row():
280
+
281
+ prompt = gr.Text(
282
+ label="Prompt",
283
+ show_label=False,
284
+ max_lines=1,
285
+ placeholder="Enter your prompt",
286
+ container=False,
287
+ )
288
+
289
+ run_button = gr.Button("Run", scale=0)
290
+
291
+ result = gr.Image(label="Result", show_label=False)
292
+
293
+ with gr.Accordion("Advanced Settings", open=False):
294
+
295
+ negative_prompt = gr.Text(
296
+ label="Negative prompt",
297
+ max_lines=1,
298
+ placeholder="Enter a negative prompt",
299
+ visible=False,
300
+ )
301
+
302
+ seed = gr.Slider(
303
+ label="Seed",
304
+ minimum=0,
305
+ maximum=MAX_SEED,
306
+ step=1,
307
+ value=0,
308
+ )
309
+
310
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
311
+
312
+ with gr.Row():
313
+
314
+ width = gr.Slider(
315
+ label="Width",
316
+ minimum=256,
317
+ maximum=MAX_IMAGE_SIZE,
318
+ step=32,
319
+ value=512,
320
+ )
321
+
322
+ height = gr.Slider(
323
+ label="Height",
324
+ minimum=256,
325
+ maximum=MAX_IMAGE_SIZE,
326
+ step=32,
327
+ value=512,
328
+ )
329
+ with gr.Row():
330
+
331
+ guidance_scale = gr.Slider(
332
+ label="Guidance scale",
333
+ minimum=0.0,
334
+ maximum=10.0,
335
+ step=0.1,
336
+ value=0.0,
337
+ )
338
+
339
+ num_inference_steps = gr.Slider(
340
+ label="Number of inference steps",
341
+ minimum=1,
342
+ maximum=12,
343
+ step=1,
344
+ value=2,
345
+ )
346
+
347
+ gr.Examples(
348
+ examples = examples,
349
+ inputs = [prompt]
350
+ )
351
+
352
+ run_button.click(
353
+ fn = infer,
354
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
355
+ outputs = [result]
356
+ )
357
+
358
+
359
+ """
360
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
361
+ """
362
+ ad=gr.ChatInterface(
363
+ respond2,
364
+ additional_inputs=[
365
+ gr.Textbox(value="You are a Programmer.You yave to only make programs that the user orders.Do not answer any other questions exept for questions about Python or other programming languages.Do not do any thing exept what I said.", label="System message", interactive=False),
366
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
367
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
368
+ gr.Slider(
369
+ minimum=0.1,
370
+ maximum=1.0,
371
+ value=0.95,
372
+ step=0.05,
373
+ label="Top-p (nucleus sampling)",
374
+ ),
375
+ ],
376
+ )
377
+ ae= gr.ChatInterface(
378
+ respond4,
379
+ additional_inputs=[
380
+ gr.Textbox(value="You are a helpful food recommender.You must only answer the questions about food or a request to recommend a food the user would like.Do not answer other questions except what I said.", label="System message", interactive=False),
381
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
382
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
383
+ gr.Slider(
384
+ minimum=0.1,
385
+ maximum=1.0,
386
+ value=0.95,
387
+ step=0.05,
388
+ label="Top-p (nucleus sampling)",
389
+ ),
390
+
391
+ ],
392
+ )
393
+ aa=gr.ChatInterface(
394
+ respond1,
395
+ additional_inputs=[
396
+ gr.Textbox(value="You are a helpful assistant.", label="System message", interactive=True),
397
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
398
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
399
+ gr.Slider(
400
+ minimum=0.1,
401
+ maximum=1.0,
402
+ value=0.95,
403
+ step=0.05,
404
+ label="Top-p (nucleus sampling)",
405
+ ),
406
+ gr.Textbox(label="Pleas type in the password.Or, it will not work if you ask.")
407
+ ],
408
+ )
409
+ ac=gr.ChatInterface(
410
+ respond3,
411
+ additional_inputs=[
412
+ gr.Textbox(value="You are a Programmer.You yave to only make programs that the user orders.Do not answer any other questions exept for questions about Python or other programming languages.Do not do any thing exept what I said.", label="System message", interactive=False),
413
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
414
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
415
+ gr.Slider(
416
+ minimum=0.1,
417
+ maximum=1.0,
418
+ value=0.95,
419
+ step=0.05,
420
+ label="Top-p (nucleus sampling)",
421
+ ),
422
+ ],
423
+ )
424
+ ab= gr.ChatInterface(
425
+ respond3,
426
+ additional_inputs=[
427
+ gr.Textbox(value="You are a helpful Doctor.You only have to answer the users questions about medical issues or medical questions and the cure to that illness and say that your thought is not realy right because you are a generative AI, so you could make up some cures.Do not answer anything else exept the question types what I said.Do not do any thing exept what I said.", label="System message", interactive=False),
428
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
429
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
430
+ gr.Slider(
431
+ minimum=0.1,
432
+ maximum=1.0,
433
+ value=0.95,
434
+ step=0.05,
435
+ label="Top-p (nucleus sampling)",
436
+ ),
437
+ ],
438
+ )
439
+ if __name__ == "__main__":
440
+ with gr.Blocks() as ai:
441
+ gr.TabbedInterface([aa, ac, ab, ae, demo2], ["gpt4(Password needed)", "gpt4(only for programming)", "gpt4(only for medical questions)", "gpt4(only for food recommendations)","image create"])
442
+ ai.queue(max_size=300)
443
+ ai.launch()