polats commited on
Commit
550a0f7
·
1 Parent(s): 658fb9d

change versions

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +57 -28
  3. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.14.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -1,44 +1,73 @@
1
  import gradio as gr
 
2
 
3
- python_code = """
4
- def fib(n):
5
- if n <= 0:
6
- return 0
7
- elif n == 1:
8
- return 1
9
- else:
10
- return fib(n-1) + fib(n-2)
11
  """
12
-
13
- js_code = """
14
- function fib(n) {
15
- if (n <= 0) return 0;
16
- if (n === 1) return 1;
17
- return fib(n - 1) + fib(n - 2);
18
- }
19
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- def chat(message, history):
22
- if "python" in message.lower():
23
- return "Type Python or JavaScript to see the code.", gr.Code(language="python", value=python_code)
24
- elif "javascript" in message.lower():
25
- return "Type Python or JavaScript to see the code.", gr.Code(language="javascript", value=js_code)
26
- else:
27
- return "Please ask about Python or JavaScript.", None
 
 
 
 
 
 
 
 
 
28
 
29
  with gr.Blocks() as demo:
30
  code = gr.Code(render=False)
31
  with gr.Row():
32
  with gr.Column():
33
- gr.Markdown("<center><h1>Write Python or JavaScript</h1></center>")
34
  gr.ChatInterface(
35
- chat,
36
- examples=["Python", "JavaScript"],
37
  additional_outputs=[code],
38
- type="messages"
 
 
 
 
 
 
 
 
 
 
 
39
  )
40
  with gr.Column():
41
- gr.Markdown("<center><h1>Code Artifacts</h1></center>")
42
  code.render()
43
 
44
- demo.launch()
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
4
  """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
6
  """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
 
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
 
47
  with gr.Blocks() as demo:
48
  code = gr.Code(render=False)
49
  with gr.Row():
50
  with gr.Column():
51
+ gr.Markdown("<center><h1>Change up Noodle Jump!</h1></center>")
52
  gr.ChatInterface(
53
+ respond,
 
54
  additional_outputs=[code],
55
+ additional_inputs=[
56
+ gr.Textbox(value="You are a sassy chatbot.", label="System message"),
57
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
58
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
59
+ gr.Slider(
60
+ minimum=0.1,
61
+ maximum=1.0,
62
+ value=0.95,
63
+ step=0.05,
64
+ label="Top-p (nucleus sampling)",
65
+ ),
66
+ ]
67
  )
68
  with gr.Column():
69
+ gr.Markdown("<center><h1>Code</h1></center>")
70
  code.render()
71
 
72
+ if __name__ == "__main__":
73
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1 @@
1
- huggingface_hub==0.25.2
 
1
+ huggingface_hub==0.26.2