MiladMola commited on
Commit
0ebc354
1 Parent(s): d6dac2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -18
app.py CHANGED
@@ -74,11 +74,11 @@ def respond(
74
  message,
75
  history: list[tuple[str, str]],
76
  system_message,
77
- max_tokens,
78
  temperature,
79
- top_p,
80
- top_k,
81
- repeat_penalty,
82
  model,
83
  ):
84
  chat_template = get_messages_formatter_type(model)
@@ -102,10 +102,10 @@ def respond(
102
 
103
  settings = provider.get_provider_default_settings()
104
  settings.temperature = temperature
105
- settings.top_k = top_k
106
- settings.top_p = top_p
107
- settings.max_tokens = max_tokens
108
- settings.repeat_penalty = repeat_penalty
109
  settings.stream = True
110
 
111
  messages = BasicChatHistory()
@@ -139,28 +139,43 @@ PLACEHOLDER = """
139
  <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 8px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
140
  <div style="padding: .5rem 1.5rem;">
141
  <img src="https://avatars.githubusercontent.com/u/39557177?v=4" style="width: 80%; max-width: 550px; height: auto; opacity: 0.80; ">
142
- <h2 dir="rtl" style="text-align: center; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">با فرمت‌های GGUF درنا چت کنید!</h2>
143
  </div>
144
  """
145
  demo = gr.ChatInterface(
146
  respond,
147
  additional_inputs=[
148
  gr.Textbox(value="", label="System message", rtl=False),
149
- gr.Slider(minimum=0.1, maximum=4.0, value=0.1, step=0.1, label="Temperature"),
150
- gr.Slider(
151
- minimum=0.1,
152
- maximum=1.0,
153
- value=0.85,
154
- step=0.05,
155
- label="Top-p",
156
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  gr.Dropdown([
158
  'dorna-llama3-8b-instruct.Q8_0.gguf',
159
  'dorna-llama3-8b-instruct.Q4_0.gguf',
160
  'dorna-llama3-8b-instruct.Q5_0.gguf',
161
  'dorna-llama3-8b-instruct.bf16.gguf',
162
  ],
163
- value="dorna-llama3-8b-instruct.Q8_0.gguf",
164
  label="Model"
165
  ),
166
  ],
 
74
  message,
75
  history: list[tuple[str, str]],
76
  system_message,
77
+ # max_tokens,
78
  temperature,
79
+ # top_p,
80
+ # top_k,
81
+ # repeat_penalty,
82
  model,
83
  ):
84
  chat_template = get_messages_formatter_type(model)
 
102
 
103
  settings = provider.get_provider_default_settings()
104
  settings.temperature = temperature
105
+ # settings.top_k = top_k
106
+ # settings.top_p = top_p
107
+ # settings.max_tokens = max_tokens
108
+ # settings.repeat_penalty = repeat_penalty
109
  settings.stream = True
110
 
111
  messages = BasicChatHistory()
 
139
  <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 8px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
140
  <div style="padding: .5rem 1.5rem;">
141
  <img src="https://avatars.githubusercontent.com/u/39557177?v=4" style="width: 80%; max-width: 550px; height: auto; opacity: 0.80; ">
142
+ <h2 dir="rtl" style="text-align: right; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">با فرمت‌های GGUF درنا چت کنید!</h2>
143
  </div>
144
  """
145
  demo = gr.ChatInterface(
146
  respond,
147
  additional_inputs=[
148
  gr.Textbox(value="", label="System message", rtl=False),
149
+ #gr.Slider(minimum=1, maximum=8192, value=2048, step=1, label="Max tokens"),
150
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
151
+ # gr.Slider(
152
+ # minimum=0.1,
153
+ # maximum=1.0,
154
+ # value=0.85,
155
+ # step=0.05,
156
+ # label="Top-p",
157
+ # ),
158
+ # gr.Slider(
159
+ # minimum=0,
160
+ # maximum=100,
161
+ # value=40,
162
+ # step=1,
163
+ # label="Top-k",
164
+ # ),
165
+ # gr.Slider(
166
+ # minimum=0.0,
167
+ # maximum=2.0,
168
+ # value=1,
169
+ # step=0.1,
170
+ # label="Repetition penalty",
171
+ # ),
172
  gr.Dropdown([
173
  'dorna-llama3-8b-instruct.Q8_0.gguf',
174
  'dorna-llama3-8b-instruct.Q4_0.gguf',
175
  'dorna-llama3-8b-instruct.Q5_0.gguf',
176
  'dorna-llama3-8b-instruct.bf16.gguf',
177
  ],
178
+ value="dorna-llama3-8b-instruct.Q4_0.gguf",
179
  label="Model"
180
  ),
181
  ],