File size: 8,747 Bytes
b470e8d
 
18f7b1e
 
 
f3141ae
 
4be2fb0
f3141ae
 
 
 
18f7b1e
 
 
 
 
 
 
 
 
 
 
 
c9cf37d
 
 
18f7b1e
 
 
b470e8d
 
 
 
f3141ae
 
 
 
 
 
 
b470e8d
18f7b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3141ae
18f7b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3141ae
18f7b1e
 
 
4be2fb0
18f7b1e
4be2fb0
18f7b1e
 
4be2fb0
18f7b1e
 
 
f3141ae
 
18f7b1e
f3141ae
 
 
 
b470e8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3141ae
18f7b1e
 
 
 
 
 
 
 
 
 
 
b470e8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18f7b1e
4be2fb0
 
 
 
 
 
 
18f7b1e
 
 
 
 
 
 
f3141ae
4be2fb0
 
 
18f7b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4be2fb0
 
 
18f7b1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3141ae
18f7b1e
 
 
 
 
 
 
 
 
 
 
a65fd1a
 
 
f3141ae
a65fd1a
 
 
18f7b1e
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import copy

import mesop as me

import components as mex
import dialogs
import handlers
import llm
from eval_table import prompt_eval_table
from tool_sidebar import tool_sidebar
from helpers import find_prompt, parse_variables
from state import State, Prompt

_INSTRUCTIONS = """
- Write your prompt.
  - You can use variables using this syntax `{{VARIABLE_NAME}}`.
- If you used variables, populate them from the `Set variables` dialog.
- Adjust model settings if necessary from the `Model settings` dialog.
- When you're ready, press the run button.
- If you make adjustments to your prompt or model settings, pressing run will create a
  new version of your prompt.
""".strip()


@me.page(
  security_policy=me.SecurityPolicy(allowed_iframe_parents=["https://huggingface.co"]),
)
def app():
  state = me.state(State)

  mex.snackbar(
    is_visible=state.show_snackbar, label=state.snackbar_message, horizontal_position="start"
  )

  dialogs.update_title()
  dialogs.model_settings()
  dialogs.prompt_variables()
  dialogs.prompt_version_history()
  dialogs.add_comparisons()
  dialogs.generate_prompt()
  dialogs.load_prompt()
  dialogs.add_row()

  with me.box(
    style=me.Style(
      background="#FDFDFD",
      display="grid",
      grid_template_columns="50fr 50fr 1fr",
      grid_template_rows="1fr 50fr",
      height="100vh",
    )
  ):
    with me.box(style=me.Style(grid_column="1 / -1")):
      with mex.header(max_width=None):
        with mex.header_section():
          with me.box(on_click=on_click_title, style=me.Style(cursor="pointer")):
            me.text(
              state.title,
              style=me.Style(font_size=16, font_weight="bold"),
            )
          if state.version:
            me.text(f"v{state.version}")

        with mex.header_section():
          mex.button_toggle(
            labels=["Prompt", "Eval"], selected=state.mode, on_click=on_click_mode_toggle
          )

    if state.mode == "Prompt":
      # Render prompt creation page
      with me.box(
        style=me.Style(padding=me.Padding(left=15, top=15, bottom=15), overflow_y="scroll")
      ):
        with mex.expanable_card(
          title="System Instructions",
          expanded=state.system_prompt_card_expanded,
          on_click_header=on_click_system_instructions_header,
        ):
          me.native_textarea(
            autosize=True,
            min_rows=2,
            placeholder="Optional tone and style instructions for the model",
            value=state.system_instructions,
            on_blur=handlers.on_update_input,
            style=_STYLE_INVISIBLE_TEXTAREA,
            key="system_instructions",
          )

        with mex.card(title="Prompt"):
          me.native_textarea(
            autosize=True,
            min_rows=2,
            placeholder="Enter your prompt",
            value=state.prompt,
            on_blur=on_update_prompt,
            style=_STYLE_INVISIBLE_TEXTAREA,
            key="prompt",
          )

        with me.box(
          style=me.Style(align_items="center", display="flex", justify_content="space-between")
        ):
          with me.content_button(
            type="flat",
            disabled=not state.prompt,
            on_click=on_click_run,
            style=me.Style(border_radius="10"),
          ):
            with me.tooltip(message="Run prompt"):
              me.icon("play_arrow")
          me.button(
            "Generate prompt",
            disabled=bool(state.prompt),
            style=me.Style(background="#EBF1FD", border_radius="10"),
            on_click=handlers.on_open_dialog,
            key="dialog_show_generate_prompt",
          )

      with me.box(style=me.Style(padding=me.Padding.all(15), overflow_y="scroll")):
        if state.response:
          with mex.card(title="Response", style=me.Style(overflow_y="hidden")):
            me.markdown(state.response)
        else:
          with mex.card(title="Prompt Tuner Instructions"):
            me.markdown(_INSTRUCTIONS)
    else:
      # Render eval page
      with me.box(style=me.Style(grid_column="1 / -2", overflow_y="scroll")):
        prompt = find_prompt(state.prompts, state.version)
        if prompt:
          with me.box(style=me.Style(margin=me.Margin.all(15))):
            compare_prompts = [
              prompt for prompt in state.prompts if prompt.version in state.comparisons
            ]
            prompt_eval_table(
              [prompt] + compare_prompts,
              on_select_rating=on_select_rating,
              on_click_run=on_click_eval_run,
            )
            me.button(
              label="Add row",
              type="flat",
              style=me.Style(
                margin=me.Margin(top=10),
                border_radius="10",
              ),
              key="dialog_show_add_row",
              on_click=handlers.on_open_dialog,
            )
    tool_sidebar()


# Event handlers


def on_click_system_instructions_header(e: me.ClickEvent):
  """Open/close system instructions card."""
  state = me.state(State)
  state.system_prompt_card_expanded = not state.system_prompt_card_expanded


def on_click_eval_run(e: me.ClickEvent):
  state = me.state(State)
  print(e.key)
  _, prompt_version, response_index, selected_prompt_response_index = e.key.split("_")
  prompt = find_prompt(state.prompts, int(prompt_version))
  selected_prompt = find_prompt(state.prompts, state.version)
  selected_prompt

  if response_index != "-1":
    response = prompt.responses[int(response_index)]
  else:
    response = {
      "variables": copy.copy(
        selected_prompt.responses[int(selected_prompt_response_index)]["variables"]
      ),
      "rating": 0,
    }
    prompt.responses.append(response)

  prompt_text = prompt.prompt
  for name, value in response["variables"].items():
    prompt_text = prompt_text.replace("{{" + name + "}}", value)
  response["output"] = llm.run_prompt(prompt_text, prompt.model, prompt.model_temperature)


def on_click_run(e: me.ClickEvent):
  """Runs the prompt with the given variables.

  A new version of the prompt will be created if the prompt, system instructions, or
  model settings have changed.

  A new response will be added if the variables have been updated.
  """
  state = me.state(State)
  num_versions = len(state.prompts)
  if state.version:
    current_prompt_meta = state.prompts[state.version - 1]
  else:
    current_prompt_meta = Prompt()

  variable_names = set(parse_variables(state.prompt))
  prompt_variables = {
    name: value for name, value in state.prompt_variables.items() if name in variable_names
  }

  if (
    current_prompt_meta.prompt != state.prompt
    or current_prompt_meta.system_instructions != state.system_instructions
    or current_prompt_meta.model != state.model
    or current_prompt_meta.model_temperature != state.model_temperature
  ):
    new_version = num_versions + 1
    state.prompts.append(
      Prompt(
        version=new_version,
        prompt=state.prompt,
        system_instructions=state.system_instructions,
        model=state.model,
        model_temperature=state.model_temperature,
        variables=list(variable_names),
      )
    )
    state.version = new_version

  prompt = state.prompt
  for name, value in prompt_variables.items():
    prompt = prompt.replace("{{" + name + "}}", value)
  state.response = llm.run_prompt(prompt, state.model, state.model_temperature)
  state.prompts[-1].responses.append(dict(output=state.response, variables=prompt_variables))


def on_click_title(e: me.ClickEvent):
  """Show dialog for editing the title of the prompt."""
  state = me.state(State)
  state.temp_title = state.title
  state.dialog_show_title = True


def on_update_prompt(e: me.InputBlurEvent):
  """Saves the prompt.

  Any new variables will be extracted from the prompt and added to prompt variables in
  the variables dialog.
  """
  state = me.state(State)
  state.prompt = e.value.strip()
  variable_names = parse_variables(state.prompt)
  for variable_name in variable_names:
    if variable_name not in state.prompt_variables:
      state.prompt_variables[variable_name] = ""


def on_click_mode_toggle(e: me.ClickEvent):
  """Toggle between Prompt and Eval modes."""
  state = me.state(State)
  state.mode = "Eval" if state.mode == "Prompt" else "Prompt"


def on_select_rating(e: me.SelectSelectionChangeEvent):
  state = me.state(State)
  _, prompt_version, response_index = e.key.split("_")
  prompt = find_prompt(state.prompts, int(prompt_version))
  prompt.responses[int(response_index)]["rating"] = e.value


# Style helpers

_STYLE_INVISIBLE_TEXTAREA = me.Style(
  overflow_y="hidden",
  width="100%",
  outline="none",
  border=me.Border.all(me.BorderSide(style="none")),
)