Richard commited on
Commit
f3141ae
·
1 Parent(s): a951f66

Improve Eval page

Browse files

Also heavily refactor code to be a bit more maintainable

.gitignore CHANGED
@@ -4,3 +4,7 @@ __pycache__
4
 
5
  # Mesop
6
  .env
 
 
 
 
 
4
 
5
  # Mesop
6
  .env
7
+
8
+
9
+ # Prompt Tuner App
10
+ saved_prompts
README.md CHANGED
@@ -9,5 +9,9 @@ app_port: 8080
9
 
10
  # Mesop Prompt Tuner
11
 
12
- Prompt tuner UI built using [Mesop](https://google.github.io/mesop/). This is a
13
- work in progress.
 
 
 
 
 
9
 
10
  # Mesop Prompt Tuner
11
 
12
+ _This is a work in progress._
13
+
14
+ This app is built using [Mesop](https://google.github.io/mesop/). The UI/UX is
15
+ heavily inspired by the Anthropic Console Workbench.
16
+
17
+ You can test out the WIP demo on [Hugging Face Spaces](https://huggingface.co/spaces/richard-to/mesop-prompt-tuner).
components/__init__.py CHANGED
@@ -7,4 +7,3 @@ from components.header import header as header
7
  from components.header import header_section as header_section
8
  from components.sidebar import icon_sidebar as icon_sidebar
9
  from components.sidebar import icon_menu_item as icon_menu_item
10
- from components.table import prompt_eval_table as prompt_eval_table
 
7
  from components.header import header_section as header_section
8
  from components.sidebar import icon_sidebar as icon_sidebar
9
  from components.sidebar import icon_menu_item as icon_menu_item
 
components/sidebar.py CHANGED
@@ -14,6 +14,7 @@ def icon_sidebar():
14
  style=me.Style(
15
  background="#F5F8FC",
16
  border=me.Border.symmetric(horizontal=me.BorderSide(width=1, style="solid", color="#DEE2E6")),
 
17
  )
18
  ):
19
  me.slot()
 
14
  style=me.Style(
15
  background="#F5F8FC",
16
  border=me.Border.symmetric(horizontal=me.BorderSide(width=1, style="solid", color="#DEE2E6")),
17
+ height="100%",
18
  )
19
  ):
20
  me.slot()
components/table.py DELETED
@@ -1,131 +0,0 @@
1
- from typing import Callable
2
-
3
- import mesop as me
4
-
5
- _NUM_REQUIRED_ROWS = 3
6
-
7
-
8
- @me.component
9
- def prompt_eval_table(prompt, on_select_rating: Callable | None = None):
10
- """Creates a grid table for displaying and comparing different prompt version runs."""
11
- # Add a row for each variable
12
- num_vars = len(prompt.variables)
13
- table_size = num_vars + _NUM_REQUIRED_ROWS
14
- with me.box(
15
- style=me.Style(
16
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
17
- display="grid",
18
- grid_template_columns=f"1fr repeat({num_vars}, 20fr) 20fr 1fr"
19
- if num_vars
20
- else "1fr 20fr 1fr",
21
- margin=me.Margin.all(15),
22
- overflow_x="scroll",
23
- )
24
- ):
25
- # Render first row. This row only displays the Prompt version.
26
- for i in range(table_size):
27
- with me.box(
28
- style=me.Style(
29
- background="#FFF",
30
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
31
- color="#000",
32
- font_size=15,
33
- font_weight="bold",
34
- padding=me.Padding.all(10),
35
- )
36
- ):
37
- if i == num_vars + 1:
38
- me.text(f"Version {prompt.version}")
39
- else:
40
- me.text("")
41
-
42
- # Render second row. This row only displays the headers of the table:
43
- # variable names, model response, avg rating.
44
- header_row = [""] + prompt.variables + ["Model response"] + [""]
45
- for i, header_text in enumerate(header_row):
46
- with me.box(
47
- style=me.Style(
48
- background="#FFF",
49
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
50
- color="#0063FF" if header_text and header_text != "Model response" else "#444",
51
- font_size=13,
52
- font_weight="bold",
53
- padding=me.Padding.all(10),
54
- )
55
- ):
56
- # Handle the variable header case.
57
- if header_text and header_text != "Model response":
58
- me.text("{{" + header_text + "}}")
59
- elif i == table_size - 1:
60
- avg_rating = _calculate_avg_rating_from_prompt(prompt)
61
- if avg_rating is not None:
62
- with me.tooltip(message="Average rating"):
63
- me.text(f"{avg_rating:.2f}", style=me.Style(text_align="center"))
64
- else:
65
- me.text("")
66
- else:
67
- me.text(header_text)
68
-
69
- # Render the data rows by going through the prompt responses.
70
- for row_index, example in enumerate(prompt.responses):
71
- content_row = (
72
- [row_index]
73
- + [example["variables"][v] for v in prompt.variables]
74
- + [example["output"], example.get("rating", "")]
75
- )
76
- for col_index, content in enumerate(content_row):
77
- if col_index == len(content_row) - 1:
78
- with me.box(
79
- style=me.Style(
80
- background="#FFF",
81
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
82
- color="#000",
83
- padding=me.Padding.all(10),
84
- )
85
- ):
86
- me.select(
87
- value=content,
88
- options=[
89
- me.SelectOption(label="1", value="1"),
90
- me.SelectOption(label="2", value="2"),
91
- me.SelectOption(label="3", value="3"),
92
- me.SelectOption(label="4", value="4"),
93
- me.SelectOption(label="5", value="5"),
94
- ],
95
- on_selection_change=on_select_rating,
96
- key=f"rating_{prompt.version}_{row_index}",
97
- style=me.Style(width=60),
98
- )
99
- elif col_index == 0 or not content:
100
- with me.box(
101
- style=me.Style(
102
- background="#FFF",
103
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
104
- color="#000",
105
- font_size=14,
106
- padding=me.Padding.all(10),
107
- text_align="center",
108
- )
109
- ):
110
- me.text(str(content))
111
- else:
112
- with me.box(
113
- style=me.Style(
114
- background="#FFF",
115
- border=me.Border.all(me.BorderSide(width=1, style="solid", color="#DEE2E6")),
116
- color="#000",
117
- font_size=14,
118
- padding=me.Padding.all(10),
119
- max_height=300,
120
- min_width=300,
121
- overflow_y="scroll",
122
- )
123
- ):
124
- me.markdown(content)
125
-
126
-
127
- def _calculate_avg_rating_from_prompt(prompt) -> float | None:
128
- ratings = [int(response["rating"]) for response in prompt.responses if response.get("rating")]
129
- if ratings:
130
- return sum(ratings) / float(len(ratings))
131
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
constants.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ DIALOG_INPUT_WIDTH = 350
2
+
3
+ MODEL_TEMPERATURE_MAX = 2
4
+ MODEL_TEMPERATURE_MIN = 0
5
+
6
+ SAVED_PROMPT_DIRECTORY = "saved_prompts"
dialogs/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from dialogs.update_title import update_title as update_title
2
+ from dialogs.model_settings import model_settings as model_settings
3
+ from dialogs.prompt_variables import prompt_variables as prompt_variables
4
+ from dialogs.prompt_version_history import prompt_version_history as prompt_version_history
5
+ from dialogs.add_comparisons import add_comparisons as add_comparisons
6
+ from dialogs.generate_prompt import generate_prompt as generate_prompt
7
+ from dialogs.load_prompt import load_prompt as load_prompt
dialogs/add_comparisons.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ from constants import DIALOG_INPUT_WIDTH
6
+ from helpers import parse_variables
7
+ from state import State
8
+
9
+
10
+ @me.component
11
+ def add_comparisons():
12
+ state = me.state(State)
13
+ with mex.dialog(state.dialog_show_add_comparison):
14
+ variable_names = set(parse_variables(state.prompt))
15
+ me.text("Add Comparisons", type="headline-6")
16
+ me.select(
17
+ label="Select Versions",
18
+ multiple=True,
19
+ options=[
20
+ me.SelectOption(label=f"v{prompt.version}", value=str(prompt.version))
21
+ for prompt in state.prompts
22
+ if prompt.version != state.version and set(prompt.variables) == variable_names
23
+ ],
24
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
25
+ on_selection_change=on_select_comparison,
26
+ )
27
+ with mex.dialog_actions():
28
+ me.button("Close", key="dialog_show_add_comparison", on_click=handlers.on_close_dialog)
29
+
30
+
31
+ def on_select_comparison(e: me.SelectSelectionChangeEvent):
32
+ """Update UI to show the selected prompt version and close the dialog."""
33
+ state = me.state(State)
34
+ state.comparisons = list(map(int, e.values))
dialogs/generate_prompt.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ import llm
6
+ from constants import DIALOG_INPUT_WIDTH
7
+ from helpers import parse_variables
8
+ from state import State
9
+
10
+
11
+ @me.component
12
+ def generate_prompt():
13
+ state = me.state(State)
14
+ with mex.dialog(state.dialog_show_generate_prompt):
15
+ me.text("Generate Prompt", type="headline-6")
16
+ me.textarea(
17
+ label="Describe your task",
18
+ value=state.prompt_gen_task_description,
19
+ on_blur=handlers.on_update_input,
20
+ key="prompt_gen_task_description",
21
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
22
+ )
23
+ with mex.dialog_actions():
24
+ me.button("Close", key="dialog_show_generate_prompt", on_click=handlers.on_close_dialog)
25
+ me.button("Generate", type="flat", on_click=on_click_generate_prompt)
26
+
27
+
28
+ def on_click_generate_prompt(e: me.ClickEvent):
29
+ """Generates an improved prompt based on the given task description and closes dialog."""
30
+ state = me.state(State)
31
+ state.prompt = llm.generate_prompt(
32
+ state.prompt_gen_task_description, state.model, state.model_temperature
33
+ )
34
+ variable_names = parse_variables(state.prompt)
35
+ for variable_name in variable_names:
36
+ if variable_name not in state.prompt_variables:
37
+ state.prompt_variables[variable_name] = ""
38
+
39
+ state.dialog_show_generate_prompt = False
dialogs/load_prompt.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import mesop as me
4
+
5
+ import components as mex
6
+ import handlers
7
+ from constants import DIALOG_INPUT_WIDTH
8
+ from state import State
9
+ from state import Prompt
10
+
11
+
12
+ @me.component
13
+ def load_prompt():
14
+ state = me.state(State)
15
+ with mex.dialog(state.dialog_show_load):
16
+ with me.box(style=me.Style(width=DIALOG_INPUT_WIDTH)):
17
+ me.text("Upload saved prompt", type="headline-6")
18
+ me.uploader(
19
+ label="Upload",
20
+ accepted_file_types=["application/json"],
21
+ type="flat",
22
+ color="primary",
23
+ on_upload=on_upload_prompt,
24
+ style=me.Style(font_weight="bold"),
25
+ )
26
+ with mex.dialog_actions():
27
+ me.button("Close", key="dialog_show_load", on_click=handlers.on_close_dialog)
28
+
29
+
30
+ def on_upload_prompt(e: me.UploadEvent):
31
+ state = me.state(State)
32
+ data = json.loads(e.file.getvalue())
33
+ data["prompts"] = [Prompt(**raw_prompt) for raw_prompt in data["prompts"]]
34
+ for key, value in data.items():
35
+ setattr(state, key, value)
36
+ state.dialog_show_load = False
dialogs/model_settings.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ from constants import DIALOG_INPUT_WIDTH
6
+ from constants import MODEL_TEMPERATURE_MAX
7
+ from constants import MODEL_TEMPERATURE_MIN
8
+ from state import State
9
+
10
+
11
+ @me.component
12
+ def model_settings():
13
+ state = me.state(State)
14
+ with mex.dialog(state.dialog_show_model_settings):
15
+ me.text("Model Settings", type="headline-6")
16
+ with me.box():
17
+ me.select(
18
+ label="Model",
19
+ key="model",
20
+ options=[
21
+ me.SelectOption(label="Gemini 1.5 Flash", value="gemini-1.5-flash"),
22
+ me.SelectOption(label="Gemini 1.5 Pro", value="gemini-1.5-pro"),
23
+ ],
24
+ value=state.model,
25
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
26
+ on_selection_change=handlers.on_update_selection,
27
+ )
28
+ with me.box():
29
+ me.text("Temperature", style=me.Style(font_weight="bold"))
30
+ with me.box(style=me.Style(display="flex", gap=10, width=DIALOG_INPUT_WIDTH)):
31
+ me.slider(
32
+ min=MODEL_TEMPERATURE_MIN,
33
+ max=MODEL_TEMPERATURE_MAX,
34
+ step=0.1,
35
+ style=me.Style(width=260),
36
+ on_value_change=on_slider_temperature,
37
+ value=state.model_temperature,
38
+ )
39
+ me.input(
40
+ value=state.model_temperature_input,
41
+ on_input=on_input_temperature,
42
+ style=me.Style(width=60),
43
+ )
44
+
45
+ with mex.dialog_actions():
46
+ me.button(
47
+ "Close",
48
+ key="dialog_show_model_settings",
49
+ on_click=handlers.on_close_dialog,
50
+ )
51
+
52
+
53
+ def on_slider_temperature(e: me.SliderValueChangeEvent):
54
+ """Adjust temperature slider value."""
55
+ state = me.state(State)
56
+ state.model_temperature = float(e.value)
57
+ state.model_temperature_input = str(state.model_temperature)
58
+
59
+
60
+ def on_input_temperature(e: me.InputEvent):
61
+ """Adjust temperature slider value by input."""
62
+ state = me.state(State)
63
+ try:
64
+ model_temperature = float(e.value)
65
+ if MODEL_TEMPERATURE_MIN <= model_temperature <= MODEL_TEMPERATURE_MAX:
66
+ state.model_temperature = model_temperature
67
+ except ValueError:
68
+ pass
dialogs/prompt_variables.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ import llm
6
+ from constants import DIALOG_INPUT_WIDTH
7
+ from helpers import parse_variables
8
+ from state import State
9
+
10
+
11
+ @me.component
12
+ def prompt_variables():
13
+ state = me.state(State)
14
+
15
+ with mex.dialog(state.dialog_show_prompt_variables):
16
+ me.text("Prompt Variables", type="headline-6")
17
+ if not state.prompt_variables:
18
+ me.text("No variables defined in prompt.", style=me.Style(width=DIALOG_INPUT_WIDTH))
19
+ else:
20
+ with me.box(
21
+ style=me.Style(display="flex", justify_content="end", margin=me.Margin(bottom=15))
22
+ ):
23
+ me.button("Generate", type="flat", on_click=on_click_generate_variables)
24
+ variable_names = set(parse_variables(state.prompt))
25
+ with me.box(style=me.Style(display="flex", flex_direction="column")):
26
+ for name, value in state.prompt_variables.items():
27
+ if name not in variable_names:
28
+ continue
29
+ me.textarea(
30
+ label=name,
31
+ value=value,
32
+ on_blur=on_input_variable,
33
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
34
+ key=name,
35
+ )
36
+
37
+ with mex.dialog_actions():
38
+ me.button("Close", on_click=handlers.on_close_dialog, key="dialog_show_prompt_variables")
39
+
40
+
41
+ def on_input_variable(e: me.InputBlurEvent):
42
+ """Generic event to save input variables.
43
+
44
+ TODO: Probably should prefix the key to avoid key collisions.
45
+ """
46
+ state = me.state(State)
47
+ state.prompt_variables[e.key] = e.value
48
+
49
+
50
+ def on_click_generate_variables(e: me.ClickEvent):
51
+ """Generates values for the given empty variables."""
52
+ state = me.state(State)
53
+ print(state.model)
54
+ variable_names = set(parse_variables(state.prompt))
55
+ generated_variables = llm.generate_variables(
56
+ state.prompt, variable_names, state.model, state.model_temperature
57
+ )
58
+ for name in state.prompt_variables:
59
+ if name in variable_names and name in generated_variables:
60
+ state.prompt_variables[name] = generated_variables[name]
dialogs/prompt_version_history.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ from constants import DIALOG_INPUT_WIDTH
6
+ from helpers import find_prompt
7
+ from state import State
8
+ from state import Prompt
9
+
10
+
11
+ @me.component
12
+ def prompt_version_history():
13
+ state = me.state(State)
14
+ with mex.dialog(state.dialog_show_version_history):
15
+ me.text("Version history", type="headline-6")
16
+ me.select(
17
+ label="Select Version",
18
+ options=[
19
+ me.SelectOption(label=f"v{prompt.version}", value=str(prompt.version))
20
+ for prompt in state.prompts
21
+ ],
22
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
23
+ on_selection_change=on_select_version,
24
+ )
25
+ with mex.dialog_actions():
26
+ me.button("Close", key="dialog_show_version_history", on_click=handlers.on_close_dialog)
27
+
28
+
29
+ def on_select_version(e: me.SelectSelectionChangeEvent):
30
+ """Update UI to show the selected prompt version and close the dialog."""
31
+ state = me.state(State)
32
+ selected_version = int(e.value)
33
+ prompt = find_prompt(state.prompts, selected_version)
34
+ if prompt != Prompt():
35
+ state.prompt = prompt.prompt
36
+ state.version = prompt.version
37
+ state.system_instructions = prompt.system_instructions
38
+ state.model = prompt.model
39
+ state.model_temperature = prompt.model_temperature
40
+ state.model_temperature_input = str(prompt.model_temperature)
41
+ # If there is an existing response, select the most recent one.
42
+ if prompt.responses:
43
+ state.prompt_variables = prompt.responses[-1]["variables"]
44
+ state.response = prompt.responses[-1]["output"]
45
+ else:
46
+ state.response = ""
47
+ state.dialog_show_version_history = False
dialogs/update_title.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+
3
+ import components as mex
4
+ import handlers
5
+ from constants import DIALOG_INPUT_WIDTH
6
+ from state import State
7
+
8
+
9
+ @me.component
10
+ def update_title():
11
+ state = me.state(State)
12
+ # Update prompt title dialog
13
+ with mex.dialog(state.dialog_show_title):
14
+ me.text("Update Prompt Title", type="headline-6")
15
+ me.input(
16
+ label="Title",
17
+ value=state.temp_title,
18
+ on_blur=handlers.on_update_input,
19
+ key="temp_title",
20
+ style=me.Style(width=DIALOG_INPUT_WIDTH),
21
+ )
22
+ with mex.dialog_actions():
23
+ me.button("Cancel", on_click=handlers.on_close_dialog, key="dialog_show_title")
24
+ me.button("Save", type="flat", disabled=not state.temp_title.strip(), on_click=on_save_title)
25
+
26
+
27
+ def on_save_title(e: me.InputBlurEvent):
28
+ """Saves the title and closes the dialog."""
29
+ state = me.state(State)
30
+ if state.temp_title:
31
+ state.title = state.temp_title
32
+ state.dialog_show_title = False
eval_table.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable
2
+ import hashlib
3
+
4
+ import mesop as me
5
+
6
+ from state import Prompt
7
+
8
+
9
+ @me.component
10
+ def prompt_eval_table(prompts: list[Prompt], on_select_rating: Callable | None = None):
11
+ data = _make_table_meta(prompts)
12
+ response_map = _make_response_map(prompts)
13
+
14
+ with me.box(
15
+ style=me.Style(
16
+ display="grid",
17
+ grid_template_columns=" ".join([row.get("size", "1fr") for row in data]),
18
+ overflow_x="scroll",
19
+ )
20
+ ):
21
+ # Render first header row. This row only displays the Prompt version.
22
+ for row in data:
23
+ with me.box(style=_HEADER_STYLE):
24
+ me.text(row.get("header_1", ""))
25
+
26
+ # Render second header row.
27
+ for row in data:
28
+ with me.box(style=_HEADER_STYLE):
29
+ if row["type"] == "model_rating":
30
+ avg_rating = _calculate_avg_rating_from_prompt(row["prompt"])
31
+ if avg_rating is not None:
32
+ with me.tooltip(message="Average rating"):
33
+ me.text(
34
+ f"{avg_rating:.2f}", style=me.Style(font_size=13, color="#666", text_align="center")
35
+ )
36
+ elif row["type"] == "variable":
37
+ me.text(row.get("header_2", ""), style=me.Style(font_size=13, color="#0063FF"))
38
+ else:
39
+ me.text(row.get("header_2", ""), style=me.Style(font_size=13, color="#aaa"))
40
+
41
+ # Render examples
42
+ for row_index, example in enumerate(prompts[0].responses):
43
+ response_key = _make_variables_key(example["variables"])
44
+ for row in data:
45
+ if row["type"] == "index":
46
+ with me.box(style=_INDEX_STYLE):
47
+ me.text(str(row_index))
48
+ elif row["type"] == "variable":
49
+ with me.box(style=_MARKDOWN_BOX_STYLE):
50
+ me.markdown(example["variables"][row["variable_name"]])
51
+ elif row["type"] == "model_response":
52
+ with me.box(style=_MARKDOWN_BOX_STYLE):
53
+ prompt_response = response_map[row["prompt"].version].get(response_key)
54
+ if prompt_response:
55
+ me.markdown(prompt_response[0]["output"])
56
+ else:
57
+ me.text("")
58
+ elif row["type"] == "model_rating":
59
+ with me.box(style=_RATING_STYLE):
60
+ prompt_response = response_map[row["prompt"].version].get(response_key)
61
+ if prompt_response:
62
+ me.select(
63
+ value=prompt_response[0].get("rating", ""),
64
+ options=[
65
+ me.SelectOption(label="1", value="1"),
66
+ me.SelectOption(label="2", value="2"),
67
+ me.SelectOption(label="3", value="3"),
68
+ me.SelectOption(label="4", value="4"),
69
+ me.SelectOption(label="5", value="5"),
70
+ ],
71
+ on_selection_change=on_select_rating,
72
+ key=f"rating_{row["prompt"].version}_{prompt_response[1]}",
73
+ style=me.Style(width=60),
74
+ )
75
+
76
+
77
+ def _hash_string(string: str) -> str:
78
+ encoded_string = string.encode("utf-8")
79
+ result = hashlib.md5(encoded_string)
80
+ return result.hexdigest()
81
+
82
+
83
+ def _make_variables_key(variables: dict[str, str]) -> str:
84
+ return _hash_string("--".join(variables.values()))
85
+
86
+
87
+ def _make_response_map(prompts: list[Prompt]) -> dict:
88
+ prompt_map = {}
89
+ for prompt in prompts:
90
+ response_map = {}
91
+ for response_index, response in enumerate(prompt.responses):
92
+ key = _make_variables_key(response["variables"])
93
+ response_map[key] = (response, response_index)
94
+ prompt_map[prompt.version] = response_map
95
+ return prompt_map
96
+
97
+
98
+ def _make_table_meta(prompts: list[Prompt]) -> dict:
99
+ data = [
100
+ {
101
+ "type": "index",
102
+ }
103
+ ]
104
+ for variable in prompts[0].variables:
105
+ data.append(
106
+ {
107
+ "type": "variable",
108
+ "header_2": "{{" + variable + "}}",
109
+ "size": "20fr",
110
+ "variable_name": variable,
111
+ }
112
+ )
113
+
114
+ for i, prompt in enumerate(prompts):
115
+ data.append(
116
+ {
117
+ "type": "model_response",
118
+ "header_1": "Version " + str(prompt.version),
119
+ "header_2": "Model response",
120
+ "index": i,
121
+ "size": "20fr",
122
+ "prompt": prompt,
123
+ }
124
+ )
125
+ data.append(
126
+ {
127
+ "type": "model_rating",
128
+ "header_1": "",
129
+ "header_2": "Rating",
130
+ "prompt": prompt,
131
+ }
132
+ )
133
+ return data
134
+
135
+
136
+ def _calculate_avg_rating_from_prompt(prompt: Prompt) -> float | None:
137
+ ratings = [int(response["rating"]) for response in prompt.responses if response.get("rating")]
138
+ if ratings:
139
+ return sum(ratings) / float(len(ratings))
140
+ return None
141
+
142
+
143
+ _BORDER_SIDE = me.BorderSide(width=1, style="solid", color="#DEE2E6")
144
+
145
+ _HEADER_STYLE = me.Style(
146
+ background="#FFF",
147
+ border=me.Border.all(_BORDER_SIDE),
148
+ color="#000",
149
+ font_size=15,
150
+ font_weight="bold",
151
+ padding=me.Padding.all(10),
152
+ )
153
+
154
+ _INDEX_STYLE = me.Style(
155
+ background="#FFF",
156
+ border=me.Border.all(_BORDER_SIDE),
157
+ color="#000",
158
+ font_size=14,
159
+ padding=me.Padding.all(10),
160
+ text_align="center",
161
+ )
162
+
163
+ _MARKDOWN_BOX_STYLE = me.Style(
164
+ background="#FFF",
165
+ border=me.Border.all(_BORDER_SIDE),
166
+ color="#000",
167
+ font_size=14,
168
+ padding=me.Padding.all(10),
169
+ max_height=300,
170
+ min_width=300,
171
+ overflow_y="scroll",
172
+ )
173
+
174
+ _RATING_STYLE = me.Style(
175
+ background="#FFF",
176
+ border=me.Border.all(_BORDER_SIDE),
177
+ color="#000",
178
+ padding=me.Padding.all(10),
179
+ )
handlers.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesop as me
2
+ from state import State
3
+
4
+
5
+ def on_open_dialog(e: me.ClickEvent):
6
+ """Generic event to open a dialog."""
7
+ state = me.state(State)
8
+ setattr(state, e.key, True)
9
+
10
+
11
+ def on_close_dialog(e: me.ClickEvent):
12
+ """Generic event to close a dialog."""
13
+ state = me.state(State)
14
+ setattr(state, e.key, False)
15
+
16
+
17
+ def on_update_input(e: me.InputBlurEvent | me.InputEvent | me.InputEnterEvent):
18
+ """Generic event to update input values."""
19
+ state = me.state(State)
20
+ setattr(state, e.key, e.value)
21
+
22
+
23
+ def on_update_selection(e: me.SelectSelectionChangeEvent):
24
+ """Generic event to update input values."""
25
+ state = me.state(State)
26
+ print("EEE", e)
27
+ setattr(state, e.key, e.value)
helpers.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from state import Prompt
3
+
4
+
5
+ _RE_VARIABLES = re.compile(r"\{\{(\w+)\}\}")
6
+
7
+
8
+ def parse_variables(prompt: str) -> list[str]:
9
+ return _RE_VARIABLES.findall(prompt)
10
+
11
+
12
+ def find_prompt(prompts: list[Prompt], version: int) -> Prompt:
13
+ # We don't expect too many versions, so we'll just loop through the list to find the
14
+ # right version.
15
+ for prompt in prompts:
16
+ if prompt.version == version:
17
+ return prompt
18
+ return Prompt()
main.py CHANGED
@@ -1,15 +1,13 @@
1
- from dataclasses import dataclass, field
2
- import re
3
-
4
  import mesop as me
5
 
6
  import components as mex
 
 
7
  import llm
8
-
9
- _DIALOG_INPUT_WIDTH = 350
10
-
11
- _MODEL_TEMPERATURE_MAX = 2
12
- _MODEL_TEMPERATURE_MIN = 0
13
 
14
  _INSTRUCTIONS = """
15
  - Write your prompt.
@@ -21,55 +19,6 @@ _INSTRUCTIONS = """
21
  new version of your prompt.
22
  """.strip()
23
 
24
- _RE_VARIABLES = re.compile(r"\{\{(\w+)\}\}")
25
-
26
-
27
- @dataclass
28
- class Prompt:
29
- prompt: str = ""
30
- model: str = ""
31
- model_temperature: float = 0.0
32
- system_instructions: str = ""
33
- version: int = 0
34
- variables: list[str] = field(default_factory=lambda: [])
35
- # Storing the responses as a dict to workaround bug with lists
36
- # of nested dataclass.
37
- responses: list[dict] = field(default_factory=lambda: [])
38
-
39
-
40
- @me.stateclass
41
- class State:
42
- # Main UI variables
43
- system_prompt_card_expanded: bool = False
44
- title: str = "Untitled Prompt"
45
- temp_title: str
46
- system_instructions: str
47
- prompt: str
48
- response: str
49
- version: int = 0
50
-
51
- # Prompt variables
52
- prompt_variables: dict[str, str]
53
-
54
- # Model info
55
- model: str = "gemini-1.5-flash"
56
- model_temperature: float = 1.0
57
- model_temperature_input: str = "1.0"
58
-
59
- # Dialogs
60
- dialog_show_title: bool = False
61
- dialog_show_model_settings: bool = False
62
- dialog_show_prompt_variables: bool = False
63
- dialog_show_generate_prompt: bool = False
64
- dialog_show_version_history: bool = False
65
- prompts: list[Prompt]
66
-
67
- # LLM Generate functionality
68
- prompt_gen_task_description: str
69
-
70
- # Valid modes: Prompt or Eval
71
- mode: str = "Prompt"
72
-
73
 
74
  @me.page(
75
  security_policy=me.SecurityPolicy(allowed_iframe_parents=["https://huggingface.co"]),
@@ -77,114 +26,13 @@ class State:
77
  def app():
78
  state = me.state(State)
79
 
80
- # Update prompt title dialog
81
- with mex.dialog(state.dialog_show_title):
82
- me.text("Update Prompt Title", type="headline-6")
83
- me.input(
84
- label="Title",
85
- value=state.temp_title,
86
- on_blur=on_update_input,
87
- key="temp_title",
88
- style=me.Style(width=_DIALOG_INPUT_WIDTH),
89
- )
90
- with mex.dialog_actions():
91
- me.button("Cancel", on_click=on_close_dialog, key="dialog_show_title")
92
- me.button("Save", type="flat", disabled=not state.temp_title.strip(), on_click=on_save_title)
93
-
94
- # Dialog for controlling Model settings
95
- with mex.dialog(state.dialog_show_model_settings):
96
- me.text("Model Settings", type="headline-6")
97
- with me.box():
98
- me.select(
99
- label="Model",
100
- key="model",
101
- options=[
102
- me.SelectOption(label="Gemini 1.5 Flash", value="gemini-1.5-flash"),
103
- me.SelectOption(label="Gemini 1.5 Pro", value="gemini-1.5-pro"),
104
- ],
105
- value=state.model,
106
- style=me.Style(width=_DIALOG_INPUT_WIDTH),
107
- on_selection_change=on_update_input,
108
- )
109
- with me.box():
110
- me.text("Temperature", style=me.Style(font_weight="bold"))
111
- with me.box(style=me.Style(display="flex", gap=10, width=_DIALOG_INPUT_WIDTH)):
112
- me.slider(
113
- min=_MODEL_TEMPERATURE_MIN,
114
- max=_MODEL_TEMPERATURE_MAX,
115
- step=0.1,
116
- style=me.Style(width=260),
117
- on_value_change=on_slider_temperature,
118
- value=state.model_temperature,
119
- )
120
- me.input(
121
- value=state.model_temperature_input,
122
- on_input=on_input_temperature,
123
- style=me.Style(width=60),
124
- )
125
-
126
- with mex.dialog_actions():
127
- me.button(
128
- "Close",
129
- key="dialog_show_model_settings",
130
- on_click=on_close_dialog,
131
- )
132
-
133
- # Dialog for setting variables
134
- with mex.dialog(state.dialog_show_prompt_variables):
135
- me.text("Prompt Variables", type="headline-6")
136
- if not state.prompt_variables:
137
- me.text("No variables defined in prompt.", style=me.Style(width=_DIALOG_INPUT_WIDTH))
138
- else:
139
- with me.box(
140
- style=me.Style(display="flex", justify_content="end", margin=me.Margin(bottom=15))
141
- ):
142
- me.button("Generate", type="flat", on_click=on_click_generate_variables)
143
- variable_names = set(_parse_variables(state.prompt))
144
- with me.box(style=me.Style(display="flex", flex_direction="column")):
145
- for name, value in state.prompt_variables.items():
146
- if name not in variable_names:
147
- continue
148
- me.textarea(
149
- label=name,
150
- value=value,
151
- on_blur=on_input_variable,
152
- style=me.Style(width=_DIALOG_INPUT_WIDTH),
153
- key=name,
154
- )
155
-
156
- with mex.dialog_actions():
157
- me.button("Close", on_click=on_close_dialog, key="dialog_show_prompt_variables")
158
-
159
- # Dialog for showing prompt version history
160
- with mex.dialog(state.dialog_show_version_history):
161
- me.text("Version history", type="headline-6")
162
- me.select(
163
- label="Select Version",
164
- options=[
165
- me.SelectOption(label=f"v{prompt.version}", value=str(prompt.version))
166
- for prompt in state.prompts
167
- ],
168
- style=me.Style(width=_DIALOG_INPUT_WIDTH),
169
- on_selection_change=on_select_version,
170
- )
171
- with mex.dialog_actions():
172
- me.button("Close", key="dialog_show_version_history", on_click=on_close_dialog)
173
-
174
- # Dialog for generating a prompt with LLM assistance
175
- # TODO: Integrate with LLM
176
- with mex.dialog(state.dialog_show_generate_prompt):
177
- me.text("Generate Prompt", type="headline-6")
178
- me.textarea(
179
- label="Describe your task",
180
- value=state.prompt_gen_task_description,
181
- on_blur=on_update_input,
182
- key="prompt_gen_task_description",
183
- style=me.Style(width=_DIALOG_INPUT_WIDTH),
184
- )
185
- with mex.dialog_actions():
186
- me.button("Close", key="dialog_show_generate_prompt", on_click=on_close_dialog)
187
- me.button("Generate", type="flat", on_click=on_click_generate_prompt)
188
 
189
  with me.box(
190
  style=me.Style(
@@ -226,7 +74,7 @@ def app():
226
  min_rows=2,
227
  placeholder="Optional tone and style instructions for the model",
228
  value=state.system_instructions,
229
- on_blur=on_update_input,
230
  style=_STYLE_INVISIBLE_TEXTAREA,
231
  key="system_instructions",
232
  )
@@ -257,7 +105,7 @@ def app():
257
  "Generate prompt",
258
  disabled=bool(state.prompt),
259
  style=me.Style(background="#EBF1FD", border_radius="10"),
260
- on_click=on_open_dialog,
261
  key="dialog_show_generate_prompt",
262
  )
263
 
@@ -270,33 +118,16 @@ def app():
270
  me.markdown(_INSTRUCTIONS)
271
  else:
272
  # Render eval page
273
- with me.box(style=me.Style(grid_column="1 / -2")):
274
- prompt = _find_prompt(state.prompts, state.version)
275
  if prompt:
276
- mex.prompt_eval_table(prompt, on_select_rating=on_select_rating)
277
-
278
- with mex.icon_sidebar():
279
- if state.mode == "Prompt":
280
- mex.icon_menu_item(
281
- icon="tune",
282
- tooltip="Model settings",
283
- key="dialog_show_model_settings",
284
- on_click=on_open_dialog,
285
- )
286
- mex.icon_menu_item(
287
- icon="data_object",
288
- tooltip="Set variables",
289
- key="dialog_show_prompt_variables",
290
- on_click=on_open_dialog,
291
- )
292
- mex.icon_menu_item(
293
- icon="history",
294
- tooltip="Version history",
295
- key="dialog_show_version_history",
296
- on_click=on_open_dialog,
297
- )
298
- if state.mode == "Prompt":
299
- mex.icon_menu_item(icon="code", tooltip="Get code")
300
 
301
 
302
  # Event handlers
@@ -323,7 +154,7 @@ def on_click_run(e: me.ClickEvent):
323
  else:
324
  current_prompt_meta = Prompt()
325
 
326
- variable_names = set(_parse_variables(state.prompt))
327
  prompt_variables = {
328
  name: value for name, value in state.prompt_variables.items() if name in variable_names
329
  }
@@ -369,94 +200,12 @@ def on_update_prompt(e: me.InputBlurEvent):
369
  """
370
  state = me.state(State)
371
  state.prompt = e.value.strip()
372
- variable_names = _parse_variables(state.prompt)
373
  for variable_name in variable_names:
374
  if variable_name not in state.prompt_variables:
375
  state.prompt_variables[variable_name] = ""
376
 
377
 
378
- def on_save_title(e: me.InputBlurEvent):
379
- """Saves the title and closes the dialog."""
380
- state = me.state(State)
381
- if state.temp_title:
382
- state.title = state.temp_title
383
- state.dialog_show_title = False
384
-
385
-
386
- def on_slider_temperature(e: me.SliderValueChangeEvent):
387
- """Adjust temperature slider value."""
388
- state = me.state(State)
389
- state.model_temperature = float(e.value)
390
- state.model_temperature_input = str(state.model_temperature)
391
-
392
-
393
- def on_input_temperature(e: me.InputEvent):
394
- """Adjust temperature slider value by input."""
395
- state = me.state(State)
396
- try:
397
- model_temperature = float(e.value)
398
- if _MODEL_TEMPERATURE_MIN <= model_temperature <= _MODEL_TEMPERATURE_MAX:
399
- state.model_temperature = model_temperature
400
- except ValueError:
401
- pass
402
-
403
-
404
- def on_input_variable(e: me.InputBlurEvent):
405
- """Generic event to save input variables.
406
-
407
- TODO: Probably should prefix the key to avoid key collisions.
408
- """
409
- state = me.state(State)
410
- state.prompt_variables[e.key] = e.value
411
-
412
-
413
- def on_select_version(e: me.SelectSelectionChangeEvent):
414
- """Update UI to show the selected prompt version and close the dialog."""
415
- state = me.state(State)
416
- selected_version = int(e.value)
417
- prompt = _find_prompt(state.prompts, selected_version)
418
- if prompt != Prompt():
419
- state.prompt = prompt.prompt
420
- state.version = prompt.version
421
- state.system_instructions = prompt.system_instructions
422
- state.model = prompt.model
423
- state.model_temperature = prompt.model_temperature
424
- state.model_temperature_input = str(prompt.model_temperature)
425
- # If there is an existing response, select the most recent one.
426
- if prompt.responses:
427
- state.prompt_variables = prompt.responses[-1]["variables"]
428
- state.response = prompt.responses[-1]["output"]
429
- else:
430
- state.response = ""
431
- state.dialog_show_version_history = False
432
-
433
-
434
- def on_click_generate_prompt(e: me.ClickEvent):
435
- """Generates an improved prompt based on the given task description and closes dialog."""
436
- state = me.state(State)
437
- state.prompt = llm.generate_prompt(
438
- state.prompt_gen_task_description, state.model, state.model_temperature
439
- )
440
- variable_names = _parse_variables(state.prompt)
441
- for variable_name in variable_names:
442
- if variable_name not in state.prompt_variables:
443
- state.prompt_variables[variable_name] = ""
444
-
445
- state.dialog_show_generate_prompt = False
446
-
447
-
448
- def on_click_generate_variables(e: me.ClickEvent):
449
- """Generates values for the given empty variables."""
450
- state = me.state(State)
451
- variable_names = set(_parse_variables(state.prompt))
452
- generated_variables = llm.generate_variables(
453
- state.prompt, variable_names, state.model, state.model_temperature
454
- )
455
- for name in state.prompt_variables:
456
- if name in variable_names and name in generated_variables:
457
- state.prompt_variables[name] = generated_variables[name]
458
-
459
-
460
  def on_click_mode_toggle(e: me.ClickEvent):
461
  """Toggle between Prompt and Eval modes."""
462
  state = me.state(State)
@@ -466,47 +215,10 @@ def on_click_mode_toggle(e: me.ClickEvent):
466
  def on_select_rating(e: me.SelectSelectionChangeEvent):
467
  state = me.state(State)
468
  _, prompt_version, response_index = e.key.split("_")
469
- prompt = _find_prompt(state.prompts, int(prompt_version))
470
  prompt.responses[int(response_index)]["rating"] = e.value
471
 
472
 
473
- # Generic event handlers
474
-
475
-
476
- def on_open_dialog(e: me.ClickEvent):
477
- """Generic event to open a dialog."""
478
- state = me.state(State)
479
- setattr(state, e.key, True)
480
-
481
-
482
- def on_close_dialog(e: me.ClickEvent):
483
- """Generic event to close a dialog."""
484
- state = me.state(State)
485
- setattr(state, e.key, False)
486
-
487
-
488
- def on_update_input(e: me.InputBlurEvent | me.SelectSelectionChangeEvent):
489
- """Generic event to update input/select values."""
490
- state = me.state(State)
491
- setattr(state, e.key, e.value)
492
-
493
-
494
- # Helper functions
495
-
496
-
497
- def _parse_variables(prompt: str) -> list[str]:
498
- return _RE_VARIABLES.findall(prompt)
499
-
500
-
501
- def _find_prompt(prompts: list[Prompt], version: int) -> Prompt:
502
- # We don't expect too many versions, so we'll just loop through the list to find the
503
- # right version.
504
- for prompt in prompts:
505
- if prompt.version == version:
506
- return prompt
507
- return Prompt()
508
-
509
-
510
  # Style helpers
511
 
512
  _STYLE_INVISIBLE_TEXTAREA = me.Style(
 
 
 
 
1
  import mesop as me
2
 
3
  import components as mex
4
+ import dialogs
5
+ import handlers
6
  import llm
7
+ from eval_table import prompt_eval_table
8
+ from tool_sidebar import tool_sidebar
9
+ from helpers import find_prompt, parse_variables
10
+ from state import State, Prompt
 
11
 
12
  _INSTRUCTIONS = """
13
  - Write your prompt.
 
19
  new version of your prompt.
20
  """.strip()
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  @me.page(
24
  security_policy=me.SecurityPolicy(allowed_iframe_parents=["https://huggingface.co"]),
 
26
  def app():
27
  state = me.state(State)
28
 
29
+ dialogs.update_title()
30
+ dialogs.model_settings()
31
+ dialogs.prompt_variables()
32
+ dialogs.prompt_version_history()
33
+ dialogs.add_comparisons()
34
+ dialogs.generate_prompt()
35
+ dialogs.load_prompt()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  with me.box(
38
  style=me.Style(
 
74
  min_rows=2,
75
  placeholder="Optional tone and style instructions for the model",
76
  value=state.system_instructions,
77
+ on_blur=handlers.on_update_input,
78
  style=_STYLE_INVISIBLE_TEXTAREA,
79
  key="system_instructions",
80
  )
 
105
  "Generate prompt",
106
  disabled=bool(state.prompt),
107
  style=me.Style(background="#EBF1FD", border_radius="10"),
108
+ on_click=handlers.on_open_dialog,
109
  key="dialog_show_generate_prompt",
110
  )
111
 
 
118
  me.markdown(_INSTRUCTIONS)
119
  else:
120
  # Render eval page
121
+ with me.box(style=me.Style(grid_column="1 / -2", overflow_y="scroll")):
122
+ prompt = find_prompt(state.prompts, state.version)
123
  if prompt:
124
+ with me.box(style=me.Style(margin=me.Margin.all(15))):
125
+ compare_prompts = [
126
+ prompt for prompt in state.prompts if prompt.version in state.comparisons
127
+ ]
128
+ prompt_eval_table([prompt] + compare_prompts, on_select_rating=on_select_rating)
129
+
130
+ tool_sidebar()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
 
133
  # Event handlers
 
154
  else:
155
  current_prompt_meta = Prompt()
156
 
157
+ variable_names = set(parse_variables(state.prompt))
158
  prompt_variables = {
159
  name: value for name, value in state.prompt_variables.items() if name in variable_names
160
  }
 
200
  """
201
  state = me.state(State)
202
  state.prompt = e.value.strip()
203
+ variable_names = parse_variables(state.prompt)
204
  for variable_name in variable_names:
205
  if variable_name not in state.prompt_variables:
206
  state.prompt_variables[variable_name] = ""
207
 
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  def on_click_mode_toggle(e: me.ClickEvent):
210
  """Toggle between Prompt and Eval modes."""
211
  state = me.state(State)
 
215
  def on_select_rating(e: me.SelectSelectionChangeEvent):
216
  state = me.state(State)
217
  _, prompt_version, response_index = e.key.split("_")
218
+ prompt = find_prompt(state.prompts, int(prompt_version))
219
  prompt.responses[int(response_index)]["rating"] = e.value
220
 
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  # Style helpers
223
 
224
  _STYLE_INVISIBLE_TEXTAREA = me.Style(
state.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+
3
+ import mesop as me
4
+
5
+
6
+ @dataclass
7
+ class Prompt:
8
+ prompt: str = ""
9
+ model: str = ""
10
+ model_temperature: float = 0.0
11
+ system_instructions: str = ""
12
+ version: int = 0
13
+ variables: list[str] = field(default_factory=list)
14
+ # Storing the responses as a dict to workaround bug with lists
15
+ # of nested dataclass.
16
+ #
17
+ # Keys: output, variables, rating
18
+ responses: list[dict] = field(default_factory=list)
19
+
20
+
21
+ @me.stateclass
22
+ class State:
23
+ # Main UI variables
24
+ system_prompt_card_expanded: bool = False
25
+ title: str = "Untitled Prompt"
26
+ temp_title: str
27
+ system_instructions: str
28
+ prompt: str
29
+ response: str
30
+ version: int = 0
31
+
32
+ # Prompt variables
33
+ prompt_variables: dict[str, str]
34
+
35
+ # Model info
36
+ model: str = "gemini-1.5-flash"
37
+ model_temperature: float = 1.0
38
+ model_temperature_input: str = "1.0"
39
+
40
+ # Dialogs
41
+ dialog_show_title: bool = False
42
+ dialog_show_model_settings: bool = False
43
+ dialog_show_prompt_variables: bool = False
44
+ dialog_show_generate_prompt: bool = False
45
+ dialog_show_version_history: bool = False
46
+ dialog_show_add_comparison: bool = False
47
+ dialog_show_load: bool = False
48
+
49
+ prompts: list[Prompt]
50
+
51
+ # LLM Generate functionality
52
+ prompt_gen_task_description: str
53
+
54
+ # Valid modes: Prompt or Eval
55
+ mode: str = "Prompt"
56
+
57
+ comparisons: list[int]
tool_sidebar.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import asdict
2
+ import errno
3
+ import json
4
+ import os
5
+ import re
6
+
7
+ import mesop as me
8
+
9
+ import components as mex
10
+ import handlers
11
+ from constants import SAVED_PROMPT_DIRECTORY
12
+ from state import State
13
+
14
+
15
+ @me.component
16
+ def tool_sidebar():
17
+ state = me.state(State)
18
+ with mex.icon_sidebar():
19
+ if state.mode == "Prompt":
20
+ mex.icon_menu_item(
21
+ icon="tune",
22
+ tooltip="Model settings",
23
+ key="dialog_show_model_settings",
24
+ on_click=handlers.on_open_dialog,
25
+ )
26
+ mex.icon_menu_item(
27
+ icon="data_object",
28
+ tooltip="Set variables",
29
+ key="dialog_show_prompt_variables",
30
+ on_click=handlers.on_open_dialog,
31
+ )
32
+ mex.icon_menu_item(
33
+ icon="history",
34
+ tooltip="Version history",
35
+ key="dialog_show_version_history",
36
+ on_click=handlers.on_open_dialog,
37
+ )
38
+
39
+ if state.mode == "Eval":
40
+ mex.icon_menu_item(
41
+ icon="compare",
42
+ tooltip="Compare versions",
43
+ key="dialog_show_add_comparison",
44
+ on_click=handlers.on_open_dialog,
45
+ )
46
+
47
+ mex.icon_menu_item(
48
+ icon="upload",
49
+ tooltip="Load prompt data",
50
+ key="dialog_show_load",
51
+ on_click=handlers.on_open_dialog,
52
+ )
53
+
54
+ mex.icon_menu_item(
55
+ icon="download",
56
+ tooltip="Download prompt data",
57
+ key="dialog_show_download",
58
+ on_click=on_click_download,
59
+ )
60
+
61
+
62
+ def on_click_download(e: me.ClickEvent):
63
+ state = me.state(State)
64
+ cleaned_title = _clean_title(state.title)
65
+ _create_directory(SAVED_PROMPT_DIRECTORY)
66
+
67
+ with open(f"{SAVED_PROMPT_DIRECTORY}/prompt-{cleaned_title}.json", "w") as outfile:
68
+ output = {
69
+ key: value
70
+ for key, value in asdict(state).items()
71
+ if key
72
+ not in (
73
+ "temp_title",
74
+ "mode",
75
+ "comparisons",
76
+ "system_prompt_card_expanded",
77
+ "prompt_gen_task_description",
78
+ )
79
+ and not key.startswith("dialog_")
80
+ }
81
+ json.dump(output, outfile)
82
+
83
+
84
+ def _clean_title(title: str) -> str:
85
+ return re.sub(r"[^a-z0-9_]", "", title.lower().replace(" ", "_"))
86
+
87
+
88
+ def _create_directory(directory_path):
89
+ """Creates a directory if it doesn't exist."""
90
+ try:
91
+ os.makedirs(directory_path)
92
+ print(f"Directory '{directory_path}' created successfully.")
93
+ except OSError as e:
94
+ if e.errno != errno.EEXIST:
95
+ raise