Richard commited on
Commit
4be2fb0
·
1 Parent(s): 18f7b1e

Add initial llm integration

Browse files
Files changed (5) hide show
  1. .gitignore +3 -0
  2. components/table.py +7 -4
  3. llm.py +50 -0
  4. main.py +22 -13
  5. requirements.txt +1 -0
.gitignore CHANGED
@@ -1,3 +1,6 @@
1
  # Python
2
  __pycache__
3
  .pytest_cache
 
 
 
 
1
  # Python
2
  __pycache__
3
  .pytest_cache
4
+
5
+ # Mesop
6
+ .env
components/table.py CHANGED
@@ -54,13 +54,13 @@ def prompt_eval_table(prompt):
54
  me.text(header_text)
55
 
56
  # Render the data rows by going through the prompt responses.
57
- for index, example in enumerate(prompt.responses):
58
  content_row = (
59
- [index]
60
  + [example["variables"][v] for v in prompt.variables]
61
  + [example["output"], example.get("rating", "")]
62
  )
63
- for row in content_row:
64
  with me.box(
65
  style=me.Style(
66
  background="#fff",
@@ -69,4 +69,7 @@ def prompt_eval_table(prompt):
69
  padding=me.Padding.all(10),
70
  )
71
  ):
72
- me.text(row)
 
 
 
 
54
  me.text(header_text)
55
 
56
  # Render the data rows by going through the prompt responses.
57
+ for row_index, example in enumerate(prompt.responses):
58
  content_row = (
59
+ [row_index]
60
  + [example["variables"][v] for v in prompt.variables]
61
  + [example["output"], example.get("rating", "")]
62
  )
63
+ for col_index, row in enumerate(content_row):
64
  with me.box(
65
  style=me.Style(
66
  background="#fff",
 
69
  padding=me.Padding.all(10),
70
  )
71
  ):
72
+ if col_index == 0 or not row:
73
+ me.text(row)
74
+ else:
75
+ me.markdown(row)
llm.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import google.generativeai as genai
4
+
5
+
6
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
7
+
8
+ genai.configure(api_key=GOOGLE_API_KEY)
9
+
10
+ # TODO: Improve on this prompt. Just adding something simple for testing.
11
+ _GENERATE_PROMPT = """
12
+ Write a detailed prompt to help complete the between <task></task> block.
13
+
14
+ <task>
15
+ {task}
16
+ </task>
17
+
18
+ For custom user input, you can leave placeholder variables. For example, if you have
19
+ variable named EMAIL, it would like {{{{EMAIL}}}} in the resulting prompt.
20
+ """.strip()
21
+
22
+
23
+ def _make_model(model_name: str, temperature: float) -> genai.GenerativeModel:
24
+ return genai.GenerativeModel(
25
+ model_name,
26
+ generation_config={
27
+ "temperature": temperature,
28
+ "top_p": 0.95,
29
+ "top_k": 64,
30
+ "max_output_tokens": 16384,
31
+ },
32
+ )
33
+
34
+
35
+ def generate_prompt(task_description: str, model_name: str, temperature: float) -> str:
36
+ model = _make_model(model_name, temperature)
37
+ prompt = _GENERATE_PROMPT.format(task=task_description)
38
+ return model.generate_content(prompt).text
39
+
40
+
41
+ def generate_variables(
42
+ prompt: str, variables: dict[str, str], model_name: str, temperature: float
43
+ ) -> dict[str, str]:
44
+ # model = _make_model(model_name, temperature)
45
+ pass
46
+
47
+
48
+ def run_prompt(prompt_with_variables: str, model_name: str, temperature: float) -> str:
49
+ model = _make_model(model_name, temperature)
50
+ return model.generate_content(prompt_with_variables).text
main.py CHANGED
@@ -4,6 +4,7 @@ import re
4
  import mesop as me
5
 
6
  import components as mex
 
7
 
8
  _DIALOG_INPUT_WIDTH = 350
9
 
@@ -258,12 +259,12 @@ def app():
258
  key="dialog_show_generate_prompt",
259
  )
260
 
261
- with me.box(style=me.Style(padding=me.Padding.all(15))):
262
  if state.response:
263
- with mex.card(title="Response", style=me.Style(height="100%")):
264
  me.markdown(state.response)
265
  else:
266
- with mex.card(title="Prompt Tuner Instructions", style=me.Style(height="100%")):
267
  me.markdown(_INSTRUCTIONS)
268
  else:
269
  # Render eval page
@@ -306,6 +307,13 @@ def on_click_system_instructions_header(e: me.ClickEvent):
306
 
307
 
308
  def on_click_run(e: me.ClickEvent):
 
 
 
 
 
 
 
309
  state = me.state(State)
310
  num_versions = len(state.prompts)
311
  if state.version:
@@ -314,7 +322,9 @@ def on_click_run(e: me.ClickEvent):
314
  current_prompt_meta = Prompt()
315
 
316
  variable_names = set(_parse_variables(state.prompt))
317
- prompt_variables = {k: v for k, v in state.prompt_variables.items() if k in variable_names}
 
 
318
 
319
  if (
320
  current_prompt_meta.prompt != state.prompt
@@ -336,9 +346,9 @@ def on_click_run(e: me.ClickEvent):
336
  state.version = new_version
337
 
338
  prompt = state.prompt
339
- for k, v in prompt_variables.items():
340
- prompt = prompt.replace("{{" + k + "}}", v)
341
- state.response = "Version v" + str(state.version) + "\n\n" + prompt
342
  state.prompts[-1].responses.append(dict(output=state.response, variables=prompt_variables))
343
 
344
 
@@ -420,12 +430,11 @@ def on_select_version(e: me.SelectSelectionChangeEvent):
420
 
421
 
422
  def on_click_generate_prompt(e: me.ClickEvent):
423
- """Generates an improved prompt based on the given task description and closes dialog.
424
-
425
- TODO: Implement this logic.
426
- """
427
  state = me.state(State)
428
- state.prompt = state.prompt_gen_task_description + " Improve prompt stuff here"
 
 
429
  state.dialog_show_generate_prompt = False
430
 
431
 
@@ -476,7 +485,7 @@ def _parse_variables(prompt: str) -> list[str]:
476
 
477
 
478
  def _find_prompt(prompts: list[Prompt], version: int) -> Prompt:
479
- # We don't expected too many versions, so we'll just loop through the list to find the
480
  # right version.
481
  for prompt in prompts:
482
  if prompt.version == version:
 
4
  import mesop as me
5
 
6
  import components as mex
7
+ import llm
8
 
9
  _DIALOG_INPUT_WIDTH = 350
10
 
 
259
  key="dialog_show_generate_prompt",
260
  )
261
 
262
+ with me.box(style=me.Style(padding=me.Padding.all(15), overflow_y="scroll")):
263
  if state.response:
264
+ with mex.card(title="Response", style=me.Style(overflow_y="hidden")):
265
  me.markdown(state.response)
266
  else:
267
+ with mex.card(title="Prompt Tuner Instructions"):
268
  me.markdown(_INSTRUCTIONS)
269
  else:
270
  # Render eval page
 
307
 
308
 
309
  def on_click_run(e: me.ClickEvent):
310
+ """Runs the prompt with the given variables.
311
+
312
+ A new version of the prompt will be created if the prompt, system instructions, or
313
+ model settings have changed.
314
+
315
+ A new response will be added if the variables have been updated.
316
+ """
317
  state = me.state(State)
318
  num_versions = len(state.prompts)
319
  if state.version:
 
322
  current_prompt_meta = Prompt()
323
 
324
  variable_names = set(_parse_variables(state.prompt))
325
+ prompt_variables = {
326
+ name: value for name, value in state.prompt_variables.items() if name in variable_names
327
+ }
328
 
329
  if (
330
  current_prompt_meta.prompt != state.prompt
 
346
  state.version = new_version
347
 
348
  prompt = state.prompt
349
+ for name, value in prompt_variables.items():
350
+ prompt = prompt.replace("{{" + name + "}}", value)
351
+ state.response = llm.run_prompt(prompt, state.model, state.model_temperature)
352
  state.prompts[-1].responses.append(dict(output=state.response, variables=prompt_variables))
353
 
354
 
 
430
 
431
 
432
  def on_click_generate_prompt(e: me.ClickEvent):
433
+ """Generates an improved prompt based on the given task description and closes dialog."""
 
 
 
434
  state = me.state(State)
435
+ state.prompt = llm.generate_prompt(
436
+ state.prompt_gen_task_description, state.model, state.model_temperature
437
+ )
438
  state.dialog_show_generate_prompt = False
439
 
440
 
 
485
 
486
 
487
  def _find_prompt(prompts: list[Prompt], version: int) -> Prompt:
488
+ # We don't expect too many versions, so we'll just loop through the list to find the
489
  # right version.
490
  for prompt in prompts:
491
  if prompt.version == version:
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  gunicorn
2
  mesop
 
 
1
  gunicorn
2
  mesop
3
+ google-generativeai