Richard commited on
Commit
56c5f7a
·
1 Parent(s): c26e218

Fix bug with system instructions not being passed

Browse files
Files changed (2) hide show
  1. llm.py +11 -6
  2. main.py +6 -3
llm.py CHANGED
@@ -38,9 +38,12 @@ Again, please generate outputs for these placeholders: {placeholders}
38
  """.strip()
39
 
40
 
41
- def _make_model(model_name: str, temperature: float) -> genai.GenerativeModel:
 
 
42
  return genai.GenerativeModel(
43
  model_name,
 
44
  generation_config={
45
  "temperature": temperature,
46
  "top_p": 0.95,
@@ -51,7 +54,7 @@ def _make_model(model_name: str, temperature: float) -> genai.GenerativeModel:
51
 
52
 
53
  def generate_prompt(task_description: str, model_name: str, temperature: float) -> str:
54
- model = _make_model(model_name, temperature)
55
  prompt = _GENERATE_PROMPT.format(task=task_description)
56
  return model.generate_content(prompt).text
57
 
@@ -59,7 +62,7 @@ def generate_prompt(task_description: str, model_name: str, temperature: float)
59
  def generate_variables(
60
  prompt: str, variable_names: list[str], model_name: str, temperature: float
61
  ) -> dict[str, str]:
62
- model = _make_model(model_name, temperature)
63
  output = (
64
  model.generate_content(
65
  _GENERATE_VARIABLES_PROMPT.format(placeholders=", ".join(variable_names))
@@ -70,6 +73,8 @@ def generate_variables(
70
  return json.loads(output)
71
 
72
 
73
- def run_prompt(prompt_with_variables: str, model_name: str, temperature: float) -> str:
74
- model = _make_model(model_name, temperature)
75
- return model.generate_content(prompt_with_variables).text
 
 
 
38
  """.strip()
39
 
40
 
41
+ def _make_model(
42
+ model_name: str, system_instruction: str = "", temperature: float = 1.0
43
+ ) -> genai.GenerativeModel:
44
  return genai.GenerativeModel(
45
  model_name,
46
+ system_instruction=system_instruction,
47
  generation_config={
48
  "temperature": temperature,
49
  "top_p": 0.95,
 
54
 
55
 
56
  def generate_prompt(task_description: str, model_name: str, temperature: float) -> str:
57
+ model = _make_model(model_name, temperature=temperature)
58
  prompt = _GENERATE_PROMPT.format(task=task_description)
59
  return model.generate_content(prompt).text
60
 
 
62
  def generate_variables(
63
  prompt: str, variable_names: list[str], model_name: str, temperature: float
64
  ) -> dict[str, str]:
65
+ model = _make_model(model_name, temperature=temperature)
66
  output = (
67
  model.generate_content(
68
  _GENERATE_VARIABLES_PROMPT.format(placeholders=", ".join(variable_names))
 
73
  return json.loads(output)
74
 
75
 
76
+ def run_prompt(
77
+ prompt_with_variables: str, system_instruction: str, model_name: str, temperature: float
78
+ ) -> str:
79
+ model = _make_model(model_name, temperature=temperature, system_instruction=system_instruction)
80
+ return model.generate_content(prompt_with_variables, request_options={"timeout": 120}).text
main.py CHANGED
@@ -163,7 +163,6 @@ def on_click_system_instructions_header(e: me.ClickEvent):
163
 
164
  def on_click_eval_run(e: me.ClickEvent):
165
  state = me.state(State)
166
- print(e.key)
167
  _, prompt_version, response_index, selected_prompt_response_index = e.key.split("_")
168
  prompt = find_prompt(state.prompts, int(prompt_version))
169
  selected_prompt = find_prompt(state.prompts, state.version)
@@ -183,7 +182,9 @@ def on_click_eval_run(e: me.ClickEvent):
183
  prompt_text = prompt.prompt
184
  for name, value in response["variables"].items():
185
  prompt_text = prompt_text.replace("{{" + name + "}}", value)
186
- response["output"] = llm.run_prompt(prompt_text, prompt.model, prompt.model_temperature)
 
 
187
 
188
 
189
  def on_click_run(e: me.ClickEvent):
@@ -228,7 +229,9 @@ def on_click_run(e: me.ClickEvent):
228
  prompt = state.prompt
229
  for name, value in prompt_variables.items():
230
  prompt = prompt.replace("{{" + name + "}}", value)
231
- state.response = llm.run_prompt(prompt, state.model, state.model_temperature)
 
 
232
  state.prompts[-1].responses.append(dict(output=state.response, variables=prompt_variables))
233
 
234
 
 
163
 
164
  def on_click_eval_run(e: me.ClickEvent):
165
  state = me.state(State)
 
166
  _, prompt_version, response_index, selected_prompt_response_index = e.key.split("_")
167
  prompt = find_prompt(state.prompts, int(prompt_version))
168
  selected_prompt = find_prompt(state.prompts, state.version)
 
182
  prompt_text = prompt.prompt
183
  for name, value in response["variables"].items():
184
  prompt_text = prompt_text.replace("{{" + name + "}}", value)
185
+ response["output"] = llm.run_prompt(
186
+ prompt_text, prompt.system_instructions, prompt.model, prompt.model_temperature
187
+ )
188
 
189
 
190
  def on_click_run(e: me.ClickEvent):
 
229
  prompt = state.prompt
230
  for name, value in prompt_variables.items():
231
  prompt = prompt.replace("{{" + name + "}}", value)
232
+ state.response = llm.run_prompt(
233
+ prompt, state.system_instructions, state.model, state.model_temperature
234
+ )
235
  state.prompts[-1].responses.append(dict(output=state.response, variables=prompt_variables))
236
 
237