Spaces:
Running
Running
File size: 5,554 Bytes
193db9d 0bab47c 193db9d 1eeda1d 193db9d 0bab47c 193db9d 0bab47c 193db9d 0bab47c 193db9d 0bab47c 193db9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
# %%
from .structs import (
Buzzer,
BuzzerMethod,
CallType,
InputField,
ModelStep,
OutputField,
TossupWorkflow,
Workflow,
)
INITIAL_SYS_PROMPT = """You are a helpful performant question answering bot.
Given a question clue, output your most likely guess in a couple words with a calibrated confidence for the guess.
"""
def create_empty_bonus_workflow():
return Workflow(
inputs=["leadin", "part"],
outputs={"answer": None, "confidence": None, "explanation": None},
steps={},
)
def create_empty_tossup_workflow():
return TossupWorkflow(
inputs=["question_text"],
outputs={"answer": None, "confidence": None},
steps={},
)
def create_first_step_input_fields() -> list[InputField]:
return [
InputField(
name="question",
description="The question text progressively revealed to the agent so far.",
variable="question_text",
)
]
def create_empty_input_field() -> list[InputField]:
return [InputField(name="", description="", variable="question_text")]
def create_quizbowl_simple_step_initial_setup():
return ModelStep(
id="simple_step",
name="Quizbowl Simple Step",
model="",
provider="",
temperature=0.7,
call_type="llm",
system_prompt=INITIAL_SYS_PROMPT,
input_fields=[
InputField(name="question", description="The question to answer", variable="question"),
],
output_fields=[
OutputField(name="answer", description="The most likely answer", type="str"),
OutputField(name="confidence", description="The confidence of the answer", type="float"),
],
)
def create_new_llm_step(step_id: str, name: str) -> ModelStep:
return ModelStep(
id=step_id,
name=name,
model="gpt-4o",
provider="OpenAI",
call_type="llm",
temperature=0.7,
system_prompt="",
input_fields=create_empty_input_field(),
output_fields=[OutputField(name="", description="")],
)
def create_first_llm_step() -> ModelStep:
return ModelStep(
id="A",
name="",
model="gpt-4o",
provider="OpenAI",
call_type="llm",
temperature=0.7,
system_prompt="",
input_fields=[create_first_step_input_fields()],
output_fields=[OutputField(name="", description="")],
)
def create_simple_qb_tossup_workflow():
return TossupWorkflow(
inputs=["question_text"],
outputs={"answer": "A.answer", "confidence": "A.confidence"},
steps={
"A": ModelStep(
id="A",
name="Tossup Agent",
model="gpt-4o-mini",
provider="OpenAI",
call_type="llm",
temperature=0.3,
system_prompt="You are a helpful assistant that can answer questions.",
input_fields=[InputField(name="question", description="The question text", variable="question_text")],
output_fields=[
OutputField(
name="answer",
description="The best guess at the answer to the question",
type="str",
),
OutputField(
name="confidence",
description="The confidence in the answer, ranging from 0 to 1 in increments of 0.05.",
type="float",
),
],
)
},
buzzer=Buzzer(
confidence_threshold=0.75,
prob_threshold=None,
method=BuzzerMethod.AND,
),
)
BONUS_SYS_PROMPT = """You are a quizbowl player answering bonus questions. For each part:
1. Read the leadin and part carefully
2. Provide a concise answer
3. Rate your confidence (0-1)
4. Explain your reasoning
Format your response as:
ANSWER: <your answer>
CONFIDENCE: <0-1>
EXPLANATION: <your reasoning>"""
def create_simple_qb_bonus_workflow() -> Workflow:
"""Create a simple model step for bonus questions."""
return Workflow(
inputs=["leadin", "part"],
outputs={"answer": "A.answer", "confidence": "A.confidence", "explanation": "A.explanation"},
steps={
"A": ModelStep(
id="A",
name="Bonus Agent",
model="gpt-4o-mini",
provider="OpenAI",
temperature=0.3,
call_type=CallType.LLM,
system_prompt=BONUS_SYS_PROMPT,
input_fields=[
InputField(
name="question_leadin",
description="The leadin text for the bonus question",
variable="leadin",
),
InputField(
name="question_part",
description="The specific part text to answer",
variable="part",
),
],
output_fields=[
OutputField(name="answer", description="The predicted answer", type="str"),
OutputField(name="confidence", description="Confidence in the answer (0-1)", type="float"),
OutputField(name="explanation", description="Short explanation for the answer", type="str"),
],
)
},
)
|