File size: 4,844 Bytes
d816c1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import requests
import openai
import json
import numpy as np
import time

def load_json(prompt_path, endpoint_path):
    with open(prompt_path, "r") as prompt_file:
        prompt_dict = json.load(prompt_file)

    with open(endpoint_path, "r") as endpoint_file:
        endpoint_dict = json.load(endpoint_file)

    return prompt_dict, endpoint_dict

def construct_message(agents, instruction, idx):
    if len(agents) == 0:
        prompt = "Can you double check that your answer is correct. Please reiterate your answer, making sure to state your answer at the end of the response."
        return prompt
    
    contexts = [agents[0][idx]['content'], agents[1][idx]['content'], agents[2][idx]['content']]

    # system prompt & user prompt for gpt-3.5-turbo
    sys_prompt = f"I want you to act as a summarizer. You can look at multiple responses and summarize the main points of them so that the meaning is not lost. Multiple responses will be given, which are responses from several different models to a single question. And you should use your excellent summarizing skills to output the best summary."
    summarize_prompt = f"[Response 1]: {contexts[0]}\n[Response 2]: {contexts[1]}\nResponse 3: {contexts[2]}\n\nThese are response of each model to a certain question. Summarize comprehensively without compromising the meaning of each response."

    message = [
        {"role": "system", "content": sys_prompt},
        {"role": "user", "content": summarize_prompt},
    ]

    completion = openai.ChatCompletion.create(
        model="gpt-3.5-turbo-16k-0613",
        messages=message,
        max_tokens=256,
        n=1
    )

    prefix_string = f"This is the summarization of recent/updated opinions from other agents: {completion}"
    prefix_string = prefix_string + "\n\n Use this summarization carefully as additional advice, can you provide an updated answer? Make sure to state your answer at the end of the response." + instruction
    return prefix_string

def generate_question(agents, question):
    agent_contexts = [[{"model": agent, "content": question}] for agent in agents]

    content = agent_contexts[0][0]["content"]

    return agent_contexts, content

def generate_answer(model, formatted_prompt):
    API_URL = endpoint_dict[model]
    headers = {"Authorization": f"Bearer {args.auth_token}"}
    payload = {"inputs": formatted_prompt}
    try:
        resp = requests.post(API_URL, json=payload, headers=headers)
        response = resp.json()
    except:
        print("retrying due to an error......")
        time.sleep(5)
        return generate_answer(API_URL, headers, payload)
    
    return {"model": model, "content": response[0]["generated_text"].split(prompt_dict[model]["response_split"])[-1]}

def prompt_formatting(model, instruction, cot):
    if model == "alpaca" or model == "orca":
        prompt = prompt_dict[model]["prompt_no_input"]
    else:
        prompt = prompt_dict[model]["prompt"]
    
    if cot:
        instruction += "Let's think step by step."

    return {"model": model, "content": prompt.format(instruction)}

def Inference(model_list, question, API_KEY, auth_token, round, cot):
    openai.api_key = API_KEY

    prompt_dict, endpoint_dict = load_json("src/prompt_template.json", "src/inference_endpoint.json")

    agents = len(model_list)
    rounds = round

    generated_description = []

    agent_contexts, content = generate_question(agents=model_list, question=question)

    # Debate
    for debate in range(rounds+1):
        # Refer to the summarized previous response
        if debate != 0:
            message = construct_message(agent_contexts, content, 2 * debate - 1)
            for i in range(agent_contexts):
                agent_contexts[i].append(prompt_formatting(agent_contexts[i][-1]["model"], message, args.cot))

        # Generate new response based on summarized response
        for agent_context in agent_contexts:
            completion = generate_answer(agent_context[-1]["model"], agent_context[-1]["content"] if debate != 0 else prompt_formatting(agent_context[-1]["model"], agent_context[-1]["content"], args.cot)["content"])
            agent_context.append(completion)

    models_response = {
        f"{args.m1}": [agent_contexts[0][1]["content"], agent_contexts[0][3]["content"], agent_contexts[0][-1]["content"]],
        f"{args.m2}": [agent_contexts[1][1]["content"], agent_contexts[1][3]["content"], agent_contexts[1][-1]["content"]],
        f"{args.m3}": [agent_contexts[2][1]["content"], agent_contexts[2][3]["content"], agent_contexts[2][-1]["content"]]
    }
    response_summarization = [
        agent_contexts[0][2], agent_contexts[0][4]
    ]
    generated_description.append({"question": content, "agent_response": models_response, "summarization": response_summarization})

    return generated_description