File size: 8,408 Bytes
3d09df9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import gradio as gr
import asyncio
import os
import time
from typing import List, Dict
from openai import OpenAI
from openai import AsyncOpenAI

MAX_SUBCONCEPTS = 25

async def call_openai_api(sn_api_key, prompt: str) -> Dict:
    sn_dev_client_async = AsyncOpenAI(
        base_url="https://api.sambanova.ai/v1",
        api_key=sn_api_key
    )

    response = await sn_dev_client_async.chat.completions.create(
        model="Meta-Llama-3.2-3B-Instruct",
        messages=[
            {
                "role": "system", 
                "content": "You are a knowledge graph generator. Generate detailed answers to questions posed about subconcepts and give an educated response as if you were a professor explaining to a student."
            },
            {
                "role": "user", 
                "content": f"{prompt}"
            }
        ],
    )
    return response

async def make_multiple_openai_calls(sn_api_key, prompts: List[str]) -> List[Dict]:
    tasks = [call_openai_api(sn_api_key, prompt) for prompt in prompts]
    return await asyncio.gather(*tasks)

def process_concept(sn_api_key, concept, num_subconcepts, progress=gr.Progress()) -> tuple:
    start_time = time.time()

    sn_dev_client = OpenAI(
        base_url="https://api.sambanova.ai/v1",
        api_key=sn_api_key
    )
    
    progress(0, "Identifying subconcepts")
    # Single API call to break down the concepts 
    response = sn_dev_client.chat.completions.create(
        model="Meta-Llama-3.2-3B-Instruct",
        messages=[
            {
                "role": "user", 
                "content": f"""Create a set of subconcepts from this concept: {concept}. 
                Do this by breaking down the concept into multiple subconcepts with each on a new line along with their questions so it is easy to parse in the following way. Note in the example above there is no additional text except for the subconcepts and new lines. 
                
                Example (for the concept "machine learning"):

                gradient descent 
                neuron training
                loss function 
                optimization functions
                backpropagation
                """
            }
        ],
    )

    result = response.choices[0].message.content
    # clean up response
    subconcepts = result.strip().split('\n')
    subconcepts = list(set([subconcept.strip() for subconcept in subconcepts]))
    # print(subconcepts)
    num_total_subconcepts = len(subconcepts)

    progress(0.2, "Preparing subconcepts")
    # limit to 10 requests for now 
    lmt = min(num_subconcepts, num_total_subconcepts)
    subconcepts = subconcepts[:lmt]

    prompts = [
        f"Please give a detailed explanation of this subconcept: {subconcept}" for subconcept in subconcepts
    ]

    progress(0.3, f"Generating explanations for {len(subconcepts)} subconcepts in parallel")
    # Run the async function in the synchronous context
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    try:
        results = loop.run_until_complete(make_multiple_openai_calls(sn_api_key, prompts))
    finally:
        loop.close()

    # Extract strings from the results 
    responses = [result.choices[0].message.content for result in results]

    progress(0.6, f"Summarizing explanations to create an intro for {len(subconcepts)} subconcepts")
    # Summarize results - using a synchonous call 
    content_to_summarize = ""
    for subconcept, response in zip(subconcepts, responses):
        content_to_summarize += f"## {subconcept.title()}\n\n{response}\n\n---\n\n"
    
    response = sn_dev_client.chat.completions.create( 
        model="Meta-Llama-3.1-8B-Instruct", # need longer context
        messages=[
            {
                "role": "user", 
                "content": f"""Summarize the results for the {concept} by creating an introduction for the class that incorporates 
                the subconcepts: {" ".join(subconcepts)}. Here is all of the information you want to summarize:  
                
                {content_to_summarize}

                Please present this as an introduction to a class on
                {concept}. 
                """
            }
        ],
    )
    print(response)
    intro_summary = response.choices[0].message.content

    end_time = time.time()
    total_time = end_time - start_time

    progress(0.9, "Formatting output")
    # Format the output in Markdown
    markdown_intro = f"# Lesson Plan: {concept.title()}\n\n"
    markdown_intro += f"**Number of LLama 3.2 3B calls made to SambaNova's API:** {num_subconcepts + 1}\n\n"
    markdown_intro += f"**1 LLama 3.1 8B call made to SambaNova's API to summarize**\n\n"
    markdown_intro += f"**Total time taken:** {total_time:.2f} seconds\n\n"
    markdown_intro += f"\n\n---\n\n"
    markdown_intro += intro_summary

    subconcept_markdowns = []
    for subconcept, response in zip(subconcepts, responses):
        subconcept_markdowns.append(f"## {subconcept.title()}\n\n{response}\n\n") 

    progress(1.0, "Complete")

    # Update the tabs (and its corresponding contents) with content for each lesson
    new_tabs = []
    new_tab_contents = []
    for i in range(len(subconcept_markdowns)):
        new_tabs.append(gr.update(label=f"Lesson {i+1}: {subconcepts[i].title()}", visible=True))
        new_tab_contents.append(gr.Markdown(f"{subconcept_markdowns[i]}"))
    new_tabs.extend([gr.update(visible=False) for _ in range(MAX_SUBCONCEPTS-len(subconcept_markdowns))])
    new_tab_contents.extend([gr.update(visible=False) for _ in range(MAX_SUBCONCEPTS-len(subconcept_markdowns))])

    return "Process complete!", markdown_intro, *new_tabs, *new_tab_contents

with gr.Blocks() as demo:
    gr.Markdown(
        """
        <h1 style="text-align: center;">Lesson Plan Generator</h1>
        <p style="text-align: center; font-size: 20px;">Ever wanted to learn something new? Struggled to break down the concept to more digestible subconcepts? In this demo, we use <a href="https://cloud.sambanova.ai">SambaNova's</a> superfast LLama 3.2 3B and LLama 3.1 8B models to summarize the concept and subconcepts as well as provide a detailed lesson for each of the subconcepts. </p>
        <p style="text-align: center; font-size: 18px;">To use this, follow the instructions below:</p>
        """,
        elem_id="header"
    )

    gr.Markdown(
        """
        <div style="margin: auto; width: 50%; text-align: left; font-size: 16px">
            <ol>
                <li>Navigate to <a href="https://cloud.sambanova.ai">https://cloud.sambanova.ai</a>, login and copy your API Key</li>
                <li>Paste it in the SambaNova API Key box below</li>
                <li>Enter a concept you are interested in (e.g. Variational Autoencoders)</li>
                <li>Choose the number of subconcepts you want to break your lessons into</li>
                <li>Click 'Generate Lesson Plan'</li>
                <li>Wait for a few seconds for multiple LLama 3B and 8B calls to finish</li>
                <li>Read through and enjoy your lesson plans</li>
            </ol>
        </div>
        """,
        elem_id="instructions"
    )
    
    with gr.Column():
        sn_api_key_input = gr.Textbox(label="Enter your SambaNova API Key (https://cloud.sambanova.ai)", type="password")

    with gr.Row():
        concept_input = gr.Textbox(label="Enter a concept", placeholder="e.g., Artificial Intelligence")
        slider = gr.Slider(minimum=1, maximum=MAX_SUBCONCEPTS, value=5, label="Number of subconcepts", step=1)
        generate_btn = gr.Button("Generate Lesson Plan", variant="primary", size="lg")
    
    with gr.Column():
        progress_output = gr.Textbox(label="Progress", interactive=True)
        lesson_intro = gr.Markdown(label="Lesson Intro") 

    tab_contents = []

    with gr.Tabs() as tabs:
        for i in range(MAX_SUBCONCEPTS):  # Initial set of tabs
            with gr.Tab(f"Lesson {i+1}", visible=False):
                exec(f'tab_{i}=gr.Markdown(f"This is content for Lesson {i+1}")')
                exec(f'tab_contents.append(tab_{i})')
            
    generate_btn.click(
        process_concept,
        inputs=[sn_api_key_input, concept_input, slider],
        outputs=[progress_output, lesson_intro] + [tabs.children[i] for i in range(MAX_SUBCONCEPTS)] + tab_contents
    )

demo.launch()