multimodalart HF staff commited on
Commit
b1c0569
·
verified ·
1 Parent(s): 49d920a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from knowledge_storm import STORMWikiRunnerArguments, STORMWikiRunner, STORMWikiLMConfigs
3
+ from knowledge_storm.lm import OpenAIModel
4
+ from knowledge_storm.rm import YouRM
5
+
6
+ lm_configs = STORMWikiLMConfigs()
7
+ openai_kwargs = {
8
+ 'api_key': os.getenv("OPENAI_API_KEY"),
9
+ 'temperature': 1.0,
10
+ 'top_p': 0.9,
11
+ }
12
+ # STORM is a LM system so different components can be powered by different models to reach a good balance between cost and quality.
13
+ # For a good practice, choose a cheaper/faster model for `conv_simulator_lm` which is used to split queries, synthesize answers in the conversation.
14
+ # Choose a more powerful model for `article_gen_lm` to generate verifiable text with citations.
15
+ gpt_35 = OpenAIModel(model='gpt-3.5-turbo', max_tokens=500, **openai_kwargs)
16
+ gpt_4 = OpenAIModel(model='gpt-4o', max_tokens=3000, **openai_kwargs)
17
+ lm_configs.set_conv_simulator_lm(gpt_4)
18
+ lm_configs.set_question_asker_lm(gpt_4)
19
+ lm_configs.set_outline_gen_lm(gpt_4)
20
+ lm_configs.set_article_gen_lm(gpt_4)
21
+ lm_configs.set_article_polish_lm(gpt_4)
22
+
23
+ # Check out the STORMWikiRunnerArguments class for more configurations.
24
+ engine_args = STORMWikiRunnerArguments("outputs")
25
+ rm = YouRM(ydc_api_key=os.getenv('YDC_API_KEY'), k=engine_args.search_top_k)
26
+ runner = STORMWikiRunner(engine_args, lm_configs, rm)
27
+
28
+ def generate_article(prompt, progress=gr.Progress(track_tqdm=True)):
29
+ runner.run(
30
+ topic=prompt,
31
+ do_research=True,
32
+ do_generate_outline=True,
33
+ do_generate_article=True,
34
+ do_polish_article=True,
35
+ )
36
+ runner.post_run()
37
+ runner.summary()
38
+
39
+