shubhamagarwal92 commited on
Commit
c0c75fc
·
1 Parent(s): de4cd5c

Changes for latest gradio

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -63,12 +63,6 @@ def downvote_last_response(state, request: gr.Request):
63
  return ""
64
 
65
 
66
- # example_abstract = """We explore the zero-shot abilities of recent large language models (LLMs) for the task of writing the literature review of a scientific research paper conditioned on its abstract and the content of related papers.
67
- # We propose and examine a novel strategy for literature review generation with an LLM in which we first generate a plan for the review, and then use it to generate the actual text. While modern LLMs can easily be trained or prompted to
68
- # condition on all abstracts of papers to be cited to generate a literature review without such intermediate plans, our empirical study shows that these intermediate plans improve the quality of generated literature reviews over vanilla
69
- # zero-shot generation. Furthermore, we also create a new test corpus consisting of recent arXiv papers (with full content) posted after both open-sourced and closed-sourced LLMs that were used in our study were released. This allows us
70
- # to ensure that our zero-shot experiments do not suffer from test set contamination.
71
- # """
72
 
73
  example_abstract = """We explore the zero-shot abilities of recent large language models (LLMs) for the task of writing the literature review of a scientific research paper conditioned on its abstract and the content of related papers."""
74
 
@@ -102,6 +96,16 @@ The service is a research preview intended for non-commercial use only, subject
102
 
103
  block_css = """
104
 
 
 
 
 
 
 
 
 
 
 
105
  #buttons button {
106
  min-width: min(120px,100%);
107
  }
@@ -192,7 +196,7 @@ def format_results_into_markdown(recommendations):
192
  for index, r in enumerate(recommendations):
193
  # hub_paper_url = f"https://huggingface.co/papers/{r['externalIds']['ArXiv']}"
194
  # comment += f"* [{r['title']}]({hub_paper_url}) ({r['year']})\n"
195
- comment += f"[{index+1}] [{r['title']}]({r['url']}) ({r['year']}) Cited by {r['citationCount']}\n"
196
  return comment
197
 
198
  def find_basis_paper(query, num_papers_api=20):
@@ -487,7 +491,7 @@ class GradioChatApp:
487
  llm_rerank = gr.Radio(choices=["True", "False"], value="True", interactive=True, label="LLM Re-rank (May override sorting)")
488
  with gr.Row():
489
  temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature", scale=1)
490
- max_tokens = gr.Slider(minimum=0, maximum=3000, value=800, step=64, interactive=True, label="Max output tokens", scale=2)
491
  display_1 = gr.Markdown(value=f"Retrieved papers", label="Retrieved papers!", elem_id="display_mrkdwn") #, visible=True)
492
  # with gr.Accordion("Generation Parameters", open=False) as parameter_row:
493
  # top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P")
 
63
  return ""
64
 
65
 
 
 
 
 
 
 
66
 
67
  example_abstract = """We explore the zero-shot abilities of recent large language models (LLMs) for the task of writing the literature review of a scientific research paper conditioned on its abstract and the content of related papers."""
68
 
 
96
 
97
  block_css = """
98
 
99
+ h1 {
100
+ text-align: center;
101
+ display:block;
102
+ }
103
+
104
+ h2 {
105
+ text-align: center;
106
+ display:block;
107
+ }
108
+
109
  #buttons button {
110
  min-width: min(120px,100%);
111
  }
 
196
  for index, r in enumerate(recommendations):
197
  # hub_paper_url = f"https://huggingface.co/papers/{r['externalIds']['ArXiv']}"
198
  # comment += f"* [{r['title']}]({hub_paper_url}) ({r['year']})\n"
199
+ comment += f"[{index+1}] [{r['title']}]({r['url']}) ({r['year']}) Cited by {r['citationCount']} <br>"
200
  return comment
201
 
202
  def find_basis_paper(query, num_papers_api=20):
 
491
  llm_rerank = gr.Radio(choices=["True", "False"], value="True", interactive=True, label="LLM Re-rank (May override sorting)")
492
  with gr.Row():
493
  temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature", scale=1)
494
+ max_tokens = gr.Slider(minimum=0, maximum=3000, value=500, step=64, interactive=True, label="Max output tokens", scale=2)
495
  display_1 = gr.Markdown(value=f"Retrieved papers", label="Retrieved papers!", elem_id="display_mrkdwn") #, visible=True)
496
  # with gr.Accordion("Generation Parameters", open=False) as parameter_row:
497
  # top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P")