baconnier commited on
Commit
a217992
·
verified ·
1 Parent(s): 7ed59a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -249
app.py CHANGED
@@ -1,205 +1,22 @@
1
- import os
2
- import json
3
- import re
4
- from huggingface_hub import InferenceClient
5
  import gradio as gr
6
- from pydantic import BaseModel, Field
7
- from typing import Optional, Literal
8
- from huggingface_hub.errors import HfHubHTTPError
9
-
10
- from custom_css import custom_css
11
- from variables import *
12
-
13
-
14
- class PromptInput(BaseModel):
15
- text: str = Field(..., description="The initial prompt text")
16
- meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"] = Field(..., description="Choice of meta prompt strategy")
17
-
18
- class RefinementOutput(BaseModel):
19
- query_analysis: Optional[str] = None
20
- initial_prompt_evaluation: Optional[str] = None
21
- refined_prompt: Optional[str] = None
22
- explanation_of_refinements: Optional[str] = None
23
- raw_content: Optional[str] = None
24
-
25
- class PromptRefiner:
26
- def __init__(self, api_token: str,meta_prompts):
27
- self.client = InferenceClient(token=api_token, timeout=120)
28
- '''self.meta_prompts = {
29
- "morphosis": original_meta_prompt,
30
- "verse": new_meta_prompt,
31
- "physics": metaprompt1,
32
- "bolism": loic_metaprompt,
33
- "done": metadone,
34
- "star": echo_prompt_refiner,
35
- "math": math_meta_prompt,
36
- "arpe": autoregressive_metaprompt
37
- }'''
38
-
39
- self.meta_prompts = meta_prompts
40
-
41
- def refine_prompt(self, prompt_input: PromptInput) -> tuple:
42
- try:
43
- # Select meta prompt using dictionary instead of if-elif chain
44
- # print(meta_prompts)
45
- selected_meta_prompt = self.meta_prompts.get(
46
- prompt_input.meta_prompt_choice,
47
- advanced_meta_prompt
48
- )
49
-
50
- messages = [
51
- {
52
- "role": "system",
53
- "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.'
54
- },
55
- {
56
- "role": "user",
57
- "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
58
- }
59
- ]
60
-
61
- response = self.client.chat_completion(
62
- model=prompt_refiner_model,
63
- messages=messages,
64
- max_tokens=3000,
65
- temperature=0.8
66
- )
67
-
68
- response_content = response.choices[0].message.content.strip()
69
-
70
- # Parse the response
71
- result = self._parse_response(response_content)
72
-
73
- return (
74
- result.get('initial_prompt_evaluation', ''),
75
- result.get('refined_prompt', ''),
76
- result.get('explanation_of_refinements', ''),
77
- result
78
- )
79
-
80
- except HfHubHTTPError as e:
81
- return (
82
- "Error: Model timeout. Please try again later.",
83
- "The selected model is currently experiencing high traffic.",
84
- "The selected model is currently experiencing high traffic.",
85
- {}
86
- )
87
- except Exception as e:
88
- return (
89
- f"Error: {str(e)}",
90
- "",
91
- "An unexpected error occurred.",
92
- {}
93
- )
94
-
95
- def _parse_response(self, response_content: str) -> dict:
96
- try:
97
- # Try to find JSON in response
98
- json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
99
- if json_match:
100
- json_str = json_match.group(1)
101
- json_str = re.sub(r'\n\s*', ' ', json_str)
102
- json_str = json_str.replace('"', '\\"')
103
- json_output = json.loads(f'"{json_str}"')
104
-
105
- if isinstance(json_output, str):
106
- json_output = json.loads(json_output)
107
- output={
108
- key: value.replace('\\"', '"') if isinstance(value, str) else value
109
- for key, value in json_output.items()
110
- }
111
- output['response_content']=json_output
112
- # Clean up JSON values
113
- return output
114
-
115
- # Fallback to regex parsing if no JSON found
116
- output = {}
117
- for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
118
- pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
119
- match = re.search(pattern, response_content, re.DOTALL)
120
- output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') if match else ""
121
- output['response_content']=response_content
122
- return output
123
-
124
- except (json.JSONDecodeError, ValueError) as e:
125
- print(f"Error parsing response: {e}")
126
- print(f"Raw content: {response_content}")
127
- return {
128
- "initial_prompt_evaluation": "Error parsing response",
129
- "refined_prompt": "",
130
- "explanation_of_refinements": str(e),
131
- 'response_content':str(e)
132
- }
133
-
134
- def apply_prompt(self, prompt: str, model: str) -> str:
135
- try:
136
- messages = [
137
- {
138
- "role": "system",
139
- "content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules:
140
-
141
- 1. Paragraph Spacing:
142
- - Add TWO blank lines between major sections (##)
143
- - Add ONE blank line between subsections (###)
144
- - Add ONE blank line between paragraphs within sections
145
- - Add ONE blank line before and after lists
146
- - Add ONE blank line before and after code blocks
147
- - Add ONE blank line before and after blockquotes
148
-
149
- 2. Section Formatting:
150
- # Title
151
-
152
- ## Major Section
153
-
154
- [blank line]
155
- Content paragraph 1
156
- [blank line]
157
- Content paragraph 2
158
- [blank line]"""
159
- },
160
- {
161
- "role": "user",
162
- "content": prompt
163
- }
164
- ]
165
-
166
- response = self.client.chat_completion(
167
- model=model,
168
- messages=messages,
169
- max_tokens=3000,
170
- temperature=0.8,
171
- stream=True # Enable streaming in the API call
172
- )
173
-
174
- # Initialize an empty string to accumulate the response
175
- full_response = ""
176
-
177
- # Process the streaming response
178
- for chunk in response:
179
- if chunk.choices[0].delta.content is not None:
180
- full_response += chunk.choices[0].delta.content
181
-
182
- # Return the complete response
183
- return full_response.replace('\n\n', '\n').strip()
184
-
185
- except Exception as e:
186
- return f"Error: {str(e)}"
187
 
188
  class GradioInterface:
189
- def __init__(self, prompt_refiner: PromptRefiner,custom_css):
190
  self.prompt_refiner = prompt_refiner
191
- custom_css = custom_css
192
  with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
193
  with gr.Column(elem_classes=["container", "title-container"]):
194
  gr.Markdown("# PROMPT++")
195
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
196
  gr.Markdown("Learn how to generate an improved version of your prompts.")
 
 
 
197
 
198
  with gr.Column(elem_classes=["container", "input-container"]):
199
  prompt_text = gr.Textbox(
200
  label="Type your prompt (or let it empty to see metaprompt)",
201
- # elem_classes="no-background",
202
- #elem_classes="container2",
203
  lines=5
204
  )
205
  meta_prompt_choice = gr.Radio(
@@ -207,11 +24,9 @@ class GradioInterface:
207
  label="Choose Meta Prompt",
208
  value="star",
209
  elem_classes=["no-background", "radio-group"]
210
- # elem_classes=[ "radio-group"]
211
  )
212
  refine_button = gr.Button("Refine Prompt")
213
 
214
- # Option 1: Put Examples here (before Meta Prompt explanation)
215
  with gr.Row(elem_classes=["container2"]):
216
  with gr.Accordion("Examples", open=False):
217
  gr.Examples(
@@ -233,42 +48,31 @@ class GradioInterface:
233
  with gr.Accordion("Meta Prompt explanation", open=False):
234
  gr.Markdown(explanation_markdown)
235
 
236
-
237
-
238
- # Option 2: Or put Examples here (after the button)
239
- # with gr.Accordion("Examples", open=False):
240
- # gr.Examples(...)
241
-
242
  with gr.Column(elem_classes=["container", "analysis-container"]):
243
  gr.Markdown(' ')
244
  gr.Markdown("### Initial prompt analysis")
245
  analysis_evaluation = gr.Markdown()
246
  gr.Markdown("### Refined Prompt")
247
  refined_prompt = gr.Textbox(
248
- label="Refined Prompt",
249
- interactive=True,
250
- show_label=True, # Must be True for copy button to show
251
- show_copy_button=True, # Adds the copy button
252
- # elem_classes="no-background"
253
- )
254
  gr.Markdown("### Explanation of Refinements")
255
  explanation_of_refinements = gr.Markdown()
256
 
257
-
258
  with gr.Column(elem_classes=["container", "model-container"]):
259
- # gr.Markdown("## See MetaPrompt Impact")
260
  with gr.Row():
261
  apply_model = gr.Dropdown(models,
262
- value="meta-llama/Llama-3.1-8B-Instruct",
263
- label="Choose the Model",
264
- container=False, # This removes the container around the dropdown
265
- scale=1, # Controls the width relative to other components
266
- min_width=300 # Sets minimum width in pixels
267
- # elem_classes="no-background"
268
- )
269
  apply_button = gr.Button("Apply MetaPrompt")
270
 
271
- # with gr.Column(elem_classes=["container", "results-container"]):
272
  gr.Markdown("### Prompts on choosen model")
273
  with gr.Tabs():
274
  with gr.TabItem("Original Prompt Output"):
@@ -284,21 +88,15 @@ class GradioInterface:
284
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
285
  )
286
 
287
- # In the __init__ method of GradioInterface class:
288
  apply_button.click(
289
  fn=self.apply_prompts,
290
  inputs=[prompt_text, refined_prompt, apply_model],
291
  outputs=[original_output, refined_output],
292
- api_name="apply_prompts" # Optional: adds API endpoint
293
- )
294
- gr.HTML(
295
- "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
296
  )
 
297
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
298
- input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
299
- # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt
300
- initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data)
301
-
302
  analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
303
  return (
304
  analysis_evaluation,
@@ -316,30 +114,4 @@ class GradioInterface:
316
  return f"Error: {str(e)}", f"Error: {str(e)}"
317
 
318
  def launch(self, share=False):
319
- self.interface.launch(share=share)
320
-
321
-
322
- #explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
323
- '''
324
- meta_info=""
325
- api_token = os.getenv('HF_API_TOKEN')
326
- if not api_token:
327
- raise ValueError("HF_API_TOKEN not found in environment variables")
328
-
329
- metadone = os.getenv('metadone')
330
- prompt_refiner_model = os.getenv('prompt_refiner_model')
331
- echo_prompt_refiner = os.getenv('echo_prompt_refiner')
332
- metaprompt1 = os.getenv('metaprompt1')
333
- loic_metaprompt = os.getenv('loic_metaprompt')
334
- openai_metaprompt = os.getenv('openai_metaprompt')
335
- original_meta_prompt = os.getenv('original_meta_prompt')
336
- new_meta_prompt = os.getenv('new_meta_prompt')
337
- advanced_meta_prompt = os.getenv('advanced_meta_prompt')
338
- math_meta_prompt = os.getenv('metamath')
339
- autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
340
- '''
341
-
342
- if __name__ == '__main__':
343
- prompt_refiner = PromptRefiner(api_token,meta_prompts)
344
- gradio_interface = GradioInterface(prompt_refiner,custom_css)
345
- gradio_interface.launch(share=True)
 
 
 
 
 
1
  import gradio as gr
2
+ from prompt_refiner import PromptRefiner
3
+ from variables import models, explanation_markdown, custom_css
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  class GradioInterface:
6
+ def __init__(self, prompt_refiner: PromptRefiner, custom_css):
7
  self.prompt_refiner = prompt_refiner
 
8
  with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
9
  with gr.Column(elem_classes=["container", "title-container"]):
10
  gr.Markdown("# PROMPT++")
11
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
12
  gr.Markdown("Learn how to generate an improved version of your prompts.")
13
+ gr.HTML(
14
+ "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
15
+ )
16
 
17
  with gr.Column(elem_classes=["container", "input-container"]):
18
  prompt_text = gr.Textbox(
19
  label="Type your prompt (or let it empty to see metaprompt)",
 
 
20
  lines=5
21
  )
22
  meta_prompt_choice = gr.Radio(
 
24
  label="Choose Meta Prompt",
25
  value="star",
26
  elem_classes=["no-background", "radio-group"]
 
27
  )
28
  refine_button = gr.Button("Refine Prompt")
29
 
 
30
  with gr.Row(elem_classes=["container2"]):
31
  with gr.Accordion("Examples", open=False):
32
  gr.Examples(
 
48
  with gr.Accordion("Meta Prompt explanation", open=False):
49
  gr.Markdown(explanation_markdown)
50
 
 
 
 
 
 
 
51
  with gr.Column(elem_classes=["container", "analysis-container"]):
52
  gr.Markdown(' ')
53
  gr.Markdown("### Initial prompt analysis")
54
  analysis_evaluation = gr.Markdown()
55
  gr.Markdown("### Refined Prompt")
56
  refined_prompt = gr.Textbox(
57
+ label="Refined Prompt",
58
+ interactive=True,
59
+ show_label=True,
60
+ show_copy_button=True,
61
+ )
 
62
  gr.Markdown("### Explanation of Refinements")
63
  explanation_of_refinements = gr.Markdown()
64
 
 
65
  with gr.Column(elem_classes=["container", "model-container"]):
 
66
  with gr.Row():
67
  apply_model = gr.Dropdown(models,
68
+ value="meta-llama/Llama-3.1-8B-Instruct",
69
+ label="Choose the Model",
70
+ container=False,
71
+ scale=1,
72
+ min_width=300
73
+ )
 
74
  apply_button = gr.Button("Apply MetaPrompt")
75
 
 
76
  gr.Markdown("### Prompts on choosen model")
77
  with gr.Tabs():
78
  with gr.TabItem("Original Prompt Output"):
 
88
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
89
  )
90
 
 
91
  apply_button.click(
92
  fn=self.apply_prompts,
93
  inputs=[prompt_text, refined_prompt, apply_model],
94
  outputs=[original_output, refined_output],
95
+ api_name="apply_prompts"
 
 
 
96
  )
97
+
98
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
99
+ initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
 
 
 
100
  analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
101
  return (
102
  analysis_evaluation,
 
114
  return f"Error: {str(e)}", f"Error: {str(e)}"
115
 
116
  def launch(self, share=False):
117
+ self.interface.launch(share=share)