szili2011 commited on
Commit
6547b17
·
verified ·
1 Parent(s): 6ffa12b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -25
app.py CHANGED
@@ -1,37 +1,40 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Add a print statement to confirm initialization
5
- print("Initializing model...")
6
-
7
- # Load AI model
8
  try:
9
- model = pipeline("text2text-generation", model="google/flan-t5-base")
 
 
 
10
  print("Model loaded successfully.")
11
  except Exception as e:
12
- print(f"Error loading model: {e}")
 
13
 
14
  def ai_vote(poll_title, choices, num_ais):
15
- try:
16
- results = {}
17
- explanations = []
18
 
19
- for _ in range(num_ais):
20
- input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why."
21
- response = model(input_text, max_length=100, num_return_sequences=1)[0]['generated_text']
22
-
23
- # Extract the chosen option and explanation
24
- chosen_option = response.split("\n")[0].strip()
25
- explanation = "\n".join(response.split("\n")[1:]).strip()
26
 
27
- results[chosen_option] = results.get(chosen_option, 0) + 1
28
- explanations.append((chosen_option, explanation))
 
 
 
 
29
 
30
- return results, explanations
31
- except Exception as e:
32
- return {"Error": str(e)}, []
 
 
 
 
 
33
 
34
- # Gradio interface
35
  def gradio_interface(title, choices, num_ais):
36
  try:
37
  choices = [choice.strip() for choice in choices.split(",")]
@@ -48,12 +51,11 @@ interface = gr.Interface(
48
  gr.Slider(label="Number of AIs", minimum=1, maximum=10, step=1)
49
  ],
50
  outputs=[
51
- gr.Label(label="Poll Results"),
52
  gr.Textbox(label="AI Explanations")
53
  ]
54
  )
55
 
56
- # Launch app
57
  if __name__ == "__main__":
58
  print("Launching interface...")
59
  interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # Initialize the model and tokenizer
 
 
 
5
  try:
6
+ print("Initializing model...")
7
+ tokenizer = AutoTokenizer.from_pretrained("satvikag/chatbot")
8
+ model = AutoModelForCausalLM.from_pretrained("satvikag/chatbot")
9
+ chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
  print("Model loaded successfully.")
11
  except Exception as e:
12
+ print(f"Error during model initialization: {e}")
13
+ chat_pipeline = None
14
 
15
  def ai_vote(poll_title, choices, num_ais):
16
+ if chat_pipeline is None:
17
+ return {"Error": "Model not initialized"}, []
 
18
 
19
+ results = {}
20
+ explanations = []
 
 
 
 
 
21
 
22
+ for _ in range(num_ais):
23
+ input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why."
24
+ try:
25
+ response = chat_pipeline(input_text, max_length=150, num_return_sequences=1)[0]['generated_text']
26
+ except Exception as e:
27
+ return {"Error": str(e)}, []
28
 
29
+ # Extract the chosen option and explanation
30
+ chosen_option = response.split("\n")[0].strip()
31
+ explanation = "\n".join(response.split("\n")[1:]).strip()
32
+
33
+ results[chosen_option] = results.get(chosen_option, 0) + 1
34
+ explanations.append((chosen_option, explanation))
35
+
36
+ return results, explanations
37
 
 
38
  def gradio_interface(title, choices, num_ais):
39
  try:
40
  choices = [choice.strip() for choice in choices.split(",")]
 
51
  gr.Slider(label="Number of AIs", minimum=1, maximum=10, step=1)
52
  ],
53
  outputs=[
54
+ gr.JSON(label="Poll Results"),
55
  gr.Textbox(label="AI Explanations")
56
  ]
57
  )
58
 
 
59
  if __name__ == "__main__":
60
  print("Launching interface...")
61
  interface.launch()