CreitinGameplays commited on
Commit
95ac312
1 Parent(s): dfc8a9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -1,14 +1,19 @@
1
  import gradio as gr
2
 
3
- gr.load("models/CreitinGameplays/bloom-3b-conversational")
 
 
 
 
 
4
 
5
  interface = gr.Interface(
6
- fn=lambda prompt="", max_tokens=128: gr.Textbox.submit(value=prompt),
7
  inputs=[
8
  gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
9
  gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
10
  ],
11
- outputs=[],
12
  title="Bloom 3b Conversational Assistant",
13
  description="Talk to Bloom 3b using a text prompt and adjust the maximum number of tokens for response generation.",
14
  )
 
1
  import gradio as gr
2
 
3
+ def conversation(prompt="", max_tokens=128):
4
+ # Integrate your Bloom 3b model here to generate response based on prompt and max_tokens
5
+ # Replace this with the actual call to your Bloom 3b model
6
+ gr.load("models/CreitinGameplays/bloom-3b-conversational")
7
+ response = "Bloom 3b is currently unavailable. Try again later!"
8
+ return response
9
 
10
  interface = gr.Interface(
11
+ fn=conversation,
12
  inputs=[
13
  gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
14
  gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
15
  ],
16
+ # No outputs are specified as the response will be printed in the console.
17
  title="Bloom 3b Conversational Assistant",
18
  description="Talk to Bloom 3b using a text prompt and adjust the maximum number of tokens for response generation.",
19
  )