File size: 1,944 Bytes
5b60a87
6b10d1a
 
 
 
 
 
5b60a87
46f3679
6b10d1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46f3679
 
 
 
 
 
 
 
 
 
 
 
 
6b10d1a
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from transformers import pipeline

bloom_model_name = "CreitinGameplays/bloom-3b-conversational"

# Create a pipeline for text generation
generator = pipeline("text-generation", model=bloom_model_name, truncation=True)

def conversation(prompt="", max_tokens=128):
  """
  Generates conversation response using Bloom with Hugging Face Transformers.

  Args:
      prompt (str, optional): Text prompt for Bloom. Defaults to "".
      max_tokens (int, optional): Maximum number of tokens for response generation. Defaults to 128.

  Returns:
      str: Bloom's generated response to the prompt.
  """
  
  try:
    # Generate response using Bloom text-generation pipeline
    response = generator(prompt, max_length=max_tokens, num_return_sequences=1)[0]["generated_text"]
    return response.strip()  # Remove potential leading/trailing whitespace
  except Exception as e:
    print(f"Error during Bloom interaction: {e}")
    return "Bloom is currently unavailable. Try again later!"

interface = gr.Interface(
  fn=conversation,
  inputs=[
      gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
      gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
  ],
  outputs=gr.Textbox(label="AI Assistant Response"),  # Textbox for the response
  title="Bloom 3b Conversational Assistant",
  description="Talk to Bloom 3b using a text prompt and adjust the maximum number tokens for response generation.",
)

interface.launch()
# This is a placeholder function, replace with your Bloom 3b interaction code
def generate_response_from_bloom3b(prompt, max_tokens):
  # Implement your Bloom 3b interaction logic here
  # Use libraries like transformers to call Bloom 3b and process the response
  # ...
  # Return the generated response as a string
  return "This is a placeholder response from generate_response_from_bloom3b"