CreitinGameplays
commited on
Commit
•
6b10d1a
1
Parent(s):
1e4352a
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,30 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
def conversation(prompt="", max_tokens=128):
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
interface = gr.Interface(
|
11 |
fn=conversation,
|
@@ -19,3 +38,10 @@ interface = gr.Interface(
|
|
19 |
)
|
20 |
|
21 |
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
bloom_model_name = "CreitinGameplays/bloom-3b-conversational"
|
5 |
+
|
6 |
+
# Create a pipeline for text generation
|
7 |
+
generator = pipeline("text-generation", model=bloom_model_name, truncation=True)
|
8 |
|
9 |
def conversation(prompt="", max_tokens=128):
|
10 |
+
"""
|
11 |
+
Generates conversation response using Bloom with Hugging Face Transformers.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
prompt (str, optional): Text prompt for Bloom. Defaults to "".
|
15 |
+
max_tokens (int, optional): Maximum number of tokens for response generation. Defaults to 128.
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
str: Bloom's generated response to the prompt.
|
19 |
+
"""
|
20 |
+
|
21 |
+
try:
|
22 |
+
# Generate response using Bloom text-generation pipeline
|
23 |
+
response = generator(prompt, max_length=max_tokens, num_return_sequences=1)[0]["generated_text"]
|
24 |
+
return response.strip() # Remove potential leading/trailing whitespace
|
25 |
+
except Exception as e:
|
26 |
+
print(f"Error during Bloom interaction: {e}")
|
27 |
+
return "Bloom is currently unavailable. Try again later!"
|
28 |
|
29 |
interface = gr.Interface(
|
30 |
fn=conversation,
|
|
|
38 |
)
|
39 |
|
40 |
interface.launch()
|
41 |
+
# This is a placeholder function, replace with your Bloom 3b interaction code
|
42 |
+
def generate_response_from_bloom3b(prompt, max_tokens):
|
43 |
+
# Implement your Bloom 3b interaction logic here
|
44 |
+
# Use libraries like transformers to call Bloom 3b and process the response
|
45 |
+
# ...
|
46 |
+
# Return the generated response as a string
|
47 |
+
return "This is a placeholder response from generate_response_from_bloom3b"
|