Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import json
|
|
4 |
import io
|
5 |
import random
|
6 |
import os
|
|
|
|
|
7 |
from PIL import Image
|
8 |
|
9 |
API_BASE_URL = "https://api-inference.huggingface.co/models/"
|
@@ -28,6 +30,8 @@ MODEL_LIST = [
|
|
28 |
API_TOKEN = os.getenv("HF_READ_TOKEN") # Make sure to set your Hugging Face token
|
29 |
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
|
30 |
|
|
|
|
|
31 |
def select_model(model_name):
|
32 |
if model_name in MODEL_LIST:
|
33 |
return f"{API_BASE_URL}{model_name}"
|
@@ -37,17 +41,12 @@ def extend_prompt(input_text):
|
|
37 |
gr.Warning("Input text is empty!")
|
38 |
return None
|
39 |
|
40 |
-
|
|
|
41 |
|
42 |
-
|
43 |
|
44 |
-
|
45 |
-
response = requests.post(API_URL, headers=HEADERS, json=payload)
|
46 |
-
response.raise_for_status()
|
47 |
-
return response.json()[0].get("generated_text", "")
|
48 |
-
except requests.exceptions.RequestException as e:
|
49 |
-
gr.Warning(f"Error in API request: {e}")
|
50 |
-
return None
|
51 |
|
52 |
def generate_image(prompt, selected_model, is_negative=False, steps=1, cfg_scale=6, seed=None):
|
53 |
if not prompt.strip():
|
|
|
4 |
import io
|
5 |
import random
|
6 |
import os
|
7 |
+
import torch
|
8 |
+
from transformers import pipeline
|
9 |
from PIL import Image
|
10 |
|
11 |
API_BASE_URL = "https://api-inference.huggingface.co/models/"
|
|
|
30 |
API_TOKEN = os.getenv("HF_READ_TOKEN") # Make sure to set your Hugging Face token
|
31 |
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
|
32 |
|
33 |
+
pipe = pipeline("text-generation", model="isek-ai/SDPrompt-RetNet-300M", trust_remote_code=True)
|
34 |
+
|
35 |
def select_model(model_name):
|
36 |
if model_name in MODEL_LIST:
|
37 |
return f"{API_BASE_URL}{model_name}"
|
|
|
41 |
gr.Warning("Input text is empty!")
|
42 |
return None
|
43 |
|
44 |
+
seed = random.randint(1, 1000000)
|
45 |
+
torch.manual_seed(seed)
|
46 |
|
47 |
+
output = pipe(input_text, max_length=(len(input_text) + random.randint(60, 90)), num_return_sequences=4)
|
48 |
|
49 |
+
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
def generate_image(prompt, selected_model, is_negative=False, steps=1, cfg_scale=6, seed=None):
|
52 |
if not prompt.strip():
|