gokaygokay commited on
Commit
de63645
1 Parent(s): a4f002b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -2
app.py CHANGED
@@ -22,6 +22,20 @@ from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
22
  import requests
23
  from RealESRGAN import RealESRGAN
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  import subprocess
27
  #subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
@@ -108,8 +122,14 @@ DEFAULT_NEGATIVE_SUFFIX = "Nsfw oversaturated crappy_art low_quality blurry bad_
108
 
109
  # Initialize Florence model
110
  device = "cuda" if torch.cuda.is_available() else "cpu"
111
- florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
112
- florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
 
 
 
 
 
 
113
 
114
  # Prompt Enhancer
115
  enhancer_medium = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device=device)
 
22
  import requests
23
  from RealESRGAN import RealESRGAN
24
 
25
+ import os
26
+
27
+ from typing import Union
28
+ from transformers.dynamic_module_utils import get_imports
29
+
30
+
31
+ def fixed_get_imports(filename: Union[str, os.PathLike]) -> list[str]:
32
+ """Work around for https://huggingface.co/microsoft/phi-1_5/discussions/72."""
33
+ if not str(filename).endswith("/modeling_florence2.py"):
34
+ return get_imports(filename)
35
+ imports = get_imports(filename)
36
+ imports.remove("flash_attn")
37
+ return imports
38
+
39
 
40
  import subprocess
41
  #subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
122
 
123
  # Initialize Florence model
124
  device = "cuda" if torch.cuda.is_available() else "cpu"
125
+
126
+ def load_models():
127
+ with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports):
128
+ florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
129
+ florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
130
+ return florence_model, florence_processor
131
+
132
+ florence_model, florence_processor = load_models()
133
 
134
  # Prompt Enhancer
135
  enhancer_medium = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device=device)