MohamedRashad commited on
Commit
c3ffb57
·
verified ·
1 Parent(s): e4aad39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -45
app.py CHANGED
@@ -7,45 +7,42 @@ from huggingface_hub import snapshot_download
7
  from dotenv import load_dotenv
8
  load_dotenv()
9
 
10
- # Load models function
11
- def load_models():
12
- # Check if CUDA is available
13
- device = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
- print("Loading SNAC model...")
16
- snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
17
- snac_model = snac_model.to(device)
18
-
19
- model_name = "canopylabs/orpheus-3b-0.1-ft"
20
-
21
- # Download only model config and safetensors
22
- snapshot_download(
23
- repo_id=model_name,
24
- allow_patterns=[
25
- "config.json",
26
- "*.safetensors",
27
- "model.safetensors.index.json",
28
- ],
29
- ignore_patterns=[
30
- "optimizer.pt",
31
- "pytorch_model.bin",
32
- "training_args.bin",
33
- "scheduler.pt",
34
- "tokenizer.json",
35
- "tokenizer_config.json",
36
- "special_tokens_map.json",
37
- "vocab.json",
38
- "merges.txt",
39
- "tokenizer.*"
40
- ]
41
- )
42
-
43
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
44
- model.to(device)
45
- tokenizer = AutoTokenizer.from_pretrained(model_name)
46
- print(f"Orpheus model loaded to {device}")
47
-
48
- return snac_model, model, tokenizer, device
 
49
 
50
  # Process text prompt
51
  def process_prompt(prompt, voice, tokenizer, device):
@@ -172,13 +169,6 @@ examples = [
172
  # Available voices
173
  VOICES = ["tara", "dan", "josh", "emma"]
174
 
175
- # Load models globally
176
- try:
177
- snac_model, model, tokenizer, device = load_models()
178
- except Exception as e:
179
- print(f"Error loading models: {e}")
180
- raise
181
-
182
  # Create Gradio interface
183
  with gr.Blocks(title="Orpheus Text-to-Speech") as demo:
184
  gr.Markdown("""
 
7
  from dotenv import load_dotenv
8
  load_dotenv()
9
 
 
 
 
 
10
 
11
+ # Check if CUDA is available
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ print("Loading SNAC model...")
15
+ snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
16
+ snac_model = snac_model.to(device)
17
+
18
+ model_name = "canopylabs/orpheus-3b-0.1-ft"
19
+
20
+ # Download only model config and safetensors
21
+ snapshot_download(
22
+ repo_id=model_name,
23
+ allow_patterns=[
24
+ "config.json",
25
+ "*.safetensors",
26
+ "model.safetensors.index.json",
27
+ ],
28
+ ignore_patterns=[
29
+ "optimizer.pt",
30
+ "pytorch_model.bin",
31
+ "training_args.bin",
32
+ "scheduler.pt",
33
+ "tokenizer.json",
34
+ "tokenizer_config.json",
35
+ "special_tokens_map.json",
36
+ "vocab.json",
37
+ "merges.txt",
38
+ "tokenizer.*"
39
+ ]
40
+ )
41
+
42
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
43
+ model.to(device)
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
45
+ print(f"Orpheus model loaded to {device}")
46
 
47
  # Process text prompt
48
  def process_prompt(prompt, voice, tokenizer, device):
 
169
  # Available voices
170
  VOICES = ["tara", "dan", "josh", "emma"]
171
 
 
 
 
 
 
 
 
172
  # Create Gradio interface
173
  with gr.Blocks(title="Orpheus Text-to-Speech") as demo:
174
  gr.Markdown("""