File size: 1,849 Bytes
18694be ceb1cc0 18694be fb92df5 18694be ceb1cc0 18694be ceb1cc0 18694be ceb1cc0 18694be ceb1cc0 fb92df5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from typing import Dict, List, Any
from transformers import AutoProcessor, MusicgenForConditionalGeneration
import torch
def create_params(params, fr):
# default
out = { "do_sample": True,
"guidance_scale": 3,
"max_new_tokens": 256
}
has_tokens = False
if params is None:
return out
if 'duration' in params:
out['max_new_tokens'] = params['duration'] * fr
has_tokens = True
for k, p in params.items():
if k in out:
if has_tokens and k == 'max_new_tokens':
continue
out[k] = p
return out
class EndpointHandler:
def __init__(self, path="pbotsaris/musicgen-small"):
# load model and processor
self.processor = AutoProcessor.from_pretrained(path)
self.model = MusicgenForConditionalGeneration.from_pretrained(path, torch_dtype=torch.float16)
self.model.to('cuda')
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Args:
data (:dict:):
The payload with the text prompt and generation parameters.
"""
inputs = data.pop("inputs", data)
params = data.pop("parameters", None)
inputs = self.processor(
text=[inputs],
padding=True,
return_tensors="pt"
).to('cuda')
params = create_params(params, self.model.config.audio_encoder.frame_rate)
with torch.cuda.amp.autocast():
outputs = self.model.generate(**inputs, **params)
pred = outputs[0].cpu().numpy().tolist()
sr = 32000
try:
sr = self.model.config.audio_encoder.sampling_rate
except:
sr = 32000
return [{"audio": pred, "sr":sr}]
if __name__ == "__main__":
handler = EndpointHandler()
|