File size: 1,593 Bytes
1264bba 65c065f 4ce2301 1264bba 4ce2301 1264bba 4ce2301 1264bba 4ce2301 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
class EndpointHandler:
def __init__(self, path="krisoei/timgpt"):
if not path:
raise ValueError("A valid model path or name must be provided.")
# Load tokenizer and model
self.tokenizer = AutoTokenizer.from_pretrained(path)
self.model = AutoModelForCausalLM.from_pretrained(
path,
torch_dtype=torch.float16,
device_map="auto"
)
# Set up text-generation pipeline
self.pipe = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
max_new_tokens=512,
do_sample=True,
temperature=0.7,
top_p=0.95,
)
def __call__(self, data):
# Validate input data
if not isinstance(data, dict):
return {"error": "Input must be a JSON object."}
prompt = data.get("inputs", "")
if not prompt:
return {"error": "No input provided."}
try:
# Generate response
outputs = self.pipe(prompt)
if outputs:
response = outputs[0]['generated_text']
# Remove the original prompt from the response
response = response[len(prompt):].strip()
return {"generated_text": response}
else:
return {"error": "No output generated."}
except Exception as e:
return {"error": str(e)}
|