Spaces:
Sleeping
Sleeping
File size: 1,647 Bytes
1fad4a0 ba1082d 1fad4a0 e03f966 7846d6e e03f966 7846d6e 3a5fb85 b69e9a7 58b75c9 f275817 ba1082d 58b75c9 299669a 977090c ba1082d 58b75c9 ba1082d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e e03f966 7846d6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import requests
import os
from transformers import Tool
# Import other necessary libraries if needed
class TextGenerationTool(Tool):
name = "text_generator"
description = (
"This is a tool for text generation. It takes a prompt as input and returns the generated text."
)
inputs = ["text"]
outputs = ["text"]
def __call__(self, prompt: str):
API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5"
headers = {"Authorization": "Bearer " + os.environ['hf']}
payload = {
"inputs": prompt # Adjust this based on your model's input format
}
payload = {
"inputs": "Can you please let us know more details about your ",
}
#def query(payload):
generated_text = requests.post(API_URL, headers=headers, json=payload).json()
print(generated_text)
return generated_text["text"]
# Define the payload for the request
#payload = {
# "inputs": prompt # Adjust this based on your model's input format
#}
# Make the request to the API
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
# Extract and return the generated text
#return generated_text["generated_text"]
# Uncomment and customize the following lines based on your text generation needs
# text_generator = pipeline(model="gpt2")
# generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
# Print the generated text if needed
# print(generated_text)
|