Spaces:
Runtime error
Runtime error
File size: 1,291 Bytes
2dfd879 1366879 2dfd879 b087f1f 2dfd879 b087f1f 2dfd879 b087f1f 2dfd879 b087f1f 2dfd879 1366879 b087f1f 2dfd879 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import os
import sys
# Check for required modules
required_modules = ['transformers', 'torch', 'numpy']
for module in required_modules:
try:
__import__(module)
except ImportError:
print(f"Module '{module}' not found. Installing...")
os.system(f"{sys.executable} -m pip install {module}")
# Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import numpy as np
# Setup model and tokenizer
model_name = "gpt-3.5-turbo" # You might want to replace this with your specific model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Function to generate text
def generate_text(prompt, max_length=100):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(inputs.input_ids, max_length=max_length, do_sample=True, top_k=50, top_p=0.95)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Main function
def main():
print("Welcome to CATGPT! Type 'exit' to quit.")
while True:
prompt = input("\nEnter your prompt: ")
if prompt.lower() == 'exit':
break
response = generate_text(prompt)
print(f"\nCATGPT: {response}")
if __name__ == "__main__":
main()
|