Spaces:
Runtime error
Runtime error
import os | |
import sys | |
# Check for required modules | |
required_modules = ['transformers', 'torch', 'numpy'] | |
for module in required_modules: | |
try: | |
__import__(module) | |
except ImportError: | |
print(f"Module '{module}' not found. Installing...") | |
os.system(f"{sys.executable} -m pip install {module}") | |
# Import necessary libraries | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import numpy as np | |
# Setup model and tokenizer | |
model_name = "gpt-3.5-turbo" # You might want to replace this with your specific model | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Function to generate text | |
def generate_text(prompt, max_length=100): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(inputs.input_ids, max_length=max_length, do_sample=True, top_k=50, top_p=0.95) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Main function | |
def main(): | |
print("Welcome to CATGPT! Type 'exit' to quit.") | |
while True: | |
prompt = input("\nEnter your prompt: ") | |
if prompt.lower() == 'exit': | |
break | |
response = generate_text(prompt) | |
print(f"\nCATGPT: {response}") | |
if __name__ == "__main__": | |
main() | |