from os import getenv import openai from dotenv import load_dotenv from openai import ChatCompletion from osbot_utils.decorators.methods.cache_on_self import cache_on_self OPEN_AI__API_KEY = 'OPEN_AI__API_KEY' class Open_API: def __init__(self): pass @cache_on_self def api_key(self): load_dotenv() return getenv(OPEN_AI__API_KEY) def create(self): history_openai_format = self.messages() response = ChatCompletion.create( model='gpt-3.5-turbo', messages=history_openai_format, temperature=1.0, stream=True ) return self.parse_response(response) def messages(self): return [{"role": "user", "content": 'Hi'}] def parse_response(self, response): partial_message = "" for chunk in response: if len(chunk['choices'][0]['delta']) != 0: partial_message = partial_message + chunk['choices'][0]['delta']['content'] yield partial_message def setup(self): openai.api_key = self.api_key() return self