|
import os |
|
from openai import OpenAI |
|
from llamaapi import LlamaAPI |
|
|
|
|
|
llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso") |
|
|
|
api_key = "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" |
|
os.environ["OPENAI_API_KEY"] = api_key |
|
client = OpenAI() |
|
|
|
prompt = 'hello, who are you ?' |
|
chat_completion = client.chat.completions.create( |
|
model="gpt-4", |
|
messages=[ |
|
{"role": "system", "content": "Provide feedback on the inputted writing sample from an ESL learner. " |
|
"Focus on areas such as grammar, vocabulary usage, and overall coherence and organization of the essay. " |
|
"Offer corrective feedback on errors, suggest improvements, and highlight positive aspects to encourage " |
|
"the learner. Please ensure the feedback is constructive, clear, and supportive to help the learner " |
|
"understand and apply the suggestions. Always frame feedback in a positive, constructive manner. " |
|
"Focus on how the student can improve rather than just highlighting mistakes. Provide clear examples " |
|
"when pointing out errors or suggesting improvements. Prompt the learner to reflect on specific parts of " |
|
"their writing"}, |
|
{"role": "user", "content": prompt}, |
|
] |
|
) |
|
print(chat_completion.choices[0].message.content.strip()) |