lanpip / bots /feedback.py
Parechan's picture
Upload 18 files
f134294 verified
raw
history blame
7.03 kB
import os
import json
import requests
from llamaapi import LlamaAPI
from openai import OpenAI
from PIL import Image
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
# Initialize
llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
api_key = "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"
os.environ["OPENAI_API_KEY"] = api_key
client = OpenAI()
# Feedbacks chatbot.
def transcribe_handwriting(image_path):
"""
Transcribes text from a handwriting image located at a local path using TrOCR.
Parameters:
- image_path (str): The local path to the handwriting image file.
Returns:0000000000000
- str: The transcribed text. Returns False if transcription fails.
"""
try:
url = image_path
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
pixel_values = processor(images=image, return_tensors="pt").pixel_values
generated_ids = model.generate(pixel_values)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]# Prepare the image for the model
return generated_text
except Exception as e:
print(f"An error occurred while processing the image: {e}")
return f"An error occurred while processing the image: {e}"
def chat_with_model(prompt, model_type="gpt-4"):
"""
Function to chat with either the GPT-4 or LLaMA model.
Parameters:
- prompt (str): The message or question to send to the model.
- model_type (str): The type of model to use ("gpt-4" or "llama").
Returns:
- str: The response from the selected model.
"""
if model_type == "gpt-4":
try:
chat_completion = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "Provide feedback on the inputted writing sample from an ESL learner. "
"Focus on areas such as grammar, vocabulary usage, and overall coherence and organization of the essay. "
"Offer corrective feedback on errors, suggest improvements, and highlight positive aspects to encourage "
"the learner. Please ensure the feedback is constructive, clear, and supportive to help the learner "
"understand and apply the suggestions. Always frame feedback in a positive, constructive manner. "
"Focus on how the student can improve rather than just highlighting mistakes. Provide clear examples "
"when pointing out errors or suggesting improvements. Prompt the learner to reflect on specific parts of "
"their writing"},
{"role": "user", "content": prompt},
]
)
return chat_completion.choices[0].message.content.strip()
except Exception as e:
return f"An error occurred with GPT-4: {e}"
elif model_type.startswith("llama"):
api_request_json = {
"model": model_type, # This can be "llama-7b", "llama-13b", etc.
"messages": [
{"role": "system", "content": "Provide feedback on the inputted writing sample from an ESL learner. "
"Focus on areas such as grammar, vocabulary usage, and overall coherence and organization of the essay. "
"Offer corrective feedback on errors, suggest improvements, and highlight positive aspects to encourage "
"the learner. Please ensure the feedback is constructive, clear, and supportive to help the learner "
"understand and apply the suggestions. Always frame feedback in a positive, constructive manner. "
"Focus on how the student can improve rather than just highlighting mistakes. Provide clear examples "
"when pointing out errors or suggesting improvements. Prompt the learner to reflect on specific parts of "
"their writing"},
{"role": "user", "content": prompt},
]
}
try:
response = llama.run(api_request_json)
response_data = response.json()
return response_data["choices"][0]["message"]["content"]
except Exception as e:
return f"An error occurred with LLaMA: {e}"
else:
return "Unsupported model type."
def feedbacks_main():
print("Hello! I am a chatbot. Which model would you like to use? (llama-13b-chat, gpt-4, gpt-3.5-turbo)")
model_type = input("Choose model or type 'exit' to quit: ").strip()
if model_type.lower() == "exit":
print("Thanks for using! Wish you all the best in English learning!")
return
while model_type not in ["llama-13b-chat", "gpt-4", "gpt-3.5-turbo"]:
print("Invalid model. Please copy one from (llama-13b-chat, gpt-4, gpt-3.5-turbo) or type 'exit' to quit.")
model_type = input("Choose model: ").strip()
if model_type.lower() == "exit":
print("Thanks for using! Wish you all the best in English learning!")
return
print(f"Model set to {model_type}. How can I assist you today?\n")
user_inputs = [] # List to store all user inputs
outputs = [] # List to store all chatbot responses
interaction_history = [] # List to store the full interaction history (both inputs and responses)
while True:
upload_decision = input("Do you need to upload a picture for transcription? (yes/no): ").strip().lower()
user_input = ""
if upload_decision == "yes":
image_path = input("Please provide the link to your handwriting image: \n")
generated_text = transcribe_handwriting(image_path)
if not generated_text:
print("Failed to transcribe the image or no text was found.")
else:
print("Transcribed text:", generated_text)
user_input = generated_text
if not user_input:
user_input = input("You: ")
if user_input.lower() == "exit": # Check for 'exit' command to break the loop
print("Thanks for using! Wish you all the best in English learning!")
break # Exit the while loop
user_inputs.append(user_input) # Add user input to the list
interaction_history.append(f"You: {user_input}") # Add user input to the interaction history
response = chat_with_model(user_input, model_type)
outputs.append(response) # Add chatbot response to the list
interaction_history.append(f"Chatbot: {response}") # Add chatbot response to the interaction history
print("Chatbot:", response)
return user_inputs, outputs, interaction_history