Parechan commited on
Commit
f134294
·
verified ·
1 Parent(s): f915f2e

Upload 18 files

Browse files
bots/__init__.py ADDED
File without changes
bots/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (151 Bytes). View file
 
bots/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (158 Bytes). View file
 
bots/__pycache__/assessment.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
bots/__pycache__/assessment.cpython-312.pyc ADDED
Binary file (7.41 kB). View file
 
bots/__pycache__/classify.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
bots/__pycache__/classify.cpython-312.pyc ADDED
Binary file (1.61 kB). View file
 
bots/__pycache__/feedback.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
bots/__pycache__/feedback.cpython-312.pyc ADDED
Binary file (6.49 kB). View file
 
bots/__pycache__/speaking.cpython-310.pyc ADDED
Binary file (3.09 kB). View file
 
bots/__pycache__/speaking.cpython-312.pyc ADDED
Binary file (4.21 kB). View file
 
bots/__pycache__/vocab.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
bots/__pycache__/vocab.cpython-312.pyc ADDED
Binary file (2.94 kB). View file
 
bots/assessment.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from transformers import TrOCRProcessor, VisionEncoderDecoderModel
2
+ # from PIL import Image
3
+ import requests
4
+ # from llamaapi import LlamaAPI
5
+ import sys
6
+ import numpy as np
7
+ # import pandas as pd
8
+ # import matplotlib.pyplot as plt
9
+ # import tensorflow as tf
10
+ import cv2
11
+
12
+ sys.path.append('./test')
13
+ from ocr.normalization import word_normalization, letter_normalization
14
+ from ocr import page, words, characters
15
+ from ocr.helpers import implt, resize
16
+ from ocr.tfhelpers import Model
17
+ from ocr.datahelpers import idx2char
18
+
19
+ import os
20
+ import json
21
+ from llamaapi import LlamaAPI
22
+ from openai import OpenAI
23
+
24
+ # Initialize
25
+ llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
26
+
27
+ api_key = "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"
28
+ os.environ["OPENAI_API_KEY"] = api_key
29
+ client = OpenAI()
30
+
31
+ # def transcribe_handwriting(image_path):
32
+ # """
33
+ # Transcribes text from a handwriting image located at a local path using TrOCR.
34
+ #
35
+ # Parameters:
36
+ # - image_path (str): The local path to the handwriting image file.
37
+ #
38
+ # Returns:
39
+ # - str: The transcribed text. Returns False if transcription fails.
40
+ # """
41
+ # try:
42
+ # url = image_path
43
+ # image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
44
+ #
45
+ # processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
46
+ # model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
47
+ # pixel_values = processor(images=image, return_tensors="pt").pixel_values
48
+ #
49
+ # generated_ids = model.generate(pixel_values)
50
+ # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]# Prepare the image for the model
51
+ #
52
+ # return generated_text
53
+ # except Exception as e:
54
+ # print(f"An error occurred while processing the image: {e}")
55
+ # return False
56
+ def transcribe_handwriting(image_path):
57
+ if image_path[:4] == 'http':
58
+ response = requests.get(image_path)
59
+ open('data/test.jpg', 'wb').write(response.content)
60
+ IMG = 'data/test.jpg' # 1, 2, 3
61
+ else:
62
+ IMG = image_path
63
+ LANG = 'en'
64
+ # You can use only one of these two
65
+ # You HABE TO train the CTC model by yourself using word_classifier_CTC.ipynb
66
+ MODEL_LOC_CHARS = f'models/char-clas/{LANG}/CharClassifier'
67
+ MODEL_LOC_CTC = 'models/word-clas/CTC/Classifier1'
68
+
69
+ CHARACTER_MODEL = Model(MODEL_LOC_CHARS)
70
+ CTC_MODEL = Model(MODEL_LOC_CTC, 'word_prediction')
71
+
72
+ image = cv2.cvtColor(cv2.imread(IMG), cv2.COLOR_BGR2RGB)
73
+ # implt(image)
74
+
75
+ # Crop image and get bounding boxes
76
+ crop = page.detection(image)
77
+ # implt(crop)
78
+ boxes = words.detection(crop)
79
+ lines = words.sort_words(boxes)
80
+
81
+ def recognise(img):
82
+ """Recognising words using CTC Model."""
83
+ img = word_normalization(
84
+ img,
85
+ 64,
86
+ border=False,
87
+ tilt=False,
88
+ hyst_norm=False)
89
+ length = img.shape[1]
90
+ # Input has shape [batch_size, height, width, 1]
91
+ input_imgs = np.zeros(
92
+ (1, 64, length, 1), dtype=np.uint8)
93
+ input_imgs[0][:, :length, 0] = img
94
+
95
+ pred = CTC_MODEL.eval_feed({
96
+ 'inputs:0': input_imgs,
97
+ 'inputs_length:0': [length],
98
+ 'keep_prob:0': 1})[0]
99
+
100
+ word = ''
101
+ for i in pred:
102
+ word += idx2char(i + 1)
103
+ return word
104
+
105
+ # implt(crop)
106
+ out = ''
107
+ for line in lines:
108
+ r = " ".join([recognise(crop[y1:y2, x1:x2]) for (x1, y1, x2, y2) in line])
109
+ out += r + '\n'
110
+ print(r)
111
+ return out
112
+
113
+
114
+ def chat_assessment_with_model(prompt, model_type="gpt-4"):
115
+ """
116
+ Function to chat with either the GPT-4 or LLaMA model.
117
+
118
+ Parameters:
119
+ - prompt (str): The message or question to send to the model.
120
+ - model_type (str): The type of model to use ("gpt-4" or "llama").
121
+
122
+ Returns:
123
+ - str: The response from the selected model.
124
+ """
125
+ if model_type == "gpt-4":
126
+ try:
127
+ chat_completion = client.chat.completions.create(
128
+ model="gpt-4",
129
+ messages=[
130
+ {"role": "system", "content": "Ask for input if user did not enter a writing."
131
+ "Then, Evaluate student writing based on ETS Rubrics regarding two aspects and provide a score: "
132
+ "For Intergrated wirting, Score 5: Successfully selects and coherently presents important information from the lecture in relation to the reading. The response is well-organized with only occasional language errors that do not hinder accuracy or clarity."
133
+ "Score 4: Good at selecting and presenting important lecture information in relation to the reading but may have minor inaccuracies or imprecisions. Minor language errors are more frequent but do not significantly affect clarity."
134
+ "Score 3: Contains some important information from the lecture and some relevant connections to the reading but may be vague, imprecise, or contain one major omission. Frequent errors may obscure meanings or connections."
135
+ "Score 2: Contains relevant information from the lecture but has significant language difficulties or inaccuracies in conveying important ideas or connections. Errors likely obscure key points for readers unfamiliar with the topics."
136
+ "Score 1: Provides little to no meaningful content from the lecture, with very low language level making it difficult to derive meaning."
137
+ "Score 0: Merely copies sentences from the reading, off-topic, written in a foreign language, consists of keystroke characters, or is blank."
138
+ "For academic discussion, Score 5: Relevant and clearly expressed contribution with consistent facility in language use, showcasing relevant explanations, effective syntactic variety, precise word choice, and almost no errors."
139
+ "Score 4: Relevant contribution that is easily understood, displaying adequate elaboration, syntactic variety, appropriate word choice, and few lexical or grammatical errors."
140
+ "Score 3: Mostly relevant and understandable contribution with some facility in language use. Some parts may be missing, unclear, or irrelevant, with noticeable lexical and grammatical errors."
141
+ "Score 2: Attempt to contribute with limited language use making ideas hard to follow, limited syntactic and vocabulary range, and an accumulation of structural and lexical errors."
142
+ "Score 1: Ineffective attempt with severely limited language use preventing expression of ideas. Few coherent ideas, with any coherent language mostly borrowed."
143
+ "Score 0: Blank, off-topic, not in English, entirely copied, unconnected to the prompt, or consists of arbitrary keystrokes."
144
+ "Lastly, provide feedback and answer questions if the user has."},
145
+ {"role": "user", "content": prompt},
146
+ ]
147
+ )
148
+ return chat_completion.choices[0].message.content.strip()
149
+ except Exception as e:
150
+ return f"An error occurred with GPT-4: {e}"
151
+ elif model_type.startswith("llama"):
152
+ api_request_json = {
153
+ "model": model_type,
154
+ "messages": [
155
+ {"role": "system", "content": "Ask for input if user did not enter a writing."
156
+ "Then, Evaluate student writing based on ETS Rubrics regarding two aspects and provide a score: "
157
+ "For Intergrated wirting, Score 5: Successfully selects and coherently presents important information from the lecture in relation to the reading. The response is well-organized with only occasional language errors that do not hinder accuracy or clarity."
158
+ "Score 4: Good at selecting and presenting important lecture information in relation to the reading but may have minor inaccuracies or imprecisions. Minor language errors are more frequent but do not significantly affect clarity."
159
+ "Score 3: Contains some important information from the lecture and some relevant connections to the reading but may be vague, imprecise, or contain one major omission. Frequent errors may obscure meanings or connections."
160
+ "Score 2: Contains relevant information from the lecture but has significant language difficulties or inaccuracies in conveying important ideas or connections. Errors likely obscure key points for readers unfamiliar with the topics."
161
+ "Score 1: Provides little to no meaningful content from the lecture, with very low language level making it difficult to derive meaning."
162
+ "Score 0: Merely copies sentences from the reading, off-topic, written in a foreign language, consists of keystroke characters, or is blank."
163
+ "For academic discussion, Score 5: Relevant and clearly expressed contribution with consistent facility in language use, showcasing relevant explanations, effective syntactic variety, precise word choice, and almost no errors."
164
+ "Score 4: Relevant contribution that is easily understood, displaying adequate elaboration, syntactic variety, appropriate word choice, and few lexical or grammatical errors."
165
+ "Score 3: Mostly relevant and understandable contribution with some facility in language use. Some parts may be missing, unclear, or irrelevant, with noticeable lexical and grammatical errors."
166
+ "Score 2: Attempt to contribute with limited language use making ideas hard to follow, limited syntactic and vocabulary range, and an accumulation of structural and lexical errors."
167
+ "Score 1: Ineffective attempt with severely limited language use preventing expression of ideas. Few coherent ideas, with any coherent language mostly borrowed."
168
+ "Score 0: Blank, off-topic, not in English, entirely copied, unconnected to the prompt, or consists of arbitrary keystrokes."
169
+ "Lastly, provide feedback and answer questions if the user has."},
170
+ {"role": "user", "content": prompt},
171
+ ]
172
+ }
173
+ try:
174
+ response = llama.run(api_request_json)
175
+ response_data = response.json()
176
+ return response_data["choices"][0]["message"]["content"]
177
+ except Exception as e:
178
+ return f"An error occurred with LLaMA: {e}"
179
+ else:
180
+ return "Unsupported model type."
bots/classify.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from llamaapi import LlamaAPI
4
+ from openai import OpenAI
5
+
6
+ # Initialize
7
+ llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
8
+
9
+ api_key = "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"
10
+ os.environ["OPENAI_API_KEY"] = api_key
11
+ client = OpenAI()
12
+
13
+
14
+ def classify_learning_content(user_input):
15
+ messages = [
16
+ {"role": "system", "content": "Classify the need as either "
17
+ "'Vocabulary Building', 'Writing instruction', "
18
+ "'Speaking Practice', 'Writing Assessment'."},
19
+ {"role": "user", "content": user_input}
20
+ ]
21
+
22
+ completion = client.chat.completions.create(
23
+ model="gpt-4",
24
+ messages=messages
25
+ )
26
+
27
+ classification_text = completion.choices[0].message.content.strip().lower() # Normalize the text
28
+ # Simplify the comparison using keywords, assuming each category is distinct enough
29
+ if "writing assessment" in classification_text:
30
+ return 4
31
+ elif "vocabulary building" in classification_text:
32
+ return 3
33
+ elif "writing instruction" in classification_text:
34
+ return 2
35
+ elif "speaking practice" in classification_text:
36
+ return 1
37
+ else:
38
+ return 0
39
+
40
+
41
+ if __name__ == '__main__':
42
+ print(classify_learning_content("Vocabulary"))
bots/feedback.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from llamaapi import LlamaAPI
5
+ from openai import OpenAI
6
+ from PIL import Image
7
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
8
+
9
+ # Initialize
10
+ llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
11
+
12
+ api_key = "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"
13
+ os.environ["OPENAI_API_KEY"] = api_key
14
+ client = OpenAI()
15
+
16
+
17
+ # Feedbacks chatbot.
18
+ def transcribe_handwriting(image_path):
19
+ """
20
+ Transcribes text from a handwriting image located at a local path using TrOCR.
21
+
22
+ Parameters:
23
+ - image_path (str): The local path to the handwriting image file.
24
+
25
+ Returns:0000000000000
26
+ - str: The transcribed text. Returns False if transcription fails.
27
+ """
28
+ try:
29
+ url = image_path
30
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
31
+
32
+ processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
33
+ model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
34
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
35
+
36
+ generated_ids = model.generate(pixel_values)
37
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]# Prepare the image for the model
38
+
39
+ return generated_text
40
+ except Exception as e:
41
+ print(f"An error occurred while processing the image: {e}")
42
+ return f"An error occurred while processing the image: {e}"
43
+
44
+
45
+ def chat_with_model(prompt, model_type="gpt-4"):
46
+ """
47
+ Function to chat with either the GPT-4 or LLaMA model.
48
+
49
+ Parameters:
50
+ - prompt (str): The message or question to send to the model.
51
+ - model_type (str): The type of model to use ("gpt-4" or "llama").
52
+
53
+ Returns:
54
+ - str: The response from the selected model.
55
+ """
56
+ if model_type == "gpt-4":
57
+ try:
58
+ chat_completion = client.chat.completions.create(
59
+ model="gpt-4",
60
+ messages=[
61
+ {"role": "system", "content": "Provide feedback on the inputted writing sample from an ESL learner. "
62
+ "Focus on areas such as grammar, vocabulary usage, and overall coherence and organization of the essay. "
63
+ "Offer corrective feedback on errors, suggest improvements, and highlight positive aspects to encourage "
64
+ "the learner. Please ensure the feedback is constructive, clear, and supportive to help the learner "
65
+ "understand and apply the suggestions. Always frame feedback in a positive, constructive manner. "
66
+ "Focus on how the student can improve rather than just highlighting mistakes. Provide clear examples "
67
+ "when pointing out errors or suggesting improvements. Prompt the learner to reflect on specific parts of "
68
+ "their writing"},
69
+ {"role": "user", "content": prompt},
70
+ ]
71
+ )
72
+ return chat_completion.choices[0].message.content.strip()
73
+ except Exception as e:
74
+ return f"An error occurred with GPT-4: {e}"
75
+ elif model_type.startswith("llama"):
76
+ api_request_json = {
77
+ "model": model_type, # This can be "llama-7b", "llama-13b", etc.
78
+ "messages": [
79
+ {"role": "system", "content": "Provide feedback on the inputted writing sample from an ESL learner. "
80
+ "Focus on areas such as grammar, vocabulary usage, and overall coherence and organization of the essay. "
81
+ "Offer corrective feedback on errors, suggest improvements, and highlight positive aspects to encourage "
82
+ "the learner. Please ensure the feedback is constructive, clear, and supportive to help the learner "
83
+ "understand and apply the suggestions. Always frame feedback in a positive, constructive manner. "
84
+ "Focus on how the student can improve rather than just highlighting mistakes. Provide clear examples "
85
+ "when pointing out errors or suggesting improvements. Prompt the learner to reflect on specific parts of "
86
+ "their writing"},
87
+ {"role": "user", "content": prompt},
88
+ ]
89
+ }
90
+ try:
91
+ response = llama.run(api_request_json)
92
+ response_data = response.json()
93
+ return response_data["choices"][0]["message"]["content"]
94
+ except Exception as e:
95
+ return f"An error occurred with LLaMA: {e}"
96
+ else:
97
+ return "Unsupported model type."
98
+
99
+
100
+ def feedbacks_main():
101
+ print("Hello! I am a chatbot. Which model would you like to use? (llama-13b-chat, gpt-4, gpt-3.5-turbo)")
102
+ model_type = input("Choose model or type 'exit' to quit: ").strip()
103
+
104
+ if model_type.lower() == "exit":
105
+ print("Thanks for using! Wish you all the best in English learning!")
106
+ return
107
+
108
+ while model_type not in ["llama-13b-chat", "gpt-4", "gpt-3.5-turbo"]:
109
+ print("Invalid model. Please copy one from (llama-13b-chat, gpt-4, gpt-3.5-turbo) or type 'exit' to quit.")
110
+ model_type = input("Choose model: ").strip()
111
+ if model_type.lower() == "exit":
112
+ print("Thanks for using! Wish you all the best in English learning!")
113
+ return
114
+
115
+ print(f"Model set to {model_type}. How can I assist you today?\n")
116
+
117
+ user_inputs = [] # List to store all user inputs
118
+ outputs = [] # List to store all chatbot responses
119
+ interaction_history = [] # List to store the full interaction history (both inputs and responses)
120
+
121
+ while True:
122
+ upload_decision = input("Do you need to upload a picture for transcription? (yes/no): ").strip().lower()
123
+ user_input = ""
124
+ if upload_decision == "yes":
125
+ image_path = input("Please provide the link to your handwriting image: \n")
126
+ generated_text = transcribe_handwriting(image_path)
127
+ if not generated_text:
128
+ print("Failed to transcribe the image or no text was found.")
129
+ else:
130
+ print("Transcribed text:", generated_text)
131
+ user_input = generated_text
132
+ if not user_input:
133
+ user_input = input("You: ")
134
+ if user_input.lower() == "exit": # Check for 'exit' command to break the loop
135
+ print("Thanks for using! Wish you all the best in English learning!")
136
+ break # Exit the while loop
137
+
138
+ user_inputs.append(user_input) # Add user input to the list
139
+ interaction_history.append(f"You: {user_input}") # Add user input to the interaction history
140
+
141
+ response = chat_with_model(user_input, model_type)
142
+ outputs.append(response) # Add chatbot response to the list
143
+ interaction_history.append(f"Chatbot: {response}") # Add chatbot response to the interaction history
144
+
145
+ print("Chatbot:", response)
146
+
147
+ return user_inputs, outputs, interaction_history
bots/speaking.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ from gtts import gTTS
3
+ from pydub import AudioSegment
4
+
5
+ import os
6
+ from llamaapi import LlamaAPI
7
+ from openai import OpenAI
8
+ from loguru import logger
9
+
10
+ # Initialize
11
+ llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
12
+
13
+ api_key = "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"
14
+ os.environ["OPENAI_API_KEY"] = api_key
15
+ client = OpenAI()
16
+
17
+
18
+ def convert_to_mp3(input_path, output_path):
19
+ audio = AudioSegment.from_file(input_path)
20
+
21
+ audio.export(output_path, format="mp3")
22
+ print(f"Audio converted to MP3 and saved as: {output_path}")
23
+
24
+
25
+ # convert_to_mp3("/content/audio_input.wav", "/content/audio_input.mp3")
26
+
27
+
28
+ def transcribe_audio(audio_path):
29
+ """
30
+ Transcribes audio to text using Whisper.
31
+
32
+ Parameters:
33
+ - audio_path (str): The path to the audio file.
34
+
35
+ Returns:
36
+ - str: The transcribed text.
37
+ """
38
+ model = whisper.load_model("base")
39
+ audio = whisper.load_audio(audio_path)
40
+ audio = whisper.pad_or_trim(audio)
41
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
42
+
43
+ _, probs = model.detect_language(mel)
44
+ print(f"Detected language: {max(probs, key=probs.get)}")
45
+
46
+ options = whisper.DecodingOptions()
47
+ result = model.decode(mel, options)
48
+ logger.success(f'Detecting completed, result: {result.text}')
49
+ return result.text
50
+
51
+
52
+ def text_to_speech(text, filename="/content/response.mp3"):
53
+ """
54
+ Converts text to speech using gTTS and saves the audio file.
55
+
56
+ Parameters:
57
+ - text (str): The text to convert to speech.
58
+ - filename (str): The filename for the saved audio file.
59
+ """
60
+ tts = gTTS(text=text, lang='en')
61
+ tts.save(filename)
62
+ # display(Audio(filename))
63
+ # os.remove(filename) # Optionally remove the file after playing
64
+
65
+
66
+ def audio_chat_with_model(prompt, model_type="gpt-4"):
67
+
68
+ if model_type == "gpt-4":
69
+ try:
70
+ chat_completion = client.chat.completions.create(
71
+ model="gpt-4",
72
+ messages=[
73
+ {"role": "system", "content": "Imagine you're a supportive language tutor engaging in a conversation with an ESL learner. "
74
+ "Learner is interested in discussing a variety of topics to improve their English speaking skills. "
75
+ "Your goal is to encourage learners to speak as much as possible, gently correct any mistakes, and provide constructive feedback."
76
+ "Adapt your conversation to include vocabulary and grammatical structures relevant to the discussion, ensuring a rich, engaging dialogue. "
77
+ "Correct errors in a positive manner and encourage the learner to ask questions and express opinions freely. "
78
+ "Conclude with a summary of their strengths during the session and offer specific advice for continued language improvement."},
79
+ {"role": "user", "content": prompt},
80
+ ]
81
+ )
82
+ return chat_completion.choices[0].message.content.strip()
83
+ except Exception as e:
84
+ return f"An error occurred with GPT-4: {e}"
bots/vocab.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from llamaapi import LlamaAPI
4
+ from openai import OpenAI
5
+
6
+ # Initialize
7
+ llama = LlamaAPI("LL-AirERHEk0jLIE1yEPvMXeobNfLsqLWJWcxLRS53obrZ3XyqMTfZc4EAuOs7r3wso")
8
+
9
+ api_key = "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"
10
+ os.environ["OPENAI_API_KEY"] = api_key
11
+ client = OpenAI()
12
+
13
+
14
+ def vocab_chat_with_model(prompt, model_type):
15
+ """
16
+ Function to chat with either the GPT-4 or LLaMA model.
17
+
18
+ Parameters:
19
+ - prompt (str): The message or question to send to the model.
20
+ - model_type (str): The type of model to use ("gpt-4" or "llama").
21
+
22
+ Returns:
23
+ - str: The response from the selected model.
24
+ """
25
+ if model_type == "gpt-4":
26
+ try:
27
+ chat_completion = client.chat.completions.create(
28
+ model="gpt-4",
29
+ messages=[
30
+ {"role": "system", "content": "This interaction is focused on enhancing the user's vocabulary."
31
+ "Start by asking the user to list some vocabulary words they are currently familiar with or interested in learning more about. "
32
+ "Once the user provides their words, expand upon each by introducing related words, paying particular attention to affixes "
33
+ "(prefixes and suffixes) and roots. Explain how understanding these components can help in deciphering the meanings of unfamiliar words. "
34
+ "Provide examples for each word to demonstrate how affixes and roots alter the meaning of base words. "
35
+ "Encourage the user to create sentences with the new vocabulary to reinforce their learning. "
36
+ "Aim to make this an engaging and informative experience that promotes the user's vocabulary expansion and deepens their understanding of word formation."},
37
+ {"role": "user", "content": prompt},
38
+ ]
39
+ )
40
+ return chat_completion.choices[0].message.content.strip()
41
+ except Exception as e:
42
+ return f"An error occurred with GPT-4: {e}"
43
+ elif model_type.startswith("llama"):
44
+ api_request_json = {
45
+ "model": model_type, # This can be "llama-7b", "llama-13b", etc.
46
+ "messages": [
47
+ {"role": "system", "content": "This interaction is focused on enhancing the user's vocabulary."
48
+ "Start by asking the user to list some vocabulary words they are currently familiar with or interested in learning more about. "
49
+ "Once the user provides their words, expand upon each by introducing related words, paying particular attention to affixes "
50
+ "(prefixes and suffixes) and roots. Explain how understanding these components can help in deciphering the meanings of unfamiliar words. "
51
+ "Provide examples for each word to demonstrate how affixes and roots alter the meaning of base words. "
52
+ "Encourage the user to create sentences with the new vocabulary to reinforce their learning. "
53
+ "Aim to make this an engaging and informative experience that promotes the user's vocabulary expansion and deepens their understanding of word formation."},
54
+ {"role": "user", "content": prompt},
55
+ ]
56
+ }
57
+ try:
58
+ response = llama.run(api_request_json)
59
+ response_data = response.json()
60
+ return response_data["choices"][0]["message"]["content"]
61
+ except Exception as e:
62
+ return f"An error occurred with LLaMA: {e}"
63
+ else:
64
+ return "Unsupported model type."