Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
from langchain.memory import VectorStoreRetrieverMemory
|
4 |
+
from langchain_community.vectorstores import Chroma
|
5 |
+
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
6 |
+
from langchain_community.document_loaders import PyPDFLoader
|
7 |
+
from langchain_core.prompts import PromptTemplate
|
8 |
+
import chromadb
|
9 |
+
import os
|
10 |
+
from uuid import uuid4
|
11 |
+
|
12 |
+
# Initialize persistent client
|
13 |
+
CHROMA_PATH = "./vectordb-chroma/gformdb"
|
14 |
+
os.makedirs(CHROMA_PATH, exist_ok=True)
|
15 |
+
default_user_aspek = "Jika Jawaban salah berikan nilai 0, jika jawaban benar namun tidak tepat berikan nilai 50, jika jawaban benar dan lengkap serta penjelasan baik, beri nilai 100"
|
16 |
+
|
17 |
+
def process_files(csv_file, openai_key, user_aspek=default_user_aspek, pdf_file=False, model_name="gpt-4o-mini"):
|
18 |
+
try:
|
19 |
+
context = ""
|
20 |
+
|
21 |
+
def context_search(query="a",source_name=False, k=5):
|
22 |
+
|
23 |
+
if source_name==True:
|
24 |
+
results = vectorstore.similarity_search(
|
25 |
+
query=query,
|
26 |
+
k=k,
|
27 |
+
filter={"source": source_name}
|
28 |
+
)
|
29 |
+
else:
|
30 |
+
results = vectorstore.similarity_search(
|
31 |
+
query=query,
|
32 |
+
k=k,
|
33 |
+
)
|
34 |
+
for res in results:
|
35 |
+
page_content = res.page_content
|
36 |
+
metadata = res.metadata
|
37 |
+
return page_content, metadata
|
38 |
+
|
39 |
+
def format_qa_efficient(data_path):
|
40 |
+
data = pd.read_csv(data_path)
|
41 |
+
data.head()
|
42 |
+
qa_efficient = ""
|
43 |
+
indexs = 0
|
44 |
+
Question = ""
|
45 |
+
for column in data.columns[3:]: # Lewati kolom Timestamp, Email Address, dan Nama
|
46 |
+
qa_efficient += f"Nomor-{indexs+1}.{column}:\n"
|
47 |
+
indexs += 1
|
48 |
+
Question += column
|
49 |
+
for index, row in data.iterrows():
|
50 |
+
name = row['Nama']
|
51 |
+
email = row["Email_Address"]
|
52 |
+
answer = row[column]
|
53 |
+
qa_efficient += f"- {name}|{email}: {answer}\n"
|
54 |
+
|
55 |
+
qa_efficient += "\n" # Baris kosong antara setiap pertanyaan
|
56 |
+
|
57 |
+
return qa_efficient, Question
|
58 |
+
|
59 |
+
# Set OpenAI API
|
60 |
+
if openai_key == "":
|
61 |
+
openai_key = os.getenv('OPENAI_API_KEY')
|
62 |
+
|
63 |
+
os.environ["OPENAI_API_KEY"] = openai_key
|
64 |
+
|
65 |
+
# Process CSV (Gradio provides the file path directly)
|
66 |
+
|
67 |
+
QA, Question = format_qa_efficient(csv_file)
|
68 |
+
|
69 |
+
if pdf_file:
|
70 |
+
# Initialize Chroma client
|
71 |
+
persistent_client = chromadb.PersistentClient(path=CHROMA_PATH)
|
72 |
+
collection = persistent_client.get_or_create_collection("RAG")
|
73 |
+
|
74 |
+
vectorstore = Chroma(
|
75 |
+
client=persistent_client,
|
76 |
+
collection_name=collection.name,
|
77 |
+
embedding_function=OpenAIEmbeddings()
|
78 |
+
)
|
79 |
+
|
80 |
+
res, metadata = context_search(pdf_file,k=1)
|
81 |
+
|
82 |
+
if metadata == pdf_file.name:
|
83 |
+
# Process PDF (Gradio provides the file path directly)
|
84 |
+
pages = PyPDFLoader(pdf_file.name).load_and_split()
|
85 |
+
uuids = [str(uuid4()) for _ in range(len(pages))]
|
86 |
+
vectorstore.add_documents(documents=pages, ids=uuids)
|
87 |
+
context, metadata = context_search(Question, pdf_file.name)
|
88 |
+
|
89 |
+
elif pdf_file == False:
|
90 |
+
context = "Tidak ada context tambahan yang diberikan, tolong gunakan pengetahuan anda untuk menjawab pertanyaan"
|
91 |
+
|
92 |
+
# Evaluation template
|
93 |
+
TEMPLATE = """
|
94 |
+
Kamu adalah AI evaluator pendidikan. Nilai jawaban siswa berikut menggunakan panduan ini:
|
95 |
+
|
96 |
+
Buat output dengan format(format ini untuk satu murid, jadi selesaikan dulu 1 murid untuk semua nomor, baru ke murid berikutnya):
|
97 |
+
HASIL EVALUASI
|
98 |
+
=============
|
99 |
+
Nama dan Email Murid:
|
100 |
+
[Nomor Soal].
|
101 |
+
Nilai: [Sesuaikan dengan Aspek Penilaian yang diberikan user]
|
102 |
+
Alasan: [alasan singkat penilaian]
|
103 |
+
Saran Perbaikan: [saran]
|
104 |
+
|
105 |
+
[buat seperti di atas untuk setiap jawaban]
|
106 |
+
|
107 |
+
================
|
108 |
+
Rata-rata Nilai: [nilai]
|
109 |
+
|
110 |
+
Rekomendasi Umum:
|
111 |
+
[rekomendasi]
|
112 |
+
|
113 |
+
|
114 |
+
MATERI REFERENSI:
|
115 |
+
{context}
|
116 |
+
|
117 |
+
SOAL & JAWABAN:
|
118 |
+
{QA}
|
119 |
+
|
120 |
+
Aspek Penilaian yang diberikan user:
|
121 |
+
{Aspek}
|
122 |
+
|
123 |
+
|
124 |
+
"""
|
125 |
+
|
126 |
+
PROMPT = PromptTemplate(
|
127 |
+
input_variables=["QA", "context", "Aspek"],
|
128 |
+
template=TEMPLATE
|
129 |
+
)
|
130 |
+
|
131 |
+
# Initialize ChatGPT
|
132 |
+
chat = ChatOpenAI(model=model_name)
|
133 |
+
chain = PROMPT | chat
|
134 |
+
|
135 |
+
# Generate evaluation
|
136 |
+
Aspek_penilaian = user_aspek
|
137 |
+
|
138 |
+
response = chain.invoke({
|
139 |
+
"Aspek": Aspek_penilaian,
|
140 |
+
"QA": QA,
|
141 |
+
"context": context
|
142 |
+
})
|
143 |
+
|
144 |
+
return response.content
|
145 |
+
|
146 |
+
except Exception as e:
|
147 |
+
return f"Error occurred: {str(e)}\nType: {type(e)}"
|
148 |
+
|
149 |
+
# Create Gradio interface
|
150 |
+
def create_interface():
|
151 |
+
with gr.Blocks(title="Quiz Evaluator", theme=gr.themes.Soft()) as app:
|
152 |
+
with gr.Column(scale=1):
|
153 |
+
gr.Markdown(
|
154 |
+
"""
|
155 |
+
# 📝 Quiz Response Evaluator
|
156 |
+
Upload your quiz responses (CSV) and reference material (PDF) to get AI-powered evaluation.
|
157 |
+
"""
|
158 |
+
)
|
159 |
+
|
160 |
+
with gr.Row():
|
161 |
+
with gr.Column(scale=1):
|
162 |
+
csv_input = gr.File(
|
163 |
+
label="Quiz Responses (CSV)",
|
164 |
+
file_types=[".csv"]
|
165 |
+
)
|
166 |
+
with gr.Column(scale=1):
|
167 |
+
pdf_input = gr.File(
|
168 |
+
label="Reference Material (PDF)",
|
169 |
+
file_types=[".pdf"]
|
170 |
+
)
|
171 |
+
|
172 |
+
user_aspek = gr.Textbox(
|
173 |
+
label="Aspek",
|
174 |
+
placeholder="Jika Jawaban salah berikan nilai 0, jika jawaban benar namun tidak tepat berikan nilai 50, jika jawaban benar dan lengkap serta penjelasan baik, beri nilai 100",
|
175 |
+
show_copy_button=True
|
176 |
+
)
|
177 |
+
|
178 |
+
model_name = gr.Textbox(
|
179 |
+
label="openAI model",
|
180 |
+
placeholder="input your openAI model",
|
181 |
+
value="gpt-4o-mini"
|
182 |
+
)
|
183 |
+
|
184 |
+
api_key = gr.Textbox(
|
185 |
+
label="OpenAI API Key",
|
186 |
+
placeholder="Enter your OpenAI API key",
|
187 |
+
type="password"
|
188 |
+
)
|
189 |
+
|
190 |
+
submit_btn = gr.Button(
|
191 |
+
"Evaluate Responses",
|
192 |
+
variant="primary",
|
193 |
+
size="lg"
|
194 |
+
)
|
195 |
+
|
196 |
+
# Using Textbox instead of Markdown for better formatting
|
197 |
+
output = gr.Textbox(
|
198 |
+
label="Evaluation Results",
|
199 |
+
lines=20,
|
200 |
+
max_lines=30,
|
201 |
+
show_copy_button=True
|
202 |
+
)
|
203 |
+
|
204 |
+
|
205 |
+
submit_btn.click(
|
206 |
+
fn=process_files,
|
207 |
+
inputs=[csv_input,api_key,user_aspek,pdf_input,model_name],
|
208 |
+
outputs=output
|
209 |
+
)
|
210 |
+
|
211 |
+
return app
|
212 |
+
|
213 |
+
# Launch the interface
|
214 |
+
if __name__ == "__main__":
|
215 |
+
app = create_interface()
|
216 |
+
app.launch(share=True)
|