book / app.py
yongyeol's picture
Update app.py
88cdbcc verified
raw
history blame
3.07 kB
import gradio as gr
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler
import json
import re
from konlpy.tag import Okt
from tensorflow.keras.preprocessing.text import tokenizer_from_json
import pickle
import os
import logging
# 둜그 μ„€μ •
logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
# ν™˜κ²½ λ³€μˆ˜ μ„€μ •
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'
os.environ['PATH'] = os.environ['JAVA_HOME'] + '/bin:' + os.environ['PATH']
# λͺ¨λΈ 및 ν† ν¬λ‚˜μ΄μ € 파일 λ‘œλ“œ
try:
model = load_model('deep_learning_model(okt_drop).h5', compile=False)
logging.info("Model loaded successfully.")
with open('tokenizer(okt_drop).json', 'r', encoding='utf-8') as f:
tokenizer_data = f.read()
tokenizer = tokenizer_from_json(tokenizer_data)
logging.info("Tokenizer loaded successfully.")
with open('scaler.pkl', 'rb') as f:
scaler = pickle.load(f)
logging.info("Scaler loaded successfully.")
except Exception as e:
logging.error("Error loading model, tokenizer, or scaler: %s", str(e))
raise e
def calculate_sentence_stats(paragraph):
paragraph = re.sub(r'\.{2,}', '.', paragraph)
sentences = re.split(r'[.!?]', paragraph)
sentence_lengths = [len(s.strip()) for s in sentences if s.strip()]
sentence_count = len(sentence_lengths)
average_length = sum(sentence_lengths) / len(sentence_lengths) if sentence_lengths else 0
return sentence_count, average_length
def process_text(text):
try:
okt = Okt()
texts = ' '.join(okt.nouns(text))
sequences = tokenizer.texts_to_sequences([texts])
max_len = 301
X = pad_sequences(sequences, maxlen=max_len)
return X
except Exception as e:
logging.error("Error processing text: %s", str(e))
raise e
def predict_text(text, grade):
try:
X = process_text(text)
sentence_count, sentence_average = calculate_sentence_stats(text)
length = len(text)
emoticon = 0
numeric_features = np.array([[int(grade), length, emoticon, sentence_count, sentence_average]])
numeric_features = scaler.transform(numeric_features)
prediction = model.predict([X, numeric_features])
predicted_label = '인곡지λŠ₯이 μƒμ„±ν•œ λ…μ„œκ°μƒλ¬Έμž…λ‹ˆλ‹€.' if prediction[0][0] > 0.5 else 'μ‚¬λžŒμ΄ μž‘μ„±ν•œ λ…μ„œκ°μƒλ¬Έμž…λ‹ˆλ‹€.'
return predicted_label
except Exception as e:
logging.error("Error predicting text: %s", str(e))
raise e
iface = gr.Interface(
fn=predict_text,
inputs=[gr.Textbox(lines=10, placeholder="Enter Text Here..."), gr.Textbox(label="Grade")],
outputs="text",
title="λ…μ„œκ°μƒλ¬Έ 뢄석기",
description="이 λ…μ„œκ°μƒλ¬Έμ΄ 학생에 μ˜ν•΄ μž‘μ„±λ˜μ—ˆλŠ”μ§€, 인곡지λŠ₯에 μ˜ν•΄ μƒμ„±λ˜μ—ˆλŠ”μ§€ λΆ„μ„ν•©λ‹ˆλ‹€."
)
iface.launch(debug=True)