english_assistant / text_evaluation_cot.py
Arseney's picture
Update text_evaluation_cot.py
cd55587 verified
from huggingface_hub import InferenceClient
model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)
def llm_inference(user_sample):
output = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are an expert in english texts evaluation.\n"
"Evaluate the text (find an answer) focusing on metrics. Based your answer on the reasoning below\n"
"Answers: A1, A2, B1, B2, C1, C2\n"
f"Text: {user_sample}\n"
"Metrics:\n"
"1. The number of sentences in the text\n"
"2. The number of words in the text\n"
"3. The number of long words (3 syllables and more)\n"
"4. The number of syllables in the text\n"
"5. FRE\n"
"6. FKGL\n"
"7. LIX\n"
"8. SMOG\n"
"Reasoning: Use FRE, FKGL, LIX and SMOG interpretations to choose an answer\n"
"Write an answer using the format: This text has <answer> level.\n"
"Explain in few words (10-15 words) why did you choose this level based on FRE, FKGL, LIX and SMOG interpretations\n"
"If it is a text on another language, say that you could not evaluate it"}],
stream=False,
max_tokens=650,
temperature=0.5
)
return output.choices[0].get('message')['content']
import gradio as gr
interface = gr.Interface(fn=llm_inference,
inputs=gr.Textbox(lines=2, placeholder="Write your text here..."),
outputs="text",
css=".gradio-container {background-image: url('https://i.pinimg.com/originals/9b/6a/a8/9b6aa8867dbe29f2d475b7a550e06490.jpg')}",
title="ENGLISH TEXT EVALUATION")
interface.launch(debug=True)