DEADLOCK007X commited on
Commit
9f58b2a
·
verified ·
1 Parent(s): b3cfce1

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +24 -0
  2. requirements.txt +4 -0
  3. tinyllama_inference.py +44 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CODEXGAME/backend/ai_evaluator/app.py
2
+
3
+ import gradio as gr
4
+ from tinyllama_inference import evaluate_code
5
+
6
+ def evaluate_interface(question, code):
7
+ result = evaluate_code(question, code)
8
+ stars = result.get("stars", 0)
9
+ feedback = result.get("feedback", "No feedback provided.")
10
+ return f"Stars: {stars}\nFeedback: {feedback}"
11
+
12
+ iface = gr.Interface(
13
+ fn=evaluate_interface,
14
+ inputs=[
15
+ gr.inputs.Textbox(lines=2, placeholder="Enter the problem question here..."),
16
+ gr.inputs.Textbox(lines=10, placeholder="Enter your code solution here...")
17
+ ],
18
+ outputs="text",
19
+ title="TinyLlama Code Evaluator",
20
+ description="Evaluate your coding solution with TinyLlama. Provide the problem statement and your code to get a rating and feedback."
21
+ )
22
+
23
+ if __name__ == "__main__":
24
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ accelerate
4
+ gradio
tinyllama_inference.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CODEXGAME/backend/ai_evaluator/tinyllama_inference.py
2
+
3
+ import json
4
+ import torch
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
+
7
+ def load_model():
8
+ # Change the model identifier if needed – this should be a TinyLlama variant available on Hugging Face.
9
+ model_name = "TheBloke/tiny-llama-7b"
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
+ return tokenizer, model
13
+
14
+ def evaluate_code(question, code):
15
+ # Construct a prompt for the AI evaluator.
16
+ prompt = f"""
17
+ You are an expert code evaluator.
18
+ Rate the user's solution to the following problem from 0-5 (0 = completely incorrect, 5 = excellent).
19
+ Also provide a concise "feedback" message.
20
+ Problem: "{question}"
21
+ Solution: "{code}"
22
+ Return ONLY valid JSON: {{"stars": number, "feedback": string}}
23
+ Do not include any extra text outside the JSON.
24
+ """
25
+ tokenizer, model = load_model()
26
+ inputs = tokenizer(prompt, return_tensors="pt")
27
+ outputs = model.generate(**inputs, max_new_tokens=150)
28
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ try:
30
+ result = json.loads(response_text.strip())
31
+ except Exception as e:
32
+ result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
33
+ return result
34
+
35
+ # For direct testing from the command line
36
+ if __name__ == "__main__":
37
+ import sys
38
+ if len(sys.argv) < 3:
39
+ print(json.dumps({"error": "Please provide a question and code as arguments"}))
40
+ sys.exit(1)
41
+ question = sys.argv[1]
42
+ code = sys.argv[2]
43
+ result = evaluate_code(question, code)
44
+ print(json.dumps(result))