|
from flask import Flask, request, jsonify |
|
import torch |
|
from transformers import RobertaTokenizer |
|
import os |
|
from transformers import RobertaForSequenceClassification |
|
import torch.serialization |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base") |
|
torch.serialization.add_safe_globals([RobertaForSequenceClassification]) |
|
|
|
model = torch.load("model.pth", map_location=torch.device('cpu'), weights_only=False) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
@app.route("/") |
|
def home(): |
|
return request.url |
|
|
|
|
|
|
|
@app.route("/predict") |
|
def predict(): |
|
try: |
|
|
|
print("Received code:", request.get_json()["code"]) |
|
|
|
data = request.get_json() |
|
if "code" not in data: |
|
return jsonify({"error": "Missing 'code' parameter"}), 400 |
|
|
|
code_input = data["code"] |
|
|
|
|
|
inputs = tokenizer( |
|
code_input, |
|
return_tensors='pt', |
|
truncation=True, |
|
padding='max_length', |
|
max_length=512 |
|
) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
prediction = outputs.logits.squeeze().item() |
|
|
|
print(f"Predicted score: {prediction}") |
|
|
|
return jsonify({"predicted_score": prediction}) |
|
|
|
except Exception as e: |
|
return jsonify({"error": str(e)}), 500 |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=7860) |