from fastapi import FastAPI, HTTPException from pydantic import BaseModel, validator import pickle import joblib import numpy as np import tensorflow as tf import pandas as pd app = FastAPI() # Input validation using Pydantic class HealthPredictionRequest(BaseModel): Gender: str Age: int SBP: int HBP: int heart_rate: int Glucose: int SpO2: int Temprature: float @validator("Gender") def validate_gender(cls, value): if value not in ["M", "F"]: raise ValueError("Gender must be 'M' or 'F'.") return value @validator("Age", "SBP", "HBP", "heart_rate", "Glucose", "SpO2") def validate_positive_integers(cls, value): if value <= 0: raise ValueError("Values must be positive integers.") return value @validator("Temprature") def validate_temperature(cls, value): if value < 95.0 or value > 105.0: # Example temperature range raise ValueError("Temperature must be between 95.0 and 105.0.") return value # Function to make predictions def get_prediction(Gender, Age, SBP, HBP, heart_rate, Glucose, SpO2, Temprature): # Load the scaler with open('minmax_scaler.pkl', 'rb') as file: scaler = pickle.load(file) # Load the model model_path = 'random_forest_model.pkl' with open(model_path, 'rb') as file: model = joblib.load(file) # Load the label encoder for Gender with open('label_encoder.pkl', 'rb') as file: label_encoder = pickle.load(file) # Convert Gender to numeric Gender_encoded = label_encoder.transform([Gender])[0] # Create input DataFrame input_data = pd.DataFrame( [[Gender_encoded, Age, SBP, HBP, heart_rate, Glucose, SpO2, Temprature]], columns=['Gender', 'Age', 'SBP ', 'HBP ', 'heart_rate ', 'Glucose ', 'SpO2', 'Temprature '] ) # Scale the input data input_data_scaled = scaler.transform(input_data) # Make prediction prediction = model.predict(input_data_scaled) # Map prediction to label label_map = { 0: 'healthy', 1: 'high BP', 2: 'low BP', 3: 'high sugar', 4: 'low sugar', 5: 'low oxygen', 6: 'high temperature', 7: 'heartbeat is high', 8: 'risk' } return label_map[prediction[0]] # Define the input data structure using Pydantic class FraudInput(BaseModel): V1: float V2: float V3: float V4: float V5: float V6: float V7: float V8: float V9: float V10: float V11: float V12: float V13: float V14: float V15: float V16: float V17: float V18: float V19: float V20: float V21: float V22: float V23: float V24: float V25: float V26: float V27: float V28: float Amount: float # Inference method for fraud detection def fraud_inference(features, scaler_path="fraud_scaler.pkl", model_path="ann_model.h5"): # Load scaler and model with open(scaler_path, "rb") as f: scaler = pickle.load(f) ann_model_loaded = tf.keras.models.load_model(model_path) # Scale features scaled_features = scaler.transform(features) # Perform inference predictions = ann_model_loaded.predict(scaled_features) predicted_label = np.argmax(predictions, axis=-1) if predicted_label[0] == 0: return 'Not Fraud' else: return 'Fraud' class CrimeData(BaseModel): Case: str Block: str IUCR: int Primary_Type: str Description: str Location_Description: str FBI_Code: int Updated_On: str Location: str def crime_inference(Case, Block, IUCR, Primary_Type, Description, Location_Description, FBI_Code, Updated_On, Location): # Load the scaler with open('crime_scaler.pkl', 'rb') as file: scaler = joblib.load(file) # Load the model model_path = 'xgboost_model.pkl' with open(model_path, 'rb') as file: model = joblib.load(file) # Load the PCA with open('crime_pca.pkl', 'rb') as file: pca = joblib.load(file) with open('crime_label_encoder.pkl', 'rb') as file: label_encoder = joblib.load(file) # Create input DataFrame input_data = pd.DataFrame( [[Case, Block, IUCR, Primary_Type, Description, Location_Description, FBI_Code, Updated_On, Location]], columns=['Case Number', 'Block', 'IUCR', 'Primary Type', 'Description', 'Location Description', 'FBI Code', 'Updated On', 'Location'] ) categorical_cols = ['Case Number', 'Block', 'IUCR', 'Primary Type', 'Description', 'Location Description', 'FBI Code', 'Updated On', 'Location'] # Label encoding for categorical columns for col in categorical_cols: input_data[col] = label_encoder.fit_transform(input_data[col]) # Scale the input data input_data_scaled = scaler.transform(input_data) # Apply PCA transformation pca_features = pca.transform(input_data_scaled) print(pca_features.shape) # Make prediction prediction = model.predict(pca_features) # Map prediction to label label_map = {0: 'not arrest', 1: 'arrest'} return label_map[prediction[0]] # API endpoint @app.post("/health_predict") def predict(request: HealthPredictionRequest): try: # Call the prediction function result = get_prediction( Gender=request.Gender, Age=request.Age, SBP=request.SBP, HBP=request.HBP, heart_rate=request.heart_rate, Glucose=request.Glucose, SpO2=request.SpO2, Temprature=request.Temprature ) return {"prediction": result} except Exception as e: raise HTTPException(status_code=400, detail=str(e)) # Define an endpoint for prediction @app.post("/fraud_predict") async def predict(input_data: FraudInput): # Convert input data to DataFrame data_dict = input_data.dict() data = pd.DataFrame([data_dict]) # Call the fraud detection inference method label = fraud_inference(data) return {"prediction": label} @app.post("/predict_crime") async def predict_crime(data: CrimeData): result = crime_inference( Case=data.Case, Block=data.Block, IUCR=data.IUCR, Primary_Type=data.Primary_Type, Description=data.Description, Location_Description=data.Location_Description, FBI_Code=data.FBI_Code, Updated_On=data.Updated_On, Location=data.Location ) return {"prediction": result}