Spaces:
Sleeping
Sleeping
File size: 4,067 Bytes
f5ed7d2 342e306 f5ed7d2 342e306 f5ed7d2 342e306 f5ed7d2 342e306 f5ed7d2 342e306 f5ed7d2 342e306 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import joblib
import os
import shutil
# Define the directory for FHE client/server files
fhe_directory = '/tmp/fhe_client_server_files/'
# Create the directory if it does not exist
if not os.path.exists(fhe_directory):
os.makedirs(fhe_directory)
else:
# If it exists, delete its contents
shutil.rmtree(fhe_directory)
os.makedirs(fhe_directory)
data=pd.read_csv('data/heart.xls')
data.info() #checking the info
data_corr=data.corr()
plt.figure(figsize=(20,20))
sns.heatmap(data=data_corr,annot=True)
#Heatmap for data
feature_value=np.array(data_corr['output'])
for i in range(len(feature_value)):
if feature_value[i]<0:
feature_value[i]=-feature_value[i]
print(feature_value)
features_corr=pd.DataFrame(feature_value,index=data_corr['output'].index,columns=['correalation'])
feature_sorted=features_corr.sort_values(by=['correalation'],ascending=False)
feature_selected=feature_sorted.index
feature_selected #selected features which are very much correalated
clean_data=data[feature_selected]
from sklearn.tree import DecisionTreeClassifier #using sklearn decisiontreeclassifier
from sklearn.model_selection import train_test_split
#making input and output dataset
X=clean_data.iloc[:,1:]
Y=clean_data['output']
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=0)
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape) #data is splited in traing and testing dataset
# feature scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
#training our model
dt=DecisionTreeClassifier(criterion='entropy',max_depth=6)
dt.fit(x_train,y_train)
#dt.compile(x_trqin)
#predicting the value on testing data
y_pred=dt.predict(x_test)
#ploting the data
from sklearn.metrics import confusion_matrix
conf_mat=confusion_matrix(y_test,y_pred)
print(conf_mat)
accuracy=dt.score(x_test,y_test)
print("\nThe accuracy of decisiontreelassifier on Heart disease prediction dataset is "+str(round(accuracy*100,2))+"%")
joblib.dump(dt, 'heart_disease_dt_model.pkl')
from concrete.ml.sklearn.tree import DecisionTreeClassifier
fhe_compatible = DecisionTreeClassifier.from_sklearn_model(dt, x_train, n_bits = 10)
fhe_compatible.compile(x_train)
#### server
from concrete.ml.deployment import FHEModelDev, FHEModelClient, FHEModelServer
# Setup the development environment
dev = FHEModelDev(path_dir=fhe_directory, model=fhe_compatible)
dev.save()
# Setup the server
server = FHEModelServer(path_dir=fhe_directory)
server.load()
####### client
from concrete.ml.deployment import FHEModelDev, FHEModelClient, FHEModelServer
# Setup the client
client = FHEModelClient(path_dir=fhe_directory, key_dir="/tmp/keys_client")
serialized_evaluation_keys = client.get_serialized_evaluation_keys()
# Load the dataset and select the relevant features
data = pd.read_csv('data/heart.xls')
# Perform the correlation analysis
data_corr = data.corr()
# Select features based on correlation with 'output'
feature_value = np.array(data_corr['output'])
for i in range(len(feature_value)):
if feature_value[i] < 0:
feature_value[i] = -feature_value[i]
features_corr = pd.DataFrame(feature_value, index=data_corr['output'].index, columns=['correlation'])
feature_sorted = features_corr.sort_values(by=['correlation'], ascending=False)
feature_selected = feature_sorted.index
# Clean the data by selecting the most correlated features
clean_data = data[feature_selected]
# Extract the first row of feature data for prediction (excluding 'output' column)
sample_data = clean_data.iloc[0, 1:].values.reshape(1, -1) # Reshape to 2D array for model input
encrypted_data = client.quantize_encrypt_serialize(sample_data)
##### end client
encrypted_result = server.run(encrypted_data, serialized_evaluation_keys)
result = client.deserialize_decrypt_dequantize(encrypted_result)
print(result) |